query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Method which calculate Offensive Ratio of a player. The total points scored in 100 possessions
def set_offensive_ratio(self): bx = self.get_standard_stats() team = self.get_team_stats() opp_team = self.get_opp_team_stats() if bx["minutes"] > 0 and (bx["t2p_int"] + bx["t3p_int"]) > 0: fgm = bx["t2p_conv"] + bx["t3p_conv"] fga = bx["t2p_int"] + bx["t3p_int"] team_fgm = team["t2p_conv"] + team["t3p_conv"] team_fga = team["t2p_int"] + team["t3p_int"] team_points = team["t2p_conv"]*2 + team["t3p_conv"]*3 + team["tl_conv"] points = bx["t2p_conv"]*2 + bx["t3p_conv"]*3 + bx["tl_conv"] try: qAST = (Decimal(bx["minutes"] / (team["minutes"] / 5)) * (Decimal('1.14') * Decimal((team["assists"] - bx["assists"]) / team_fgm))) + \ Decimal((((team["assists"] / team["minutes"]) * bx["minutes"] * 5 - bx["assists"]) / ((team_fgm / team["minutes"]) * bx["minutes"] * 5 - fgm)) * (1 - (bx["minutes"] / (team["minutes"] / 5)))) except ZeroDivisionError: print(BCOLORS.WARNING + "Error: División por cero" + BCOLORS.ENDC) qAST = 1 except InvalidOperation: print(BCOLORS.WARNING + "Error: Invalid Operation" + BCOLORS.ENDC) qAST = 1 fg_part = fgm * (1 - Decimal('0.5') * Decimal((points - bx["tl_conv"]) / (2 * fga)) * qAST) try: ast_part = Decimal('0.5') * Decimal(((team_points - team["tl_conv"]) - (points - bx["tl_conv"])) / (2*(team_fga - fga))) * bx["assists"] except ZeroDivisionError: print(BCOLORS.WARNING + "Error: División por cero" + BCOLORS.ENDC) ast_part = 0 if bx["tl_int"] > 0: ft_part = Decimal(1 - (1 - (bx["tl_conv"] / bx["tl_int"]))**2) * Decimal('0.4') * bx["tl_int"] else: ft_part = 0 team_scoring_poss = Decimal(team_fgm + Decimal(1 - (1 - (team["tl_conv"] / team["tl_int"]))**2) * team["tl_int"] * Decimal('0.4')) try: team_orb_percentage = Decimal(team["reb_of"] / (team["reb_of"] + ((opp_team["reb_def"] + opp_team["reb_of"]) - opp_team["reb_of"]))) except ZeroDivisionError: print(BCOLORS.FAIL + "Error: División por cero" + BCOLORS.ENDC) team_orb_percentage = 0 except InvalidOperation: print(BCOLORS.FAIL + "Error: Invalid Operation" + BCOLORS.ENDC) team_orb_percentage = 0 team_play_percentage = Decimal(team_scoring_poss / (team_fga + team["tl_int"] * Decimal('0.4') + team["turnovers"])) try: team_orb_weight = ((1 - team_orb_percentage) * team_play_percentage) / ((1 - team_orb_percentage) * team_play_percentage + team_orb_percentage * (1 - team_play_percentage)) except InvalidOperation: print(BCOLORS.FAIL + "Error: Invalid Operation" + BCOLORS.ENDC) team_orb_weight = 0 orb_part = bx["reb_of"] * team_orb_weight * team_play_percentage fg_x_poss = (fga - fgm) * (1 - Decimal('1.07') * team_orb_percentage) if bx["tl_conv"] > 0: ft_x_poss = Decimal((1 - (bx["tl_conv"] / bx["tl_int"]))**2) * Decimal('0.4') * bx["tl_int"] else: ft_x_poss = Decimal(1 - (bx["tl_conv"] / 1)**2) * Decimal('0.4') * bx["tl_int"] try: sc_poss = (fg_part + ast_part + ft_part) * (1 - (team["reb_of"] / team_scoring_poss) * team_orb_weight * team_play_percentage) + orb_part except InvalidOperation: print(BCOLORS.FAIL + "Error: Invalid Operation" + BCOLORS.ENDC) sc_poss =0 tot_poss = sc_poss + fg_x_poss + ft_x_poss + bx["turnovers"] pprod_fg_part = 2 * (fgm + Decimal('0.5') * bx["t3p_conv"]) * (1 - Decimal('0.5') * Decimal((points - bx["tl_conv"]) / (2 * fga)) * qAST) try: pprod_ast_part = 2 * ((team_fgm - fgm + Decimal('0.5') * (team["t3p_conv"] - bx["t3p_conv"])) / (team_fgm - fgm)) * Decimal('0.5') * Decimal(((team_points - team["tl_conv"]) - (points - bx["tl_conv"])) / (2 * (team_fga - fga))) * bx["assists"] except: pprod_ast_part = 0 pprod_orb_part = bx["reb_of"] * team_orb_weight * team_play_percentage * (team_points / (team_fgm + Decimal(1 - (team["tl_conv"] / team["tl_int"])**2) * Decimal('0.4') * team["tl_int"])) try: pprod = (pprod_fg_part + pprod_ast_part + bx["tl_conv"]) * (1 - (team["reb_of"] / team_scoring_poss) * team_orb_weight * team_play_percentage) + pprod_orb_part except InvalidOperation: print(BCOLORS.FAIL + "Error: Invalid Operation" + BCOLORS.ENDC) pprod = 0 try: result = 100 * (pprod / tot_poss) except InvalidOperation: print(BCOLORS.FAIL + "Error: Invalid Operation" + BCOLORS.ENDC) result = 0 # print("fgm: " + str(fgm)) # print("fga: " + str(fga)) # print("team_fgm: " + str(team_fgm)) # print("team_fga: " + str(team_fga)) # print("team_points: " + str(team_points)) # print("points: " + str(points)) # print("qAST: " + str(qAST)) # print("fg_part: " + str(fg_part)) # print("ast_part: " + str(ast_part)) # print("ft_part: " + str(ft_part)) # print("team_scoring_poss: " + str(team_scoring_poss)) # print("team_orb_percentage: " + str(team_orb_percentage)) # print("team_play_percentage: " + str(team_play_percentage)) # print("team_orb_weight: " + str(team_orb_weight)) # print("orb_part: " + str(orb_part)) # print("fg_x_poss: " + str(fg_x_poss)) # print("ft_x_poss: " + str(ft_x_poss)) # print("sc_poss: " + str(sc_poss)) # print("tot_poss: " + str(tot_poss)) # print("pprod_fg_part: " + str(pprod_fg_part)) # print("pprod_ast_part: " + str(pprod_ast_part)) # print("pprod_orb_part: " + str(pprod_orb_part)) # print("pprod: " + str(pprod)) # print("result: " + str(result) + "\n") else: result = 0.00 self.ortg = "%.2f" % round(result, 2) if Decimal(self.ortg) < 0 or Decimal(self.ortg) >= 1000: """For one game, maybe we've got a negative result or one so big, so, for just only a game, we get the ORTG using team's formula""" print(BCOLORS.OKBLUE + "ORTG negativo o superior a 1000 para jugadora => recalculamos a través de la fórmula de equipo" + BCOLORS.ENDC) bx = self.get_standard_stats() result = round((bx["t2p_conv"]*2 + bx["t3p_conv"]*3 + bx["tl_conv"])/self.get_team_possessions(), 2) self.ortg = "%.2f" % result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def player_ratio(self, ctx):\r\n player = ctx.message.content.split(' ')[1]\r\n if os.environ.get(\"WoW_Token\") is None:\r\n return\r\n else:\r\n async with aiohttp.ClientSession().get('https://us.api.battle.net/wow/character/zul\\'jin/' + player + '?fields=pvp&locale=en_US&apikey=' + os.environ.get(\"WoW_Token\")) as res:\r\n if res.status == 200:\r\n data = await res.json()\r\n player_pvp_stats = data['pvp']['brackets']['ARENA_BRACKET_3v3']\r\n await ctx.message.channel.send(u\"Player: {:s}\").format(player)\r\n await ctx.message.channel.send(\"Rating: {:d}\".format(player_pvp_stats['rating']))\r\n await ctx.message.channel.send(\"Season Wins: {:d}\".format(player_pvp_stats['seasonWon']))\r\n await ctx.message.channel.send(\"Season Losses: {:d}\".format(player_pvp_stats['seasonLost']))\r\n\r\n if player_pvp_stats['seasonWon'] == 0 or player_pvp_stats['seasonLost'] == 0:\r\n await ctx.message.channel.send(\"Ratio: 0\")\r\n else:\r\n await ctx.message.channel.send(\"Ratio: {:.4f}\".format(\r\n float(player_pvp_stats['seasonWon'])/\r\n float(player_pvp_stats['seasonLost']))\r\n )", "def offensive_rating(data_frame, mode):\n off_rat = dict()\n average_points = calculate_average_points(data_frame, mode)\n for k, possessions in possessions_home_away(data_frame, mode).items():\n try:\n off_rat[k] = format(float(average_points[k]) * 100 / float(possessions), '.2f')\n except ZeroDivisionError:\n off_rat[k] = 0.0\n return off_rat", "def compute_player_score():\n\n progress_bar = ProgressBar(label=\"Computing universes\")\n\n survivals_count = 0\n for i in range(PARALLEL_UNIVERSES_COUNT):\n if simulate_universe():\n survivals_count += 1\n progress_bar.set_progression((i + 1) / PARALLEL_UNIVERSES_COUNT)\n\n progress_bar.end(\"\\n\\n\")\n\n return survivals_count / PARALLEL_UNIVERSES_COUNT", "def winning_percentage(self):\n return float(len(self.wins))/len((self.wins+self.losses))", "def winRate(DF):\r\n df = DF[\"return\"]\r\n pos = df[df>1]\r\n neg = df[df<1]\r\n return (len(pos) / len(pos + neg)) * 100", "def custom_score(game, player):\n if game.is_loser(player):\n return -math.inf\n\n if game.is_winner(player):\n return math.inf\n\n opp_moves = game.get_legal_moves(game.get_opponent(player))\n own_moves = game.get_legal_moves(player)\n\n return len(own_moves) / max(len(opp_moves), 1e-6)", "def get_opinion_percent(self):\n return (self.get_percent()+100)/2", "def p(party, vote_count, s):\n return t(party, vote_count) / d(s)", "def evaluate(game, player):\n weights = [2, 200, 2000, 20000]\n reward = 0\n opponent = get_opponent(player)\n for length in range(2, 6):\n reward += weights[length - 2] * get_num_series(game, player, length)\n reward -= weights[length - 2] * get_num_series(game, opponent, length)\n return reward", "def cal_hit_ratio(self):\n full, top_k = self._subjects, self._top_k\n top_k = full[full['rank']<=top_k]\n score = 0.0\n # golden items hit in the top_K items\n score_1 = sum([(len(d[(d['item'].isin(self._test_items[d['user'].iloc[0]]))& (d['ratings']==1.0)])) for i,d in top_k.groupby('user')])\n score_2 = sum([(len(d[(d['item'].isin(self._test_items[d['user'].iloc[0]]))& (d['ratings']==0.0)])) for i,d in top_k.groupby('user')])\n score = score_1 - score_2\n return score/full['user'].nunique()", "def custom_score(game, player):\n \n # get avaliable moves for each player\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n \n # return different between # of my agent's move and oppenent's\n return float(own_moves - opp_moves)", "def rate_club(user, club):\n if not user.is_authenticated():\n return None\n if not club.posel_set.exists():\n return None\n return sum(x[1] for x in rank_in_club(user, club)) / club.posel_set.count()", "def set_defensive_ratio(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_team_stats()\n if bx[\"minutes\"] > 0:\n opp_fga = opp_team[\"t2p_int\"] + opp_team[\"t3p_int\"]\n opp_fgm = opp_team[\"t2p_conv\"] + opp_team[\"t3p_conv\"]\n try:\n dor = Decimal(opp_team[\"reb_of\"] / (opp_team[\"reb_of\"] + team[\"reb_def\"]))\n except ZeroDivisionError:\n print(BCOLORS.FAIL + \"Error: División por cero\" + BCOLORS.ENDC)\n dor = 0\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n dor = 0\n\n try:\n dfg = Decimal(opp_fgm / opp_fga)\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n dfg = 0\n try:\n fmwt = Decimal((dfg * (1 - dor)) / (dfg * (1 - dor) + (1 - dfg) * dor))\n except:\n fmwt = 0\n stops1 = bx[\"steals\"] + bx[\"block_shots\"] * fmwt * (1 - Decimal('1.07') * dor) + bx[\"reb_def\"] * (1 - fmwt)\n\n try:\n stops2 = (Decimal((opp_fga - opp_fgm - team[\"block_shots\"]) / team[\"minutes\"]) * fmwt * (1 - Decimal('1.07') * dor) + Decimal((opp_team[\"turnovers\"] - team[\"steals\"]) / team[\"minutes\"])) * bx[\"minutes\"] + Decimal(bx[\"fouls_cm\"] / team[\"fouls_cm\"]) * Decimal('0.4') * opp_team[\"tl_int\"] * (1 - Decimal(opp_team[\"tl_conv\"] / opp_team[\"tl_int\"]))**2\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n stops2 = 0\n except InvalidOperation:\n print(BCOLORS.WARNING + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n stops2 = 0\n\n stops = stops1 + stops2\n poss = self.get_team_possessions()\n if bx[\"minutes\"] > 0:\n stop_percentage = (float(stops) * float(opp_team[\"minutes\"])) / (float(poss) * float(bx[\"minutes\"]))\n else:\n stop_percentage = 0.00\n opp_points = opp_team[\"t2p_conv\"] * 2 + opp_team[\"t3p_conv\"] * 3 + opp_team[\"tl_conv\"]\n team_defensive_rating = 100 * (float(opp_points) / poss)\n try:\n d_pts_per_scposs = float(opp_points) / (float(opp_fgm) + (1 - (1 - (float(opp_team[\"tl_conv\"]) / float(opp_team[\"tl_int\"])))**2) * float(opp_team[\"tl_int\"])*0.4)\n result = Decimal(team_defensive_rating) + Decimal('0.2') * (100 * Decimal(d_pts_per_scposs) * (1 - Decimal(stop_percentage)) - Decimal(team_defensive_rating))\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n d_pts_per_scposs = 0\n result = 0.00\n\n\n\n # print(\"dor: \" + str(dor))\n # print(\"dfg: \" + str(dfg))\n # print(\"fmwt: \" + str(fmwt))\n # print(\"stops1: \" + str(stops1))\n # print(\"stops2: \" + str(stops2))\n # print(\"stops: \" + str(stops))\n # print(\"poss: \" + str(poss))\n # print(\"stop_percentage: \" + str(stop_percentage))\n # print(\"opp_points: \" + str(opp_points))\n # print(\"team_defensive_rating: \" + str(team_defensive_rating))\n # print(\"d_pts_per_scposs: \" + str(d_pts_per_scposs))\n # print(\"drtg: \" + str(result) + \"\\n\")\n else:\n result = 0.00\n self.drtg = \"%.2f\" % round(result, 2)", "def custom_score(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = number_moves(game, player) / 8\n if own_moves == 0:\n return float(\"-inf\")\n\n opp_moves = number_moves(game, game.get_opponent(player)) / 8\n if opp_moves == 0:\n return float(\"inf\")\n\n move_ratio = (own_moves * 8) / (opp_moves * 8) / 8\n\n # Calculate centerness_score\n completeness = completeness_of_game(game)\n centerness_score = 0\n if completeness < 0.5:\n centerness_max = (game.width / 2.)**2 + (game.height / 2.)**2\n\n own_centerness = centerness(game, player) / centerness_max\n opp_centerness = centerness(game, game.get_opponent(player)) / centerness_max\n centerness_ratio = (own_centerness * centerness_max) / (centerness_max * opp_centerness + 0.1) / centerness_max\n\n centerness_score = -1 * own_centerness + opp_centerness - centerness_ratio\n\n return 2 * own_moves - 2 * opp_moves + 2 * move_ratio + centerness_score", "def get_real_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_real_rating() / self.field.range)", "def calculateWinRate():\n times = 10\n winRate = 0.0\n for i in range(times):\n game = Game('user', 6, 6)\n winRate += game.play(5, False, True, False, False)\n winRate = winRate/times\n print \"Winrate:\", winRate", "def exceeded_ratio(self) -> float:\n return self.amount_spent / self.total_amount", "def custom_score(game, player):\n \n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n \n # First obtain the improved score\n player_legal_moves = game.get_legal_moves(player)\n opponent = game.get_opponent(player)\n opponent_legal_moves = game.get_legal_moves(opponent)\n improved = len(player_legal_moves) - len(opponent_legal_moves)\n if improved != 0:\n return float(improved)\n \n # Second get differences from center\n center_coordinates = center_y, center_x = get_center_coordinates(game)\n player_coordinates = game.get_player_location(player)\n opponent_coordinates = game.get_player_location(opponent)\n player_center_dist = get_distances_from_center(center_coordinates, player_coordinates)\n opponent_center_dist = get_distances_from_center(center_coordinates, opponent_coordinates)\n center_dist_diff = player_center_dist - opponent_center_dist\n \n # Third obtain next_moves\n player_next_moves = [get_next_moves(game, move, list(move)) for move in player_legal_moves]\n opponent_next_moves = [get_next_moves(game, move, list(move)) for move in opponent_legal_moves] \n improved_next = len(player_next_moves) - len(opponent_next_moves)\n \n # Put player and opponent feature differences in a tuple/vector surrogoate\n feature_diff_vector = (improved, center_dist_diff, improved_next)\n \n # Provide a weighting vector for the features of each player-participant\n weight_vector = (1.5,0.1,1.0)\n # Calculate the return value = weighted difference of players' features\n weighted_difference_dot_product = sum(p*q for p,q, \\\n in zip(feature_diff_vector, weight_vector))\n \n return float(weighted_difference_dot_product)", "def calculate_score(self):\n\n correct_award = 150\n turns_total = self.turns.count()\n turns_correct = self.turns.filter(is_match=True).count()\n seconds_left = (60.0 - (self.turns.last().created - self.turns.first().created).total_seconds()) or 0\n maxpoints = turns_correct * correct_award\n deduction_for_errors = correct_award * 0.11123\n\n maxpoints -= ((turns_total - turns_correct) * 2 * deduction_for_errors)\n maxpoints += seconds_left * 5.123214\n\n return Decimal(maxpoints)", "def get_real_rating(self):\n if not (self.votes and self.score):\n return 0\n return float(self.score)/self.votes", "def pe_ratio(self):\n try:\n return self.price / self.dividend_yield\n except ZeroDivisionError:\n return 0.0", "def do_damage(self) -> float:\n sum = 0\n for operator in self.__operators:\n if operator.is_alive:\n operator.experience += 1\n sum += operator.experience / 100\n return 0.1 + sum", "def get_winpercent(self, game: 'Game' = None, player: 'Player' = None):\n if game and player:\n pass\n elif game:\n play_count = self.play_set.filter(game=game).count()\n win_count = self.play_set.filter(winner=self, game=game).count()\n return win_count / play_count * 100\n elif player:\n pass\n # play_count = self.play_set.filter(players__in=player).count()\n # win_count = self.play_set.filter(\n # winner=self, player=player).count()\n # return win_count / play_count * 100\n else:\n return self.winpercent", "def ucbScore(self,totalPlayedTimes):\n winRate = self.winRate()\n #print totalPlayedTimes\n #print self.playedTimes\n confidenceInterval = math.sqrt(2 * math.log(totalPlayedTimes,math.e) / self.playedTimes)\n \n return winRate + confidenceInterval", "def calc_win_lose_ratio(self):\n total = len(self.train_y)\n survived = 0\n for i in self.train_y:\n if i > 0:\n survived += 1\n\n self.survival_sum = [survived, total-survived]", "def get_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_rating() / self.field.range)", "def custom_score_4(game, player):\n \"\"\"custom_score_4 heuristic function aims at minimizing loosing chances of myPlayer\n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = len(game.get_legal_moves(player)) #Calculate length of available moves for myPlayer\n length_opp_payer_moves = 1.0 * len(game.get_legal_moves(game.get_opponent(player)))#Calculate length of available moves for the oppositePlayer\n\n if length_my_player_moves == 0:\n return float(\"-inf\")\n\n if length_opp_payer_moves == 0:\n return float(\"inf\")\n\n return float(-length_opp_payer_moves/length_my_player_moves)", "def custom_score_2(game, player):\n \n # get avaliable moves for each player\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n \n # shortcut to definite state:\n # 1. my agent win -> return very high score\n if opp_moves == 0:\n return float(\"inf\")\n # 2. opponenent's agent win -> return very low score\n elif own_moves == 0:\n return float(\"-inf\")\n\n # score: avaliable moves ratio\n return float(own_moves/opp_moves)", "def points_percentage(plane, p, points, total):\n match = 0\n for point in points:\n if distance_to_plane(plane, point) <= p:\n match += 1\n\n return match / total", "def administer(self):\n\n score = 0.0\n for question in self.questions:\n if question.ask_and_evaluate() is True:\n score += 1\n return (score / len(self.questions)) * 100", "def get_win_percentage(self) -> float:\n if self.wins == 0:\n return 0.0\n else:\n return round((self.wins / (self.wins + self.losses)) * 100, 2)", "def get_improved_score_factor(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n return float(own_moves - opp_moves)", "def custom_score_6(game, player):\n \"\"\"custom_score_6 heuristic function aims towards weighted chances heuristics\n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = len(game.get_legal_moves(player)) #Calculate length of available moves for myPlayer\n length_opp_payer_moves = len(game.get_legal_moves(game.get_opponent(player)))#Calculate length of available moves for the oppositePlayer\n return float(length_my_player_moves*length_my_player_moves - 1.5*length_opp_payer_moves*length_opp_payer_moves)", "def quick_ratio(self):\n return (\n self.current_assets - self.inventory_net) / self.current_liabilities", "def custom_score_2(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n if game.is_winner(player):\n return float(\"inf\")\n\n # Aim to maximise your own available moves vs the opponent (Factor 2)\n\n opponent = game.get_opponent(player)\n return float(len(game.get_legal_moves(player)))-2.0*float(len(game.get_legal_moves(opponent)))", "def calculate_overall_rating(player_dict):\r\n if player_dict[\"position\"].upper() == \"QB\":\r\n throw_power = int(max(min(int(player_dict[\"throw_power\"]), 99), 70))\r\n throw_accuracy = int(max(min(math.ceil(\r\n ((2 * (\r\n int(player_dict[\"throw_accuracy_short\"]) + \r\n int(player_dict[\"throw_accuracy_mid\"]) + \r\n int(player_dict[\"throw_accuracy_deep\"]) + \r\n int(player_dict[\"throw_on_the_run\"]) + \r\n int(player_dict[\"playaction\"])\r\n )) - (2 * min(\r\n int(player_dict[\"throw_accuracy_short\"]), \r\n int(player_dict[\"throw_accuracy_mid\"]), \r\n int(player_dict[\"throw_accuracy_deep\"]), \r\n int(player_dict[\"throw_on_the_run\"]), \r\n int(player_dict[\"playaction\"])\r\n ))\r\n ) / 8\r\n ), 99), 60))\r\n break_tackles = int(max(min(\r\n math.ceil(((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2) + 7), \r\n 90), 20))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 98), 45))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 55))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((throw_power - 50.0) / 10.0) * 4.9\r\n overall_rating += ((throw_accuracy - 50.0) / 10.0) * 5.8\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 0.8\r\n overall_rating += ((agility - 50.0) / 10.0) * 0.8\r\n overall_rating += ((awareness - 50.0) / 10.0) * 4.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 2.0\r\n overall_rating = int(max(min((round(overall_rating) + 28), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"HB\":\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 70), 25))\r\n break_tackles = int(max(min(\r\n (math.ceil((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2) + 5), \r\n 99), 50))\r\n carrying = int(max(min(int(player_dict[\"carrying\"]), 99), 60))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 80))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 70))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 90), 45))\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 50))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 0.33\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 0.8\r\n overall_rating += ((carrying - 50.0) / 10.0) * 2.0\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.8\r\n overall_rating += ((agility - 50.0) / 10.0) * 2.8\r\n overall_rating += ((awareness - 50.0) / 10.0) * 2.0\r\n overall_rating += ((strength - 50.0) / 10.0) * 0.6\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.3\r\n overall_rating += ((catching - 50.0) / 10.0) * 1.4\r\n overall_rating = int(max(min((round(overall_rating) + 27), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"FB\":\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 75), 40))\r\n run_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"run_block\"]) + \r\n int(player_dict[\"run_block_power\"]) + \r\n int(player_dict[\"run_block_finesse\"])\r\n ) / 3\r\n ), 85), 45))\r\n break_tackles = int(max(min(\r\n (math.ceil((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2) + 5), \r\n 99), 55))\r\n carrying = int(max(min(int(player_dict[\"carrying\"]), 99), 60))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 65))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 95), 55))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 95), 60))\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 60))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 45))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 1.0\r\n overall_rating += ((run_block - 50.0) / 10.0) * 7.2\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 1.8\r\n overall_rating += ((carrying - 50.0) / 10.0) * 1.8\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.8\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.0\r\n overall_rating += ((awareness - 50.0) / 10.0) * 2.8\r\n overall_rating += ((strength - 50.0) / 10.0) * 1.8\r\n overall_rating += ((speed - 50.0) / 10.0) * 1.8\r\n overall_rating += ((catching - 50.0) / 10.0) * 5.2\r\n overall_rating = int(max(min((round(overall_rating) + 39), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"WR\":\r\n break_tackles = int(max(min(\r\n math.ceil((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2), \r\n 80), 35))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 75))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 75))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 35))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 85), 35))\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 99), 65))\r\n jumping = int(max(min(int(player_dict[\"jumping\"]), 99), 65))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 0.8\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 2.3\r\n overall_rating += ((agility - 50.0) / 10.0) * 2.3\r\n overall_rating += ((awareness - 50.0) / 10.0) * 2.3\r\n overall_rating += ((strength - 50.0) / 10.0) * 0.8\r\n overall_rating += ((speed - 50.0) / 10.0) * 2.3\r\n overall_rating += ((catching - 50.0) / 10.0) * 4.75\r\n overall_rating += ((jumping - 50.0) / 10.0) * 1.4\r\n overall_rating = int(max(min((round(overall_rating) + 26), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"TE\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 55))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 90), 55))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 35))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 95), 55))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 60))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 99), 45))\r\n break_tackles = int(max(min(\r\n (math.ceil((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2) + 5), \r\n 95), 20))\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 80), 35))\r\n run_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"run_block\"]) + \r\n int(player_dict[\"run_block_power\"]) + \r\n int(player_dict[\"run_block_finesse\"])\r\n ) / 3\r\n ), 85), 35))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 2.65\r\n overall_rating += ((strength - 50.0) / 10.0) * 2.65\r\n overall_rating += ((awareness - 50.0) / 10.0) * 2.65\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.25\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.25\r\n overall_rating += ((catching - 50.0) / 10.0) * 5.4\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 1.2\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 1.2\r\n overall_rating += ((run_block - 50.0) / 10.0) * 5.4\r\n overall_rating = int(max(min((round(overall_rating) + 35), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"LT\" or player_dict[\"position\"].upper() == \"RT\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 85), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 70))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 85), 40))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 90), 60))\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 99), 60))\r\n run_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"run_block\"]) + \r\n int(player_dict[\"run_block_power\"]) + \r\n int(player_dict[\"run_block_finesse\"])\r\n ) / 3\r\n ), 99), 60))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 0.8\r\n overall_rating += ((strength - 50.0) / 10.0) * 3.3\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.3\r\n overall_rating += ((agility - 50.0) / 10.0) * 0.8\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 0.8\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 4.75\r\n overall_rating += ((run_block - 50.0) / 10.0) * 3.75\r\n overall_rating = int(max(min((round(overall_rating) + 26), 99), 40))\r\n return overall_rating\r\n \r\n if (player_dict[\"position\"].upper() == \"LG\" or player_dict[\"position\"].upper() == \"RG\" or \r\n player_dict[\"position\"].upper() == \"C\"):\r\n speed = int(max(min(int(player_dict[\"speed\"]), 85), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 70))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 85), 40))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 90), 60))\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 99), 65))\r\n run_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"run_block\"]) + \r\n int(player_dict[\"run_block_power\"]) + \r\n int(player_dict[\"run_block_finesse\"])\r\n ) / 3\r\n ), 99), 65))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 1.7\r\n overall_rating += ((strength - 50.0) / 10.0) * 3.25\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.25\r\n overall_rating += ((agility - 50.0) / 10.0) * 0.8\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.7\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 3.25\r\n overall_rating += ((run_block - 50.0) / 10.0) * 4.8\r\n overall_rating = int(max(min((round(overall_rating) + 28), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"LE\" or player_dict[\"position\"].upper() == \"RE\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 90), 55))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 60))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 90), 45))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 99), 60))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.75\r\n overall_rating += ((strength - 50.0) / 10.0) * 3.75\r\n overall_rating += ((awareness - 50.0) / 10.0) * 1.75\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.75\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 3.8\r\n overall_rating += ((tackle - 50.0) / 10.0) * 5.5\r\n overall_rating = int(max(min((round(overall_rating) + 30), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"DT\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 90), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 70))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 90), 40))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 99), 65))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 1.8\r\n overall_rating += ((strength - 50.0) / 10.0) * 5.5\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.8\r\n overall_rating += ((agility - 50.0) / 10.0) * 1\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 2.8\r\n overall_rating += ((tackle - 50.0) / 10.0) * 4.55\r\n overall_rating = int(max(min((round(overall_rating) + 29), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"LOLB\" or player_dict[\"position\"].upper() == \"ROLB\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 70))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 60))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 95), 65))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 75))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 90), 20))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 99), 60))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.75\r\n overall_rating += ((strength - 50.0) / 10.0) * 2.4\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.6\r\n overall_rating += ((agility - 50.0) / 10.0) * 2.4\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.3\r\n overall_rating += ((catching - 50.0) / 10.0) * 1.3\r\n overall_rating += ((tackle - 50.0) / 10.0) * 4.8\r\n overall_rating = int(max(min((round(overall_rating) + 29), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"MLB\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 65))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 60))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 35))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 95), 65))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 75))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 99), 65))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 0.75\r\n overall_rating += ((strength - 50.0) / 10.0) * 3.4\r\n overall_rating += ((awareness - 50.0) / 10.0) * 5.2\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.65\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.75\r\n overall_rating += ((tackle - 50.0) / 10.0) * 5.2\r\n overall_rating = int(max(min((round(overall_rating) + 27), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"CB\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 80))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 85), 40))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 35))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 75))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 40))\r\n jumping = int(max(min(int(player_dict[\"jumping\"]), 99), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 85), 30))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.85\r\n overall_rating += ((strength - 50.0) / 10.0) * 0.9\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.85\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.55\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 2.35\r\n overall_rating += ((catching - 50.0) / 10.0) * 3\r\n overall_rating += ((jumping - 50.0) / 10.0) * 1.55\r\n overall_rating += ((tackle - 50.0) / 10.0) * 1.55\r\n overall_rating = int(max(min((round(overall_rating) + 28), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"FS\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 75))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 85), 45))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 70))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 35))\r\n jumping = int(max(min(int(player_dict[\"jumping\"]), 99), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 90), 45))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.0\r\n overall_rating += ((strength - 50.0) / 10.0) * 0.9\r\n overall_rating += ((awareness - 50.0) / 10.0) * 4.85\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.5\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 2.5\r\n overall_rating += ((catching - 50.0) / 10.0) * 3.0\r\n overall_rating += ((jumping - 50.0) / 10.0) * 1.5\r\n overall_rating += ((tackle - 50.0) / 10.0) * 2.5\r\n overall_rating = int(max(min((round(overall_rating) + 30), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"SS\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 75))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 90), 45))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 70))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 35))\r\n jumping = int(max(min(int(player_dict[\"jumping\"]), 99), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 90), 45))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.2\r\n overall_rating += ((strength - 50.0) / 10.0) * 1.7\r\n overall_rating += ((awareness - 50.0) / 10.0) * 4.75\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.7\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.7\r\n overall_rating += ((catching - 50.0) / 10.0) * 3.2\r\n overall_rating += ((jumping - 50.0) / 10.0) * 0.9\r\n overall_rating += ((tackle - 50.0) / 10.0) * 3.2\r\n overall_rating = int(max(min((round(overall_rating) + 30), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"K\":\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 85), 35))\r\n kick_power = int(max(min(int(player_dict[\"kick_power\"]), 99), 80))\r\n kick_accuracy = int(max(min(int(player_dict[\"kick_accuracy\"]), 99), 70))\r\n \r\n overall_rating = (-177 + (0.218 * awareness) + (1.28 * kick_power) + (1.47 * kick_accuracy))\r\n overall_rating = int(max(min(round(overall_rating), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"P\":\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 85), 40))\r\n kick_power = int(max(min(int(player_dict[\"kick_power\"]), 99), 80))\r\n kick_accuracy = int(max(min(int(player_dict[\"kick_accuracy\"]), 99), 70))\r\n \r\n overall_rating = (-183 + (0.218 * awareness) + (1.5 * kick_power) + (1.33 * kick_accuracy))\r\n overall_rating = int(max(min(round(overall_rating), 99), 40))\r\n return overall_rating", "def winrate(matches):\n if not matches:\n print('no matches')\n return None\n\n win_loss = [match['result'] for match in matches]\n return sum(win_loss)/len(win_loss)", "def calculate_score(player_cards):\n score = sum(player_cards)\n return score", "def cash_ratio(self):\n return self.cash / self.current_liabilities", "def score(self,player, board):\r\n numPlayer = 0\r\n numOpp = 0\r\n for i in self.squares():\r\n if board[i] == player:\r\n numPlayer+= SQUARE_WEIGHTS[i]\r\n else:\r\n numOpp+=SQUARE_WEIGHTS[i]\r\n return numPlayer-numOpp", "def get_fool_ratio(self, test_acc, attack_accs):\n return [round(100*((test_acc - attack_acc) / test_acc), 2) for attack_acc in attack_accs]", "def custom_score_2(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = number_moves(game, player)\n if own_moves == 0:\n return float(\"-inf\")\n\n opp_moves = number_moves(game, game.get_opponent(player))\n if opp_moves == 0:\n return float(\"inf\")\n\n move_ratio = own_moves / opp_moves\n\n completeness = completeness_of_game(game)\n centerness_score = 0\n\n if completeness < 0.5:\n own_centerness = centerness(game, player)\n opp_centerness = centerness(game, game.get_opponent(player))\n centerness_ratio = own_centerness / opp_centerness + 0.1\n\n center_score = -1 * own_centerness + opp_centerness - centerness_ratio\n\n return 2 * own_moves - 2 * opp_moves + 2 * move_ratio + centerness_score", "def get_percent(self, n):\n controlled = 0.00\n for i in range(len(self.tile_contents)):\n if(self.tile_contents[i].player_number == n):\n controlled += 1.00\n \n return float(controlled / self.paint_blocks)", "def precision(self, user_list):\n hit = 0\n all_recom = 0\n print('Calculate precision: ')\n for user in tqdm(user_list):\n recom_data = self._get_recommend(user)\n recom_item = set([data[0] for data in recom_data])\n user_item = set(\n self.test[self.test['userId'] == user]['movieId'].values)\n overlap = recom_item & user_item\n hit += len(overlap)\n all_recom += len(recom_item)\n print('\\nprecision is: ', hit / (all_recom * 1.0))\n return hit / (all_recom * 1.0)", "def open_positions_score(game, player):\n moves = game.get_legal_moves()\n side_coef = 1 if player == game.active_player else -1\n \n if len(moves) == 0:\n result = float(\"-inf\")\n else:\n result = len(moves)\n \n \n return float(result*side_coef)", "def custom_score_3(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = number_moves(game, player)\n if own_moves == 0:\n return float(\"-inf\")\n\n opp_moves = number_moves(game, game.get_opponent(player))\n if opp_moves == 0:\n return float(\"inf\")\n\n #Between 1-8\n return own_moves / opp_moves", "def custom_score_2(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n \n # First obtain the base information to calculate player & opponent\n # feature values\n player_legal_moves = game.get_legal_moves(player)\n opponent = game.get_opponent(player)\n opponent_legal_moves = game.get_legal_moves(opponent)\n if len(player_legal_moves) != len(opponent_legal_moves):\n return float(len(player_legal_moves) - len(opponent_legal_moves))\n \n # Get_center_coordinates and opponent. Then set the list of participants\n center_coordinates = center_y, center_x = get_center_coordinates(game)\n participants = [player, opponent]\n \n # Then, for each participant obtain his/her feature values \n for participant in participants:\n if participant == player:\n p_legal_moves = player_legal_moves\n player_either = player\n participant_coordinates = p_y, p_x = \\\n game.get_player_location(participant)\n player_legal_move_count, player_start_center_distance, \\\n player_total_next_moves, player_max_path_length, \\\n player_path_count, player_min_center_diff \\\n = \\\n get_player_feature_values(game, center_coordinates, \\\n player_either,participant_coordinates, p_legal_moves)\n else:\n p_legal_moves = opponent_legal_moves\n player_either = opponent\n participant_coordinates = p_y, p_x \\\n = game.get_player_location(participant)\n opponent_legal_move_count, opponent_start_center_distance, \\\n opponent_total_next_moves, opponent_max_path_length, \\\n opponent_path_count, opponent_min_center_diff \\\n = \\\n get_player_feature_values(game, center_coordinates, \\\n player_either, participant_coordinates, p_legal_moves)\n \n # Place each participant's feature values in a tuple/vector surrogate \n pro_player_vector = \\\n (player_legal_move_count, player_start_center_distance, \\\n player_total_next_moves, player_max_path_length, player_path_count, \\\n opponent_min_center_diff)\n pro_opponent_vector = \\\n (opponent_legal_move_count, opponent_start_center_distance, \\\n opponent_total_next_moves, opponent_max_path_length, \\\n opponent_path_count, player_min_center_diff)\n \n # Provide a weighting vector for the features \n weight_vector = (1.5,0.1,1.0,0.001,0.001,0.001)\n \n # Calculate the return value = weighted difference of players' features\n weighted_difference_dot_product = sum(p*(q-r ) for p,q,r \\\n in zip(weight_vector, pro_player_vector, pro_opponent_vector))\n \n return float(weighted_difference_dot_product)", "def _evaluate_num_pieces(self, player):\n evaluation = 0\n if player is Player.black:\n evaluation += self.num_black_pieces * 10\n evaluation -= self.num_white_pieces * 10\n evaluation += self.num_black_kings * 10\n evaluation -= self.num_white_kings * 10\n elif player is Player.white:\n evaluation -= self.num_black_pieces * 10\n evaluation += self.num_white_pieces * 10\n evaluation -= self.num_black_kings * 10\n evaluation += self.num_white_kings * 10\n\n return evaluation", "def pct(self):\n\t\treturn self.bottle.pct()", "def improved_score(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n return float(own_moves - opp_moves)", "def custom_score(game, player):\n \n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n if game.move_count < 15:\n return center_modified_score(game, player)\n\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n return float(own_moves - opp_moves)", "def determineAmountToCall(self, player):\n\t\treturn sum(self.currentBet) - sum(player.betAmount)", "def weighted_score(player, board):\n opp = Othello.opponent(player)\n total = 0\n for sq in Othello.squares():\n if board[sq] == player:\n total += SQUARE_WEIGHTS[sq]\n elif board[sq] == opp:\n total -= SQUARE_WEIGHTS[sq]\n return total", "def odds_ratio(target_pct, peer_pct):\n odds_ratio = 0.0\n if peer_pct == 0.0:\n return None\n elif target_pct == 0.0:\n odds_ratio = 0.0\n elif target_pct == peer_pct:\n odds_ratio = 1.0\n elif peer_pct > 0.0 and target_pct < 1.0 and peer_pct < 1.0:\n odds_ratio = (target_pct/(1-target_pct))/(peer_pct/(1-peer_pct))\n return round(odds_ratio, 3)", "def get_points(self):\n self.round_points = 0\n for die in self.dice:\n if die == 1:\n self.round_points += 100\n elif die == 5:\n self.round_points += 50\n return self.round_points", "def getProduction(self, playerID):\n prod=0\n for p in self.__camps:\n if( p.getOwner() == playerID ):\n prod = prod + p.getGrowthrate()\n return prod", "def _prorata_rate(self, days_used, days_in_month):\n return (100 * days_used // days_in_month) / 100.0", "def custom_score_5(game, player):\n \"\"\"custom_score_5 heuristic function defines chances heuristics\n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = len(game.get_legal_moves(player)) #Calculate length of available moves for myPlayer\n length_opp_payer_moves = len(game.get_legal_moves(game.get_opponent(player)))#Calculate length of available moves for the oppositePlayer\n return float(length_my_player_moves*length_my_player_moves - length_opp_payer_moves*length_opp_payer_moves)", "def get_score(self, player: int) -> int:\n score = 0\n i = 0\n while i < len(self.leylines):\n score += 1 if self.leylines[i].player == player else 0\n score += 1 if self.rights[i].player == player else 0\n score += 1 if self.lefts[i].player == player else 0\n i += 1\n return score", "def get_online_price_diff_percent_method(self):\n try:\n if self.overclockerskz and self.overclockerskz.online_price:\n return int((self.get_online_price_diff_method() / self.overclockerskz.online_price) * 100)\n else:\n return 0\n except (TypeError, ValueError):\n return 0", "def eval(self):\n\n ratio_player_win = self.player_wins / self.num_test\n ratio_opponent_win = self.opponent_wins / self.num_test\n ratio_tie = 1.0 - ratio_player_win - ratio_opponent_win\n\n print(\"\\nPlayer Test Results:\")\n print(\"\\tWins {0:.2f}%\".format(100.0 * ratio_player_win))\n print(\"\\tLosses {0:.2f}%\".format(100.0 * ratio_opponent_win))\n print(\"\\tTie {0:.2f}%\".format(100.0 * ratio_tie))\n\n ratio_optimal_win = self.optimal_wins / self.num_test\n ratio_optimal_loose = self.optimal_losses / self.num_test\n ratio_optimal_tie = 1.0 - ratio_optimal_win - ratio_optimal_loose\n\n print(\"\\nOptimal Results:\")\n print(\"\\tPlayer {0:.2f}%\".format(100.0 * ratio_optimal_win))\n print(\"\\tOpponent {0:.2f}%\".format(100.0 * ratio_optimal_loose))\n print(\"\\tTie {0:.2f}%\".format(100.0 * ratio_optimal_tie))\n\n # Ratio of win, loss diff between player and optimal\n # positive if the player beats opponent\n relative_result = ((ratio_player_win - ratio_opponent_win) /\n (ratio_optimal_win - ratio_optimal_loose))\n\n print(\"\\nResults Player Relative Optimal:\")\n print(\"\\tWins {0:.2f}%\".format(100.0 * ratio_player_win / ratio_optimal_win))\n print(\"\\tLosses {0:.2f}%\".format(100.0 * ratio_opponent_win / ratio_optimal_loose))\n print(\"\\tScore {0:.2f}%\".format(100.0 * relative_result))\n\n if self.last_test is not None:\n print(\"Diff from last test score is {0:.2f}%\".format(100.0 * (relative_result - self.last_test)))\n self.last_test = relative_result", "def calculate_profit(self):", "def calc_match_points(self, match):\n if match.winner == match.TIE:\n match.home.tournament_score += 1\n match.away.tournament_score += 1\n else:\n match.winner.tournament_score += 3\n match.loser.tournament_score += 0", "def points_per_dollar(self):\n if float(self.draftkings_salary) == 0.0:\n return 0.0\n\n return float(self.predicted_draftkings_points) / float(self.draftkings_salary)", "def custom_score_7(game, player):\n \"\"\"custom_score_7 heuristic function also aims towards weighted chances heuristics\n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = len(game.get_legal_moves(player)) #Calculate length of available moves for myPlayer\n length_opp_payer_moves = len(game.get_legal_moves(game.get_opponent(player)))#Calculate length of available moves for the oppositePlayer\n return float(1.5*length_my_player_moves*length_my_player_moves - length_opp_payer_moves*length_opp_payer_moves)", "def _calculate_score(self):\n mul = self._check_board()\n if mul > 0:\n inc = 100 * mul + ((mul - 1) * 25)\n self.score += inc", "def custom_score(game, player):\n \"\"\" custom_score heuristic function idea is to implement aggressive heuristic function \n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = len(game.get_legal_moves(player)) # Calculate length of myPlayer moves\n length_opp_payer_moves = len(game.get_legal_moves(game.get_opponent(player))) # Calculate length of opposite player moves same as custom score 2\n return float(length_my_player_moves - 1.5*length_opp_payer_moves)", "def get_percentage_practices(measure_table):\n with open(OUTPUT_DIR / \"practice_count.json\") as f:\n num_practices = json.load(f)[\"num_practices\"]\n\n num_practices_in_study = get_number_practices(measure_table)\n\n return np.round((num_practices_in_study / num_practices) * 100, 2)", "def expected_value(held_dice, num_die_sides, num_free_dice):\n result = 0\n outcomes = range(1, num_die_sides + 1)\n possible = sorted(gen_all_sequences(outcomes, num_free_dice))\n for hand in possible:\n result += score(held_dice + hand)\n return float(result)/len(possible)", "def penalty(self):\n return 0", "def custom_score_2(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = len(game.get_legal_moves(player))\n\n if game.move_count < 15:\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n return float(own_moves - opp_moves)\n\n delta = 0\n\n moves = game.get_legal_moves()\n initial_moves_count = len(moves)\n indexes = np.random.permutation(initial_moves_count)\n\n for i in range(0, min(4, initial_moves_count)):\n first_level = True\n simulation = game.copy()\n\n while True:\n moves = simulation.get_legal_moves()\n moves_count = len(moves)\n if moves_count == 0:\n if simulation.is_winner(player):\n delta = delta + 1\n else:\n delta = delta - 1\n break\n if first_level:\n selected_move = indexes[i]\n first_level = False\n else:\n selected_move = random.randint(0, moves_count - 1)\n\n simulation.apply_move(moves[selected_move])\n\n return float(own_moves + delta) #float(own_moves - opp_moves + 5 * delta)\n\n #return float(own_moves - opp_moves + free_area_score(game, player) - free_area_score(game, game.get_opponent(player)))", "def custom_score_3(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n #own_moves = len(game.get_legal_moves(player))\n\n #if game.move_count < 23:\n # opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n # return float(own_moves - opp_moves)\n\n return float(free_area_score(game, player) - free_area_score(game, game.get_opponent(player)))", "def evaluate(self, player):\n evaluation = 0\n\n # Takes into account the number of pieces for each side in play.\n self._evaluate_num_pieces(player)\n\n # Evaluates the position of each piece\n evaluation += self._evaluate_pieces_position(player)\n evaluation -= self._evaluate_pieces_position(player.other)\n\n # Random extra point used to randomize plays that are equal\n evaluation += random.randint(0, 1)\n\n return evaluation", "def ratio_local_prod(self):\n if self.current_energy_produced == 0.0:\n return 1.0\n else:\n return 1. - self.export_grid / self.current_energy_produced", "def compute_score(window, computer_piece):\n score = 0\n if window.count(computer_piece) == 4:\n score += 100\n elif window.count(computer_piece) == 3 and window.count(0) == 1:\n score += 5\n elif window.count(computer_piece) == 2 and window.count(0) == 2:\n score += 2\n if window.count(PLAYER_PIECE) == 2 and window.count(0) == 2:\n score -= 1\n if window.count(PLAYER_PIECE) == 3 and window.count(0) == 1:\n score -= 100\n return score", "def get_game_score(self):\n if self.game_is_tied():\n return 0\n elif self.is_game_won():\n my_available_steps = self.steps_available(self.loc)\n opp_available_steps = self.steps_available(self.opponent_loc)\n my_score = self.my_score - self.penalty_score if my_available_steps == 0 else self.my_score\n opp_score = self.opponent_score - self.penalty_score if opp_available_steps == 0 else self.opponent_score\n return (my_score - opp_score) / (abs(my_score) + abs(opp_score))\n else:\n if abs(self.my_score) + abs(self.opponent_score) == 0:\n return 0\n return (self.my_score - self.opponent_score) / (abs(self.my_score) + abs(self.opponent_score))", "def expected_value(held_dice, num_die_sides, num_free_dice):\n list_scores = []\n die_sides = [die for die in range(1, num_die_sides + 1)]\n possible_seq = gen_all_sequences(die_sides, num_free_dice)\n for item in possible_seq:\n list_scores.append(score(held_dice + item))\n \n return float(sum(list_scores)) / len(list_scores)", "def KPI(self, total=True):\n \n data = self.select_table('ChordLog')\n correct = data[data['PredictedLabel'] == data['ActualLabel']]\n\n # % correctly predicted in chord net\n human_level_performance = (len(correct) / len(data)) * 100\n \n # round value\n human_level_performance = round(human_level_performance, 4) \n \n return human_level_performance", "def get_sharpe_ratio(allocs, prices):\n\tport_val = get_portfolio_value(prices, allocs, start_val=1.0)\n\tsharpe_ratio = get_portfolio_stats(port_val, daily_rf=0.0, samples_per_year=252)[3]\n\treturn -sharpe_ratio", "def mc_update_scores(scores, board, player):\n dim = board.get_dim()\n winner = board.check_win()\n other_player = provided.switch_player(player)\n \n if winner == provided.DRAW:\n ratio = {player: 0, other_player: 0, 1: 0}\n elif winner == player:\n ratio = {player: 0 + SCORE_CURRENT, other_player: 0 - SCORE_OTHER, provided.EMPTY: 0}\n elif winner == other_player:\n ratio = {player: 0 - SCORE_CURRENT, other_player: 0 + SCORE_OTHER, provided.EMPTY: 0}\t\n \n for valx in range(dim):\n for valy in range(dim): \n scores[valx][valy] += ratio[board.square(valx, valy)] \n return scores", "def ventilation_rate(self):\n # TODO: calculate based on MERV ratings/efficiency/power/etc.\n return (\n sum(v.calculate_ach(self.volume) for v in self.air_quality_measures)\n + self.outdoor_air_ventilation\n )", "def rate(self) -> float:\n return self.success_cnt / self.total_cnt if self.total_cnt > 0 else 1.0", "def golden_ratio():\n print((1+math.sqrt(5))/2)", "def state_score_naive(self, game_state, player, weights):\n # walls score\n other_players = [p for p in game_state.players if p != player]\n my_walls = player.num_walls\n their_walls = max([p.num_walls for p in other_players])\n walls_diff = (my_walls - their_walls)\n # path length score\n my_path = len(game_state.get_shortest_path_player(player))\n their_path = min([len(game_state.get_shortest_path_player(p)) for p in other_players])\n paths_diff = their_path - my_path\n \n return weights[0]*walls_diff + weights[1]*paths_diff", "def _cost_caught_by_police(self):\n if self.fine_frequency != 0:\n if self.number_of_courses % self.fine_frequency == 0 and self.number_of_courses != 0:\n if self.number_of_courses % self.fine_frequency_paid_by_driver == 0 and self.number_of_courses != 0:\n self.fine_paid_number_of_courses += 1\n fine_value = np.random.choice([100, 200, 500], p=[0.25, 0.4, 0.35])\n self.total_penalty_points += self._add_penalty_points() # adding penalty points\n return fine_value\n else:\n return 0\n else:\n return 0\n else:\n return 0", "def rate(self, neighbors, labels):\n num = 0\n den = 0\n for neighbor in neighbors:\n lable = self.labels[neighbor[1]]\n dest_to_neighbor = neighbor[0]\n num += lable / dest_to_neighbor\n den += 1 / dest_to_neighbor\n return num/den", "def expected_value(held_dice, num_die_sides, num_free_dice):\r\n \r\n scores = []\r\n \r\n die_sides = [(die + 1) for die in range(num_die_sides)]\r\n \r\n pos_outcomes = gen_all_sequences(die_sides, num_free_dice)\r\n\r\n for outcome in pos_outcomes:\r\n scores.append(score(held_dice + outcome))\r\n \r\n expected_result = float(sum(scores))/len(scores)\r\n \r\n return expected_result", "def rate_board(board, player):\n approx_player_moves = sum(\n len(_get_empty_neighbors((i, j), board))\n for i in range(5)\n for j in range(5)\n if board[i][j] == player\n )\n approx_opponent_moves = sum(\n len(_get_empty_neighbors((i, j), board))\n for i in range(5)\n for j in range(5)\n if board[i][j] == -player\n )\n return approx_player_moves - approx_opponent_moves", "def get_score_percent(self, value):\n qs_related = RoundData.objects.prefetch_related(\n 'shotdata').select_related('shotdata')\n\n round_holes = int(self.round_type)\n\n if value == 'par':\n return round((qs_related.filter(shotdata__nr_strokes=F('shotdata__hole__par')).count()/round_holes), 2)\n if value == 'birdie_better':\n return round((qs_related.filter(shotdata__nr_strokes__lt=F('shotdata__hole__par')).count()/round_holes), 2)\n if value == 'tbogey_worse':\n return round((qs_related.filter(shotdata__nr_strokes__gte=F('shotdata__hole__par')+3).count()/round_holes), 2)\n if isinstance(value, int):\n return round((qs_related.filter(shotdata__nr_strokes=F('shotdata__hole__par') + value).count()/round_holes), 2)", "def adv_ratio(self): # XXX\r\n bw = StatsRouter.global_bw_mean\r\n if bw == 0.0: return 0\r\n else: return self.bw/bw", "def evaluate(self, mode=0):\r\n winner = self.determine_winner()\r\n if winner:\r\n return winner * self.WIN_SCORE\r\n\r\n if mode == 1:\r\n return self.centre_priority_evaluate()\r\n elif mode == 2:\r\n return 0.5 * (self.centre_priority_evaluate() + self.piece_evaluate())\r\n else:\r\n return self.piece_evaluate()", "def credits_earned(self):\n\n if self.grade() >= 69.5:\n return self.nCredits\n else:\n return 0.0", "def get_proficiency_percentage(self):\n choice_values = [choice[0] for choice in self.PROFICIENCY_CHOICES]\n if '' in choice_values:\n choice_values.remove('') # Remove the empty proficiency choice\n choice_values.sort() # Ensure values are in the correct order\n\n value = choice_values.index(self.proficiency) + 1\n factor = 100 / len(choice_values)\n percentage = round(value * factor)\n\n return percentage", "def current_ratio(self):\n return self.current_assets / self.current_liabilities", "def custom_score_3(game, player):\n \n \n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n \n player_legal_moves = game.get_legal_moves(player)\n opponent = game.get_opponent(player)\n opponent_legal_moves = game.get_legal_moves(opponent)\n player_legal_move_count, opponent_legal_move_count = \\\n len(player_legal_moves), len(opponent_legal_moves)\n move_count_difference = player_legal_move_count - opponent_legal_move_count\n # Find coordinates of center box\n h, w = get_center_coordinates(game)\n # Retrieve player's coordinates\n y, x = game.get_player_location(player)\n # Obtain coordinate further, closest to origin\n furthest_coord, closest_coord = max(h - y, w -x), min(h - y, w - x)\n # Return weighted, vector-valued length from origin / sum of weights\n weighted_distance_from_center = \\\n math.sqrt((closest_coord**2 + 2*(furthest_coord**2)))/3\n feature_vector = (move_count_difference, weighted_distance_from_center)\n \n weight_vector = (1.0,0.1)\n \n weighted_difference_dot_product = sum(p*q for p,q, \\\n in zip(weight_vector, feature_vector)) \n \n return float(weighted_difference_dot_product)", "def centre_priority_evaluate(self):\r\n evaluation = 0\r\n for player in range(2):\r\n player_sign = player * 2 - 1\r\n for i in range(4):\r\n score = i + 1\r\n evaluation += player_sign * score * count_bits(self.bitboard_king[player] &\r\n self.CENTRE_PRIORITY_BITMASKS[i])\r\n evaluation += player_sign * score * count_bits(self.bitboard_pawns[player] &\r\n self.CENTRE_PRIORITY_BITMASKS[i])\r\n return evaluation", "def expected_value(held_dice, num_die_sides, num_free_dice):\r\n die_outcomes = set(range(1, num_die_sides + 1))\r\n \r\n possible_sequences = gen_all_sequences(die_outcomes, num_free_dice)\r\n \r\n total_score = 0.0\r\n for sequence in possible_sequences:\r\n total_score += score(held_dice + sequence)\r\n \r\n return float(total_score / len(possible_sequences))", "def calculate_scores(self):\n words = self.walk_board()\n player_scores = {}\n for word in words:\n player = word.get_owning_player()\n if player not in player_scores:\n player_scores[player] = 0\n player_scores[player] += word.get_score()\n return player_scores", "def Probability(rating1, rating2):\n return 1.0 * 1.0 / (1 + 1.0 * math.pow(10, 1.0 * (rating1 - rating2) / 400))", "def custom_score_3(game, player):\n \"\"\"custom_score_3 heuristic function aims at maximizing win chances of my agent\n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = 1.0 * len(game.get_legal_moves(player))#Calculate length of available moves for myPlayer\n length_opp_payer_moves = len(game.get_legal_moves(game.get_opponent(player)))#Calculate length of available moves with oppositePlayer\n\n if length_my_player_moves == 0:\n return float(\"-inf\")\n\n if length_opp_payer_moves == 0:\n return float(\"inf\")\n\n return float(length_my_player_moves/length_opp_payer_moves)" ]
[ "0.6951562", "0.6515817", "0.644771", "0.6344681", "0.62165296", "0.62093073", "0.61908257", "0.6157803", "0.6073757", "0.60568166", "0.6025391", "0.60042846", "0.5987803", "0.5981942", "0.5976337", "0.5951976", "0.5948813", "0.5936161", "0.593527", "0.593107", "0.5930244", "0.59181416", "0.59173024", "0.5916344", "0.5905475", "0.59005296", "0.589831", "0.5896911", "0.58819103", "0.58808035", "0.5869075", "0.58617043", "0.5845676", "0.5843281", "0.5843244", "0.5832698", "0.5825173", "0.5813156", "0.5804885", "0.579463", "0.57938963", "0.57904273", "0.57849365", "0.578347", "0.5776807", "0.5758458", "0.57579446", "0.57551193", "0.57548463", "0.57520443", "0.57492435", "0.572721", "0.57178944", "0.5714464", "0.5709707", "0.57083154", "0.5707349", "0.57063556", "0.56882703", "0.56873655", "0.5682273", "0.5678438", "0.56778", "0.56762904", "0.56762743", "0.5670208", "0.5658948", "0.5652207", "0.56518054", "0.56507206", "0.5650687", "0.56497794", "0.5639028", "0.5636266", "0.5633702", "0.5629081", "0.5628909", "0.562829", "0.5623302", "0.5622033", "0.56217074", "0.561321", "0.56125003", "0.5603133", "0.5599133", "0.5594772", "0.559337", "0.5593013", "0.55888104", "0.5588438", "0.5584812", "0.5583648", "0.5583001", "0.5575012", "0.5562968", "0.5561216", "0.5558128", "0.55474603", "0.5543088", "0.5540791" ]
0.6635203
1
Linear interpolation for converting between physics and engineering units.
def __init__(self, coef, f1=unit_function, f2=unit_function): super(self.__class__, self).__init__(f1, f2) self.p = np.poly1d(coef)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def interpolate_linear(self, transect):\n\n u = np.copy(self.u_mps)\n v = np.copy(self.v_mps)\n\n valid = np.isnan(u) == False\n\n # Check for valid data\n if sum(valid) > 1 and sum(self.valid_data[0, :]) > 1:\n\n # Compute ens_time\n ens_time = np.nancumsum(transect.date_time.ens_duration_sec)\n\n # Apply linear interpolation\n self.u_processed_mps = np.interp(x=ens_time,\n xp=ens_time[self.valid_data[0, :]],\n fp=u[self.valid_data[0, :]],\n left=np.nan,\n right=np.nan)\n # Apply linear interpolation\n self.v_processed_mps = np.interp(x=ens_time,\n xp=ens_time[self.valid_data[0, :]],\n fp=v[self.valid_data[0, :]],\n left=np.nan,\n right=np.nan)", "def linear(e0, e1, t0, t1, e):\n alpha = max(0, min(1, (e - e0) / (e1 - e0))) # what fraction of the way through are we\n t = alpha * t1 + (1 - alpha) * t0 # interpolate accordingly\n return t", "def linear_interpolation(left, right, alpha):\n\n return left + alpha * (right - left)", "def interpolate(self):\n interp = (\n self._get_ticks() - self._last_update\n ) / self._tick_step / self.dilation\n if interp > 1.0:\n interp = 1.0\n return interp", "def linear_interp(x,y,xi) :\n \n f = interp1d(x,y,kind='linear')\n yi = f(xi)\n \n return yi", "def linear_interpolation(self, pt1, pt2, unknown):\n\n #Write your code for linear interpolation here\n pt1,intensity1=pt1\n pt2,intensity2=pt2\n newPoint=unknown\n intensity_diff=pt2-pt1\n if(intensity_diff<=0):\n intensity_diff=1\n\n a1=pt2-newPoint\n b1=a1/intensity_diff\n x=intensity1*b1\n a2=newPoint - pt1\n b2=a2/intensity_diff\n y=intensity2*b2\n new_intensity=x+y\n\n return new_intensity", "def reinterp(self, lamb):\n _wavelength = self._get_filter_in_units_of(lamb)\n _lamb = _drop_units(lamb)\n try:\n _unit = str(lamb.unit)\n except Exception:\n _unit = self.wavelength_unit\n ifT = np.interp(_lamb, _wavelength, self.transmit, left=0., right=0.)\n return self.__class__(_lamb, ifT, name=self.name, dtype=self.dtype,\n unit=_unit)", "def linear_evolve(self,nt=1):\n for l in range(nt):\n y_temp = np.empty(self.y.shape[0])\n for i in range(self.y.shape[0]):\n \n # idx left to the departure point\n j = int(np.floor((self.x[i]-self.u[i]*self.dt)/self.dx))\n # idx right to the departure point\n k = j+1\n print i, j, k\n # linear interpolation\n alpha = (self.x[i]-self.u[i]*self.dt - j*self.dx)/self.dx\n y_temp[i] = (1-alpha)*self.y[j] + alpha*self.y[k]\n # copy array to current time\n self.y = np.copy(y_temp)\n stop\n #return current varibale\n return self.y", "def linear_interpolate(x, x0, y0, x1, y1):\n try:\n return (y0 * (x1 - x) + y1 * (x - x0)) / (x1 - x0)\n except ZeroDivisionError:\n return 0.0", "def smooth_loads(self):\r\n self.fqn = interp1d(Turbine.t, self.qn, kind='cubic')", "def _raw_phys_to_eng(self, physics_value):\n y = [val - physics_value for val in self.y]\n new_pp = PchipInterpolator(self.x, y)\n roots = new_pp.roots()\n if len(roots) == 1:\n x = roots[0]\n return x\n else:\n raise UniqueSolutionException(\"The function does not have any solution.\")", "def to_linear(self):\n return inv(quad_hybrid).dot(self.circular)", "def linear_interpolate_value_change(t0, v0, t1, v1, dt):\n return (v1 - v0)/float(t1-t0) * dt", "def _LinearInterpolate(x0, target, x1, y0, y1):\n if x0 == x1:\n return (y0 + y1) / 2\n return (y1 - y0) * (target - x0) / (x1 - x0) + y0", "def _convert_slope(old_param, new_param, equivs, input_units, output_units):\n\n # remove denominator to get just flux\n old_top = old_param * input_units['x']\n new_top = new_param * input_units['x']\n\n try:\n converted_top = (old_top.value * old_top.unit).to(\n new_top.unit).value\n except u.core.UnitConversionError:\n converted_top = (old_top.value * old_top.unit).to(\n new_top.unit, equivalencies=equivs).value\n\n try:\n quantity = converted_top * input_units['x'].to(output_units['x'])\n except u.core.UnitConversionError: # pragma: no cover\n # no use case for this clause yet\n quantity = converted_top * input_units['x'].to(output_units['x'],\n equivalencies=equivs)\n try:\n return quantity.value\n except AttributeError:\n return quantity", "def mel_to_linear(frequency):\n return 700.0 * (np.exp(frequency / 1127.01048) - 1.0)", "def linear_int(x, y, mode=\"interp1d\"):\n if mode == \"interp1d\":\n fit = interp1d(x, y, fill_value=\"extrapolate\")\n else:\n params = n_ord_interp(x, y)\n fit = np.poly1d(params)\n\n x = np.arange(0, 2400)\n\n return fit(x)", "def interpolate(x0, y0, x1, y1, x):\n y = (y0 * (x1 - x) + y1 * (x - x0)) / (x1 - x0)\n\n return y", "def linearize(self, params, unknowns, resids):\n\n m = self.slope\n J = {}\n\n J['y', 'x'] = m\n return J", "def __lineartrans(self):\n do = self.domain\n self.transpoints = copy(self.pts)\n def t(x):\n return (x - do[0])/(do[1]-do[0])\n for i in range(len(self.transpoints)):\n self.transpoints[i,0] = t(self.transpoints[i,0])", "def coord_interp(parameter, interval):\r\n epoch = _np.linspace(1800, 12600 , int(10800/interval)+1) # 3h validity interval within 4h\r\n time = _np.array([epoch**deg for deg in range(len(parameter)-1,-1,-1)])\r\n return _np.matmul(parameter,time)", "def Linear(v):\n bias = Symbol('h_{}'.format(v), REAL)\n\n min_, max_ = linear_energy_ranges[v]\n\n theta.assertions.add(LE(bias, limitReal(max_)))\n theta.assertions.add(GE(bias, limitReal(min_)))\n\n return bias", "def linear_momentum(self):\r\n return self.mass * self.vel", "def linear(self, X):\n return X", "def convert_units(self):\n for prod in (\"ier\", \"ier_inc_rain\"):\n self.data[prod].data[:] /= 1e6", "def linear_interpolate(self, last_goal, goal):\n # We interpolate to reach the commanded desired position in self.ramp_ratio % of the time we have this goal\n delta_x_per_step = (goal - last_goal) / self.interpolation_steps\n self.linear = np.array(\n [(last_goal + i * delta_x_per_step) for i in range(1, int(self.interpolation_steps) + 1)])", "def linear(min_iterations, i, start = start_temp, final = final_temp):\n\n\ttemperature = start - i * (start - final) / min_iterations\n\n\treturn temperature", "def linear_interpolator(moving):\n \n if isinstance(moving, medipy.base.Image) :\n MovingImageType = medipy.itk.itk_image_type(moving)\n else :\n MovingImageType = moving\n \n return itk.LinearInterpolateImageFunction[MovingImageType, itk.D].New()", "def _lerp(self, start_value, end_value):\n # @todo: can probably replace this with np.interp(self.step_lerp_pcts, [0, 1], [start_value, end_value])\n return (1.0-self.step_lerp_pcts)*start_value + self.step_lerp_pcts*end_value", "def interpolate(self, xs):\n tck = splrep(self._xs, self._ys)\n new_ys = splev(xs, tck, der=0)\n return new_ys", "def reinterp(self, lamb):\n mean, samples = self._get_mean_and_samples_attribute('reinterp')\n mean_val = mean(lamb)\n samp_val = [sk(mean_val.wavelength) for sk in samples]\n samp_transmissions = [sk.transmit for sk in samp_val]\n\n return self.__class__(mean_val.wavelength, mean_val.transmit,\n samp_transmissions, name=self.name,\n dtype=mean_val.dtype,\n unit=mean_val.wavelength_unit)", "def converts_into_linear(Rr, Rl, L):\n a = Rr/2\n b = Rl/2\n c = Rr/L\n d = -Rl/L\n return [a, b, c, d]", "def interpolate(self, expression_strings):\n interpolated_solution = fenics.interpolate(\n fenics.Expression(expression_strings, element = self.element), \n self.function_space.leaf_node())\n \n self.solution.leaf_node().vector()[:] = interpolated_solution.leaf_node().vector()", "def interpolate_state(self, a: Vector, b: Vector, u: float, dt: float) -> Vector:\n return vectorops.interpolate(a,b,u)", "def interpolate_linear(self, known_coords, known_values, interp_coords, groupname):\n\t\t#First need to reshape known_coords and known_values\n\t\tn_params = self.signal[groupname]['dimension']\n\t\tknown_coords = np.reshape( known_coords, (-1,n_params) )\n\t\tknown_values = np.reshape( known_values, (-1) )\t\t\n\t\treturn griddata(known_coords, known_values, interp_coords, method='linear')", "def interpolate(self, lon, lat, egy=None):\n raise NotImplementedError(\"MapBase.interpolate()\")", "def mel_to_linear(\n self, mel_amp: np.ndarray, threshold: float = 1e-10\n ) -> np.ndarray:\n return np.maximum(threshold, np.dot(self._inv_mel_basis, mel_amp))", "def forward(self, x):\n\n x, _ = equiangular_calculator(x, self.ratio)\n x = x.permute(0, 3, 1, 2)\n x = F.interpolate(x, scale_factor=(self.kernel_size, self.kernel_size), mode=\"nearest\")\n x = reformat(x)\n return x", "def interpolate(self, interp):\n x = np.linspace(0, 29, len(self.ya))\n f_ya = interpolate.interp1d(x, self.ya)\n f_yv = interpolate.interp1d(x, self.yv)\n f_pa = interpolate.interp1d(x, np.reshape(self.pa, [-1]))\n f_pv = interpolate.interp1d(x, np.reshape(self.pv, [-1]))\n\n x_interp = np.linspace(0, 29, len(self.ya)*interp)\n self.ya = list(f_ya(x_interp))\n self.yv = list(f_yv(x_interp))\n self.pa = list(f_pa(x_interp))\n self.pv = list(f_pv(x_interp))", "def Interpolator(X, Y, TimeleftIndex, TimeRightIndex,YValue):\n Y1 = Y[TimeleftIndex]\n Y2 = Y[TimeRightIndex]\n X2 = X[TimeRightIndex]\n X1 = X[TimeleftIndex]\n slope = (Y2 - Y1) / (X2 - X1)\n if slope != 0:\n X0 = (YValue - Y1) / slope + X1\n return X0\n else:\n return 0", "def lam2E(l):\n E=12398.4/(l*u['ang'])\n return E", "def TE_TM_pol_to_lin(field):\n\n pass", "def __hinterpolate(self):\n \n # Temp. Data holders\n upperint = []\n lowerint = []\n \n # Dont like this, because here we insert points into the rawdata\n # But it creates consisitent results in the interpolation results\n if self.__upper[0][0] != 0: self.__upper.insert(0,(0.,0.))\n if self.__lower[0][0] != 0: self.__lower.insert(0,(0.,0.))\n \n # Create points\n if self.__interpolation_method == \"l\":\n xpointsU = list(map(lambda x:x/float(self.__procPointsCount),range(0,self.__procPointsCount+1)))\n xpointsL = list(map(lambda x:x/float(self.__procPointsCount),range(0,self.__procPointsCount+1)))\n elif self.__interpolation_method == \"p\":\n xpointsU = [x**2/float(self.__procPointsCount)**2 for x in range(self.__procPointsCount+1)]\n xpointsL = [x**2/float(self.__procPointsCount)**2 for x in range(self.__procPointsCount+1)]\n \n # Calculate secants\n uppersec = [(self.__upper[i+1][1]-self.__upper[i][1])/(self.__upper[i+1][0]-self.__upper[i][0]) for i in range(len(self.__upper)-1)]\n lowersec = [(self.__lower[i+1][1]-self.__lower[i][1])/(self.__lower[i+1][0]-self.__lower[i][0]) for i in range(len(self.__lower)-1)]\n \n # Calculate tangents\n uppertan = [(uppersec[k-1]+uppersec[k])/2 for k in range(1,len(uppersec))]\n uppertan.insert(0,uppersec[0])\n uppertan.append(uppersec[-1])\n\n lowertan = [(lowersec[k-1]+lowersec[k])/2 for k in range(1,len(lowersec))]\n lowertan.insert(0,lowersec[0])\n lowertan.append(lowersec[-1])\n \n # Hermite blending functions\n p0 = lambda t: 2*t**3 - 3*t**2 + 1\n m0 = lambda t: t**3 - 2*t**2 + t\n p1 = lambda t: -2*t**3 + 3*t**2\n m1 = lambda t: t**3 - t**2\n \n # Find matching points to improve accuarcy\n matchU = [(i,j) for i in range(len(xpointsU)) for j in range(len(self.__upper)) if xpointsU[i] == self.__upper[j][0]]\n matchL = [(i,j) for i in range(len(xpointsL)) for j in range(len(self.__lower)) if xpointsL[i] == self.__lower[j][0]]\n \n # Reverse match pairs to insure no index errors\n matchU.reverse()\n matchL.reverse()\n\n# print(self.__lower)\n# print(xpointsL)\n # Pop xpoints that dont require interpolation and append the point into the upperint list\n for i in matchU:\n xpointsU.pop(i[0])\n upperint.append(self.__upper[i[1]])\n \n# print(matchL)\n \n # Same process as above but for lower airfoil\n for i in matchL:\n xpointsL.pop(i[0])\n lowerint.append(self.__lower[i[1]])\n \n # Interpolate upper points\n for xp in xpointsU:\n for i in range(len(self.__upper)-1):\n if self.__upper[i][0] < xp < self.__upper[i+1][0]:\n h = self.__upper[i+1][0]-self.__upper[i][0]\n t = (xp - self.__upper[i][0]) / h\n solution = ( p0(t)*self.__upper[i][1] + h*m0(t)*uppertan[i] + p1(t)*self.__upper[i+1][1] + h*m1(t)*uppertan[i+1] )\n upperint.append((xp,solution))\n \n # Interpolate lower points\n for xp in xpointsL:\n for i in range(len(self.__lower)-1):\n if self.__lower[i][0] < xp < self.__lower[i+1][0]:\n h = self.__lower[i+1][0]-self.__lower[i][0]\n t = (xp - self.__lower[i][0]) / h\n solution = ( p0(t)*self.__lower[i][1] + h*m0(t)*lowertan[i] + p1(t)*self.__lower[i+1][1] + h*m1(t)*lowertan[i+1] )\n lowerint.append((xp,solution))\n \n # Sort the points to keep the correct sequence\n upperint.sort(key=lambda x:x[0], reverse=True)\n lowerint.sort(key=lambda x:x[0])\n \n # Do checks to insure no duplicates\n if upperint[0][0] != 1.0: upperint.insert(0,(1.0,0.0))\n if upperint[-1][0] != 0.0: upperint.append((0.0,0.0))\n if lowerint[0][0] == 0.0: lowerint.pop(0)\n if lowerint[-1][0] != 1.0: lowerint.append((1.0,0.0))\n\n self.__ProcPoints = upperint + lowerint", "def linear_source(tau, a):\n\n return a[0]+a[1]*tau", "def interpolation(self) -> int:\n return self._interpolation", "def test_isentropic_pressure_addition_args_interp():\n lev = [100000., 95000., 90000., 85000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 296.\n tmp[1, :] = 292.\n tmp[2, :] = 290.\n tmp[3, :] = 288.\n rh = np.ones((4, 5, 5))\n rh[0, :] = 100.\n rh[1, :] = 80.\n rh[2, :] = 40.\n rh[3, :] = 20.\n relh = rh * units.percent\n tmpk = tmp * units.kelvin\n isentlev = [296., 297.] * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk, relh)\n truerh = 69.197 * units.percent\n assert_almost_equal(isentprs[1][1], truerh, 3)", "def interpolate(x1, x2, u, N):\n \n # finding the magnitude of each component\n a1 = np.matmul(x1, u)\n a2 = np.matmul(x2, u)\n\n ims = [np.matmul(u, t * a1 + (1 - t) * a2) \\\n for t in np.linspace(0, 1, N)]\n\n return np.stack(ims, 0)", "def Stokes_from_linear(self):\n (Ex,Ey) = self.linear\n self.logger.debug(\"Stokes_from_linear: (Ex, Ey) = %s\", (Ex,Ey))\n (Exc,Eyc) = self.linear.conj()\n self.logger.debug(\"Stokes_from_linear: (Ex*,Ey*) = %s\", (Exc,Eyc))\n (Sxx,Syy) = abs(self.linear*self.linear.conj())\n self.logger.debug(\"Stokes_from_linear: Sxx, Syy = %s\", (Sxx,Syy))\n Sxy = Ex*Eyc\n Syx = Ey*Exc\n self.logger.debug(\"Stokes_from_linear: Sxy, Syx = %s\", (Sxy,Syx))\n self.I = float(Sxx+Syy)\n self.Q = float(Sxx-Syy)\n self.U = float((Sxy+Syx).real)\n self.V = float(((0-1j)*(Sxy-Syx)).real)\n self.Stokes = self.I,self.Q,self.U,self.V\n return self.Stokes", "def linear_interpolate_value_at_time(t0, v0, t1, v1, t):\n return v0 + linear_interpolate_value_change(t0, v0, t1, v1, t - t0)", "def interpolate(self, interpolation=\"nearest\", **kwargs):\n return podpac.interpolators.Interpolate(source=self, interpolation=interpolation, **kwargs)", "def interpolate_fire_line(self):\n fire_line = self.get_fire_line(line_only=True)\n LINEX, LINEY = self.get_particles_props('x', 'y', array=fire_line)\n coef = np.polyfit(LINEY, LINEX, deg=min(10, len(LINEX)))\n fit = np.poly1d(coef)\n return fit(self.grid.YCELL[0])", "def lmap(v: float, x: Interval, y: Interval) -> float:\n return y[0] + (v - x[0]) * (y[1] - y[0]) / (x[1] - x[0])", "def interpolate ( hsl1, hsl2, amt ):\n if isinstance( hsl1, Colz ):\n h1 = hsl1.h\n s1 = hsl1.s\n l1 = hsl1.l\n a1 = hsl1.a\n if isinstance( hsl1, list ):\n h1 = hsl1[0]\n s1 = hsl1[1]\n l1 = hsl1[2]\n if len(hsl1) > 3:\n a1 = hsl1[3]\n\n if isinstance( h1, int ):\n h1 = h1 / 360.0\n if isinstance( s1, int ):\n s1 = s1 / 100.0\n if isinstance( l1, int ):\n l1 = l1 / 100.0\n\n if isinstance( hsl2, Colz ):\n h2 = hsl2.h\n s2 = hsl2.s\n l2 = hsl2.l\n a2 = hsl2.a\n if isinstance( hsl2, list ):\n h2 = hsl2[0]\n s2 = hsl2[1]\n l2 = hsl2[2]\n if len(hsl2) > 3:\n a2 = hsl2[3]\n\n if isinstance( h2, int ):\n h2 = h2 / 360.0\n if isinstance( s2, int ):\n s2 = s2 / 100.0\n if isinstance( l2, int ):\n l2 = l2 / 100.0\n\n h3 = Colz.hueLerp( h1, h2, amt )\n s3 = Colz.linearLerp( s1, s2, amt )\n l3 = Colz.linearLerp( l1, l2, amt )\n\n if 'a1' in locals() and 'a2' in locals():\n a3 = Colz.linearLerp( a1, a2, amt )\n else:\n a3 = 1.0\n\n c_result = Colz()\n c_result.setHsla( h3, s3, l3, a3 )\n return c_result", "def linear_interpolate(src_code, dst_code, step=5):\n assert (len(src_code.shape) == 2 and len(dst_code.shape) == 2 and\n src_code.shape[0] == 1 and dst_code.shape[0] == 1 and\n src_code.shape[1] == dst_code.shape[1])\n\n linspace = np.linspace(0.0, 1.0, step)[:, np.newaxis].astype(np.float32)\n return src_code + linspace * (dst_code - src_code)", "def interpolate(self, y, x=None, kind='cubic', num=None, lobatto=True,\n use_mp=False, dps=None):\n if x is None:\n x = np.linspace(float(self.a), float(self.b), len(y))\n f = interp1d(x, y, kind=kind)\n self.approximate(lambda x: float(f(float(x))),\n num=num, lobatto=lobatto, use_mp=use_mp, dps=dps)", "def analyticalLinearSol(self, t):\n return self.c*t + self.I", "def interpolate_temperature(temperature):\n return min(1.0, max(0.0, (10 + temperature) / 45))", "def get_duct_linear_heat_loss_coefficient() -> float:\n return 0.49", "def _lerp(self, lo: float, hi: float, t: float):\r\n return lo + t * (hi - lo)", "def interpolation(self):\n return self._interpolation", "def interpolate(self, distance, normalized=...): # -> BaseGeometry:\n ...", "def inv_lerp(x, low, high):\n return (x - low) / (high - low)", "def speed_interpolation(val):\n if val == 0.5:\n return 1.0\n elif val < 0.5:\n return low_interp(val)\n else:\n return hi_interp(val)", "def linear_helix_strain(x, dof):\n base = np.zeros([6, dof])\n base[1, 0] = sin(x/10) # y-bending\n base[2, 1] = x * cos(x/10) # z-bending\n return base", "def interpolated(self, Any, Any_1, p_float): # real signature unknown; restored from __doc__\n pass", "def test_linear_interpolation_range(self):\n\n for x in [[1.0, 2.0, 4.0], [-20, -19, 0], numpy.arange(200) + 1000]:\n for y in [[5.0, 9.0], [100, 200, 10000]]:\n\n # Define ny by nx array with corresponding values\n A = numpy.zeros((len(x), len(y)))\n\n # Define values for each x, y pair as a linear function\n for i in range(len(x)):\n for j in range(len(y)):\n A[i, j] = linear_function(x[i], y[j])\n\n # Test that linearly interpolated points are correct\n xis = numpy.linspace(x[0], x[-1], 100)\n etas = numpy.linspace(y[0], y[-1], 100)\n points = combine_coordinates(xis, etas)\n\n vals = interpolate2d(x, y, A, points, mode='linear')\n refs = linear_function(points[:, 0], points[:, 1])\n assert numpy.allclose(vals, refs, rtol=1e-12, atol=1e-12)", "def linpol(self):\n return self._linpol", "def lerp(self, t):\n pass", "def nlerp(targettime, time, q):\n i_interp_int, t_matrix = compute_t(targettime, time)\n q_interp = q[i_interp_int,:] * (1 - t_matrix) \n q_interp += q[np.clip(i_interp_int + 1,0,len(time)-1),:] * t_matrix\n return norm(q_interp)", "def interpolate(i0, d0, i1, d1):\n if i0 == i1:\n return [d0]\n values = []\n a = (d1 - d0) / (i1 - i0)\n d = d0\n for i in range(i0,i1+1):\n values.append(d)\n d = d + a\n return values", "def interpolate(self, var, time, lat, lon):\n\n # Get the nearest four points in space\n # Check to see if lat/lons are 2d or 1d\n if len(self['lat'].shape) == 2:\n closey, closex, distances = self.nearest_points(lat, lon, npt=4)\n # Distances in km\n# distances = np.array([self.haversine(\n# (self['lat'][y,x].values, self['lon'][y,x].values),\n# (lat, lon)) for y,x in \n# zip(list(closey), list(closex))])\n else:\n closen = self.nearest_points(lat, lon, npt=4)\n closey = closen\n closex = closen\n # Distances in km\n distances = np.array([self.haversine(\n (self['lat'][n].values, self['lon'][n].values),\n (lat, lon)) for n in list(closen)])\n # Check for exact match (within some tolerance)\n spaceweights = np.zeros(distances.shape)\n if (distances < 1.0).sum() > 0:\n spaceweights[distances.argmin()] = 1\n else:\n # Here, inverse distance weighting (for simplicity)\n spaceweights = 1.0 / distances\n spaceweights /= spaceweights.sum()\n # Get weights in time\n #time64 = np.datetime64(time)\n #all the valid times in the ensemble\n valids = self['validtime'].values\n timeweights = np.zeros(valids.shape)\n # Check if we are outside the valid time range\n if (time < valids[0]) or (time > valids[-1]):\n print(\"Interpolation is outside of time range in state!\")\n return None\n # Find where we are in this list\n #index after the time of the observation\n lastdex = (valids >= time).argmax()\n # If we match a particular time value, then\n # this is just an identity\n if valids[lastdex] == time:\n # Just make a one at this time\n timeweights[lastdex] = 1\n else:\n # Linear interpolation\n #often going to be 6 hours, subtracts datetime objects I think\n diff = (valids[lastdex] - valids[lastdex-1])\n #print(valids[lastdex], valids[lastdex-1], diff)\n #often going to be 21600 seconds\n totsec = diff.seconds\n #totsec = np.abs(diff / np.timedelta64(1, 's'))\n #ST\n #calculate time difference between time after and time of observation\n #the abs will make this positive definite, which is okay since\n #the difference will always be negative\n thisdiff = abs(time - valids[lastdex])\n #thissec = np.abs(thisdiff / np.timedelta64(1,'s'))\n thissec = thisdiff.seconds\n # Put in appropriate weights\n #ST switched the -1 between the two lines to match up with the positive-\n #definite thisdiff\n timeweights[lastdex-1] = float(thissec) / totsec\n timeweights[lastdex] = 1.0 - (float(thissec)/totsec)\n # Now that we have the weights, do the interpolation\n #ST an ntimes x 4 x nens array\n interp = self.variables[var].values[:,closey,closex,:]\n # Do a dot product with the time weights\n # And with the space weights\n if len(interp.shape) == 3:\n interp = (timeweights[:,None,None] * interp).sum(axis=0)\n else:\n interp = (timeweights[:,None,None,None] * interp).sum(axis=0)\n \n if len(interp.shape) == 3:\n #ST Changed 2nd : to None\n interp = (spaceweights[:,None,None] * interp).sum(axis=1)\n else:\n interp = (spaceweights[:,None] * interp).sum(axis=0)\n # Return estimate from all ensemble members\n return interp", "def fLinear(Vc1,Vc2,Vc3,Vk,Vw,Va,Vf,Pc1,Pc2,Pc3,Pk,Pw,Pa,Pf):\n#\n# 1. Normalise volumetric components:\n#\t-----------------------------------\n\tSum=abs(Vc1)+abs(Vc2)+abs(Vc3)+abs(Vk)+abs(Vw)+abs(Va)+abs(Vf)\n\tVc1=abs(Vc1)/Sum\n\tVc2=abs(Vc2)/Sum\n\tVc3=abs(Vc3)/Sum\n\tVk=abs(Vk)/Sum\n\tVw=abs(Vw)/Sum\n\tVa=abs(Va)/Sum\n\tVf=abs(Vf)/Sum\n#\n#\t2. Compute liear response function:\n#\t-----------------------------------\n\tLrf=Vc1*Pc1+Vc2*Pc2+Vc3*Pc3+Vk*Pk+Vw*Pw+Va*Pa+Vf*Pf\n#\n# 3. Output result:\n#\t-----------------\n\treturn Lrf", "def energy_func(self):\n return (\n self.inl[0].m.val_SI * (\n self.outl[0].h.val_SI - self.inl[0].h.val_SI) +\n self.inl[1].m.val_SI * (\n self.outl[1].h.val_SI - self.inl[1].h.val_SI))", "def linear(minVal, maxVal, newMin, newMax, value):\n coef = ((float(value) - float(minVal)) * 100) / (float(maxVal) - float(minVal))\n newVal = float(newMin) + ((coef * (float(newMax) - float(newMin))) / 100)\n return newVal", "def match_wl(wl, spec, ref_wl, method=\"scipy\", kind=\"linear\"):\n starttime = time.time()\n if method == \"scipy\":\n #print(kind + \" scipy interpolation\")\n linear_interp = interp1d(wl, spec, kind=kind)\n new_spec = linear_interp(ref_wl)\n elif method == \"numpy\":\n if kind.lower() is not \"linear\":\n print(\"Warning: Cannot do \" + kind + \" interpolation with numpy, switching to linear\" )\n #print(\"Linear numpy interpolation\")\n new_spec = np.interp(ref_wl, wl, spec) # 1-d peicewise linear interpolat\n else:\n print(\"Method was given as \" + method)\n raise(\"Not correct interpolation method specified\")\n #print(\"Interpolation Time = \" + str(time.time() - starttime) + \" seconds\")\n\n return new_spec # test inperpolations ", "def lerp(self, t):\n a = self.a0 + t * self.da\n return self.c + Vector((self.r * cos(a), self.r * sin(a)))", "def value(self, r: float, θ: float, φ: float) -> complex:\n result = 0\n for comp in self.components:\n result += np.interp([r], self.x, comp)[0]\n\n return result", "def trilinear_interpolate(point, atom_index, emap, emap_max, emap_min):\n point1 = []\n point0 = []\n dif = []\n for p in point:\n if round(p) == p:\n p += 1E-10\n point0.append(math.floor(p))\n point1.append(math.ceil(p))\n dif.append((p - point0[-1]) / (point1[-1] - point0[-1]))\n\n i000 = energy_map_index(point0, emap_max, emap_min) # (0, 0, 0)\n i100 = energy_map_index([point1[0], point0[1], point0[2]], emap_max, emap_min) # (1, 0, 0)\n i001 = energy_map_index([point0[0], point0[1], point1[2]], emap_max, emap_min) # (0, 0, 1)\n i101 = energy_map_index([point1[0], point0[1], point1[2]], emap_max, emap_min) # (1, 0, 1)\n i010 = energy_map_index([point0[0], point1[1], point0[2]], emap_max, emap_min) # (0, 1, 0)\n i110 = energy_map_index([point1[0], point1[1], point0[2]], emap_max, emap_min) # (1, 1, 0)\n i011 = energy_map_index([point0[0], point1[1], point1[2]], emap_max, emap_min) # (0, 1, 1)\n i111 = energy_map_index(point1, emap_max, emap_min) # (1, 1, 1)\n\n c00 = emap[i000][atom_index] * (1 - dif[0]) + emap[i100][atom_index] * dif[0]\n c01 = emap[i001][atom_index] * (1 - dif[0]) + emap[i101][atom_index] * dif[0]\n c10 = emap[i010][atom_index] * (1 - dif[0]) + emap[i110][atom_index] * dif[0]\n c11 = emap[i011][atom_index] * (1 - dif[0]) + emap[i111][atom_index] * dif[0]\n\n c0 = c00 * (1 - dif[1]) + c10 * dif[1]\n c1 = c01 * (1 - dif[1]) + c11 * dif[1]\n\n c = c0 * (1 - dif[2]) + c1 * dif[2]\n\n return c", "def interpolateLinear( t):\n k = np.searchsorted( keytime, t, side='right') - 1\n u = (t - keytime[k]) / (keytime[k + 1] - keytime[k])\n curframe = (1.0-u)*keyframe[k] + u*keyframe[k+1]\n\n return curframe", "def linearize(self, params, unknowns, resids):\n\n x = params['x']\n a = self.a\n b = self.b\n J = {}\n\n J['y', 'x'] = 2.0*a*x + b\n return J", "def GetInterpolation(self, *args, **kwargs):\n pass", "def solve_linear_harmonic_oscillator(t, initial_ampl, initial_velocity, resonance_freq, damping = 0.0, drive_ampl = 0.0, drive_ang_freq = 0.0):\n\n\n x = initial_ampl\n p = initial_velocity\n y0 = [initial_ampl, initial_velocity]\n y1 = odeint(dy, y0, t, args=(damping, resonance_freq, drive_ampl, drive_ang_freq)) # under damped\n return y1[:,0], y1[:,1]", "def linear_to_mel(frequency):\n return 1127.01048 * np.log(1.0 + frequency / 700.0)", "def interpol(self,x,y,x1):\n \n N = len(x)\n i = np.minimum(np.maximum(np.searchsorted(x,x1,side='right'),1),N-1)\n xl = x[i-1]\n xr = x[i]\n yl = y[i-1]\n yr = y[i]\n y1 = yl + (yr-yl)/(xr-xl) * (x1-xl)\n above = x1 > x[-1]\n below = x1 < x[0]\n y1 = np.where(above,y[-1] + (x1 - x[-1]) * (y[-1]-y[-2])/(x[-1]-x[-2]), y1)\n y1 = np.where(below,y[0],y1)\n \n return y1, i", "def control_law( self, inputs ):\n return np.interp( inputs, self.__x, self.__y )", "def LAT(self):\n # The maximum update amount for these element\n LateralFraction_DELTA = self.dt * (self.LateralFraction_LIMITS[1] -\n self.LateralFraction_LIMITS[0]) / (\n 2.0)\n\n # Add either positive or negative or zero delta for each\n # NOTE: 'High' is open bracket ) so the max is 1\n LateralFraction_DIRECTION = np.random.randint(-1, 2, 1)[0]\n\n # Now, modify modifiable params AND CLIP\n self.LateralFraction += LateralFraction_DIRECTION * LateralFraction_DELTA\n self.LateralFraction = np.clip(self.LateralFraction,\n self.LateralFraction_LIMITS[0],\n self.LateralFraction_LIMITS[1])", "def interpolateseasons(self):\n\n remainder = self.season - self.startseason\n f1 = 1.0 - remainder\n self.data = (self.startdata * f1) + (self.stopdata * remainder)", "def __init__(self, ts, ys):\n super(ForwardEulerOutput, self).__init__(np.min(ts), np.max(ts))\n self.interp = interp1d(ts, ys, kind='linear', copy=True)", "def _linear_interpolation(\n prevFrame : \"np.ndarray\",\n cFrame : \"np.ndarray\",\n fID : \"int\",\n smoothingFrames : \"int\"\n ) -> \"np.ndarray\":\n prevWeight = 1-((fID+1)/smoothingFrames)\n finalWeight = (fID+1)/smoothingFrames\n transitionFrame = prevWeight * prevFrame + finalWeight*cFrame\n return transitionFrame.astype(np.uint8)", "def _set_angular_velocity(self):\n nb_angular_velocities = 0\n sum_angular_velocities = 0\n for sl_id in range(self.nb_sl):\n w_list = self.sl_list[sl_id].angular_velocities\n nb_angular_velocities += len(w_list)\n sum_angular_velocities += np.sum(w_list)\n self.angular_velocity = sum_angular_velocities / nb_angular_velocities", "def _interpolate_relative_potential(self, r):\n \n # interpolate\n #spline = interpolate.UnivariateSpline(self._relative_potential_r,\n # self._relative_potential_psi, k = 1)\n \n # linear interpolation is more reliable assuming number of points\n # is large enough\n spline = interpolate.interp1d(self._relative_potential_r, self._relative_potential_psi)\n \n return 10.0**spline(np.log10(r))", "def solve_linear_displacement(engine, source_k, s):\n code = CodeSegment(engine)\n code.assign(x=Literal(engine.q), y='q')\n code.assign(x=Literal(numpy.zeros_like(engine.q)), y=s)\n code.decompose(x='q', layout='layout')\n for d in range(engine.pm.ndim):\n def tf(k, d=d):\n k2 = sum(ki ** 2 for ki in k)\n mask = k2 == 0\n k2[mask] = 1.0\n return 1j * k[d] / k2 * ~mask\n code.assign(x='source_k', y='disp1_k')\n code.transfer(complex='disp1_k', tf=tf)\n code.c2r(complex='disp1_k', real='disp1')\n code.readout(mesh='disp1', value='s1', x='q', layout='layout')\n code.assign_component(attribute=s, value='s1', dim=d)\n return code", "def interpolate(self, *point, **kwargs):\n\n # Assume alpha enhancement of 0.4 if not given.\n if len(point) == 3:\n point = [] + list(point) + [0.4]\n warnings.warn(\n \"Assuming [alpha/Fe] = 0.4 composition unless \"\n \"otherwise specified.\", StandardCompositionAssumed)\n elif len(point) == 4:\n point = list(point)\n warnings.warn(\n \"Fourth stellar param is [alpha/Fe] = {}\".format(point[3]))\n\n return super(self.__class__, self).interpolate(*point, **kwargs)", "def linear(x2, y2, N=100):\n\n m = y2 / x2\n x = np.linspace(0, x2, N)\n y = m*x\n\n # The time of travel\n T = np.sqrt(2*(1+m**2)/g/m * x2)\n print('T(linear) = {:.3f}'.format(T))\n return x, y, T", "def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val -\n (T_m - self.Tamb.val_SI) * self.lkf_lin.val -\n self.lkf_quad.val * (T_m - self.Tamb.val_SI) ** 2))", "def InterpolationDerivs(self, , p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=...):\n ...", "def y(df,x):\r\n x_p=np.array(df['Vertices'])\r\n y_p=np.array(df['DIxPRE 252'])\r\n cs = scipy.interpolate.splrep(x_p,y_p)\r\n return scipy.interpolate.splev(x,cs)", "def interpol(x, X, Y):\n \n for idx, xx in enumerate(X):\n if x <= xx:\n break\n \n x2 = xx \n y2 = Y[idx]\n x1 = X[idx-1] \n y1 = Y[idx-1] \n y = (y2-y1)/(x2-x1)*(x-x1) + y1\n \n return y", "def localized_E(E1, i, j, x, y):\n oldval = x[i, j]\n newval = oldval * -1 # flip\n # local computations\n E2 = E1 - (h * oldval) + (h * newval)\n E2 = E2 + (eta * y[i, j] * oldval) - (eta * y[i, j] * newval)\n adjacent = [(0, 1), (0, -1), (1, 0), (-1, 0)]\n neighbors = [x[i + di, j + dj] for di, dj in adjacent\n if is_valid(i + di, j + dj, x.shape)]\n E2 = E2 + beta * sum(a * oldval for a in neighbors)\n E2 = E2 - beta * sum(a * newval for a in neighbors)\n return oldval, newval, E1, E2", "def _t_at_interface(self, polarization, n_1, n_2):\n if polarization == 's':\n return 2*n_1/(n_1 + n_2)\n elif polarization == 'p':\n return 2*n_1/(n_1 + n_2)\n else:\n raise ValueError(\"Polarization must be 's' or 'p'\")", "def _linear_dispersion_coeff(beta, q):\n return np.sqrt(beta) / (2 * np.sin(np.pi * q))" ]
[ "0.63465023", "0.62840116", "0.6036108", "0.59930384", "0.59634036", "0.59620404", "0.5953784", "0.59198254", "0.5882442", "0.5880557", "0.5876995", "0.5843551", "0.5801997", "0.57321805", "0.5688701", "0.5683657", "0.5601913", "0.55995375", "0.55786633", "0.5578463", "0.55348766", "0.55223244", "0.5517989", "0.5504811", "0.5504322", "0.5486917", "0.54713875", "0.54655313", "0.5460785", "0.546076", "0.5423822", "0.53893375", "0.53859645", "0.53670543", "0.535898", "0.53539306", "0.5352216", "0.5351444", "0.5348427", "0.53482974", "0.5325294", "0.5323807", "0.53218615", "0.53076524", "0.5269795", "0.52604496", "0.52574337", "0.5254407", "0.5241783", "0.5235491", "0.5229146", "0.5222261", "0.52180916", "0.5208359", "0.5202945", "0.51946497", "0.5188109", "0.5186631", "0.51854795", "0.5172422", "0.5163609", "0.5162645", "0.5139305", "0.51382923", "0.5120992", "0.5118842", "0.5118395", "0.510242", "0.5102223", "0.509852", "0.5097368", "0.50948626", "0.5090386", "0.50902385", "0.5070053", "0.5068713", "0.5066496", "0.5062672", "0.5058039", "0.50579786", "0.5054", "0.50538206", "0.50497967", "0.50437164", "0.5042036", "0.50364727", "0.5035736", "0.5034755", "0.50335234", "0.5032853", "0.50325024", "0.5030202", "0.50271976", "0.5022652", "0.50143194", "0.5012708", "0.50106573", "0.5008562", "0.50084704", "0.5006534", "0.5004968" ]
0.0
-1
Convert between engineering and physics units.
def _raw_eng_to_phys(self, eng_value): return self.p(eng_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_units(self):\n for prod in (\"ier\", \"ier_inc_rain\"):\n self.data[prod].data[:] /= 1e6", "def original_units(self):\n self.physical_units(distance=self.infer_original_units('km'),\n velocity=self.infer_original_units('km s^-1'),\n mass=self.infer_original_units('Msol'), persistent=False)", "def useUnits():", "def units(self, key):\n \n # Strip any operators\n _, key = get_operator(key)\n \n # Fill out aliases \n if key in component_from_alias:\n key = component_from_alias[key]\n elif key == 'E':\n key = 'electricField'\n elif key == 'B':\n key = 'magneticField' \n \n return pg_units(key)", "def get_units(self):\r\n msg = struct.pack('>2B', 56, 14)\r\n response = self.query(msg)\r\n\r\n if response[1] == 2:\r\n units = 'A'\r\n to_nm_multiplier = 1 / 10\r\n elif response[1] == 1:\r\n units = 'nm'\r\n to_nm_multiplier = 1\r\n elif response[1] == 0:\r\n units = 'um'\r\n to_nm_multiplier = 1000\r\n else:\r\n raise ValueError('Units not recognised.')\r\n\r\n # Save results locally too for quick re-use\r\n self._current_units = units\r\n self._current_to_nm_multiplier = to_nm_multiplier\r\n\r\n return units, to_nm_multiplier", "def _fix_units(cube, definition):\n\n if cube.var_name != 'pr':\n cube.convert_units(definition.units)", "def get_units(self,):\n self.UNITS = {'pressure':'Pa',}\n return", "def assign_unit(self):\n self.units = {}\n for unit in RADIAL_UNITS:\n if unit.REPR == \"2th_deg\":\n self.units[unit] = self.tth_deg\n elif unit.REPR == \"2th_rad\":\n self.units[unit] = self.tth_rad\n elif unit.REPR == \"q_nm^-1\":\n self.units[unit] = self.q_nm\n elif unit.REPR == \"q_A^-1\":\n self.units[unit] = self.q_A\n elif unit.REPR == \"r_mm\":\n self.units[unit] = self.r_mm\n else:\n logger.warning(\"Unit unknown to GUI %s\" % unit)", "def convert_units(data, units):\n # Build the dictionary of units conversions\n convert = {'m' : [1.0, 0., 'm'], \n 'meter' : [1.0, 0., 'm'], \n 'deg C' : [1.0, 273.15, 'K'], \n 'Celsius' : [1.0, 273.15, 'K'], \n 'K' : [1.0, 0., 'K'],\n 'db' : [1.e4, 101325., 'Pa'], \n 'Pa' : [1.0, 0., 'Pa'],\n 'mg/m^3': [1.e-6, 0., 'kg/m^3'], \n 'S/m': [1.0, 0., 'S/m'],\n 'mS/m' : [1.e-3, 0., 'S/m'],\n 'psu': [1.0, 0., 'psu'], \n 'salinity': [1.0, 0., 'psu'], \n 'kg/m^3': [1.0, 0., 'kg/m^3'], \n 'kilogram meter-3': [1.0, 0., 'kg/m^3'], \n 'm/s': [1.0, 0., 'm/s'], \n 'mg/l': [1.e-3, 0., 'kg/m^3'],\n 'meter second-1' : [1.0, 0., 'm/s'],\n 'm.s-1' : [1.0, 0., 'm/s'],\n 'pH units' : [1.0, 0., 'pH units'],\n 'MPa' : [1.e6, 0., 'Pa'],\n '--' : [1.0, 0., '--'],\n 'mD' : [9.869233e-16, 0., 'm^2'],\n 'um' : [1.e-6, 0., 'm'],\n 'm/s 1e-9' : [1.e-9, 0., 'm/s'],\n 'm/s 1e-7' : [1.e-7, 0., 'm/s'],\n 'wt.%' : [10., 0., 'psu'],\n '10^-15 m^2' : [1.e-15, 0., 'm^2'],\n 'm^2' : [1., 0., 'm^2'],\n 'kg/m^2/year' : [3.168808781402895e-08, 0., 'kg/m^2/s'] \n } \n \n # Make sure the data are a numpy array and the units are a list\n if isinstance(data, float) or isinstance(data, int):\n data = np.array([data])\n if isinstance(data, list):\n data = np.array(data)\n if isinstance(units, str) or isinstance(units, unicode):\n units = [units]\n if units == None:\n units = ['']\n \n # Make sure you can slice through the columns: must be two-dimensional\n sh = data.shape\n data = np.atleast_2d(data)\n \n # Allow conversion of a row of data if all of the same unit\n if len(units) == 1 and data.shape[1] > 1:\n data = data.transpose()\n \n # Create an emtpy array to hold the output\n out_data = np.zeros(data.shape)\n out_units = []\n \n # Convert the units\n for i in range(len(units)):\n try:\n out_data[:,i] = data[:,i] * convert[units[i]][0] + \\\n convert[units[i]][1]\n out_units += [convert[units[i]][2]]\n except KeyError:\n print('Do not know how to convert %s to mks units' % units[i])\n print('Continuing without converting these units...')\n out_data[:,i] = data[:,i]\n out_units += units[i]\n \n # Return the converted data in the original shape\n out_data = np.reshape(out_data, sh, 'C')\n return (out_data, out_units)", "def _raw_phys_to_eng(self, physics_value):\n roots = (self.p - physics_value).roots\n if len(roots) == 1:\n x = roots[0]\n return x\n else:\n raise ValueError(\"There doesn't exist a corresponding engineering value or \"\n \"they are not unique:\", roots)", "def units(self):\n pass", "def _override_units_system(self):\n try:\n f = open(self.filename+\".units\")\n except OSError:\n return\n\n name_mapping = {'pos': 'distance', 'vel': 'velocity'}\n units_dict = {}\n\n for line in f:\n if (not line.startswith(\"#\")):\n if \":\" not in line:\n raise OSError(\"Unknown format for units file %r\"%(self.filename+\".units\"))\n else:\n t, u = list(map(str.strip,line.split(\":\")))\n t = name_mapping.get(t,t)\n units_dict[t] = u\n\n self.set_units_system(**units_dict)", "def convert(self):\n return _libsbml.SBMLUnitsConverter_convert(self)", "def convertUnits(self, varname, arr):\n if varname == \"SPDQ\" or varname == \"PHQ\":\n return arr*2.5e6/1000.\n return arr", "def convert_units(celsius_value, units):\n if units == 0:\n return celsius_value\n if units == 1:\n return celsius_value * 1.8 + 32\n return celsius_value + 273.15", "def convert_mass(self, event):\n try:\n #Compare other unit to one unit(kilograms)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"Earth masses\": 5.97219e+24, \"Solar masses\": 1.9890000000000002e+30, \"carats\": 0.0002, \"cental\": 45.359237, \"decagrams\": 0.01, \"femtograms\": 1e-18, \"grains\": 6.479891000000001e-05, \"grams\": 0.001, \"hectograms\": 0.1, \"hundredweights\": 50.802345, \"kilograms\": 1.0, \"kilotonnes\": 1000000.0, \"megatonnes\": 1000000000.0, \"micrograms\": 1e-09, \"milligrams\": 1e-06, \"nanograms\": 1e-12, \"ounces(US & UK)\": 0.02835, \"ounces(precious metals)\": 0.031103, \"picograms\": 1e-15, \"pounds(US & UK)\": 0.453592, \"pounds(precious metals)\": 0.373242, \"slugs\": 14.593903, \"stones\": 6.350293, \"tonnes(metric)\": 1000.0, \"tons(UK)\": 1016.046909, \"tons(US)\": 907.18474}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def get_units(self):\n return str(self._modeler.GetModelUnits())", "def convert_units(src_unit: Union[str, float], tgt_unit: Union[str, float]):\n return _parse_unit(src_unit) / _parse_unit(tgt_unit)", "def getDistanceUnits(self) -> Unit:\n ...", "def ke(self):\n self._obj['w'] = (self._obj['u'])**2 + (self._obj['v'])**2\n\n if len(self._obj.attrs['units']) == 4:\n vel_units = self._obj.attrs['units'][-1]\n self._obj.attrs['units'].append(f'({vel_units})^2')\n else:\n vel_units = self._obj.attrs['units'][-2]\n self._obj.attrs['units'][-1] = (f'({vel_units})^2')\n return self._obj", "def convert_units(self, time_units=None, len_units=None):\n in_time = self.time_units\n # Check new time units\n if time_units is None:\n time_units = in_time\n flag = _units.validate_units(time_units)\n if flag == -1:\n raise ValueError('Bad time units input {}'.format(time_units))\n # Check new length units\n in_len = self.len_units\n if len_units is None:\n len_units = in_len\n flag = _units.validate_units(len_units)\n if flag == -1:\n raise ValueError('Bad length units input {}'.format(len_units))\n # Convert parameters units\n for key, value in self.parameters.items():\n if type(value) in [int, float]:\n self.parameters[key] = _units.units_conversion(value, in_len, len_units)\n # Convert drawdown data\n self.drawdown.convert_units(time_units, len_units)\n # Convert associate data units\n for i in range(self.data_count()):\n if self.data[i].dtype == 1: # drawdown units\n data_units = len_units\n elif self.data[i].dtype == 2: # first derivative units\n data_units = len_units + \"/\" + time_units\n elif self.data[i].dtype == 3: # second derivative units\n data_units = len_units + \"/\" + time_units + \"2\"\n self.data[i].convert_units(time_units, data_units)\n self.len_units = len_units\n self.time_units = time_units\n # End Function", "def convertUnit(*args, fromUnit: AnyStr=\"\", toUnit: AnyStr=\"\", **kwargs)->float:\n pass", "def convert_to(self, units: str) -> None:\n if self.units == units:\n return\n\n if units not in Variable.VALID_UNIT_CONVERSIONS[self.units]:\n msg = f\"\"\"Not a valid unit conversion. Valid destination units:\n {Variable.VALID_UNIT_CONVERSIONS[self.units]}\"\"\"\n raise ValueError(msg)\n\n if self.units == \"celsius\" and units == \"fahrenheit\":\n self._celsius_to_fahrenheit()\n elif self.units == \"m/s\" and units == \"km/h\":\n self._mps_to_kph()\n elif self.units == \"m/s\" and units == \"mph\":\n self._mps_to_mph()\n else:\n raise ValueError(\"Not a valid unit conversion.\")", "def _mps_to_kph(self) -> None:\n if self.units == \"m/s\":\n self.units = \"km/h\"\n self.value = ((self.value * 360) / 100).__round__(2)\n else:\n msg = (\n \"Not a valid unit conversion, expected units to be in 'm/s' but instead \"\n + f\"units were in {self.units}.\"\n )\n raise ValueError(msg)", "def convert_energy(self, event):\n try:\n #Compare other unit to one unit(joules)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"Btu(th)\": 1054.35, \"Btu(mean)\": 1055.87, \"calories(IT)\": 4.1868, \"calories(th)\": 4.184, \"calories(mean)\": 4.19002, \"calories(15C)\": 4.1858, \"calories(20C)\": 4.1819, \"calories(food)\": 4186.0, \"centigrade heat units\": 1900.4, \"electron volts\": 1.60219 * 10 ** -19, \"ergs\": 1.0 * 10 ** -7, \"foot-pound force\": 1.355818, \"foot poundals\": 0.04214, \"gigajoules\": 1.0 * 10 ** 9, \"horsepower hours\": 2684520.0, \"inch-pound force\": 0.112985, \"joules\": 1.0, \"kilocalories(IT)\": 4186.8, \"kilocalories(th)\": 4184.0, \"kilogram-force meters\": 9.80665, \"kilojoules\": 1000.0, \"kilowatt hours\": 3600000.0, \"megajoules\": 1.0 * 10 ** 6, \"newton meters\": 1.0, \"therms\": 105505585.257348, \"watt seconds\": 1.0, \"watt hours\" : 3600.0}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def update_units(self):\n unit_var_value = self.view.vars['unit'].get()\n if unit_var_value == 'm3ph':\n self.minran_u_label.config(text='m³/h')\n self.maxran_u_label.config(text='m³/h')\n self.points_tview.heading('vflow', text='Przepływ [m³/h]', anchor=tk.CENTER)\n elif unit_var_value == 'lps':\n self.minran_u_label.config(text='l/s')\n self.maxran_u_label.config(text='l/s')\n self.points_tview.heading('vflow', text='Przepływ [l/s]', anchor=tk.CENTER)\n self.view.vars['pump_eff_min'].convert_unit(unit_var_value)\n self.view.vars['pump_eff_max'].convert_unit(unit_var_value)\n self.view.vars['pump_characteristic'].convert_unit(unit_var_value)", "def convert_energy_2_internal_u(self,val):\n units = self.current_units[\"energy\"]\n cfact = conversion_facs_energy[self.current_units[\"energy\"]]\n \n # special handling for nano meters\n if units == \"nm\":\n # zero is interpretted as zero energy\n try:\n ret = numpy.zeros(val.shape, dtype=val.dtype)\n ret[val!=0.0] = 1.0/val[val!=0]\n return ret/cfact\n except: \n return (1.0/val)/cfact\n #if val == 0.0:\n # return 0.0\n #return (1.0/val)/cfact\n else:\n return val*cfact", "def convert_eV_kJmol(en_eV):\n return en_eV/kJmol_eV", "def setunits(self, *args, **kwargs):\n return _coordsys.coordsys_setunits(self, *args, **kwargs)", "def unit_converter(val, from_u, to_u):\n\tconverter = {'b':0, 'k':1, 'm':2, 'g':3, 't':4}\n\tif converter[from_u] < converter[to_u]:\n\t\tval = float(val)\n\t\tfor _ in range(converter[to_u] - converter[from_u]):\n\t\t\tval = val/1024\n\telse:\n\t\tfor _ in range(converter[from_u] - converter[to_u]):\n\t\t\tval = val * 1024\n\t\t\t\n\treturn val", "def conv(old='auto', new='auto'):\n if old == new:\n return 1.\n for unittype in [lenunits, angunits, timunits, masunits, eneunits]:\n if old in unittype and new in unittype:\n return unittype[new] / unittype[old]\n\n raise ValueError('Units \\'{}\\' and \\'{}\\' unrecognized or '\n 'not of same unit type'.format(old, new))", "def _raw_eng_to_phys(self, eng_value):\n return self.pp(eng_value)", "def convert_units(self, units):\n self.unit_array = self.unit_array.to(units)", "def unit_of_measurement(self):\n if self.values.primary.units == \"C\":\n return TEMP_CELSIUS\n if self.values.primary.units == \"F\":\n return TEMP_FAHRENHEIT\n\n return self.values.primary.units", "def unit_of_measurement(self) -> str:\n raw_units = self.raw_units\n\n if raw_units in [TEMP_CELSIUS, TEMP_FAHRENHEIT]:\n return self.hass.config.units.temperature_unit\n return raw_units", "def kinetic_energy(self, units = 'si'):\n if units == 'si':\n return 0.5 * self.mass * (linalg.norm(self.velocity) ** 2)\n if units == 'au':\n return 0.5 * self.mass * (linalg.norm(self.velocity * (1.496e11) * 86400) ** 2)", "def test_change_units(self):\n s = State(\"water\", T=Q_(100, \"degC\"), p=Q_(1.0, \"atm\"), units=\"EE\")\n assert s.units == \"EE\"\n s.units = \"SI\"\n assert s.units == \"SI\"\n assert s.cv.units == \"kilojoule / kelvin / kilogram\"\n assert s.cp.units == \"kilojoule / kelvin / kilogram\"\n assert s.s.units == \"kilojoule / kelvin / kilogram\"\n assert s.h.units == \"kilojoule / kilogram\"\n assert s.T.units == \"degree_Celsius\"\n assert s.u.units == \"kilojoule / kilogram\"\n assert s.v.units == \"meter ** 3 / kilogram\"\n assert s.p.units == \"bar\"", "def test_convert(self):\n height = 1.6 * self.meter\n foot = .305 * self.meter\n inch = 1 / 12 * foot\n\n self.assertTrue(abs(height / foot - 5.246) < .001)\n self.assertTrue(abs(height / inch - 62.951) < .001)\n\n newton = self.kgram * self.meter / (self.second ** 2)\n pound = 4.448222 * newton\n accel = 9.8 * self.meter / (self.second ** 2)\n\n weight = 150 * pound\n mass = weight / accel\n self.assertTrue(abs(mass / self.kgram - 68.085) < .001)", "def convert_H_kJmol(en_H):\n return en_H/kJmol_H", "def _mps_to_mph(self) -> None:\n if self.units == \"m/s\":\n self.units = \"mph\"\n self.value = (self.value * 2.236936).__round__(2)\n else:\n msg = (\n \"Not a valid unit conversion, expected units to be in 'm/s' but instead \"\n + f\"units were in {self.units}.\"\n )\n raise ValueError(msg)", "def getUnits(self):\n return _libsbml.Compartment_getUnits(self)", "def unit_of_measurement(self):\n return self._tasmota_entity.unit", "def convert_kJmol_eV(en_kJmol):\n return en_kJmol*kJmol_eV", "def _get_units(self, q) -> unyt.Unit:\n try:\n units = q.units\n except AttributeError:\n units = unyt.dimensionless\n return unyt.Unit(units, registry=self.registry)", "def convert_force(self, event):\n try:\n #Compare other unit to one unit(newtons)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"dynes\": 0.00001, \"kilograms force\": 9.80665, \"kilonewtons\": 1000.0, \"kips\": 4448.222, \"meganewtons\": 1.0 * 10 ** 6, \"newtons\": 1.0, \"pounds force\": 4.448222, \"poundals\": 0.138255, \"sthene\": 1000.0, \"tonnes force\": 9806.65, \"tons force(UK)\": 9964.016418, \"tons force(US)\": 8896.443231}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def units(self):\n return self._units", "def units(self):\n return self._units", "def unit_of_measurement(self):\n return self._units", "def convert_volume(self, event):\n try:\n #Compare other unit to one unit(cubic decimeters)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"acre foot\": 1233481.837548, \"barrels\": 158.987295, \"bushels(UK)\": 36.36872, \"bushels(US)\": 35.23907, \"centiliters\": 0.01, \"cubic centimeters\": 0.001, \"cubic decameters\": 1000000.0, \"cubic decimeters\": 1.0, \"cubic feet\": 28.316847, \"cubic inches\": 0.016387, \"cubic kilometers\": 1000000000000.0, \"cubic meters\": 1000.0, \"cubic mile\": 4168181825000.0, \"cubic millimeters\": 1e-06, \"cubic yards\": 764.554858, \"cups\": 0.236588, \"deciliters\": 0.1, \"dram\": 0.003697, \"dram(imperial)\": 0.003552, \"fluid ounces(US)\": 0.029574, \"fluid ounces(imperial)\": 0.028413, \"gallons(US,dry)\": 4.404884, \"gallons(US,liquid)\": 3.785412, \"gallons(imperial)\": 4.54609, \"gill(US)\": 0.118294, \"gill(imperial)\": 0.142065, \"liters\": 1.0, \"liters(1901-1964)\": 1.000028, \"microliters\": 1e-06, \"milliliters\": 0.001, \"nanoliters\": 1e-09, \"picoliters\": 1e-12, \"pints(US,dry)\": 0.55061, \"pints(US,liquid)\": 0.473176, \"pints(imperial)\": 0.568261, \"quarts(UK,dry)\": 1.101221, \"quarts(US,liquid)\": 0.946353, \"quarts(imperial)\": 1.136523, \"table spoons\": 0.014787, \"tea spoons\": 0.004929}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def convert_speed(self, event):\n try:\n #Compare other unit to one unit(meters/second)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"Mach number\": 340.2933, \"Nm/24hr\": 0.021435, \"centimeters/minute\": 0.000167, \"centimeters/second\": 0.01, \"feet/hour\": 8.5e-05, \"feet/minute\": 0.00508, \"feet/second\": 0.3048, \"inches/minute\": 0.000423, \"inches/second\": 0.0254, \"kilometers/hour\": 0.277778, \"kilometers/second\": 1000.0, \"knots\": 0.514444, \"meters/hour\": 0.000278, \"meters/minute\": 0.016667, \"meters/second\": 1.0, \"miles/hour\": 0.44704, \"miles/minute\": 26.8224, \"miles/second\": 1609.344, \"nautical miles/hour\": 0.514444, \"speed of light\": 299790000.0, \"speed of sound\": 343.0, \"yards/hour\": 0.000254, \"yards/minute\": 0.01524, \"yards/second\": 0.9144}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def to_axis_units(self, label, vals):\n if label in ['Hmolar', 'Smolar', 'Umolar', 'Dmolar', 'P']:\n return vals / 1000\n elif label in ['T']:\n return vals\n else:\n raise ValueError(label)", "def convert_units(unt, origunits):\n if unt[0:3] == origunits[0:3] | unt[0:3] == 'ori':\n units = origunits\n convf = 1\n else:\n if 'fee' == unt[(((0:3 -1) -1) -1)]:\n if origunits[0:3] == 'met':\n units = 'feet'\n convf = 3.2808399\n else:\n units = origunits\n convf = 1\n else:\n if 'met' == unt[(((0:3 -1) -1) -1)]:\n if origunits[0:3] == 'fee':\n units = 'meters'\n convf = 0.3048\n else:\n units = origunits\n convf = 1\n else:\n if 'm/s' == unt[(((0:3 -1) -1) -1)]:\n if origunits[0:3] == 'kno':\n units = 'meters/sec'\n convf = 0.51444444\n else:\n units = origunits\n convf = 1\n else:\n if 'kno' == unt[(((0:3 -1) -1) -1)]:\n if origunits[0:3] == 'm/s':\n units = 'knots'\n convf = 1.9438445\n else:\n units = origunits\n convf = 1\n else:\n error('Unknown units')\n #\n return units, convf", "def test_convert_compatible_units(self):\n result = convert_units(self.arr, 'degC')\n expected_data = np.array([[-273.15, -272.15], [-271.15, -270.15]])\n expected_units = cf_units.Unit('degC')\n self.assertEquals(result.units, expected_units)\n self.assertArrayEqual(result.data, expected_data)", "def _get_units_object(self, units):\n if isinstance(units, cellml_units):\n # We're done\n pass\n else:\n units = amara_parse_cellml(unicode(units))\n assert isinstance(units, cellml_units)\n return units", "def to_meters(d, d_unit):\n if d_unit == UOM_M:\n dm = d\n elif d_unit == UOM_KM:\n dm = d * 1000\n elif d_unit == UOM_FEET:\n dm = feet2m(d)\n elif d_unit == UOM_SM:\n dm = SM2m(d)\n elif d_unit == UOM_NM:\n dm = NM2m(d)\n return dm", "def E2V(E):\n# for energy in mev returns velocity in m/s\n return sqrt(E/5.227e-6)", "def unit_of_measurement(self):\n return self.var_units", "def convert_units(cube, units):\n try:\n cube.convert_units(units)\n except ValueError:\n if not _try_special_conversions(cube, units):\n raise\n\n return cube", "def units(self, x):\n u = ''.join([chr(d) for d in self[x]['units'][:]])\n if (u in ['n/a']) and (x in ['latitude', 'longitude']):\n u = 'radian' # assume radians\n return u", "def unit_of_measurement(self):\n unit = get_uom_from_status(self._device.status)\n if unit == HS_UNIT_LUX:\n return LIGHT_LUX\n elif unit == HS_UNIT_CELSIUS:\n return TEMP_CELSIUS\n elif unit == HS_UNIT_FAHRENHEIT:\n return TEMP_FAHRENHEIT\n elif unit == HS_UNIT_PERCENTAGE:\n return PERCENTAGE\n elif unit == HS_UNIT_A or unit == HS_UNIT_AMPERES:\n return ELECTRIC_CURRENT_AMPERE\n elif unit == HS_UNIT_KW:\n return POWER_KILO_WATT\n elif unit == HS_UNIT_KWH:\n return ENERGY_KILO_WATT_HOUR\n elif unit == HS_UNIT_V or unit == HS_UNIT_VOLTS:\n return ELECTRIC_POTENTIAL_VOLT\n elif unit == HS_UNIT_W or unit == HS_UNIT_WATTS:\n return POWER_WATT\n return None", "def get_units(cls, wkt):\n if HAS_GDAL:\n return SpatialReference(wkt).units\n else:\n m = cls.units_regex.match(wkt)\n return m.group('unit'), m.group('unit_name')", "def convert_length(self, event):\n try:\n #Compare other unit to one unit(meters)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"angstroms\": 10 ** -10, \"au\": 149598550000.0, \"barleycorns\": 0.008467, \"cables\": 182.88, \"centimeters\": 0.01, \"chains\": 20.11684, \"decimeters\": 0.1, \"ells\": 0.875, \"ems\" : 0.004233, \"fathoms\": 1.8288, \"feet(UK & US)\": 0.3048, \"feet(US survey)\": 0.304801, \"furlongs\": 201.168, \"hands\": 0.1016, \"hectometers\": 100.0, \"inches\": 0.0254, \"kilometers\": 1000.0, \"light years\": 9460528405000000.0, \"meters\": 1.0, \"micrometers\": 0.000001, \"mil\": 0.0000254, \"miles(UK & US)\": 1609.344, \"miles(nautical, international)\": 1852.0, \"miles(nautical, UK)\": 1853.184, \"millimeters\": 0.001, \"nanometers\": 10 ** -9, \"parsecs\": 30856776000000000.0, \"picometers\": 10 ** -12, \"Scandinavian mile\": 10000.0, \"thou\": 0.0000254, \"yards\": 0.9144, \"links\": 0.2011684, \"pica\": 0.00423333, \"rods\": 5.0292, \"spans\": 0.2286}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def _standardise_dtypes_and_units(cube: Cube) -> None:\n\n def as_correct_dtype(obj: ndarray, required_dtype: dtype) -> ndarray:\n \"\"\"\n Returns an object updated if necessary to the required dtype\n\n Args:\n obj:\n The object to be updated\n required_dtype:\n The dtype required\n\n Returns:\n The updated object\n \"\"\"\n if obj.dtype != required_dtype:\n return obj.astype(required_dtype)\n return obj\n\n cube.data = as_correct_dtype(cube.data, get_required_dtype(cube))\n for coord in cube.coords():\n if coord.name() in TIME_COORDS and not check_units(coord):\n coord.convert_units(get_required_units(coord))\n req_dtype = get_required_dtype(coord)\n # ensure points and bounds have the same dtype\n if np.issubdtype(req_dtype, np.integer):\n coord.points = round_close(coord.points)\n coord.points = as_correct_dtype(coord.points, req_dtype)\n if coord.has_bounds():\n if np.issubdtype(req_dtype, np.integer):\n coord.bounds = round_close(coord.bounds)\n coord.bounds = as_correct_dtype(coord.bounds, req_dtype)", "def unit_of_measurement(self) -> Any:\n return TEMP_CELSIUS", "def test_measurment(self):\r\n self.assertEqual(Converter.MeasurmentWorldtoUS(10, \"km\"), 6.214)\r\n self.assertEqual(Converter.MeasurmentWorldtoUS(10, \"m\"), 10.936)\r\n self.assertEqual(Converter.MeasurmentWorldtoUS(10, \"cm\"), 0.328)\r\n self.assertEqual(Converter.MeasurmentWorldtoUS(10, \"mm\"), 0.394)\r\n self.assertEqual(Converter.MeasurmentUStoWorld(10, \"mi\"), 16.093)\r\n self.assertEqual(Converter.MeasurmentUStoWorld(10, \"yd\"), 9.144)\r\n self.assertEqual(Converter.MeasurmentUStoWorld(10, \"ft\"), 304.8)\r\n self.assertEqual(Converter.MeasurmentUStoWorld(10, \"in\"), 254)", "def to_unit(self):\n if self.is_zero():\n return Vector(0,0,0)\n else:\n magnitude = self.l2_norm()\n return Vector(self.x/magnitude, self.y/magnitude, self.z/magnitude)", "def unit_of_measurement(self) -> str:\n raw_units = self.raw_unit_of_measurement\n if raw_units in (TEMP_FAHRENHEIT, TEMP_CELSIUS):\n return self.hass.config.units.temperature_unit\n return raw_units", "def _dwd_kelvin_to_celsius(self, chn):\n if not self._is_solar_channel(chn) and \\\n (self[chn].info['units'] in ['K', 'degree Kelvin', 'KELVIN'] or\n self[chn].unit == 'K'):\n self[chn].data -= CONVERSION\n self[chn].info['units'] = self[chn].unit = 'C'", "def unit_of_measurement(self):\n if self.api_unit in TEMPERATURE_UNITS:\n return self.hass.config.units.temperature_unit\n\n if self.api_unit in LENGTH_UNITS:\n return self.hass.config.units.length_unit\n\n if self.api_unit in PRESSURE_UNITS:\n if self.hass.config.units == IMPERIAL_SYSTEM:\n return self.hass.config.units.pressure_unit\n return PRESSURE_HPA\n\n if self.api_unit in FUEL_CONSUMPTION_UNITS:\n if self.hass.config.units == IMPERIAL_SYSTEM:\n return FUEL_CONSUMPTION_MPG\n return FUEL_CONSUMPTION_L_PER_100KM\n\n return self.api_unit", "def _convert_unit(self, unit):\n if unit in self.units:\n return self.units[unit]\n elif unit in unit_map:\n return unit_map[unit]\n else:\n raise SBMLError('Unit not recognized: ' + str(unit))", "def convert(self, value, units, newunits):\n return value * self._units[units] / self._units[newunits]", "def cbToEngUnits( BoardNum, Range, DataVal, EngUnits = 0.0 ):\n EngUnits = ctypes.c_float( EngUnits )\n CHK( cbw.cbToEngUnits( BoardNum, Range, DataVal, byref( EngUnits ) ) )\n return EngUnits.value", "def convert_energy_2_current_u(self,val):\n units = self.current_units[\"energy\"]\n cfact = conversion_facs_energy[units]\n \n # special handling for nanometers\n if units == \"nm\":\n # zero is interpretted as zero energy\n try:\n ret = numpy.zeros(val.shape, dtype=val.dtype)\n ret[val!=0.0] = 1.0/val[val!=0]\n return ret/cfact\n except: \n return (1.0/val)/cfact\n else:\n return val/cfact", "def convert_kcalmol_kJmol(en_kcalmol):\n return en_kcalmol/kJmol_kcalmol", "def unit_of_measurement(self) -> str:\n return MS", "def to(self, unit, equivalencies=[], freq=None):\n\n if not isinstance(unit, u.Unit):\n unit = u.Unit(unit)\n\n if unit == self.unit:\n # No copying\n return self\n\n if ((self.unit.is_equivalent(u.Jy / u.beam) and\n not any({u.Jy/u.beam, u.K}.issubset(set(eq)) for eq in equivalencies))):\n # the 'not any' above checks that there is not already a defined\n # Jy<->K equivalency. If there is, the code below is redundant\n # and will cause problems.\n\n if hasattr(self, 'beams'):\n factor = (self.jtok_factors(equivalencies=equivalencies) *\n (self.unit*u.beam).to(u.Jy))\n else:\n # replace \"beam\" with the actual beam\n if not hasattr(self, 'beam'):\n raise ValueError(\"To convert objects with Jy/beam units, \"\n \"the object needs to have a beam defined.\")\n brightness_unit = self.unit * u.beam\n\n # create a beam equivalency for brightness temperature\n if freq is None:\n try:\n freq = self.with_spectral_unit(u.Hz).spectral_axis\n except AttributeError:\n raise TypeError(\"Object of type {0} has no spectral \"\n \"information. `freq` must be provided for\"\n \" unit conversion from Jy/beam\"\n .format(type(self)))\n else:\n if not freq.unit.is_equivalent(u.Hz):\n raise u.UnitsError(\"freq must be given in equivalent \"\n \"frequency units.\")\n\n bmequiv = self.beam.jtok_equiv(freq)\n # backport to handle astropy < 3: the beam equivalency was only\n # modified to handle jy/beam in astropy 3\n if bmequiv[0] == u.Jy:\n bmequiv.append([u.Jy/u.beam, u.K, bmequiv[2], bmequiv[3]])\n\n factor = brightness_unit.to(unit,\n equivalencies=bmequiv + list(equivalencies))\n\n else:\n # scaling factor\n factor = self.unit.to(unit, equivalencies=equivalencies)\n\n converted_array = (self.quantity * factor).value\n\n # use private versions of variables, not the generated property\n # versions\n # Not entirely sure the use of __class__ here is kosher, but we do want\n # self.__class__, not super()\n new = self.__class__(value=converted_array, unit=unit, copy=True,\n wcs=self._wcs, meta=self._meta, mask=self._mask,\n header=self._header)\n\n return new", "def unit_of_measurement(self):\n return self.values.primary.units", "def make_power_physical_units(power, dt):\n \n return power*dt", "def convert_pressure(self, event):\n try:\n #Compare other unit to one unit(pascals)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"atm\": 101325.0, \"bars\": 100000.0, \"centimeters mercury\": 1333.22, \"centimeters water\": 98.0665, \"feet of water\": 2989.06692, \"hectopascals\": 100.0, \"inches of mercury\": 3386.388, \"inches of water\": 249.08891, \"kilogram-force/sq.centimeter\": 98066.5, \"kilogram-force/sq.meter\": 9.80665, \"kilonewtons/sq.meter\": 1000.0, \"kilonewtons/sq.millimeter\": 1000000000.0, \"kilopascals\": 1000.0, \"kips/sq.inch\": 6894760.0, \"meganewtons/sq.meter\": 1000000.0, \"meganewtons/sq.millimeter\": 1000000000000.0, \"meters of water\": 9806.65, \"millibars\": 100.0, \"millimeters of mercury\": 133.322, \"millimeters of water\": 9.80665, \"newtons/sq.centimeter\": 10000.0, \"newtons/sq.meter\": 1.0, \"newtons/sq.millimeter\": 1000000.0, \"pascals\": 1.0, \"poundals/sq.foot\": 1.44816, \"pounds-force/sq.foot\": 47.88, \"pounds-force/sq.inch\": 6894.757, \"tonnes-force/sq.cm\": 98066500.0, \"tonnes-force/sq.meter\": 9806.65, \"tons(UK)-force/sq.foot\": 107251.0, \"tons(UK)-force/sq.inch\": 15444280.0, \"tons(US)-force/sq.foot\": 95760.0, \"tons(US)-force/sq.inch\": 13789500.0, \"torr\": 133.322}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def solve(self):\n wort_gravity = self.property('start_gravity').to('sg') +\\\n (self.total_points().to('points') / self.property('wort_volume').to('gal') / 1000.0)\n self.property('wort_gravity', Quantity(wort_gravity, 'sg'))", "def native_unit_of_measurement(self) -> str:\n return f\"{CURRENCY_CENT}/{UnitOfVolume.LITERS}\"", "def unit_of_measurement(self):\n set_req = self.gateway.const.SetReq\n unit_map = {\n set_req.V_TEMP: (TEMP_CELSIUS\n if self.gateway.metric else TEMP_FAHRENHEIT),\n set_req.V_HUM: '%',\n set_req.V_DIMMER: '%',\n set_req.V_LIGHT_LEVEL: '%',\n set_req.V_WEIGHT: 'kg',\n set_req.V_DISTANCE: 'm',\n set_req.V_IMPEDANCE: 'ohm',\n set_req.V_WATT: 'W',\n set_req.V_KWH: 'kWh',\n set_req.V_FLOW: 'm',\n set_req.V_VOLUME: 'm3',\n set_req.V_VOLTAGE: 'V',\n set_req.V_CURRENT: 'A',\n }\n if float(self.gateway.protocol_version) >= 1.5:\n if set_req.V_UNIT_PREFIX in self._values:\n return self._values[\n set_req.V_UNIT_PREFIX]\n unit_map.update({set_req.V_PERCENTAGE: '%'})\n if float(self.gateway.protocol_version) >= 2.0:\n unit_map.update({\n set_req.V_ORP: 'mV',\n set_req.V_EC: 'μS/cm',\n set_req.V_VAR: 'var',\n set_req.V_VA: 'VA',\n })\n return unit_map.get(self.value_type)", "def _get_units(self):\n #assert self.ser.isOpen()\n\n self.serial_connection.write('UNI' + self.CR + self.LF)\n acknowledgement = self.serial_connection.readline()\n self._check_acknowledgement(acknowledgement)\n\n self.serial_connection.write(self.ENQ)\n unit = self.MEASUREMENT_UNITS[self.serial_connection.readline().rstrip(self.LF).rstrip(self.CR)]\n\n self.serial_connection.write(self.CR + self.LF)\n\n return unit", "def _parse_units(self, model, comp, node):\n node = dom_child(node, 'unitDefinition')\n while node:\n name = node.getAttribute('id')\n self.log('Parsing unit definition for \"' + name + '\".')\n unit = myokit.units.dimensionless\n node2 = dom_child(node, 'listOfUnits')\n node2 = dom_child(node2, 'unit')\n while node2:\n kind = str(node2.getAttribute('kind')).strip()\n u2 = self._convert_unit(kind)\n if node2.hasAttribute('multiplier'):\n m = float(node2.getAttribute('multiplier'))\n else:\n m = 1.0\n if node2.hasAttribute('scale'):\n m *= 10 ** float(node2.getAttribute('scale'))\n u2 *= m\n if node2.hasAttribute('exponent'):\n u2 **= float(node2.getAttribute('exponent'))\n unit *= u2\n node2 = dom_next(node2, 'unit')\n self.units[name] = unit\n node = dom_next(node, 'unitDefinition')", "def resolve_units(obj, _):\n return obj.units.decode()", "def convert_units(self, time_units=None, len_units=None, pump_units=None,\n same=False):\n in_time = self.time_units\n # Check new time units\n if time_units is None:\n time_units = in_time\n flag = _units.validate_units(time_units)\n if flag == -1:\n raise ValueError('Bad time units input {}'.format(time_units))\n # Check new length units\n in_len = self.len_units\n if len_units is None:\n len_units = in_len\n flag = _units.validate_units(len_units)\n if flag == -1:\n raise ValueError('Bad length units input {}'.format(len_units))\n # Check new pumping rate units\n in_pump = self.pump_units\n if pump_units is None:\n pump_units = in_pump\n if same:\n pump_units = \"%s3/%s\" % (len_units, time_units)\n flag = _units.validate_units(pump_units)\n if flag == -1:\n raise ValueError('Bad pumping rate units input {}'.format(len_units))\n\n # Convert parameters units\n for key, value in self.parameters.items():\n if type(value) in [int, float]:\n self.parameters[key] = _units.units_conversion(value, in_len, len_units)\n # Convert pumping rate data\n self.pumprate.convert_units(time_units, pump_units)\n # Convert well data units\n for i in range(self.well_count()):\n self.wells[i].convert_units(time_units, len_units)\n # Set input units\n self.len_units = len_units\n self.time_units = time_units\n self.pump_units = pump_units\n # End Function", "def unit_of_measurement(self) -> str | None:\n # Highest priority, for registered entities: unit set by user,with fallback to\n # unit suggested by integration or secondary fallback to unit conversion rules\n if self._sensor_option_unit_of_measurement is not UNDEFINED:\n return self._sensor_option_unit_of_measurement\n\n # Second priority, for non registered entities: unit suggested by integration\n if not self.registry_entry and (\n suggested_unit_of_measurement := self.suggested_unit_of_measurement\n ):\n return suggested_unit_of_measurement\n\n # Third priority: Legacy temperature conversion, which applies\n # to both registered and non registered entities\n native_unit_of_measurement = self.native_unit_of_measurement\n\n if (\n self.device_class == SensorDeviceClass.TEMPERATURE\n and native_unit_of_measurement\n in {UnitOfTemperature.CELSIUS, UnitOfTemperature.FAHRENHEIT}\n ):\n return self.hass.config.units.temperature_unit\n\n # Fourth priority: Native unit\n return native_unit_of_measurement", "def set_units(self, units):\n self.units = units", "def convert_kcalmol_eV(en_kcalmol):\n return en_kcalmol*kcalmol_eV", "def unit_of_measurement(self):\n return self.sensor_type[\"unit\"]", "def convert_eV_kcalmol(en_eV):\n return en_eV/kcalmol_eV", "def to_inch(self):\r\n if self.units != 'inch':\r\n self.units = 'inch'\r\n for statement in self.statements:\r\n statement.to_inch()\r\n for tool in iter(self.tools.values()):\r\n tool.to_inch()\r\n for primitive in self.primitives:\r\n primitive.to_inch()\r\n for hit in self.hits:\r\n hit.to_inch()", "def normalize_emission(self):\n self._e /= self._e.sum(0)", "def setup_md_units(md_base_units: Dict[str, Union[str, float]]):\n # Initialize basic unit system\n md_base_units = {u: _parse_unit(md_base_units[u]) for u in md_base_units}\n\n # Set up unit dictionary\n units = Units(md_base_units)\n\n # Derived units (MD internal -> ASE internal)\n units[\"time\"] = units[\"length\"] * np.sqrt(units[\"mass\"] / units[\"energy\"])\n units[\"force\"] = units[\"energy\"] / units[\"length\"]\n units[\"stress\"] = units[\"energy\"] / units[\"length\"] ** 3\n units[\"pressure\"] = units[\"stress\"]\n\n # Conversion of length units\n units[\"A\"] = aseunits.Angstrom / units[\"length\"]\n units[\"Ang\"] = units[\"A\"]\n units[\"Angs\"] = units[\"A\"]\n units[\"Angstrom\"] = units[\"A\"]\n units[\"nm\"] = aseunits.nm / units[\"length\"]\n units[\"a0\"] = aseunits.Bohr / units[\"length\"]\n units[\"Bohr\"] = units[\"a0\"]\n\n # Conversion of energy units\n units[\"kcal\"] = aseunits.kcal / units[\"energy\"]\n units[\"kJ\"] = aseunits.kJ / units[\"energy\"]\n units[\"eV\"] = aseunits.eV / units[\"energy\"]\n units[\"Hartree\"] = aseunits.Hartree / units[\"energy\"]\n units[\"Ha\"] = units[\"Hartree\"]\n\n # Time units\n units[\"fs\"] = aseunits.fs / units[\"time\"]\n units[\"s\"] = aseunits.s / units[\"time\"]\n units[\"aut\"] = aseunits._aut * aseunits.s / units[\"time\"]\n\n # Pressure units\n units[\"Pascal\"] = aseunits.Pascal / units[\"pressure\"]\n units[\"bar\"] = 1e5 * units[\"Pascal\"]\n\n # Mol\n units[\"mol\"] = aseunits.mol\n\n # Mass\n units[\"Dalton\"] = 1.0 / units[\"mass\"]\n units[\"amu\"] = aseunits._amu / units[\"mass\"]\n\n # Charge distributions\n units[\"Debye\"] = aseunits.Debye / (units[\"charge\"] * units[\"length\"])\n units[\"C\"] = aseunits.C / units[\"charge\"]\n\n # Constants (internal frame)\n units[\"kB\"] = aseunits.kB / units[\"energy\"] # Always uses Kelvin\n units[\"hbar\"] = (\n aseunits._hbar * (aseunits.J * aseunits.s) / (units[\"energy\"] * units[\"time\"])\n ) # hbar is given in J*s by ASE\n units[\"ke\"] = (\n units[\"a0\"] * units[\"Ha\"] / units[\"charge\"] ** 2\n ) # Coulomb constant is 1 in atomic units\n\n # For spectra\n units[\"hbar2icm\"] = units[\"hbar\"] * 100.0 * aseunits._c * aseunits._aut\n\n return units", "def to_unit(self, unit):\n unit = _find_unit(unit)\n self.value = _convert_value(self.value, self.unit, unit)\n self.unit = unit", "def convert_kJmol_kcalmol(en_kJmol):\n return en_kJmol*kJmol_kcalmol", "def unit_of_measurement(self):\n return None", "def set_units_system(self, velocity=None, distance=None, mass=None, temperature=None):\n import configparser\n\n from .. import config_parser\n\n # if the units system doesn't exist (if this is a new snapshot), create\n # one\n if len(self._file_units_system) < 3:\n warnings.warn(\"Previous unit system incomplete -- using defaults\")\n self._file_units_system = [\n units.Unit(x) for x in ('G', '1 kpc', '1e10 Msol')]\n\n else:\n # we want to change the base units -- so convert to original\n # units first and then set all arrays to new unit system\n self.original_units()\n\n\n # if any are missing, work them out from what we already have:\n\n if velocity is None:\n velocity = self.infer_original_units('km s^-1')\n\n if distance is None:\n distance = self.infer_original_units('kpc')\n\n if mass is None:\n mass = self.infer_original_units('Msol')\n\n if temperature is None:\n temperature = self.infer_original_units('K')\n\n new_units = []\n for x in [velocity, distance, mass, temperature]:\n if x is not None:\n new_units.append(units.Unit(x))\n\n\n self._file_units_system = new_units\n\n # set new units for all known arrays\n for arr_name in list(self.keys()):\n arr = self[arr_name]\n # if the array has units, then use the current units, else\n # check if a default dimension for this array exists in\n # the configuration\n if arr.units != units.NoUnit():\n ref_unit = arr.units\n else:\n try:\n ref_unit = config_parser.get(\n 'default-array-dimensions', arr_name)\n except configparser.NoOptionError:\n # give up -- no applicable dimension found\n continue\n\n arr.set_units_like(ref_unit)", "def _get_units(self, name):\n meta = self._abs2meta\n\n if name in meta:\n return meta[name]['units']\n\n proms = self._prom2abs\n\n if name in proms['output']:\n abs_name = proms['output'][name][0]\n return meta[abs_name]['units']\n\n elif name in proms['input']:\n if len(proms['input'][name]) > 1:\n # The promoted name maps to multiple absolute names, require absolute name.\n msg = \"Can't get units for the promoted name '%s' because it refers to \" + \\\n \"multiple inputs: %s. Access the units using an absolute path name.\"\n raise RuntimeError(msg % (name, str(proms['input'][name])))\n\n abs_name = proms['input'][name][0]\n return meta[abs_name]['units']\n\n raise KeyError('Variable name \"{}\" not found.'.format(name))", "def tempConvert(temp, unit):\n if unit == 'F':\n celsius = (temp - 32) * 5 / 9\n return celsius\n else:\n return temp" ]
[ "0.6702368", "0.661202", "0.6604063", "0.6344034", "0.61844754", "0.6114347", "0.60904294", "0.6029163", "0.6023127", "0.59929097", "0.5970022", "0.59694815", "0.5943942", "0.5935189", "0.59149885", "0.59114206", "0.5895953", "0.5884657", "0.58838505", "0.5863761", "0.5856587", "0.5852781", "0.5845475", "0.58259255", "0.58205277", "0.58083826", "0.578711", "0.57794905", "0.5750528", "0.57405627", "0.56955665", "0.5688327", "0.56879526", "0.56875783", "0.56855935", "0.5681583", "0.5663293", "0.5660534", "0.56505233", "0.5649003", "0.5642654", "0.5637654", "0.56375027", "0.5633675", "0.56258166", "0.5614941", "0.5614941", "0.56058294", "0.5601029", "0.5594353", "0.5592946", "0.5591142", "0.55889", "0.55785215", "0.5575195", "0.5570567", "0.55671924", "0.5555584", "0.55422807", "0.5533645", "0.55318916", "0.5521691", "0.552161", "0.55213684", "0.55171", "0.5511836", "0.54957026", "0.5495448", "0.54885286", "0.54782766", "0.54773396", "0.547664", "0.54721856", "0.54712665", "0.54646575", "0.5464598", "0.5464068", "0.54606146", "0.5458746", "0.54554635", "0.54549843", "0.54514265", "0.54483813", "0.54427975", "0.54402554", "0.542907", "0.5425745", "0.5423836", "0.5412843", "0.53985626", "0.5391975", "0.53902304", "0.5380302", "0.53654736", "0.53650784", "0.53505236", "0.5349555", "0.5340431", "0.5339175", "0.5337363" ]
0.57462955
29
Convert between physics and engineering units.
def _raw_phys_to_eng(self, physics_value): roots = (self.p - physics_value).roots if len(roots) == 1: x = roots[0] return x else: raise ValueError("There doesn't exist a corresponding engineering value or " "they are not unique:", roots)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_units(self):\n for prod in (\"ier\", \"ier_inc_rain\"):\n self.data[prod].data[:] /= 1e6", "def original_units(self):\n self.physical_units(distance=self.infer_original_units('km'),\n velocity=self.infer_original_units('km s^-1'),\n mass=self.infer_original_units('Msol'), persistent=False)", "def useUnits():", "def units(self, key):\n \n # Strip any operators\n _, key = get_operator(key)\n \n # Fill out aliases \n if key in component_from_alias:\n key = component_from_alias[key]\n elif key == 'E':\n key = 'electricField'\n elif key == 'B':\n key = 'magneticField' \n \n return pg_units(key)", "def get_units(self):\r\n msg = struct.pack('>2B', 56, 14)\r\n response = self.query(msg)\r\n\r\n if response[1] == 2:\r\n units = 'A'\r\n to_nm_multiplier = 1 / 10\r\n elif response[1] == 1:\r\n units = 'nm'\r\n to_nm_multiplier = 1\r\n elif response[1] == 0:\r\n units = 'um'\r\n to_nm_multiplier = 1000\r\n else:\r\n raise ValueError('Units not recognised.')\r\n\r\n # Save results locally too for quick re-use\r\n self._current_units = units\r\n self._current_to_nm_multiplier = to_nm_multiplier\r\n\r\n return units, to_nm_multiplier", "def convert_energy_2_internal_u(self,val):\n units = self.current_units[\"energy\"]\n cfact = conversion_facs_energy[self.current_units[\"energy\"]]\n \n # special handling for nano meters\n if units == \"nm\":\n # zero is interpretted as zero energy\n try:\n ret = numpy.zeros(val.shape, dtype=val.dtype)\n ret[val!=0.0] = 1.0/val[val!=0]\n return ret/cfact\n except: \n return (1.0/val)/cfact\n #if val == 0.0:\n # return 0.0\n #return (1.0/val)/cfact\n else:\n return val*cfact", "def assign_unit(self):\n self.units = {}\n for unit in RADIAL_UNITS:\n if unit.REPR == \"2th_deg\":\n self.units[unit] = self.tth_deg\n elif unit.REPR == \"2th_rad\":\n self.units[unit] = self.tth_rad\n elif unit.REPR == \"q_nm^-1\":\n self.units[unit] = self.q_nm\n elif unit.REPR == \"q_A^-1\":\n self.units[unit] = self.q_A\n elif unit.REPR == \"r_mm\":\n self.units[unit] = self.r_mm\n else:\n logger.warning(\"Unit unknown to GUI %s\" % unit)", "def convert_energy(self, event):\n try:\n #Compare other unit to one unit(joules)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"Btu(th)\": 1054.35, \"Btu(mean)\": 1055.87, \"calories(IT)\": 4.1868, \"calories(th)\": 4.184, \"calories(mean)\": 4.19002, \"calories(15C)\": 4.1858, \"calories(20C)\": 4.1819, \"calories(food)\": 4186.0, \"centigrade heat units\": 1900.4, \"electron volts\": 1.60219 * 10 ** -19, \"ergs\": 1.0 * 10 ** -7, \"foot-pound force\": 1.355818, \"foot poundals\": 0.04214, \"gigajoules\": 1.0 * 10 ** 9, \"horsepower hours\": 2684520.0, \"inch-pound force\": 0.112985, \"joules\": 1.0, \"kilocalories(IT)\": 4186.8, \"kilocalories(th)\": 4184.0, \"kilogram-force meters\": 9.80665, \"kilojoules\": 1000.0, \"kilowatt hours\": 3600000.0, \"megajoules\": 1.0 * 10 ** 6, \"newton meters\": 1.0, \"therms\": 105505585.257348, \"watt seconds\": 1.0, \"watt hours\" : 3600.0}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def convert_eV_kJmol(en_eV):\n return en_eV/kJmol_eV", "def _fix_units(cube, definition):\n\n if cube.var_name != 'pr':\n cube.convert_units(definition.units)", "def convert_units(data, units):\n # Build the dictionary of units conversions\n convert = {'m' : [1.0, 0., 'm'], \n 'meter' : [1.0, 0., 'm'], \n 'deg C' : [1.0, 273.15, 'K'], \n 'Celsius' : [1.0, 273.15, 'K'], \n 'K' : [1.0, 0., 'K'],\n 'db' : [1.e4, 101325., 'Pa'], \n 'Pa' : [1.0, 0., 'Pa'],\n 'mg/m^3': [1.e-6, 0., 'kg/m^3'], \n 'S/m': [1.0, 0., 'S/m'],\n 'mS/m' : [1.e-3, 0., 'S/m'],\n 'psu': [1.0, 0., 'psu'], \n 'salinity': [1.0, 0., 'psu'], \n 'kg/m^3': [1.0, 0., 'kg/m^3'], \n 'kilogram meter-3': [1.0, 0., 'kg/m^3'], \n 'm/s': [1.0, 0., 'm/s'], \n 'mg/l': [1.e-3, 0., 'kg/m^3'],\n 'meter second-1' : [1.0, 0., 'm/s'],\n 'm.s-1' : [1.0, 0., 'm/s'],\n 'pH units' : [1.0, 0., 'pH units'],\n 'MPa' : [1.e6, 0., 'Pa'],\n '--' : [1.0, 0., '--'],\n 'mD' : [9.869233e-16, 0., 'm^2'],\n 'um' : [1.e-6, 0., 'm'],\n 'm/s 1e-9' : [1.e-9, 0., 'm/s'],\n 'm/s 1e-7' : [1.e-7, 0., 'm/s'],\n 'wt.%' : [10., 0., 'psu'],\n '10^-15 m^2' : [1.e-15, 0., 'm^2'],\n 'm^2' : [1., 0., 'm^2'],\n 'kg/m^2/year' : [3.168808781402895e-08, 0., 'kg/m^2/s'] \n } \n \n # Make sure the data are a numpy array and the units are a list\n if isinstance(data, float) or isinstance(data, int):\n data = np.array([data])\n if isinstance(data, list):\n data = np.array(data)\n if isinstance(units, str) or isinstance(units, unicode):\n units = [units]\n if units == None:\n units = ['']\n \n # Make sure you can slice through the columns: must be two-dimensional\n sh = data.shape\n data = np.atleast_2d(data)\n \n # Allow conversion of a row of data if all of the same unit\n if len(units) == 1 and data.shape[1] > 1:\n data = data.transpose()\n \n # Create an emtpy array to hold the output\n out_data = np.zeros(data.shape)\n out_units = []\n \n # Convert the units\n for i in range(len(units)):\n try:\n out_data[:,i] = data[:,i] * convert[units[i]][0] + \\\n convert[units[i]][1]\n out_units += [convert[units[i]][2]]\n except KeyError:\n print('Do not know how to convert %s to mks units' % units[i])\n print('Continuing without converting these units...')\n out_data[:,i] = data[:,i]\n out_units += units[i]\n \n # Return the converted data in the original shape\n out_data = np.reshape(out_data, sh, 'C')\n return (out_data, out_units)", "def convert_units(celsius_value, units):\n if units == 0:\n return celsius_value\n if units == 1:\n return celsius_value * 1.8 + 32\n return celsius_value + 273.15", "def get_units(self):\n return str(self._modeler.GetModelUnits())", "def get_units(self,):\n self.UNITS = {'pressure':'Pa',}\n return", "def convert(self):\n return _libsbml.SBMLUnitsConverter_convert(self)", "def convert_to(self, units: str) -> None:\n if self.units == units:\n return\n\n if units not in Variable.VALID_UNIT_CONVERSIONS[self.units]:\n msg = f\"\"\"Not a valid unit conversion. Valid destination units:\n {Variable.VALID_UNIT_CONVERSIONS[self.units]}\"\"\"\n raise ValueError(msg)\n\n if self.units == \"celsius\" and units == \"fahrenheit\":\n self._celsius_to_fahrenheit()\n elif self.units == \"m/s\" and units == \"km/h\":\n self._mps_to_kph()\n elif self.units == \"m/s\" and units == \"mph\":\n self._mps_to_mph()\n else:\n raise ValueError(\"Not a valid unit conversion.\")", "def convert_mass(self, event):\n try:\n #Compare other unit to one unit(kilograms)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"Earth masses\": 5.97219e+24, \"Solar masses\": 1.9890000000000002e+30, \"carats\": 0.0002, \"cental\": 45.359237, \"decagrams\": 0.01, \"femtograms\": 1e-18, \"grains\": 6.479891000000001e-05, \"grams\": 0.001, \"hectograms\": 0.1, \"hundredweights\": 50.802345, \"kilograms\": 1.0, \"kilotonnes\": 1000000.0, \"megatonnes\": 1000000000.0, \"micrograms\": 1e-09, \"milligrams\": 1e-06, \"nanograms\": 1e-12, \"ounces(US & UK)\": 0.02835, \"ounces(precious metals)\": 0.031103, \"picograms\": 1e-15, \"pounds(US & UK)\": 0.453592, \"pounds(precious metals)\": 0.373242, \"slugs\": 14.593903, \"stones\": 6.350293, \"tonnes(metric)\": 1000.0, \"tons(UK)\": 1016.046909, \"tons(US)\": 907.18474}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def convert_kJmol_eV(en_kJmol):\n return en_kJmol*kJmol_eV", "def ke(self):\n self._obj['w'] = (self._obj['u'])**2 + (self._obj['v'])**2\n\n if len(self._obj.attrs['units']) == 4:\n vel_units = self._obj.attrs['units'][-1]\n self._obj.attrs['units'].append(f'({vel_units})^2')\n else:\n vel_units = self._obj.attrs['units'][-2]\n self._obj.attrs['units'][-1] = (f'({vel_units})^2')\n return self._obj", "def convert_units(src_unit: Union[str, float], tgt_unit: Union[str, float]):\n return _parse_unit(src_unit) / _parse_unit(tgt_unit)", "def _override_units_system(self):\n try:\n f = open(self.filename+\".units\")\n except OSError:\n return\n\n name_mapping = {'pos': 'distance', 'vel': 'velocity'}\n units_dict = {}\n\n for line in f:\n if (not line.startswith(\"#\")):\n if \":\" not in line:\n raise OSError(\"Unknown format for units file %r\"%(self.filename+\".units\"))\n else:\n t, u = list(map(str.strip,line.split(\":\")))\n t = name_mapping.get(t,t)\n units_dict[t] = u\n\n self.set_units_system(**units_dict)", "def update_units(self):\n unit_var_value = self.view.vars['unit'].get()\n if unit_var_value == 'm3ph':\n self.minran_u_label.config(text='m³/h')\n self.maxran_u_label.config(text='m³/h')\n self.points_tview.heading('vflow', text='Przepływ [m³/h]', anchor=tk.CENTER)\n elif unit_var_value == 'lps':\n self.minran_u_label.config(text='l/s')\n self.maxran_u_label.config(text='l/s')\n self.points_tview.heading('vflow', text='Przepływ [l/s]', anchor=tk.CENTER)\n self.view.vars['pump_eff_min'].convert_unit(unit_var_value)\n self.view.vars['pump_eff_max'].convert_unit(unit_var_value)\n self.view.vars['pump_characteristic'].convert_unit(unit_var_value)", "def units(self):\n pass", "def convertUnits(self, varname, arr):\n if varname == \"SPDQ\" or varname == \"PHQ\":\n return arr*2.5e6/1000.\n return arr", "def convertUnit(*args, fromUnit: AnyStr=\"\", toUnit: AnyStr=\"\", **kwargs)->float:\n pass", "def convert_energy_2_current_u(self,val):\n units = self.current_units[\"energy\"]\n cfact = conversion_facs_energy[units]\n \n # special handling for nanometers\n if units == \"nm\":\n # zero is interpretted as zero energy\n try:\n ret = numpy.zeros(val.shape, dtype=val.dtype)\n ret[val!=0.0] = 1.0/val[val!=0]\n return ret/cfact\n except: \n return (1.0/val)/cfact\n else:\n return val/cfact", "def convert_units(self, time_units=None, len_units=None):\n in_time = self.time_units\n # Check new time units\n if time_units is None:\n time_units = in_time\n flag = _units.validate_units(time_units)\n if flag == -1:\n raise ValueError('Bad time units input {}'.format(time_units))\n # Check new length units\n in_len = self.len_units\n if len_units is None:\n len_units = in_len\n flag = _units.validate_units(len_units)\n if flag == -1:\n raise ValueError('Bad length units input {}'.format(len_units))\n # Convert parameters units\n for key, value in self.parameters.items():\n if type(value) in [int, float]:\n self.parameters[key] = _units.units_conversion(value, in_len, len_units)\n # Convert drawdown data\n self.drawdown.convert_units(time_units, len_units)\n # Convert associate data units\n for i in range(self.data_count()):\n if self.data[i].dtype == 1: # drawdown units\n data_units = len_units\n elif self.data[i].dtype == 2: # first derivative units\n data_units = len_units + \"/\" + time_units\n elif self.data[i].dtype == 3: # second derivative units\n data_units = len_units + \"/\" + time_units + \"2\"\n self.data[i].convert_units(time_units, data_units)\n self.len_units = len_units\n self.time_units = time_units\n # End Function", "def convert_kcalmol_eV(en_kcalmol):\n return en_kcalmol*kcalmol_eV", "def getDistanceUnits(self) -> Unit:\n ...", "def unit_converter(val, from_u, to_u):\n\tconverter = {'b':0, 'k':1, 'm':2, 'g':3, 't':4}\n\tif converter[from_u] < converter[to_u]:\n\t\tval = float(val)\n\t\tfor _ in range(converter[to_u] - converter[from_u]):\n\t\t\tval = val/1024\n\telse:\n\t\tfor _ in range(converter[from_u] - converter[to_u]):\n\t\t\tval = val * 1024\n\t\t\t\n\treturn val", "def conv(old='auto', new='auto'):\n if old == new:\n return 1.\n for unittype in [lenunits, angunits, timunits, masunits, eneunits]:\n if old in unittype and new in unittype:\n return unittype[new] / unittype[old]\n\n raise ValueError('Units \\'{}\\' and \\'{}\\' unrecognized or '\n 'not of same unit type'.format(old, new))", "def test_convert(self):\n height = 1.6 * self.meter\n foot = .305 * self.meter\n inch = 1 / 12 * foot\n\n self.assertTrue(abs(height / foot - 5.246) < .001)\n self.assertTrue(abs(height / inch - 62.951) < .001)\n\n newton = self.kgram * self.meter / (self.second ** 2)\n pound = 4.448222 * newton\n accel = 9.8 * self.meter / (self.second ** 2)\n\n weight = 150 * pound\n mass = weight / accel\n self.assertTrue(abs(mass / self.kgram - 68.085) < .001)", "def convert_H_kJmol(en_H):\n return en_H/kJmol_H", "def _raw_eng_to_phys(self, eng_value):\n return self.p(eng_value)", "def _mps_to_kph(self) -> None:\n if self.units == \"m/s\":\n self.units = \"km/h\"\n self.value = ((self.value * 360) / 100).__round__(2)\n else:\n msg = (\n \"Not a valid unit conversion, expected units to be in 'm/s' but instead \"\n + f\"units were in {self.units}.\"\n )\n raise ValueError(msg)", "def convert_eV_kcalmol(en_eV):\n return en_eV/kcalmol_eV", "def cbToEngUnits( BoardNum, Range, DataVal, EngUnits = 0.0 ):\n EngUnits = ctypes.c_float( EngUnits )\n CHK( cbw.cbToEngUnits( BoardNum, Range, DataVal, byref( EngUnits ) ) )\n return EngUnits.value", "def convert_units(self, units):\n self.unit_array = self.unit_array.to(units)", "def _dwd_kelvin_to_celsius(self, chn):\n if not self._is_solar_channel(chn) and \\\n (self[chn].info['units'] in ['K', 'degree Kelvin', 'KELVIN'] or\n self[chn].unit == 'K'):\n self[chn].data -= CONVERSION\n self[chn].info['units'] = self[chn].unit = 'C'", "def kinetic_energy(self, units = 'si'):\n if units == 'si':\n return 0.5 * self.mass * (linalg.norm(self.velocity) ** 2)\n if units == 'au':\n return 0.5 * self.mass * (linalg.norm(self.velocity * (1.496e11) * 86400) ** 2)", "def convert_volume(self, event):\n try:\n #Compare other unit to one unit(cubic decimeters)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"acre foot\": 1233481.837548, \"barrels\": 158.987295, \"bushels(UK)\": 36.36872, \"bushels(US)\": 35.23907, \"centiliters\": 0.01, \"cubic centimeters\": 0.001, \"cubic decameters\": 1000000.0, \"cubic decimeters\": 1.0, \"cubic feet\": 28.316847, \"cubic inches\": 0.016387, \"cubic kilometers\": 1000000000000.0, \"cubic meters\": 1000.0, \"cubic mile\": 4168181825000.0, \"cubic millimeters\": 1e-06, \"cubic yards\": 764.554858, \"cups\": 0.236588, \"deciliters\": 0.1, \"dram\": 0.003697, \"dram(imperial)\": 0.003552, \"fluid ounces(US)\": 0.029574, \"fluid ounces(imperial)\": 0.028413, \"gallons(US,dry)\": 4.404884, \"gallons(US,liquid)\": 3.785412, \"gallons(imperial)\": 4.54609, \"gill(US)\": 0.118294, \"gill(imperial)\": 0.142065, \"liters\": 1.0, \"liters(1901-1964)\": 1.000028, \"microliters\": 1e-06, \"milliliters\": 0.001, \"nanoliters\": 1e-09, \"picoliters\": 1e-12, \"pints(US,dry)\": 0.55061, \"pints(US,liquid)\": 0.473176, \"pints(imperial)\": 0.568261, \"quarts(UK,dry)\": 1.101221, \"quarts(US,liquid)\": 0.946353, \"quarts(imperial)\": 1.136523, \"table spoons\": 0.014787, \"tea spoons\": 0.004929}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def unit_of_measurement(self):\n if self.values.primary.units == \"C\":\n return TEMP_CELSIUS\n if self.values.primary.units == \"F\":\n return TEMP_FAHRENHEIT\n\n return self.values.primary.units", "def E2V(E):\n# for energy in mev returns velocity in m/s\n return sqrt(E/5.227e-6)", "def convert_units(unt, origunits):\n if unt[0:3] == origunits[0:3] | unt[0:3] == 'ori':\n units = origunits\n convf = 1\n else:\n if 'fee' == unt[(((0:3 -1) -1) -1)]:\n if origunits[0:3] == 'met':\n units = 'feet'\n convf = 3.2808399\n else:\n units = origunits\n convf = 1\n else:\n if 'met' == unt[(((0:3 -1) -1) -1)]:\n if origunits[0:3] == 'fee':\n units = 'meters'\n convf = 0.3048\n else:\n units = origunits\n convf = 1\n else:\n if 'm/s' == unt[(((0:3 -1) -1) -1)]:\n if origunits[0:3] == 'kno':\n units = 'meters/sec'\n convf = 0.51444444\n else:\n units = origunits\n convf = 1\n else:\n if 'kno' == unt[(((0:3 -1) -1) -1)]:\n if origunits[0:3] == 'm/s':\n units = 'knots'\n convf = 1.9438445\n else:\n units = origunits\n convf = 1\n else:\n error('Unknown units')\n #\n return units, convf", "def convert_units(cube, units):\n try:\n cube.convert_units(units)\n except ValueError:\n if not _try_special_conversions(cube, units):\n raise\n\n return cube", "def convert_force(self, event):\n try:\n #Compare other unit to one unit(newtons)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"dynes\": 0.00001, \"kilograms force\": 9.80665, \"kilonewtons\": 1000.0, \"kips\": 4448.222, \"meganewtons\": 1.0 * 10 ** 6, \"newtons\": 1.0, \"pounds force\": 4.448222, \"poundals\": 0.138255, \"sthene\": 1000.0, \"tonnes force\": 9806.65, \"tons force(UK)\": 9964.016418, \"tons force(US)\": 8896.443231}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def _raw_eng_to_phys(self, eng_value):\n return self.pp(eng_value)", "def test_convert_compatible_units(self):\n result = convert_units(self.arr, 'degC')\n expected_data = np.array([[-273.15, -272.15], [-271.15, -270.15]])\n expected_units = cf_units.Unit('degC')\n self.assertEquals(result.units, expected_units)\n self.assertArrayEqual(result.data, expected_data)", "def convert_kcalmol_kJmol(en_kcalmol):\n return en_kcalmol/kJmol_kcalmol", "def test_change_units(self):\n s = State(\"water\", T=Q_(100, \"degC\"), p=Q_(1.0, \"atm\"), units=\"EE\")\n assert s.units == \"EE\"\n s.units = \"SI\"\n assert s.units == \"SI\"\n assert s.cv.units == \"kilojoule / kelvin / kilogram\"\n assert s.cp.units == \"kilojoule / kelvin / kilogram\"\n assert s.s.units == \"kilojoule / kelvin / kilogram\"\n assert s.h.units == \"kilojoule / kilogram\"\n assert s.T.units == \"degree_Celsius\"\n assert s.u.units == \"kilojoule / kilogram\"\n assert s.v.units == \"meter ** 3 / kilogram\"\n assert s.p.units == \"bar\"", "def unit_of_measurement(self) -> str:\n raw_units = self.raw_units\n\n if raw_units in [TEMP_CELSIUS, TEMP_FAHRENHEIT]:\n return self.hass.config.units.temperature_unit\n return raw_units", "def to(self, unit, equivalencies=[], freq=None):\n\n if not isinstance(unit, u.Unit):\n unit = u.Unit(unit)\n\n if unit == self.unit:\n # No copying\n return self\n\n if ((self.unit.is_equivalent(u.Jy / u.beam) and\n not any({u.Jy/u.beam, u.K}.issubset(set(eq)) for eq in equivalencies))):\n # the 'not any' above checks that there is not already a defined\n # Jy<->K equivalency. If there is, the code below is redundant\n # and will cause problems.\n\n if hasattr(self, 'beams'):\n factor = (self.jtok_factors(equivalencies=equivalencies) *\n (self.unit*u.beam).to(u.Jy))\n else:\n # replace \"beam\" with the actual beam\n if not hasattr(self, 'beam'):\n raise ValueError(\"To convert objects with Jy/beam units, \"\n \"the object needs to have a beam defined.\")\n brightness_unit = self.unit * u.beam\n\n # create a beam equivalency for brightness temperature\n if freq is None:\n try:\n freq = self.with_spectral_unit(u.Hz).spectral_axis\n except AttributeError:\n raise TypeError(\"Object of type {0} has no spectral \"\n \"information. `freq` must be provided for\"\n \" unit conversion from Jy/beam\"\n .format(type(self)))\n else:\n if not freq.unit.is_equivalent(u.Hz):\n raise u.UnitsError(\"freq must be given in equivalent \"\n \"frequency units.\")\n\n bmequiv = self.beam.jtok_equiv(freq)\n # backport to handle astropy < 3: the beam equivalency was only\n # modified to handle jy/beam in astropy 3\n if bmequiv[0] == u.Jy:\n bmequiv.append([u.Jy/u.beam, u.K, bmequiv[2], bmequiv[3]])\n\n factor = brightness_unit.to(unit,\n equivalencies=bmequiv + list(equivalencies))\n\n else:\n # scaling factor\n factor = self.unit.to(unit, equivalencies=equivalencies)\n\n converted_array = (self.quantity * factor).value\n\n # use private versions of variables, not the generated property\n # versions\n # Not entirely sure the use of __class__ here is kosher, but we do want\n # self.__class__, not super()\n new = self.__class__(value=converted_array, unit=unit, copy=True,\n wcs=self._wcs, meta=self._meta, mask=self._mask,\n header=self._header)\n\n return new", "def setunits(self, *args, **kwargs):\n return _coordsys.coordsys_setunits(self, *args, **kwargs)", "def convert_speed(self, event):\n try:\n #Compare other unit to one unit(meters/second)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"Mach number\": 340.2933, \"Nm/24hr\": 0.021435, \"centimeters/minute\": 0.000167, \"centimeters/second\": 0.01, \"feet/hour\": 8.5e-05, \"feet/minute\": 0.00508, \"feet/second\": 0.3048, \"inches/minute\": 0.000423, \"inches/second\": 0.0254, \"kilometers/hour\": 0.277778, \"kilometers/second\": 1000.0, \"knots\": 0.514444, \"meters/hour\": 0.000278, \"meters/minute\": 0.016667, \"meters/second\": 1.0, \"miles/hour\": 0.44704, \"miles/minute\": 26.8224, \"miles/second\": 1609.344, \"nautical miles/hour\": 0.514444, \"speed of light\": 299790000.0, \"speed of sound\": 343.0, \"yards/hour\": 0.000254, \"yards/minute\": 0.01524, \"yards/second\": 0.9144}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def _get_units_object(self, units):\n if isinstance(units, cellml_units):\n # We're done\n pass\n else:\n units = amara_parse_cellml(unicode(units))\n assert isinstance(units, cellml_units)\n return units", "def unit_of_measurement(self) -> str | None:\n # Highest priority, for registered entities: unit set by user,with fallback to\n # unit suggested by integration or secondary fallback to unit conversion rules\n if self._sensor_option_unit_of_measurement is not UNDEFINED:\n return self._sensor_option_unit_of_measurement\n\n # Second priority, for non registered entities: unit suggested by integration\n if not self.registry_entry and (\n suggested_unit_of_measurement := self.suggested_unit_of_measurement\n ):\n return suggested_unit_of_measurement\n\n # Third priority: Legacy temperature conversion, which applies\n # to both registered and non registered entities\n native_unit_of_measurement = self.native_unit_of_measurement\n\n if (\n self.device_class == SensorDeviceClass.TEMPERATURE\n and native_unit_of_measurement\n in {UnitOfTemperature.CELSIUS, UnitOfTemperature.FAHRENHEIT}\n ):\n return self.hass.config.units.temperature_unit\n\n # Fourth priority: Native unit\n return native_unit_of_measurement", "def unit_of_measurement(self):\n unit = get_uom_from_status(self._device.status)\n if unit == HS_UNIT_LUX:\n return LIGHT_LUX\n elif unit == HS_UNIT_CELSIUS:\n return TEMP_CELSIUS\n elif unit == HS_UNIT_FAHRENHEIT:\n return TEMP_FAHRENHEIT\n elif unit == HS_UNIT_PERCENTAGE:\n return PERCENTAGE\n elif unit == HS_UNIT_A or unit == HS_UNIT_AMPERES:\n return ELECTRIC_CURRENT_AMPERE\n elif unit == HS_UNIT_KW:\n return POWER_KILO_WATT\n elif unit == HS_UNIT_KWH:\n return ENERGY_KILO_WATT_HOUR\n elif unit == HS_UNIT_V or unit == HS_UNIT_VOLTS:\n return ELECTRIC_POTENTIAL_VOLT\n elif unit == HS_UNIT_W or unit == HS_UNIT_WATTS:\n return POWER_WATT\n return None", "def unit_of_measurement(self):\n return self._tasmota_entity.unit", "def _convert_unit(self, unit):\n if unit in self.units:\n return self.units[unit]\n elif unit in unit_map:\n return unit_map[unit]\n else:\n raise SBMLError('Unit not recognized: ' + str(unit))", "def convert(self, value, units, newunits):\n return value * self._units[units] / self._units[newunits]", "def getUnits(self):\n return _libsbml.Compartment_getUnits(self)", "def to_axis_units(self, label, vals):\n if label in ['Hmolar', 'Smolar', 'Umolar', 'Dmolar', 'P']:\n return vals / 1000\n elif label in ['T']:\n return vals\n else:\n raise ValueError(label)", "def cu_energy(self,val,units=\"1/cm\"):\n if units in self.units[\"energy\"]:\n x = conversion_facs_energy[units]\n i_val = x*val\n \n cu = self.current_units[\"energy\"] \n if cu != \"1/fs\":\n y = conversion_facs_energy[units] \n return i_val/y\n \n return i_val", "def test_measurment(self):\r\n self.assertEqual(Converter.MeasurmentWorldtoUS(10, \"km\"), 6.214)\r\n self.assertEqual(Converter.MeasurmentWorldtoUS(10, \"m\"), 10.936)\r\n self.assertEqual(Converter.MeasurmentWorldtoUS(10, \"cm\"), 0.328)\r\n self.assertEqual(Converter.MeasurmentWorldtoUS(10, \"mm\"), 0.394)\r\n self.assertEqual(Converter.MeasurmentUStoWorld(10, \"mi\"), 16.093)\r\n self.assertEqual(Converter.MeasurmentUStoWorld(10, \"yd\"), 9.144)\r\n self.assertEqual(Converter.MeasurmentUStoWorld(10, \"ft\"), 304.8)\r\n self.assertEqual(Converter.MeasurmentUStoWorld(10, \"in\"), 254)", "def convert_kJmol_kcalmol(en_kJmol):\n return en_kJmol*kJmol_kcalmol", "def unit_of_measurement(self):\n return self._units", "def _mps_to_mph(self) -> None:\n if self.units == \"m/s\":\n self.units = \"mph\"\n self.value = (self.value * 2.236936).__round__(2)\n else:\n msg = (\n \"Not a valid unit conversion, expected units to be in 'm/s' but instead \"\n + f\"units were in {self.units}.\"\n )\n raise ValueError(msg)", "def units(self):\n return self._units", "def units(self):\n return self._units", "def _get_units(self, q) -> unyt.Unit:\n try:\n units = q.units\n except AttributeError:\n units = unyt.dimensionless\n return unyt.Unit(units, registry=self.registry)", "def tempConvert(temp, unit):\n if unit == 'F':\n celsius = (temp - 32) * 5 / 9\n return celsius\n else:\n return temp", "def parse_engineering( string, unit = \"\" ):\n if not string.endswith(unit):\n raise ValueError(\"string '%s' is missing the unit '%s'\" % (string, unit))\n if unit:\n string = string[:-len(unit)]\n\n m = re.match(r\"\\s*([\\+\\-]?[.0-9]+)\\s*([a-zA-Z]*)\\s*\", string)\n if not m:\n raise ValueError(\"string '%s' cannot be parsed\" % string)\n x = m.group(1)\n mod = m.group(2)\n conv = {'a':1e-18, 'f':1e-15, 'p':1e-12, 'n':1e-9, 'u':1e-6,\n 'm':1e-3 , 'c':1e-2 , 'd':1e-1 , '':1.0 , 'k':1e3 ,\n 'M':1e6 , 'G':1e9 , 'T':1e12 , 'P':1e15, 'E':1e18}\n return float(x) * conv[mod]", "def solve(self):\n wort_gravity = self.property('start_gravity').to('sg') +\\\n (self.total_points().to('points') / self.property('wort_volume').to('gal') / 1000.0)\n self.property('wort_gravity', Quantity(wort_gravity, 'sg'))", "def convert_pressure(self, event):\n try:\n #Compare other unit to one unit(pascals)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"atm\": 101325.0, \"bars\": 100000.0, \"centimeters mercury\": 1333.22, \"centimeters water\": 98.0665, \"feet of water\": 2989.06692, \"hectopascals\": 100.0, \"inches of mercury\": 3386.388, \"inches of water\": 249.08891, \"kilogram-force/sq.centimeter\": 98066.5, \"kilogram-force/sq.meter\": 9.80665, \"kilonewtons/sq.meter\": 1000.0, \"kilonewtons/sq.millimeter\": 1000000000.0, \"kilopascals\": 1000.0, \"kips/sq.inch\": 6894760.0, \"meganewtons/sq.meter\": 1000000.0, \"meganewtons/sq.millimeter\": 1000000000000.0, \"meters of water\": 9806.65, \"millibars\": 100.0, \"millimeters of mercury\": 133.322, \"millimeters of water\": 9.80665, \"newtons/sq.centimeter\": 10000.0, \"newtons/sq.meter\": 1.0, \"newtons/sq.millimeter\": 1000000.0, \"pascals\": 1.0, \"poundals/sq.foot\": 1.44816, \"pounds-force/sq.foot\": 47.88, \"pounds-force/sq.inch\": 6894.757, \"tonnes-force/sq.cm\": 98066500.0, \"tonnes-force/sq.meter\": 9806.65, \"tons(UK)-force/sq.foot\": 107251.0, \"tons(UK)-force/sq.inch\": 15444280.0, \"tons(US)-force/sq.foot\": 95760.0, \"tons(US)-force/sq.inch\": 13789500.0, \"torr\": 133.322}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def extendedConvert(self):\r\n devId = str(self.deviceId)\r\n if(devId == '28' or devId == '29'):\r\n answers = []\r\n #just add the counter value\r\n answers.append(self.fields[1])\r\n #find the engineering units converter\r\n enum = self.fields[0] & 0x3F\r\n #look up the scale and offset for that eeu\r\n eeu = self._eeumaps[str(enum)]\r\n self.eeu1 = eeu\r\n print('eeu:' + str(eeu))\r\n #convert from twos complement and adjust by scale/offset\r\n val = (self.convertSigned16(self.fields[2]) * eeu[1]) + eeu[0]\r\n answers.append(val)\r\n #reset fields to hold the new answers\r\n self.fields = answers\r\n self.units = [self.UNITS_COUNT, eeu[2]]\r\n elif(devId == '53' or devId == '54'):\r\n #strip off the first part of the answer which is the last part of the\r\n #serial number\r\n answers = [self.fields[1]]\r\n self.fields = answers\r\n elif(devId == '75' or devId == '76'):\r\n answers = []\r\n #find out the number of I/O points\r\n pointCount = self.fields[0] & 3\r\n #find out engineering units for 1st I/O\r\n enum = self.fields[1] & 0x3F\r\n eeu = self._eeumaps[str(enum)]\r\n self.eeu1 = eeu\r\n #new value = old value * scale + offset\r\n val = (self.convertSigned16(self.fields[3]) * eeu[1]) + eeu[0]\r\n answers.append(val)\r\n self.units = [eeu[2]]\r\n #see if there's two\r\n if pointCount == 2:\r\n #find out engineering units for 2nd I/O\r\n #and off first two bits\r\n enum = self.fields[0] >> 2\r\n eeu = self._eeumaps[str(enum)]\r\n self.eeu2 = eeu\r\n val = (self.convertSigned16(self.fields[2]) * eeu[1]) + eeu[0]\r\n answers.append(val)\r\n self.units.append(eeu[2])\r\n else:\r\n self.eeu2 = []\r\n #reset fields to hold the new answers\r\n self.fields = answers\r\n\r\n return", "def unit_of_measurement(self) -> str:\n raw_units = self.raw_unit_of_measurement\n if raw_units in (TEMP_FAHRENHEIT, TEMP_CELSIUS):\n return self.hass.config.units.temperature_unit\n return raw_units", "def to_inch(self):\r\n if self.units != 'inch':\r\n self.units = 'inch'\r\n for statement in self.statements:\r\n statement.to_inch()\r\n for tool in iter(self.tools.values()):\r\n tool.to_inch()\r\n for primitive in self.primitives:\r\n primitive.to_inch()\r\n for hit in self.hits:\r\n hit.to_inch()", "def unit_of_measurement(self):\n return self.values.primary.units", "def native_unit_of_measurement(self) -> str:\n return f\"{CURRENCY_CENT}/{UnitOfVolume.LITERS}\"", "def unit_of_measurement(self):\n set_req = self.gateway.const.SetReq\n unit_map = {\n set_req.V_TEMP: (TEMP_CELSIUS\n if self.gateway.metric else TEMP_FAHRENHEIT),\n set_req.V_HUM: '%',\n set_req.V_DIMMER: '%',\n set_req.V_LIGHT_LEVEL: '%',\n set_req.V_WEIGHT: 'kg',\n set_req.V_DISTANCE: 'm',\n set_req.V_IMPEDANCE: 'ohm',\n set_req.V_WATT: 'W',\n set_req.V_KWH: 'kWh',\n set_req.V_FLOW: 'm',\n set_req.V_VOLUME: 'm3',\n set_req.V_VOLTAGE: 'V',\n set_req.V_CURRENT: 'A',\n }\n if float(self.gateway.protocol_version) >= 1.5:\n if set_req.V_UNIT_PREFIX in self._values:\n return self._values[\n set_req.V_UNIT_PREFIX]\n unit_map.update({set_req.V_PERCENTAGE: '%'})\n if float(self.gateway.protocol_version) >= 2.0:\n unit_map.update({\n set_req.V_ORP: 'mV',\n set_req.V_EC: 'μS/cm',\n set_req.V_VAR: 'var',\n set_req.V_VA: 'VA',\n })\n return unit_map.get(self.value_type)", "def _parse_units(self, model, comp, node):\n node = dom_child(node, 'unitDefinition')\n while node:\n name = node.getAttribute('id')\n self.log('Parsing unit definition for \"' + name + '\".')\n unit = myokit.units.dimensionless\n node2 = dom_child(node, 'listOfUnits')\n node2 = dom_child(node2, 'unit')\n while node2:\n kind = str(node2.getAttribute('kind')).strip()\n u2 = self._convert_unit(kind)\n if node2.hasAttribute('multiplier'):\n m = float(node2.getAttribute('multiplier'))\n else:\n m = 1.0\n if node2.hasAttribute('scale'):\n m *= 10 ** float(node2.getAttribute('scale'))\n u2 *= m\n if node2.hasAttribute('exponent'):\n u2 **= float(node2.getAttribute('exponent'))\n unit *= u2\n node2 = dom_next(node2, 'unit')\n self.units[name] = unit\n node = dom_next(node, 'unitDefinition')", "def normalize_emission(self):\n self._e /= self._e.sum(0)", "def _get_units(self):\n #assert self.ser.isOpen()\n\n self.serial_connection.write('UNI' + self.CR + self.LF)\n acknowledgement = self.serial_connection.readline()\n self._check_acknowledgement(acknowledgement)\n\n self.serial_connection.write(self.ENQ)\n unit = self.MEASUREMENT_UNITS[self.serial_connection.readline().rstrip(self.LF).rstrip(self.CR)]\n\n self.serial_connection.write(self.CR + self.LF)\n\n return unit", "def convert_H_kcalmol(en_H):\n return en_H/kcalmol_H", "def _standardise_dtypes_and_units(cube: Cube) -> None:\n\n def as_correct_dtype(obj: ndarray, required_dtype: dtype) -> ndarray:\n \"\"\"\n Returns an object updated if necessary to the required dtype\n\n Args:\n obj:\n The object to be updated\n required_dtype:\n The dtype required\n\n Returns:\n The updated object\n \"\"\"\n if obj.dtype != required_dtype:\n return obj.astype(required_dtype)\n return obj\n\n cube.data = as_correct_dtype(cube.data, get_required_dtype(cube))\n for coord in cube.coords():\n if coord.name() in TIME_COORDS and not check_units(coord):\n coord.convert_units(get_required_units(coord))\n req_dtype = get_required_dtype(coord)\n # ensure points and bounds have the same dtype\n if np.issubdtype(req_dtype, np.integer):\n coord.points = round_close(coord.points)\n coord.points = as_correct_dtype(coord.points, req_dtype)\n if coord.has_bounds():\n if np.issubdtype(req_dtype, np.integer):\n coord.bounds = round_close(coord.bounds)\n coord.bounds = as_correct_dtype(coord.bounds, req_dtype)", "def unit_of_measurement(self):\n if self.api_unit in TEMPERATURE_UNITS:\n return self.hass.config.units.temperature_unit\n\n if self.api_unit in LENGTH_UNITS:\n return self.hass.config.units.length_unit\n\n if self.api_unit in PRESSURE_UNITS:\n if self.hass.config.units == IMPERIAL_SYSTEM:\n return self.hass.config.units.pressure_unit\n return PRESSURE_HPA\n\n if self.api_unit in FUEL_CONSUMPTION_UNITS:\n if self.hass.config.units == IMPERIAL_SYSTEM:\n return FUEL_CONSUMPTION_MPG\n return FUEL_CONSUMPTION_L_PER_100KM\n\n return self.api_unit", "def convert_length(self, event):\n try:\n #Compare other unit to one unit(meters)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"angstroms\": 10 ** -10, \"au\": 149598550000.0, \"barleycorns\": 0.008467, \"cables\": 182.88, \"centimeters\": 0.01, \"chains\": 20.11684, \"decimeters\": 0.1, \"ells\": 0.875, \"ems\" : 0.004233, \"fathoms\": 1.8288, \"feet(UK & US)\": 0.3048, \"feet(US survey)\": 0.304801, \"furlongs\": 201.168, \"hands\": 0.1016, \"hectometers\": 100.0, \"inches\": 0.0254, \"kilometers\": 1000.0, \"light years\": 9460528405000000.0, \"meters\": 1.0, \"micrometers\": 0.000001, \"mil\": 0.0000254, \"miles(UK & US)\": 1609.344, \"miles(nautical, international)\": 1852.0, \"miles(nautical, UK)\": 1853.184, \"millimeters\": 0.001, \"nanometers\": 10 ** -9, \"parsecs\": 30856776000000000.0, \"picometers\": 10 ** -12, \"Scandinavian mile\": 10000.0, \"thou\": 0.0000254, \"yards\": 0.9144, \"links\": 0.2011684, \"pica\": 0.00423333, \"rods\": 5.0292, \"spans\": 0.2286}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def convert_units(self, time_units=None, len_units=None, pump_units=None,\n same=False):\n in_time = self.time_units\n # Check new time units\n if time_units is None:\n time_units = in_time\n flag = _units.validate_units(time_units)\n if flag == -1:\n raise ValueError('Bad time units input {}'.format(time_units))\n # Check new length units\n in_len = self.len_units\n if len_units is None:\n len_units = in_len\n flag = _units.validate_units(len_units)\n if flag == -1:\n raise ValueError('Bad length units input {}'.format(len_units))\n # Check new pumping rate units\n in_pump = self.pump_units\n if pump_units is None:\n pump_units = in_pump\n if same:\n pump_units = \"%s3/%s\" % (len_units, time_units)\n flag = _units.validate_units(pump_units)\n if flag == -1:\n raise ValueError('Bad pumping rate units input {}'.format(len_units))\n\n # Convert parameters units\n for key, value in self.parameters.items():\n if type(value) in [int, float]:\n self.parameters[key] = _units.units_conversion(value, in_len, len_units)\n # Convert pumping rate data\n self.pumprate.convert_units(time_units, pump_units)\n # Convert well data units\n for i in range(self.well_count()):\n self.wells[i].convert_units(time_units, len_units)\n # Set input units\n self.len_units = len_units\n self.time_units = time_units\n self.pump_units = pump_units\n # End Function", "def unit_of_measurement(self):\n return self.var_units", "def to_unit(self):\n if self.is_zero():\n return Vector(0,0,0)\n else:\n magnitude = self.l2_norm()\n return Vector(self.x/magnitude, self.y/magnitude, self.z/magnitude)", "def get_units(cls, wkt):\n if HAS_GDAL:\n return SpatialReference(wkt).units\n else:\n m = cls.units_regex.match(wkt)\n return m.group('unit'), m.group('unit_name')", "def unit_of_measurement(self) -> Any:\n return TEMP_CELSIUS", "def geten(self):\n lat = self.getlatlon()[0]\n return (0.5*self._sm*(self._vr**2 + self._vt**2 + self._vp**2) +\n forces.wgs84_pot(self._r, lat)*self._sm)\n # G*self._mm*self._sm/self._r)", "def _uni_to_diff(self, v, omega):\n\n# print(\"--MuleBot._uni_to_diff({:.3f}, {:.3f})\".format(v, omega))\n loggerMB.debug(\"--MuleBot._uni_to_diff({:.3f}, {:.3f})\".format(v, omega))\n\n # v = translation velocity (m/s)\n # omega = angular velocity (rad/s)\n\n # For some reason, it is necessary to multiply the angle by -1.\n # TODO: Probably have to put this back in.\n omega *= -1.0\n\n inches_per_meter = 39.3701\n circumference_in = 2.0 * math.pi * MuleBot.WHEEL_RADIUS\n circumference_m = circumference_in / inches_per_meter\n radians_per_circumference = 2.0\n # R = roll?(meters/radian)\n R = circumference_m / radians_per_circumference\n\n # Get info in inches\n Lin = MuleBot.WHEEL_BASE_LENGTH\n # Convert inches to meters\n Lm = Lin / inches_per_meter\n\n # All measurements are now metric.\n v_l = ( (2.0 * v) - (omega * Lm) ) / (2.0 * R)\n v_r = ( (2.0 * v) + (omega * Lm) ) / (2.0 * R)\n loggerMB.debug(\"--MuleBot._uni_to_diff v_l, v_r: {:.3f}, {:.3f}\".format(v_l, v_r))\n\n rpm_l = self.rps_to_rpm(v_l)\n rpm_r = self.rps_to_rpm(v_r)\n# print(\"--MuleBot._uni_to_diff rpm_l, rpm_r: {:.3f}, {:.3f}\".format(rpm_l, rpm_r))\n loggerMB.debug(\"--MuleBot._uni_to_diff rpm_l, rpm_r: {:.3f}, {:.3f}\".format(rpm_l, rpm_r))\n\n return v_l, v_r", "def convert(self):\n return _libsbml.SBMLInferUnitsConverter_convert(self)", "def to_meters(d, d_unit):\n if d_unit == UOM_M:\n dm = d\n elif d_unit == UOM_KM:\n dm = d * 1000\n elif d_unit == UOM_FEET:\n dm = feet2m(d)\n elif d_unit == UOM_SM:\n dm = SM2m(d)\n elif d_unit == UOM_NM:\n dm = NM2m(d)\n return dm", "def test_unit_conversion(self):\n self.cube_uv_down.convert_units(\"kW m-2\")\n scale_factor = 1.0\n expected = np.full_like(\n self.cube_uv_down.data, dtype=np.float32, fill_value=0.1\n )\n result = calculate_uv_index(self.cube_uv_down, scale_factor)\n self.assertArrayEqual(result.data, expected)", "def convert_H_eV(en_H):\n return en_H/eV_H", "def _get_units(self, name):\n meta = self._abs2meta\n\n if name in meta:\n return meta[name]['units']\n\n proms = self._prom2abs\n\n if name in proms['output']:\n abs_name = proms['output'][name][0]\n return meta[abs_name]['units']\n\n elif name in proms['input']:\n if len(proms['input'][name]) > 1:\n # The promoted name maps to multiple absolute names, require absolute name.\n msg = \"Can't get units for the promoted name '%s' because it refers to \" + \\\n \"multiple inputs: %s. Access the units using an absolute path name.\"\n raise RuntimeError(msg % (name, str(proms['input'][name])))\n\n abs_name = proms['input'][name][0]\n return meta[abs_name]['units']\n\n raise KeyError('Variable name \"{}\" not found.'.format(name))", "def convert_fuelconsumption(self, event):\n try:\n #Compare other unit to one unit(liters/100 kilometer)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n if current_value != 0:\n unit_comp = {\"car(2014 US Average)\": 9.260417, \"gallon(UK)/100 miles\": 2.824809, \"gallon(US)/100 miles\": 2.352146, \"kilometer/liter\": 100.0 / (current_value ** 2), \"liters/100 kilometer\": 1.0, \"liters/meter\": 100000.0, \"miles/gallon(UK)\": 282.480936 / (current_value ** 2), \"miles/gallon(US)\": 235.214583 / (current_value ** 2)}\n else: #In case current_value == 0, it will error coz number division by zero.\n unit_comp = {\"car(2014 US Average)\": 1.0, \"gallon(UK)/100 miles\": 1.0, \"gallon(US)/100 miles\": 1.0, \"kilometer/liter\": 1.0, \"liters/100 kilometer\": 1.0, \"liters/meter\": 1.0, \"miles/gallon(UK)\": 1.0, \"miles/gallon(US)\": 1.0}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)" ]
[ "0.67884886", "0.65156615", "0.6370421", "0.62429684", "0.60196847", "0.59708744", "0.59549516", "0.59494585", "0.59487826", "0.59480655", "0.591996", "0.5908836", "0.590394", "0.5892939", "0.58907676", "0.58825135", "0.58816415", "0.5879758", "0.5878579", "0.5834652", "0.58009183", "0.579033", "0.57901025", "0.5783464", "0.5773485", "0.5744693", "0.57161945", "0.57076705", "0.5697574", "0.5696437", "0.56928194", "0.5675286", "0.5655954", "0.5650785", "0.5647491", "0.5645676", "0.5641953", "0.5639426", "0.5638304", "0.5626676", "0.56151414", "0.5610815", "0.5610746", "0.56017226", "0.55971813", "0.5592975", "0.55918", "0.55850744", "0.55806005", "0.5574443", "0.5571404", "0.5568221", "0.5563917", "0.55595124", "0.55365306", "0.5517177", "0.5515644", "0.5515515", "0.5503816", "0.5488333", "0.54800034", "0.54791677", "0.54677075", "0.5467437", "0.546374", "0.54593176", "0.54522955", "0.54510224", "0.54510224", "0.5442614", "0.5436857", "0.5428591", "0.54252756", "0.54036176", "0.53952324", "0.53919107", "0.53915", "0.5388711", "0.53873014", "0.53855306", "0.5380819", "0.5380178", "0.5379003", "0.53786266", "0.5377345", "0.53772956", "0.5375169", "0.5368997", "0.53680503", "0.536593", "0.5365505", "0.5363762", "0.53589594", "0.5358799", "0.5346344", "0.53370255", "0.5327439", "0.5324261", "0.5322217", "0.53111446" ]
0.60435265
4
PChip interpolation for converting between physics and engineering units.
def __init__(self, x, y, f1=unit_function, f2=unit_function): super(self.__class__, self).__init__(f1, f2) self.x = x self.y = y self.pp = PchipInterpolator(x, y) diff = np.diff(y) if not ((np.all(diff > 0)) or (np.all((diff < 0)))): raise ValueError("Given coefficients must be monotonically" "decreasing.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _raw_phys_to_eng(self, physics_value):\n y = [val - physics_value for val in self.y]\n new_pp = PchipInterpolator(self.x, y)\n roots = new_pp.roots()\n if len(roots) == 1:\n x = roots[0]\n return x\n else:\n raise UniqueSolutionException(\"The function does not have any solution.\")", "def test_isentropic_pressure_p_increase_rh_out():\n lev = [85000., 90000., 95000., 100000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 288.\n tmp[1, :] = 290.\n tmp[2, :] = 292.\n tmp[3, :] = 296.\n tmpk = tmp * units.kelvin\n rh = np.ones((4, 5, 5))\n rh[0, :] = 20.\n rh[1, :] = 40.\n rh[2, :] = 80.\n rh[3, :] = 100.\n relh = rh * units.percent\n isentlev = 296. * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk, relh)\n truerh = 100. * units.percent\n assert_almost_equal(isentprs[1], truerh, 3)", "def test_isentropic_pressure_p_increase():\n lev = [85000, 90000., 95000., 100000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 288.\n tmp[1, :] = 290.\n tmp[2, :] = 292.\n tmp[3, :] = 296.\n tmpk = tmp * units.kelvin\n isentlev = [296.] * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk)\n trueprs = 1000. * units.hPa\n assert_almost_equal(isentprs[0], trueprs, 3)", "def test_isentropic_pressure_interp():\n lev = [100000., 95000., 90000., 85000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 296.\n tmp[1, :] = 292.\n tmp[2, :] = 290\n tmp[3, :] = 288.\n tmpk = tmp * units.kelvin\n isentlev = [296., 297] * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk)\n trueprs = 936.213 * units.hPa\n assert_almost_equal(isentprs[0][1], trueprs, 3)", "def pchip(*args):\n from . import pyinterp\n xs, ys, kws = util.parseargs(*args)\n return pyinterp.Pchip(xs, ys, **kws)", "def get_interpolator(x: np.array, y: np.array):\n return intp.PchipInterpolator(x, y)", "def interpolate(self, interpolation=\"nearest\", **kwargs):\n return podpac.interpolators.Interpolate(source=self, interpolation=interpolation, **kwargs)", "def interpolation(self):\n return self._interpolation", "def pchip(x, y, u):\n h = []\n h0 = x[0]\n for h1 in x[1:]:\n h.append(h1 - h0)\n h0 = h1\n\n delta = []\n for (j, f) in enumerate(h):\n delta.append((y[j + 1] - y[j]) / f)\n\n d = []\n d.append(pchipend(h[0], h[1], delta[0], delta[1]))\n for k in range(1, len(x) - 1):\n d.append(pchipslopes(h[k - 1], h[k], delta[k - 1], delta[k]))\n\n d.append(pchipend(h[-1], h[-2], delta[-1], delta[-2]))\n\n # evaluate function\n pchipy = []\n segmentlx = x[0]\n segmently = y[0]\n for (i, e) in enumerate(delta):\n segmentrx = x[i + 1]\n segmentry = y[i + 1]\n leftindex = u.index(segmentlx)\n rightindex = u.index(segmentrx)\n c = (3 * e - 2 * d[i] - d[i + 1]) / h[i]\n b = (d[i] - 2 * e + d[i + 1]) / (h[i] ** 2)\n dfloat = d[i]\n for j in u[leftindex:rightindex]:\n j = j - u[leftindex]\n pchipy.append(segmently + j * (dfloat + j * (c + j * b)))\n segmentlx = segmentrx\n segmently = segmentry\n\n # append the last point\n pchipy.append(y[-1])\n return pchipy", "def interpolation(self) -> int:\n return self._interpolation", "def _pchip_coeffs_i(X, Y, i):\n\n # Pre-assign sizes for PCHIP variables.\n h = [0.0, 0.0, 0.0]\n δ = [0.0, 0.0, 0.0]\n d = [0.0, 0.0]\n\n # Check whether x is adjacent to the start or end of this X\n at_start = (i == 0) or np.isnan(X[i - 1] + Y[i - 1])\n at_end = (i == len(X) - 2) or np.isnan(X[i + 2] + Y[i + 2])\n\n if at_start and at_end:\n\n # if np.isnan(X[i + 1]) or np.isnan(Y[i + 1]):\n # # Only one valid data point. Leave the interpolant as NaN.\n # d[0], c, b = np.nan, np.nan, np.nan\n\n # else:\n\n # ||| X[0] <= x <= X[1] ||| Revert to Linear Interpolation\n # If actually only one non-NaN data point, then d[0] will be NaN, so\n # interpolant will evaluate to NaN.\n d[0] = (Y[i + 1] - Y[i]) / (X[i + 1] - X[i])\n C3, C2 = 0.0, 0.0\n\n else:\n if at_start:\n # ||| X[0] <= x <= X[1] < X[2] --->\n h[1] = X[i + 1] - X[i]\n h[2] = X[i + 2] - X[i + 1]\n δ[1] = (Y[i + 1] - Y[i]) / h[1]\n δ[2] = (Y[i + 2] - Y[i + 1]) / h[2]\n\n # Noncentered, shape-preserving, three-point formula:\n d[0] = ((2.0 * h[1] + h[2]) * δ[1] - h[1] * δ[2]) / (h[1] + h[2])\n if np.sign(d[0]) != np.sign(δ[1]):\n d[0] = 0.0\n elif (np.sign(δ[1]) != np.sign(δ[2])) and (\n np.abs(d[0]) > np.abs(3.0 * δ[1])\n ):\n d[0] = 3.0 * δ[1]\n\n # Standard PCHIP formula\n if np.sign(δ[1]) * np.sign(δ[2]) > 0.0:\n w1 = 2.0 * h[2] + h[1]\n w2 = h[2] + 2.0 * h[1]\n d[1] = (w1 + w2) / (w1 / δ[1] + w2 / δ[2])\n else:\n d[1] = 0.0\n\n elif at_end:\n # <--- X[i-1] < X[i] < x <= X[i+1] |||\n h[0] = X[i] - X[i - 1]\n h[1] = X[i + 1] - X[i]\n δ[0] = (Y[i] - Y[i - 1]) / h[0]\n δ[1] = (Y[i + 1] - Y[i]) / h[1]\n\n # Standard PCHIP formula\n if np.sign(δ[0]) * np.sign(δ[1]) > 0.0:\n w1 = 2.0 * h[1] + h[0]\n w2 = h[1] + 2.0 * h[0]\n d[0] = (w1 + w2) / (w1 / δ[0] + w2 / δ[1])\n else:\n d[0] = 0.0\n\n # Noncentered, shape-preserving, three-point formula:\n d[1] = ((h[0] + 2.0 * h[1]) * δ[1] - h[1] * δ[0]) / (h[0] + h[1])\n if np.sign(d[1]) != np.sign(δ[1]):\n d[1] = 0.0\n elif (np.sign(δ[1]) != np.sign(δ[0])) and (\n np.abs(d[1]) > np.abs(3 * δ[1])\n ):\n\n d[1] = 3.0 * δ[1]\n\n else:\n # <--- X[i-1] < X[i] < x <= X[i+1] < X[i+2] --->\n h[0] = X[i] - X[i - 1] # Way faster to do this\n h[1] = X[i + 1] - X[i] # than\n h[2] = X[i + 2] - X[i + 1] # diff(X(i-1:i+3))\n δ[0] = (Y[i] - Y[i - 1]) / h[0]\n δ[1] = (Y[i + 1] - Y[i]) / h[1]\n δ[2] = (Y[i + 2] - Y[i + 1]) / h[2]\n\n # Standard PCHIP formula\n for j in range(2):\n if np.sign(δ[j]) * np.sign(δ[j + 1]) > 0.0:\n w1 = 2.0 * h[j + 1] + h[j]\n w2 = h[j + 1] + 2.0 * h[j]\n d[j] = (w1 + w2) / (w1 / δ[j] + w2 / δ[j + 1])\n else:\n d[j] = 0.0\n\n # Polynomial coefficients for this piece\n dzzdx = (δ[1] - d[0]) / h[1]\n dzdxdx = (d[1] - δ[1]) / h[1]\n C3 = (dzdxdx - dzzdx) / h[1] # coeff of the 3rd degree term (x^3)\n C2 = 2 * dzzdx - dzdxdx # coeff of 2nd degree term (x^2)\n\n # The following code evaluates the `d`'th deriviative of the cubic\n # interpolant at `x`.\n # s = x - X[i]\n # if d == 0:\n # y = Y[i] + s * (d[0] + s * (C2 + s * C3))\n # elif d == 1: # first derivative\n # y = d[0] + s * (2 * C2 + 3 * s * C3)\n # elif d == 2: # second derivative\n # y = 2 * C2 + 6 * s * C3\n # elif d == 3: # third derivative\n # y = 6 * C3\n # else:\n # y = 0.0\n # return y\n\n # Faster to return tuple than build an np.array just to deconstruct it later\n return C3, C2, d[0], Y[i]", "def convert_pressure(self, event):\n try:\n #Compare other unit to one unit(pascals)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"atm\": 101325.0, \"bars\": 100000.0, \"centimeters mercury\": 1333.22, \"centimeters water\": 98.0665, \"feet of water\": 2989.06692, \"hectopascals\": 100.0, \"inches of mercury\": 3386.388, \"inches of water\": 249.08891, \"kilogram-force/sq.centimeter\": 98066.5, \"kilogram-force/sq.meter\": 9.80665, \"kilonewtons/sq.meter\": 1000.0, \"kilonewtons/sq.millimeter\": 1000000000.0, \"kilopascals\": 1000.0, \"kips/sq.inch\": 6894760.0, \"meganewtons/sq.meter\": 1000000.0, \"meganewtons/sq.millimeter\": 1000000000000.0, \"meters of water\": 9806.65, \"millibars\": 100.0, \"millimeters of mercury\": 133.322, \"millimeters of water\": 9.80665, \"newtons/sq.centimeter\": 10000.0, \"newtons/sq.meter\": 1.0, \"newtons/sq.millimeter\": 1000000.0, \"pascals\": 1.0, \"poundals/sq.foot\": 1.44816, \"pounds-force/sq.foot\": 47.88, \"pounds-force/sq.inch\": 6894.757, \"tonnes-force/sq.cm\": 98066500.0, \"tonnes-force/sq.meter\": 9806.65, \"tons(UK)-force/sq.foot\": 107251.0, \"tons(UK)-force/sq.inch\": 15444280.0, \"tons(US)-force/sq.foot\": 95760.0, \"tons(US)-force/sq.inch\": 13789500.0, \"torr\": 133.322}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def update_pressure(self):\n m_multipliers = np.ones(self.mesh.get_number_of_cells())\n\n\n rhs_current = np.zeros(self.mfd.get_number_of_dof()) \n rhs_current += self.rhs_mfd\n\n\n for cell_index in range(self.mesh.get_number_of_cells()):\n density = -self.ref_pressure\n density += self.current_pressure[cell_index]\n density *= self.compressibility\n density += 1.\n density *= self.ref_density\n\n # We multiply by the inverse of \\frac{\\rho}{\\mu}\n m_multipliers[cell_index] = self.viscosity/density\n\n c_entry = self.compressibility\n c_entry *= self.porosities[cell_index]\n c_entry /= self.delta_t\n c_entry *= self.mesh.get_cell_volume(cell_index)\n\n rhs_current[self.mesh.get_number_of_faces()+\n cell_index] += c_entry*self.current_pressure[cell_index]\n\n self.lhs_coo.data[self.c_start+cell_index] = c_entry\n\n for [index, cell_index] in enumerate(self.rate_wells):\n rhs_current[self.mesh.get_number_of_faces()+cell_index] += \\\n self.rate_wells_rate[index]\n\n self.mfd.update_m(self.lhs_coo.data[:self.m_x_coo_length], m_multipliers)\n\n solution = dsolve.spsolve(self.lhs_coo.tocsr(), rhs_current)\n self.prev_pressure = self.current_pressure\n self.current_pressure = solution[self.mesh.get_number_of_faces():]\n self.current_velocity = solution[:self.mesh.get_number_of_faces()]", "def _get_interpolation(self) :\n \n return self._interpolation", "def C_P(self):\n return self.generic_getter(\n get_heat_capacity_pressure, \"C_P\", \"convert_heat_capacity\"\n )", "def test_isentropic_pressure_4d():\n lev = [100000., 95000., 90000., 85000.] * units.Pa\n tmp = np.ones((3, 4, 5, 5))\n tmp[:, 0, :] = 296.\n tmp[:, 1, :] = 292.\n tmp[:, 2, :] = 290\n tmp[:, 3, :] = 288.\n tmpk = tmp * units.kelvin\n rh = np.ones((3, 4, 5, 5))\n rh[:, 0, :] = 100.\n rh[:, 1, :] = 80.\n rh[:, 2, :] = 40.\n rh[:, 3, :] = 20.\n relh = rh * units.percent\n isentlev = [296., 297., 300.] * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk, relh, vertical_dim=1)\n trueprs = 1000. * units.hPa\n trueprs2 = 936.213 * units.hPa\n trueprs3 = 879.50375588 * units.hPa\n truerh = 69.19706 * units.percent\n assert isentprs[0].shape == (3, 3, 5, 5)\n assert_almost_equal(isentprs[0][:, 0, :], trueprs, 3)\n assert_almost_equal(isentprs[0][:, 1, :], trueprs2, 3)\n assert_almost_equal(isentprs[0][:, 2, :], trueprs3, 3)\n assert_almost_equal(isentprs[1][:, 1, ], truerh, 3)", "def test_isentropic_pressure_addition_args_interp():\n lev = [100000., 95000., 90000., 85000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 296.\n tmp[1, :] = 292.\n tmp[2, :] = 290.\n tmp[3, :] = 288.\n rh = np.ones((4, 5, 5))\n rh[0, :] = 100.\n rh[1, :] = 80.\n rh[2, :] = 40.\n rh[3, :] = 20.\n relh = rh * units.percent\n tmpk = tmp * units.kelvin\n isentlev = [296., 297.] * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk, relh)\n truerh = 69.197 * units.percent\n assert_almost_equal(isentprs[1][1], truerh, 3)", "def InterpolationDerivs(self, , p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=...):\n ...", "def __hinterpolate(self):\n \n # Temp. Data holders\n upperint = []\n lowerint = []\n \n # Dont like this, because here we insert points into the rawdata\n # But it creates consisitent results in the interpolation results\n if self.__upper[0][0] != 0: self.__upper.insert(0,(0.,0.))\n if self.__lower[0][0] != 0: self.__lower.insert(0,(0.,0.))\n \n # Create points\n if self.__interpolation_method == \"l\":\n xpointsU = list(map(lambda x:x/float(self.__procPointsCount),range(0,self.__procPointsCount+1)))\n xpointsL = list(map(lambda x:x/float(self.__procPointsCount),range(0,self.__procPointsCount+1)))\n elif self.__interpolation_method == \"p\":\n xpointsU = [x**2/float(self.__procPointsCount)**2 for x in range(self.__procPointsCount+1)]\n xpointsL = [x**2/float(self.__procPointsCount)**2 for x in range(self.__procPointsCount+1)]\n \n # Calculate secants\n uppersec = [(self.__upper[i+1][1]-self.__upper[i][1])/(self.__upper[i+1][0]-self.__upper[i][0]) for i in range(len(self.__upper)-1)]\n lowersec = [(self.__lower[i+1][1]-self.__lower[i][1])/(self.__lower[i+1][0]-self.__lower[i][0]) for i in range(len(self.__lower)-1)]\n \n # Calculate tangents\n uppertan = [(uppersec[k-1]+uppersec[k])/2 for k in range(1,len(uppersec))]\n uppertan.insert(0,uppersec[0])\n uppertan.append(uppersec[-1])\n\n lowertan = [(lowersec[k-1]+lowersec[k])/2 for k in range(1,len(lowersec))]\n lowertan.insert(0,lowersec[0])\n lowertan.append(lowersec[-1])\n \n # Hermite blending functions\n p0 = lambda t: 2*t**3 - 3*t**2 + 1\n m0 = lambda t: t**3 - 2*t**2 + t\n p1 = lambda t: -2*t**3 + 3*t**2\n m1 = lambda t: t**3 - t**2\n \n # Find matching points to improve accuarcy\n matchU = [(i,j) for i in range(len(xpointsU)) for j in range(len(self.__upper)) if xpointsU[i] == self.__upper[j][0]]\n matchL = [(i,j) for i in range(len(xpointsL)) for j in range(len(self.__lower)) if xpointsL[i] == self.__lower[j][0]]\n \n # Reverse match pairs to insure no index errors\n matchU.reverse()\n matchL.reverse()\n\n# print(self.__lower)\n# print(xpointsL)\n # Pop xpoints that dont require interpolation and append the point into the upperint list\n for i in matchU:\n xpointsU.pop(i[0])\n upperint.append(self.__upper[i[1]])\n \n# print(matchL)\n \n # Same process as above but for lower airfoil\n for i in matchL:\n xpointsL.pop(i[0])\n lowerint.append(self.__lower[i[1]])\n \n # Interpolate upper points\n for xp in xpointsU:\n for i in range(len(self.__upper)-1):\n if self.__upper[i][0] < xp < self.__upper[i+1][0]:\n h = self.__upper[i+1][0]-self.__upper[i][0]\n t = (xp - self.__upper[i][0]) / h\n solution = ( p0(t)*self.__upper[i][1] + h*m0(t)*uppertan[i] + p1(t)*self.__upper[i+1][1] + h*m1(t)*uppertan[i+1] )\n upperint.append((xp,solution))\n \n # Interpolate lower points\n for xp in xpointsL:\n for i in range(len(self.__lower)-1):\n if self.__lower[i][0] < xp < self.__lower[i+1][0]:\n h = self.__lower[i+1][0]-self.__lower[i][0]\n t = (xp - self.__lower[i][0]) / h\n solution = ( p0(t)*self.__lower[i][1] + h*m0(t)*lowertan[i] + p1(t)*self.__lower[i+1][1] + h*m1(t)*lowertan[i+1] )\n lowerint.append((xp,solution))\n \n # Sort the points to keep the correct sequence\n upperint.sort(key=lambda x:x[0], reverse=True)\n lowerint.sort(key=lambda x:x[0])\n \n # Do checks to insure no duplicates\n if upperint[0][0] != 1.0: upperint.insert(0,(1.0,0.0))\n if upperint[-1][0] != 0.0: upperint.append((0.0,0.0))\n if lowerint[0][0] == 0.0: lowerint.pop(0)\n if lowerint[-1][0] != 1.0: lowerint.append((1.0,0.0))\n\n self.__ProcPoints = upperint + lowerint", "def interpolation(self):\n return self._image.interpolation", "def test_isentropic_pressure_additional_args():\n lev = [100000., 95000., 90000., 85000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 296.\n tmp[1, :] = 292.\n tmp[2, :] = 290.\n tmp[3, :] = 288.\n rh = np.ones((4, 5, 5))\n rh[0, :] = 100.\n rh[1, :] = 80.\n rh[2, :] = 40.\n rh[3, :] = 20.\n relh = rh * units.percent\n tmpk = tmp * units.kelvin\n isentlev = [296.] * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk, relh)\n truerh = 100. * units.percent\n assert_almost_equal(isentprs[1], truerh, 3)", "def test_isentropic_pressure():\n lev = [100000., 95000., 90000., 85000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 296.\n tmp[1, :] = 292.\n tmp[2, :] = 290\n tmp[3, :] = 288.\n tmp[:, :, -1] = np.nan\n tmpk = tmp * units.kelvin\n isentlev = [296.] * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk)\n trueprs = np.ones((1, 5, 5)) * (1000. * units.hPa)\n trueprs[:, :, -1] = np.nan\n assert isentprs[0].shape == (1, 5, 5)\n assert_almost_equal(isentprs[0], trueprs, 3)", "def _interpolation(self, video):\n self.F_int = []\n self.mgrid_0 = []\n self.mgrid_1 = []\n for p in range(video.points.shape[0]):\n _m_0, _m_1 = np.meshgrid(self.extended_points_0[p], self.extended_points_1[p])\n _F_int = interp2d(self.extended_points_0[p], self.extended_points_1[p], video.mraw[0, _m_0, _m_1], kind='cubic')\n self.F_int.append(_F_int)\n\n m_0, m_1 = np.meshgrid(self.extended_points_0[p, self.pad:-self.pad], self.extended_points_1[p, self.pad:-self.pad])\n self.mgrid_0.append(m_0)\n self.mgrid_1.append(m_1)", "def getEichFromEQ(self, ep, verbose=False):\n #assuming plasma is centered in machine here\n zMin = ep.g['ZmAxis'] - 0.25\n zMax = ep.g['ZmAxis'] + 0.25\n zWall = np.linspace(zMin, zMax, 1000)\n zLCFS = ep.g['lcfs'][:,1]\n #this prevents us from getting locations not at midplane\n idx = np.where(np.logical_and(zLCFS>zMin,zLCFS<zMax))\n Rmax = ep.g['lcfs'][:,0][idx].max()\n Rmin = ep.g['lcfs'][:,0][idx].min()\n # geometric quantities\n Rgeo = (Rmax + Rmin) / 2.0\n a = (Rmax - Rmin) / 2.0\n aspect = a/Rgeo\n\n #Regression 15\n C = 1.35\n Cp = -0.02\n Cr = 0.04\n Cb = -0.92\n Ca = 0.42\n # Evaluate Bp at outboard midplane\n Z_omp_sol = 0.0\n Bp = abs(ep.BpFunc.ev(Rmax,Z_omp_sol))\n #Evaluate lq\n self.lqEich = C * self.Psol**Cp * Rgeo**Cr * Bp**Cb * aspect**Ca # in mm\n Bt = abs(ep.BtFunc.ev(ep.g['RmAxis'],ep.g['ZmAxis']))\n if verbose==True:\n print(\"Poloidal Field at midplane: {:f}\".format(Bp))\n print(\"Toroidal Field at axis: {:f}\".format(Bt))\n print(\"Found heat flux width value of: {:f} mm\".format(self.lqEich))\n log.info(\"Found heat flux width value of: {:f} mm\".format(self.lqEich))\n return", "def computeProp(self):\n self.chem = {}\n for key in self.config.C:\n if key in ['P', 'T', 'Z', 'DZ']:\n continue\n self.chem[key] = chemistry.ConstituentProperties(key)\n\n # nAtm = len(self.gas[self.config.C['P']])\n self.property = []\n for op in self.config.LP:\n self.property.append([])\n zOffset = 0.0\n iOffset = 0\n psep = 1.0E6\n for i, zv in enumerate(self.gas[self.config.C['Z']]): # find the nearest z value at p_ref\n P = self.gas[self.config.C['P']][i]\n if abs(P - self.config.p_ref) < psep:\n psep = abs(P - self.config.p_ref)\n iOffset = i\n zOffset = self.gas[self.config.C['Z']][iOffset]\n z_at_p_ref = self.config.Req\n\n for i, zv in enumerate(self.gas[self.config.C['Z']]):\n T = self.gas[self.config.C['T']][i]\n P = self.gas[self.config.C['P']][i]\n self.property[self.config.LP['P']].append(P)\n self.property[self.config.LP['Z']].append(zv)\n rr = z_at_p_ref + zv - zOffset\n # note that this is the \"actual\"z along equator referenced to planet center (aka radius)\n self.property[self.config.LP['R']].append(rr)\n # ##set mean amu\n amulyr = 0.0\n for key in self.chem:\n amulyr += self.chem[key].amu * self.gas[self.config.C[key]][i]\n self.property[self.config.LP['AMU']].append(amulyr)\n # ##set GM pre-calc (normalized further down) and get lapse rate\n if not i:\n self.property[self.config.LP['GM']].append(0.0)\n self.property[self.config.LP['LAPSE']].append(0.0)\n self.property[self.config.LP['LAPSEP']].append(0.0)\n else:\n rho = (amulyr * P) / (chemistry.R * T)\n dr = abs(zv - self.gas[self.config.C['Z']][i - 1])\n dV = 4.0 * np.pi * (rr**2) * dr\n dM = 1.0e11 * rho * dV\n GdM = self.property[self.config.LP['GM']][i - 1] + chemistry.GravConst * dM\n # in km3/s2\n # mass added as you make way into atmosphere by radius r (times G)\n self.property[self.config.LP['GM']].append(GdM)\n dT = abs(T - self.gas[self.config.C['T']][i - 1])\n dP = abs(P - self.gas[self.config.C['P']][i - 1])\n self.property[self.config.LP['LAPSE']].append(dT / dr)\n self.property[self.config.LP['LAPSEP']].append(dT / dP)\n # ##set refractivity and index of refraction\n refrlyr = 0.0\n for key in self.chem:\n refrlyr += self.chem[key].refractivity(T=T) * self.gas[self.config.C[key]][i]\n refrlyr = refrlyr * P * (293.0 / T)\n self.property[self.config.LP['REFR']].append(refrlyr)\n nlyr = refrlyr / 1.0E6 + 1.0\n self.property[self.config.LP['N']].append(nlyr)\n\n # ##Now need to normalize GM to planet and calculate scale height (H)\n GMnorm = self.property[self.config.LP['GM']][iOffset] # G*(Mass added by p_ref)\n for i, mv in enumerate(self.property[self.config.LP['GM']]):\n gm = self.config.GM_ref - (mv - GMnorm)\n self.property[self.config.LP['GM']][i] = gm\n little_g = gm / self.property[self.config.LP['R']][i]**2\n m_bar = self.property[self.config.LP['AMU']][i]\n T = self.gas[self.config.C['T']][i]\n self.property[self.config.LP['H']].append((chemistry.R * T) /\n (little_g * m_bar) / 1000.0)\n self.property[self.config.LP['g']].append(little_g)\n self.property = np.array(self.property)", "def get_pressure(self): # This function implements the equations needed to convert the digital data into mbars\n self.digital_pressure_data()\n C_1, C_2, C_3, C_4, C_5, C_6=self.calibration_constants()\n temperature, dT=self.get_temperature()\n OFF = ((C_2 * (2**16)) + ((C_4 * dT)/2**7))\n SENS = (C_1 * (2**15)) + ((C_3 * dT)/(2**8))\n pressure=(((self.presadc*(SENS/(2**21)))-OFF)/(2**15))/100\n return pressure, temperature", "def GetInterpolation(self, *args, **kwargs):\n pass", "def terrain_multiplier(self):\n #Hardcode table of terrain multipliers\n self.terrain_table = pd.DataFrame({\n 'height': [0.00, 3.00, 5.00, 10.0, 15.0, 20.0, 30.0, 40.0, 50.0, 75.0, 100., 150., 200.],\n '1': [0.99, 0.99, 1.05, 1.12, 1.16, 1.19, 1.22, 1.24, 1.25, 1.27, 1.29, 1.31, 1.32],\n '2': [0.91, 0.91, 0.91, 1.00, 1.05, 1.08, 1.12, 1.16, 1.18, 1.22, 1.24, 1.27, 1.29],\n '3': [0.83, 0.83, 0.83, 0.83, 0.89, 0.94, 1.00, 1.04, 1.07, 1.12, 1.16, 1.21, 1.24],\n '4': [0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.80, 0.85, 0.90, 0.98, 1.03, 1.11, 1.16]}) #T4.1 AS1170.2\n self.terrain_table.set_index('height',inplace=True)\n\n terrain_stacked = self.terrain_table.stack().reset_index().values\n\n #2d interpolation of Table 4.1 AS1170.2.\n #Terrain Categories may be halves (e.g Category 1.5)\n #Heights may be any value\n #https://stackoverflow.com/questions/56291133/interpolation-of-a-pandas-dataframe\n self.M_z_cat = griddata(terrain_stacked[:,0:2],\n terrain_stacked[:,2],\n [(self.height, self.terrain_category)],\n method='linear')[0]", "def pressure(self, alt):\n alt_profile = self.altitude_profile(alt)\n T, number_density = alt_profile[1], alt_profile[8]\n\n # using eqn(42) of COESA76\n pressure = number_density * k * T\n return pressure", "def evalWaveSol(new_interps, patch_dict, intp_deg=1, interp_key='times'):\n try:\n len(new_interps)\n except TypeError:\n new_interps = [new_interps]\n K = patch_dict['K']\n vv = patch_dict['v']\n # Interpolate eigen coefficients\n new_ecs = np.empty((len(new_interps),K),dtype=float)\n for i in range(K):\n # Interpolating one by one seems right, right?\n if intp_deg==0:\n # Find nearest time for each time\n # IS THERE A WAY TO DO THIS NOT ONE BY ONE???\n for i_idx, i in enumerate(new_interps):\n idx = np.abs(patch_dict[interp_key]-i).argmin()\n new_ecs[i_idx,i] = patch_dict['ec'][idx,i]\n \n elif intp_deg==1: # Default\n f = interpolate.interp1d(patch_dict[interp_key],patch_dict['ec'][:,i],kind='linear',\n bounds_error=False,fill_value=np.nan)\n new_ecs[:,i] = f(new_interps)\n \n elif intp_deg==3:\n f = interpolate.interp1d(patch_dict[interp_key],patch_dict['ec'][:,i],kind='cubic',\n bounds_error=False,fill_value=np.nan)\n new_ecs[:,i] = f(new_interps)\n \n else:\n tck = interpolate.splrep(patch_dict[interp_key],patch_dict['ec'][:,i],k=interp_deg)\n new_ecs[:,i] = interpolate.splev(new_interps,tck)\n \n # Construct x values for that period of time\n denoised_xs = np.dot(new_ecs,vv[:K]) + patch_dict['mean_xs']\n \n return denoised_xs", "def __getPressureCalibrationCoefficients(self):\n src13 = self.read_byte_data(self.address, 0x13)\n src14 = self.read_byte_data(self.address, 0x14)\n src15 = self.read_byte_data(self.address, 0x15)\n src16 = self.read_byte_data(self.address, 0x16)\n src17 = self.read_byte_data(self.address, 0x17)\n src18 = self.read_byte_data(self.address, 0x18)\n src19 = self.read_byte_data(self.address, 0x19)\n src1A = self.read_byte_data(self.address, 0x1A)\n src1B = self.read_byte_data(self.address, 0x1B)\n src1C = self.read_byte_data(self.address, 0x1C)\n src1D = self.read_byte_data(self.address, 0x1D)\n src1E = self.read_byte_data(self.address, 0x1E)\n src1F = self.read_byte_data(self.address, 0x1F)\n src20 = self.read_byte_data(self.address, 0x20)\n src21 = self.read_byte_data(self.address, 0x21)\n c00 = (src13 << 12) | (src14 << 4) | (src15 >> 4)\n c00 = getTwosComplement(c00, 20)\n c10 = ((src15 & 0x0F) << 16) | (src16 << 8) | src17\n c10 = getTwosComplement(c10, 20)\n c20 = (src1C << 8) | src1D\n c20 = getTwosComplement(c20, 16)\n c30 = (src20 << 8) | src21\n c30 = getTwosComplement(c30, 16)\n c01 = (src18 << 8) | src19\n c01 = getTwosComplement(c01, 16)\n c11 = (src1A << 8) | src1B\n c11 = getTwosComplement(c11, 16)\n c21 = (src1E < 8) | src1F\n c21 = getTwosComplement(c21, 16)\n return c00, c10, c20, c30, c01, c11, c21", "def apply_interpolation(self, transect, interpolation_method=None):\n\n # Reset processed data\n if self.u_mps is not None:\n self.u_processed_mps = np.copy(self.u_mps)\n self.v_processed_mps = np.copy(self.v_mps)\n self.u_processed_mps[self.valid_data[0, :] == False] = np.nan\n self.v_processed_mps[self.valid_data[0, :] == False] = np.nan\n\n # Determine interpolation methods to apply\n if interpolation_method is None:\n interpolation_method = self.interpolate\n else:\n self.interpolate = interpolation_method\n\n # Apply specified interpolation method\n\n if interpolation_method == 'None':\n # Sets invalid data to nan with no interpolation\n self.interpolate_none()\n\n elif interpolation_method == 'ExpandedT':\n # Set interpolate to none as the interpolation done is in the QComp\n self.interpolate_next()\n\n elif interpolation_method == 'Hold9':\n # Interpolates using SonTek method of holding last valid for up to 9 samples\n self.interpolate_hold_9()\n\n elif interpolation_method == 'HoldLast':\n # Interpolates by holding last valid indefinitely\n self.interpolate_hold_last()\n\n elif interpolation_method == 'Linear':\n # Interpolates using linear interpolation\n self.interpolate_linear(transect)\n\n elif interpolation_method == 'Smooth':\n # Interpolates using smooth interpolation\n self.interpolate_smooth(transect)\n\n elif interpolation_method == 'TRDI':\n # TRDI interpolation is done in discharge.\n # For TRDI the interpolation is done on discharge not on velocities\n self.interpolate_none()", "def interpolate(self):\n interp = (\n self._get_ticks() - self._last_update\n ) / self._tick_step / self.dilation\n if interp > 1.0:\n interp = 1.0\n return interp", "def test_isentropic_pressure_tmp_out_interp():\n lev = [100000., 95000., 90000., 85000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 296.\n tmp[1, :] = 292.\n tmp[2, :] = 290.\n tmp[3, :] = 288.\n tmpk = tmp * units.kelvin\n isentlev = [296., 297.] * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk, temperature_out=True)\n truetmp = 291.4579 * units.kelvin\n assert_almost_equal(isentprs[1][1], truetmp, 3)", "def _t_at_interface(self, polarization, n_1, n_2):\n if polarization == 's':\n return 2*n_1/(n_1 + n_2)\n elif polarization == 'p':\n return 2*n_1/(n_1 + n_2)\n else:\n raise ValueError(\"Polarization must be 's' or 'p'\")", "def test_isentropic_pressure_tmp_out():\n lev = [100000., 95000., 90000., 85000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 296.\n tmp[1, :] = 292.\n tmp[2, :] = 290.\n tmp[3, :] = 288.\n tmpk = tmp * units.kelvin\n isentlev = [296.] * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk, temperature_out=True)\n truetmp = 296. * units.kelvin\n assert_almost_equal(isentprs[1], truetmp, 3)", "def _approx_wep(wair,entr,pres):\n pvmax = pres * (1-wair) / (1-wair + _EPSW*wair)\n if pvmax >= _PTPE:\n # Saturation would start at _TTP; use saturated heat capacity at _TTP\n a_t = (pres - _PTPE)/(pres - _PTPE + _EPSW*_PTPE)\n s_t = (wair*_CDRY*numpy.log(_TTP/_TCELS) - (1-wair)*_LILTP/_TTP\n - wair*_RDRY*numpy.log((pres-_PTPE)/_PATM)\n + wair*_RWAT*_EPSW*_PTPE/(pres-_PTPE)*_AVI)\n c_t = (wair*_CDRY + wair*(1-a_t)/a_t*_CVAP + (1-wair/a_t)*_CICE\n + wair*_RWAT*(1-a_t)*(_EPSW*a_t + 1-a_t)/_EPSW/a_t**2 * _AVI**2)\n temp = _TTP * numpy.exp(-(s_t-entr)/c_t)\n else:\n # Get approximate saturation temperature\n v = numpy.log(pres*(1-wair)/(_PTPE*(_EPSW*wair + 1-wair)))/_BVI\n r = _AVI/_BVI\n x = maths4.lamb2(v,r)\n tsat = _TTP/x\n ssat = (wair * (_CDRY*numpy.log(tsat/_TCELS)\n - _RDRY*numpy.log((pres-pvmax)/_PATM))\n + (1-wair) * (_CVAP*numpy.log(tsat/_TTP) + _LLVTP/_TTP\n - _RWAT*numpy.log(pvmax/_PTPE)))\n \n if entr >= ssat:\n ceff = wair*_CDRY + (1-wair)*_CVAP\n temp = _TTP * numpy.exp((entr-ssat)/ceff)\n else:\n csat = (wair*_CDRY + (1-wair)*_CVAP\n + (1-wair)*_RWAT*pres/(pres-pvmax)\n * ((_AVI+_BVI)*_TTP/tsat - _BVI)**2)\n temp = tsat * numpy.exp(-(ssat-entr)/csat)\n pvap = _PTPE * numpy.exp((_AVI+_BVI)*(1 - _TTP/temp)\n - _BVI*numpy.log(temp/_TTP))\n airf = (pres - pvap) / (pres - pvap + _EPSW*pvap)\n dhum = pres/(_RDRY*temp) / (airf + (1-airf)/_EPSW)\n return airf, temp, dhum", "def _raw_eng_to_phys(self, eng_value):\n return self.pp(eng_value)", "def pressure(self):\r\n self._read_temperature()\r\n\r\n # Algorithm from the BME280 driver\r\n # https://github.com/BoschSensortec/BME280_driver/blob/master/bme280.c\r\n adc = self._read24(_BME280_REGISTER_PRESSUREDATA) / 16 # lowest 4 bits get dropped\r\n var1 = float(self._t_fine) / 2.0 - 64000.0\r\n var2 = var1 * var1 * self._pressure_calib[5] / 32768.0\r\n var2 = var2 + var1 * self._pressure_calib[4] * 2.0\r\n var2 = var2 / 4.0 + self._pressure_calib[3] * 65536.0\r\n var3 = self._pressure_calib[2] * var1 * var1 / 524288.0\r\n var1 = (var3 + self._pressure_calib[1] * var1) / 524288.0\r\n var1 = (1.0 + var1 / 32768.0) * self._pressure_calib[0]\r\n if var1 == 0:\r\n return 0\r\n if var1:\r\n pressure = 1048576.0 - adc\r\n pressure = ((pressure - var2 / 4096.0) * 6250.0) / var1\r\n var1 = self._pressure_calib[8] * pressure * pressure / 2147483648.0\r\n var2 = pressure * self._pressure_calib[7] / 32768.0\r\n pressure = pressure + (var1 + var2 + self._pressure_calib[6]) / 16.0\r\n\r\n pressure /= 100\r\n if pressure < _BME280_PRESSURE_MIN_HPA:\r\n return _BME280_PRESSURE_MIN_HPA\r\n if pressure > _BME280_PRESSURE_MAX_HPA:\r\n return _BME280_PRESSURE_MAX_HPA\r\n return pressure\r\n else:\r\n return _BME280_PRESSURE_MIN_HPA", "def evalComponent(self, x, p):\n if p > 0 and p <= self.n:\n p = str(p)\n y = self[\"off\"] + self[\"lin\"] * x\n self._v1d.assignValues(\n {\"A\": self[\"A\" + p], \"al\": self[\"al\" + p], \"ad\": self[\"ad\" + p], \"mu\": self[\"mu\" + p]})\n y += self._v1d.evaluate(x)\n return y\n else:\n raise(PE.PyAValError(\"No such component (no. \" + str(p) + \")\", where=\"MultiVoigt1d::evalComponent\",\n solution=\"Use value between 1 and \" + str(self.n)))", "def P(self):\n return self.generic_getter(get_pressure, \"p\", \"convert_pressure\")", "def get_cape(temp,pres,dewpt,hght,startp,startt,startdp,totalcape=False): \n\n # Check units\n # Init temp is startt in C, Init dew point is stwrtdp,\n # pressure levels are in hPa \n temp = temp - 273.15 # convert temperature to celsius\n dewpt = dewpt - 273.15 # convert dewpoint to celsius\n pres = pres/100 # convert pressure to hPa\n \n \n inds = np.where( (pres < startp) ) \n tmp = pres[inds]\n del pres\n #pres = tmp[::-1]\n pres = tmp[:]\n del tmp \n startp = startp/100\n \n tmp = temp[inds]\n del temp\n #temp = tmp[::-1]\n temp = tmp[:]\n del tmp \n\n tmp = dewpt[inds]\n del dewpt\n #dewpt = tmp[::-1]\n dewpt = tmp[:]\n del tmp \n\n tmp = hght[inds]\n del hght\n #hght = tmp[::-1]\n hght = tmp[:]\n del tmp \n\n \n # Get Sub-LCL traces \n presdry,tempdry,tempiso=dry_ascent(startp,startt-degCtoK,startdp-degCtoK) \n \n\n # make lcl variables explicit\n P_lcl=presdry[-1]\n T_lcl=tempdry[-1]\n\n # Now lift a wet parcel from the intersection point\n # preswet=linspace(P_lcl,100,101)\n preswet,tempwet=moist_ascent(P_lcl,T_lcl)\n\n # tparcel is the concatenation of tempdry and \n # tempwet, and so on.\n \n tparcel=np.concatenate((tempdry,tempwet[1:]))\n pparcel=np.concatenate((presdry,preswet[1:]))\n\n # Interpolating the environmental profile onto the \n # parcel pressure coordinate\n # tempenv=interp(preswet,pres[::-1],temp[::-1])\n ## NEW, for total column:\n tempenv=interp(pparcel,pres[::-1],temp[::-1])\n\n\n # now solve for the equlibrium levels above LCL\n # (all of them, including unstable ones)\n # eqlev,stab=solve_eq(preswet[::-1],(tempwet-tempenv)[::-1])\n # NEW, for total column:\n # On second thought, we don't really want/need\n # any equilibrium levels below LCL\n # eqlev,stab=solve_eq(pparcel[::-1],(tparcel-tempenv)[::-1])\n # This is equivalent to the old statement :\n eqlev,stab=solve_eq(pparcel[pparcel<=P_lcl][::-1],\\\n (tparcel-tempenv)[pparcel<=P_lcl][::-1])\n\n aa = tparcel-tempenv\n\n # Sorting index by decreasing pressure\n I=np.argsort(eqlev)[::-1]\n eqlev=eqlev[I]; stab=stab[I]\n\n # temperatures at the equilibrium level\n # tempeq=interp(eqlev,preswet[::-1],tempenv[::-1])\n ## NEW, for total column:\n tempeq=interp(eqlev,pparcel[::-1],tparcel[::-1])\n\n # This helps with debugging\n # for ii,eq in enumerate(eqlev):\n # print \"%5.2f %5.2f %2d\"%(eq,tempeq[ii],stab[ii])\n\n # need environmental temperature at LCL\n tenv_lcl=interp(P_lcl,pparcel[::-1],tempenv[::-1])\n\n isstab=np.where(stab==1.,True,False)\n unstab=np.where(stab==1.,False,True) \n\n if eqlev.shape[0]==0:\n # no unstable layers in entire profile\n # because the parcel never crosses the tenv\n P_lfc=float('NaN')\n P_el=float('NaN')\n elif T_lcl>tenv_lcl:\n # check LCL to see if this is unstable\n P_lfc=P_lcl\n if totalcape:\n P_el=eqlev[isstab][-1]\n else:\n P_el=eqlev[isstab][0]\n elif eqlev.shape[0]>1:\n # Parcel is stable at LCL so LFC is the \n # first unstable equilibrium level and \n # \"EQ\" level is the first stable equilibrium \n # level\n P_lfc=eqlev[unstab][0]\n if totalcape:\n P_el=eqlev[isstab][-1]\n else:\n P_el=eqlev[isstab][0]\n else:\n # catch a problem... if there is only\n # one eqlev and it's stable (this is \n # unphysical), then it could be a vertical\n # resolution thing. This is a kind of \n # \"null\" option\n try:\n\t P_el=eqlev[isstab][0]\n P_lfc=eqlev[isstab][0]\n except:\n\t P_el=eqlev[unstab][0]\n P_lfc=eqlev[unstab][0]\t\n\t\n if np.isnan(P_lfc):\n return P_lcl,P_lfc,P_el,0,0\n\n # need to handle case where dwpt is not available \n # above a certain level for any reason. Most simplest \n # thing to do is set it to a reasonably low value; \n # this should be a conservative approach!\n \n #dwpt=dewpt.copy().soften_mask()\n [inds] = np.where(np.isnan(dewpt))\n dwpt = dewpt\n dwpt[inds] = dwpt.min()\n \n # raise ValueError\n #if dwpt[(pres>=P_el).data*(pres<P_lfc).data].mask.any():\n # print \"WARNING: substituting dwpt.min() for masked values of DWPT in this sounding\"\n #dwpt[dwpt.mask]=dwpt.min()\n # dwptenv=interp(preswet,pres[::-1],dwpt[::-1])\n # NEW:\n\n dwptenv=interp(pparcel,pres[::-1],dwpt[::-1])\n\n\n \n #if hght[(pres>=P_el).data].mask.any():\n # raise NotImplementedError, \"TODO: Implement standard atmosphere to substitute missing heights\"\n # hghtenv=interp(preswet,pres[::-1],self.soundingdata['hght'][::-1])\n # NEW:\n hghtenv=interp(pparcel,pres[::-1],hght[::-1])\n \n\n # Areas of POSITIVE Bouyancy\n # cond1=(tempwet>=tempenv)*(preswet<=P_lfc)*(preswet>P_el)\n # NEW:\n cond1=(tparcel>=tempenv)*(pparcel<=P_lfc)*(pparcel>P_el)\n # Areas of NEGATIVE Bouyancy\n # cond2=(tempwet<tempenv)*(preswet<=P_lcl)*(preswet>P_el)\n # NEW:\n if totalcape:\n cond2=(tparcel<tempenv)*(pparcel>P_el)\n else:\n cond2=(tparcel<tempenv)*(pparcel>P_lfc)\n # Do CAPE calculation\n # 1. Virtual temperature of parcel... remember it's saturated above LCL.\n # e_parcel=SatVap(tempwet)\n # Tv_parcel=VirtualTemp(tempwet+degCtoK,preswet*100.,e_parcel)\n # e_env=SatVap(dwptenv)\n # Tv_env=VirtualTemp(tempenv+degCtoK,preswet*100.,e_env)\n # NEW:\n e_parcel=SatVap(tparcel)\n Tv_parcel=VirtualTemp(tparcel+degCtoK,pparcel*100.,e_parcel)\n e_env=SatVap(dwptenv)\n Tv_env=VirtualTemp(tempenv+degCtoK,pparcel*100.,e_env)\n\n CAPE=trapz(9.81*(Tv_parcel[cond1]-Tv_env[cond1])/Tv_env[cond1],hghtenv[cond1])\n CIN=trapz(9.81*(Tv_parcel[cond2]-Tv_env[cond2])/Tv_env[cond2],hghtenv[cond2])\n\n return P_lcl,P_lfc,P_el,CAPE,CIN", "def trilinear_interpolate(point, atom_index, emap, emap_max, emap_min):\n point1 = []\n point0 = []\n dif = []\n for p in point:\n if round(p) == p:\n p += 1E-10\n point0.append(math.floor(p))\n point1.append(math.ceil(p))\n dif.append((p - point0[-1]) / (point1[-1] - point0[-1]))\n\n i000 = energy_map_index(point0, emap_max, emap_min) # (0, 0, 0)\n i100 = energy_map_index([point1[0], point0[1], point0[2]], emap_max, emap_min) # (1, 0, 0)\n i001 = energy_map_index([point0[0], point0[1], point1[2]], emap_max, emap_min) # (0, 0, 1)\n i101 = energy_map_index([point1[0], point0[1], point1[2]], emap_max, emap_min) # (1, 0, 1)\n i010 = energy_map_index([point0[0], point1[1], point0[2]], emap_max, emap_min) # (0, 1, 0)\n i110 = energy_map_index([point1[0], point1[1], point0[2]], emap_max, emap_min) # (1, 1, 0)\n i011 = energy_map_index([point0[0], point1[1], point1[2]], emap_max, emap_min) # (0, 1, 1)\n i111 = energy_map_index(point1, emap_max, emap_min) # (1, 1, 1)\n\n c00 = emap[i000][atom_index] * (1 - dif[0]) + emap[i100][atom_index] * dif[0]\n c01 = emap[i001][atom_index] * (1 - dif[0]) + emap[i101][atom_index] * dif[0]\n c10 = emap[i010][atom_index] * (1 - dif[0]) + emap[i110][atom_index] * dif[0]\n c11 = emap[i011][atom_index] * (1 - dif[0]) + emap[i111][atom_index] * dif[0]\n\n c0 = c00 * (1 - dif[1]) + c10 * dif[1]\n c1 = c01 * (1 - dif[1]) + c11 * dif[1]\n\n c = c0 * (1 - dif[2]) + c1 * dif[2]\n\n return c", "def altitude_to_pressure(alt):\n for i in range(len(_heights)-1,0,-1):\n h0 = _heights[i]\n T0 = _basetemps[i]\n if alt > h0:\n if _isotherm[i]:\n rP = math.exp(-_g / _R / T0 * (alt - h0))\n else:\n l0 = _lapsert[i]\n rP = math.pow(1 + (alt - h0) * l0 / T0, -_g / _R / l0)\n return _basepress[i] * rP\n l0 = _lapsert[0]\n return _stdpres * math.pow(1 + alt * l0 / _stdtemp, -_g / _R / l0)", "def calc_Hcp_ij(self):\n\t\n\thp0_delayed = self.hp_wavelet.get_Psi(self.xi[0] + self.Orbit.L/l.Clight)\n\thp0 = self.hp_wavelet.get_Psi(self.xi[0])\n\thc0_delayed = self.hc_wavelet.get_Psi(self.xi[0] + self.Orbit.L/l.Clight)\n\thc0 = self.hc_wavelet.get_Psi(self.xi[0])\n\t\n\thp1_delayed = self.hp_wavelet.get_Psi(self.xi[1] + self.Orbit.L/l.Clight)\n\thp1 = self.hp_wavelet.get_Psi(self.xi[1])\n\thc1_delayed = self.hc_wavelet.get_Psi(self.xi[1] + self.Orbit.L/l.Clight)\n\thc1 = self.hc_wavelet.get_Psi(self.xi[1])\n\t\n\thp2_delayed = self.hp_wavelet.get_Psi(self.xi[2] + self.Orbit.L/l.Clight)\n\thp2 = self.hp_wavelet.get_Psi(self.xi[2])\n\thc2_delayed = self.hc_wavelet.get_Psi(self.xi[2] + self.Orbit.L/l.Clight)\n\thc2 = self.hc_wavelet.get_Psi(self.xi[2])\n\t\n\tself.Hpij[0,1] = hp1_delayed - hp0\n\tself.Hpij[1,0] = hp0_delayed - hp1\n\n\tself.Hpij[0,2] = hp2_delayed - hp0\n\tself.Hpij[2,0] = hp0_delayed - hp2\n\n\tself.Hpij[1,2] = hp2_delayed - hp1\n\tself.Hpij[2,1] = hp1_delayed - hp2\n\t\n\t# cross-polarization\n\tself.Hcij[0,1] = hc1_delayed - hc0\n\tself.Hcij[1,0] = hc0_delayed - hc1\n\n\tself.Hcij[0,2] = hc2_delayed - hc0\n\tself.Hcij[2,0] = hc0_delayed - hc2\n\n\tself.Hcij[1,2] = hc2_delayed - hc1\n\tself.Hcij[2,1] = hc1_delayed - hc2\n\t\n\treturn", "def _phi2psi(self):\n try:\n locq = self.param_q(self.rhotor)\n except:\n self._readeqdsk(self.shot)\n locq = self.param_q(self.rhotor)\n \n locphi = self.rhotor**2\n psi = integrate.cumtrapz(1/locq,locphi)\n psi = np.concatenate([[0], psi])\n psi = psi/max(psi)\n self.param_psi = interpolate.interp1d(self.rhotor, psi) \n \n\n # tmpnum=100000\n # locq = self.param_q(np.linspace(0,1,tmpnum)) #augmenting precision near the core\n # locphi = self.rhotor**2\n # locphi_p = interpolate.interp1d(np.linspace(0,1,len(locphi)),locphi)\n # locphi = locphi_p(np.linspace(0,1,tmpnum))\n # psi = integrate.cumtrapz(1/locq,locphi)\n # psi = np.concatenate([[0], psi])\n # psi = psi/max(psi)\n # rhopsi = psi**0.5\n # self.param_psi = interpolate.interp1d(np.linspace(0,1,tmpnum), rhopsi)", "def plc_temp(coil_df):", "def tempAir(sample):\n sample *= 1.0\n sample /= 1000\n celsius = (sample - 0.5) * 100\n return round(celsius,2)", "def partial_pressure(fraction=3, tem=283.15, pre=1.21325):\n pwater = np.exp(77.345 + 0.0057 * tem - 7235 / tem) / (tem ** 8.2) / 100000\n # partial pressure of H2O in air by relation, [Bar]\n p_hcl = fraction * 10 ** -5 * pre\n # firstly use 3ppm concentration to do estimation [Bar]\n return tem, pre, pwater, p_hcl", "def initialise_source(self, c, key):\n if key == 'p':\n return 50e5\n elif key == 'h':\n flow = c.to_flow()\n if c.source_id == 'out1':\n T = 200 + 273.15\n return h_mix_pT(flow, T)\n else:\n T = 250 + 273.15\n return h_mix_pT(flow, T)", "def _approx_pot(wair,temp,pres,ppot,airf,dhum):\n pvsat0 = _PTPE*numpy.exp((_AVI+_BVI)*(1 - _TTP/temp)\n + _BVI*numpy.log(_TTP/temp))\n pvmax0 = pres * (1-wair)/(_EPSW*wair + 1-wair)\n if pvmax0 > pvsat0:\n # Parcel starts saturated\n pv0 = pvsat0\n a0 = (pres-pv0) / (pres-pv0 + _EPSW*pv0)\n ceff0 = (wair*_CDRY + wair*(1-a0)/a0*_CVAP + (1-wair/a0)*_CICE\n + wair*_RWAT*(1-a0)/a0*pres/(pres-pv0)\n * ((_AVI+_BVI)*_TTP/temp - _BVI)**2)\n reff0 = wair*(_RDRY + _RWAT*(1-a0)/a0\n + _RWAT*(1-a0)/a0*pres/(pres-pv0) * ((_AVI+_BVI)*_TTP/temp - _BVI))\n pvmaxt = pvmax0 * (_TTP/temp)**(ceff0/reff0)\n ginv0 = ceff0/reff0\n r = (_AVI+_BVI)/(ginv0+_BVI) - 1\n v = numpy.log((_TTP/temp)**ginv0 * pvmax0/_PTPE)/(ginv0+_BVI)\n if pvmaxt > _PTPE or v <= r:\n # Parcel is always ice-saturated\n tpot = temp * (ppot/pres)**(reff0/ceff0)\n pv2 = _PTPE*numpy.exp((_AVI+_BVI)*(1 - _TTP/tpot)\n + _BVI*numpy.log(_TTP/tpot))\n apot = (ppot-pv2) / (ppot-pv2 + _EPSW*pv2)\n else:\n # Find where parcel de-saturates\n x = maths4.lamb2(v,r)\n ticl = _TTP/x\n picl = pres * (ticl/temp)**ginv\n if ppot < picl:\n # Parcel ends saturated\n tpot = temp * (ppot/pres)**(reff0/ceff0)\n pv2 = _PTPE*numpy.exp((_AVI+_BVI)*(1 - _TTP/tpot)\n + _BVI*numpy.log(_TTP/tpot))\n apot = (ppot-pv2) / (ppot-pv2 + _EPSW*pv2)\n else:\n # Parcel ends unsaturated\n p1 = picl\n t1 = ticl\n ceff1 = wair*_CDRY + (1-wair)*_CVAP\n reff1 = wair*_RDRY + (1-wair)*_RWAT\n tpot = t1 * (ppot/p1)**(reff1/ceff1)\n apot = wair\n else:\n # Parcel starts unsaturated\n ticl, picl, __ = iceair4a._approx_icl(wair,temp,pres,dhum)\n if ppot < picl:\n # Parcel ends saturated\n p1 = picl\n t1 = ticl\n pv1 = _PTPE*numpy.exp((_AVI+_BVI)*(1 - _TTP/t1)\n + _BVI*numpy.log(_TTP/t1))\n a1 = (p1-pv1) / (p1-pv1 + _EPSW*pv1)\n ceff1 = (wair*_CDRY + (1-wair)*_CVAP\n + (1-wair)*_RWAT*p1/(p1-pv1) * ((_AVI+_BVI)*_TTP/t1 - _BVI)**2)\n reff1 = (wair*_RDRY + (1-wair)*_RWAT\n + (1-wair)*_RWAT*p1/(p1-pv1) * ((_AVI+_BVI)*_TTP/t1 - _BVI))\n tpot = t1 * (ppot/p1)**(reff1/ceff1)\n pv2 = _PTPE*numpy.exp((_AVI+_BVI)*(1 - _TTP/tpot)\n + _BVI*numpy.log(_TTP/tpot))\n apot = (ppot-pv2) / (ppot-pv2 + _EPSW*pv2)\n else:\n # Parcel ends unsaturated\n ceff1 = wair*_CDRY + (1-wair)*_CVAP\n reff1 = wair*_RDRY + (1-wair)*_RWAT\n tpot = temp * (ppot/pres)**(reff1/ceff1)\n apot = wair\n dhpot = ppot/(_RDRY*tpot) / (apot + (1-apot)/_EPSW)\n return apot, tpot, dhpot", "def _raw_eng_to_phys(self, eng_value):\n return self.p(eng_value)", "def InterpolationFunctions(self, , p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=...):\n ...", "def pressure(altitude):\n t = temperature(altitude) # R\n if altitude <= 36152:\n p = 2116*(t/518.6)**5.256 # psf\n else:\n p = 473.1*exp(1.73-0.000048*altitude) # psf\n return p", "def InterpolateDerivs(self, , p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=...):\n ...", "def P2T(self):\n # Convert to RJ temperature\n #fac=planck.I2Ta(self.f*1e6,1).value\n fac = planck(self.f*1e6, 1)\n fac=fac/fac[0]\n self.spec=self.spec*np.tile(fac,(self.spec.shape[0],1))", "def speed_interpolation(val):\n if val == 0.5:\n return 1.0\n elif val < 0.5:\n return low_interp(val)\n else:\n return hi_interp(val)", "def __call__(self, vigt):\r\n return self.interpolant(vigt)", "def liqpressure(temp):\n tau = temp/_TTP\n pres = 1.\n for (a,b) in _C_PMELT:\n pres += a * (1 - tau**b)\n pres *= _PTPE\n return pres", "def raw_to_calibrated_pressure(self, rawpressure, rawtemp):\n t_fine = self._raw_to_t_fine(rawtemp)\n\n adc_P = np.array(rawpressure, dtype='int64')\n dig_P1 = self.calib_vals['dig_P1'].astype('int64')\n dig_P2 = self.calib_vals['dig_P2'].astype('int64')\n dig_P3 = self.calib_vals['dig_P3'].astype('int64')\n dig_P4 = self.calib_vals['dig_P4'].astype('int64')\n dig_P5 = self.calib_vals['dig_P5'].astype('int64')\n dig_P6 = self.calib_vals['dig_P6'].astype('int64')\n dig_P7 = self.calib_vals['dig_P7'].astype('int64')\n dig_P8 = self.calib_vals['dig_P8'].astype('int64')\n dig_P9 = self.calib_vals['dig_P9'].astype('int64')\n\n var1 = t_fine - 128000\n var2 = var1 * var1 * dig_P6\n var2 += ((var1*dig_P5)<<17)\n var2 += ((dig_P4)<<35)\n var1 = ((var1 * var1 * dig_P3)>>8) + ((var1 * dig_P2)<<12)\n var1 = ((((1)<<47)+var1))*(dig_P1)>>33\n\n p = 1048576-adc_P\n p = (((p<<31)-var2)*3125)//var1\n var1 = (dig_P9 * (p>>13) * (p>>13)) >> 25\n var2 = (dig_P8 * p) >> 19\n p = ((p + var1 + var2) >> 8) + (dig_P7<<4)\n return p/256000.", "def cal_pt(self):\n\n if not self.check_def(['E','px','py','pz']):\n sys.exit('Particle error: Quadri impulsion not define (error for pt routine)')\n\n self.pt =math.sqrt(self.px**2+self.py**2)", "def interpolatehour(self, hourslice, starttime, data):\n\n times = self.times[hourslice]\n # Hacky solution for boundary crossers\n if times[1] == 0:\n times[0] -= 24\n if times[2] == 0:\n times[0] -= 24\n times[1] -= 24\n elif times[3] == 0:\n times[3] += 24\n times[4] += 24\n elif times[4] == 0:\n times[4] += 24\n f = PchipInterpolator(times,\n data,\n extrapolate=False,\n axis=0)\n\n return f(starttime)", "def get_units(self,):\n self.UNITS = {'pressure':'Pa',}\n return", "def convert_units(self):\n for prod in (\"ier\", \"ier_inc_rain\"):\n self.data[prod].data[:] /= 1e6", "def interpolate_ephemeris(self):\n #Compute the offsets into the lookup tables\n startemiss, stopemiss = self.get_emissivity_offsets()\n hourslice, starttime = self.get_hour_offsets()\n latslice = self.get_lat_offsets()\n \n #Compute the start and stop dates\n startdata = self.extract_season(self.startseason,startemiss,\n hourslice, latslice)\n stopdata = self.extract_season(self.stopseason,startemiss,\n hourslice, latslice)\n # Interpolate Season\n seasons = [self.startseason, self.stopseason]\n season_f = compute_interpolation_function(seasons, [startdata, stopdata], 'linear')\n data = season_f(self.season)\n #Interpolate time\n self.data = self.interpolatehour(hourslice, starttime, data)", "def interpolate(self, image):\n return", "def test_isentropic_pressure_masked_column():\n lev = [100000., 95000.] * units.Pa\n tmp = np.ma.ones((len(lev), 5, 5))\n tmp[0, :] = 296.\n tmp[1, :] = 292.\n tmp[:, :, -1] = np.ma.masked\n tmp = units.Quantity(tmp, units.kelvin)\n isentprs = isentropic_interpolation([296.] * units.kelvin, lev, tmp)\n trueprs = np.ones((1, 5, 5)) * (1000. * units.hPa)\n trueprs[:, :, -1] = np.nan\n assert isentprs[0].shape == (1, 5, 5)\n assert_almost_equal(isentprs[0], trueprs, 3)", "def calc_VPD(t_celsius, rel_humidity):\n # according to Licor LI-6400 manual pg 14-10\n # and Buck AL (1981). New equations for computing vapor pressure and\n # enhancement factor. J Appl Meteor 20:1527-1532\n vp_sat = 0.61365 * math.exp((17.502 * t_celsius) / (240.97 + t_celsius))\n\n vp_air = vp_sat * rel_humidity\n return vp_sat - vp_air # or vp_sat * (1 - rel_humidity)", "def bilinear_interpolation(self, pt1, pt2, pt3, pt4, unknown):\n\n # Write your code for bilinear interpolation here\n # May b you can reuse or call linear interpolatio method to compute this task\n \n X1,Y1, intensity1 = pt1\n X2,Y2, intensity2 = pt2\n X3,Y3, intensity3 = pt3\n X4,Y4, intensity4 = pt4\n newPointX1,newPointY1 = unknown\n\n newpt1=self.linear_interpolation((X1,intensity1),(X2,intensity2),newPointX1)\n newpt2=self.linear_interpolation((X3,intensity3),(X4,intensity4),newPointX1)\n newpt1=Y1,newpt1\n newpt2=Y4,newpt2\n intensity=self.linear_interpolation(newpt1,newpt2,newPointY1)\n \n \n\n return intensity", "def tempWater(sample):\n sample *= .0009\n sample *= 1000\n celsius = (sample - 20.5128) * 0.0512\n return round(celsius,2)", "def func_Ip_318(pp, pd):\n return pp/(np.pi*(pd/2)**2)", "def get_chamber_pressure(self):\n raise NotImplementedError", "def process_component(self, i, outwave, filters):\n cspec = self.basis_spec[i, :].copy()\n cphot = 0\n inwave = self.ssp.wavelengths\n\n if self.safe:\n cspec = np.interp(self.params['outwave'], vac2air(inwave), cspec/a)\n cphot = 10**(-0.4 * getSED(inwave, cspec/a, filters))\n return cspec, cphot\n\n # Dust attenuation\n tage = self.params['tage'][i]\n tesc = self.params.get('dust_tesc', 0.01)\n dust1 = self.params.get('dust1', 0.0)\n dust2 = self.params['dust2']\n a = (1 + self.params.get('zred', 0.0))\n dust = (tage < tesc) * dust1 + dust2\n att = self.params['dust_curve'][0](inwave, **self.params)\n cspec *= np.exp(-att*dust)\n\n if filters is not None:\n cphot = 10**(-0.4 * getSED(inwave*a, cspec / a, filters))\n\n # Wavelength scale. Broadening and redshifting and placing on output\n # wavelength grid\n if self.params.get('lsf', [None])[0] is not None:\n cspec = smoothspec(vac2air(inwave) * a, cspec / a,\n self.params['sigma_smooth'], **self.params)\n else:\n sigma = self.params.get('sigma_smooth', 0.0)\n cspec = self.ssp.smoothspec(inwave, cspec, sigma)\n cspec = np.interp(self.params['outwave'], vac2air(inwave * a), cspec/a)\n\n return cspec, cphot", "def _build_interpolator(self):\n # Extract the data from the interpolation dataset\n self.interp_data, names, units = xr_dataset_to_array(self.interp_ds, \n self.ztsp[0])\n \n # Record the variables and their units\n self.f_names = names[1:]\n self.f_units = units[1:]\n \n # Create the interpolator\n self.f = interp1d(self.interp_data[:,0], \n self.interp_data[:,1:].transpose())", "def value(self, r: float, θ: float, φ: float) -> complex:\n result = 0\n for comp in self.components:\n result += np.interp([r], self.x, comp)[0]\n\n return result", "def func_Ic_318(cp, cd):\n return cp/(np.pi*(cd/2)**2)", "def init_patch_clamp(self):\n\n self.pressure.nearing()\n\n # Auto pipette offset and holding at 0 V\n self.amplifier.meter_resist_enable(False)\n self.amplifier.auto_pipette_offset()\n self.amplifier.set_holding(0.)\n self.amplifier.set_holding_enable(True)\n\n # Begin metering\n self.amplifier.meter_resist_enable(True)\n # wait for stable measure\n time.sleep(4)\n # Get pipette resistance\n self.pipette_resistance = self.get_single_resistance_metering(res_type='float')\n if 5e6 > self.pipette_resistance:\n self.update_message('ERROR: Tip resistance is too low ({}).'\n ' Should be higher than 5 MOhm.'.format(self.get_single_resistance_metering('text')))\n self.amplifier.meter_resist_enable(False)\n return 0\n if 10e6 < self.pipette_resistance:\n self.update_message('ERROR: Tip resistance is too high ({}).'\n ' Should be lower than 10 MOhm.'.format(self.get_single_resistance_metering('text')))\n self.amplifier.meter_resist_enable(False)\n return 0\n else:\n self.update_message('Tip resistance is good: {}'.format(self.get_single_resistance_metering('text')))\n self.pipette_resistance_checked = True\n self.set_continuous_meter(True)\n return 1", "def test_pressure(self):\n sol = Mader(p_cj=3.0e11, d_cj=8.0e5, gamma=3.0, u_piston=0.0)\n r = np.array([0.7, 0.8])\n t = 6.25e-6\n solrt = sol(r, t)\n np.testing.assert_allclose(solrt.pressure[0], 223599111111.10834)", "def _temperature(self, p_input:float) -> float:\n if self._unit_in == 'R':\n temp_K = p_input*5.0/9.0\n elif self._unit_in == 'F':\n temp_K = (p_input+459.67)/9.0*5.0\n elif self._unit_in == 'C':\n temp_K = p_input+273.15\n elif self._unit_in == 'K':\n temp_K = p_input\n \n if self._unit_out == 'R':\n return (temp_K*9.0/5.0)\n elif self._unit_out == 'F':\n return (temp_K*9.0/5.0-459.67) \n elif self._unit_out == 'C':\n return (temp_K-273.15)\n elif self._unit_out == 'K':\n return temp_K", "def linear_interpolation(self, pt1, pt2, unknown):\n\n #Write your code for linear interpolation here\n pt1,intensity1=pt1\n pt2,intensity2=pt2\n newPoint=unknown\n intensity_diff=pt2-pt1\n if(intensity_diff<=0):\n intensity_diff=1\n\n a1=pt2-newPoint\n b1=a1/intensity_diff\n x=intensity1*b1\n a2=newPoint - pt1\n b2=a2/intensity_diff\n y=intensity2*b2\n new_intensity=x+y\n\n return new_intensity", "def gen_interpolation_coeff(self):\n\n L_counts = [0 for x in range(self.N)]\n ngram_freqs = [self.get_ngram_freq(n) for n in range(0,self.N + 1)]\n\n for ngram in self.freq_dist:\n\n c_at_n = [0 for x in range(self.N)]\n p_at_n = [0 for x in range(self.N)]\n \n for n_len in range(1, self.N + 1):\n\n nom_ngram = ngram[len(ngram) - n_len:]\n denom_ngram = nom_ngram[:-1]\n\n nom_freq = ngram_freqs[n_len]\n denom_freq = ngram_freqs[n_len - 1 ]\n\n nom_count = nom_freq[nom_ngram] - 1\n denom_count = denom_freq[denom_ngram] - 1\n\n if n_len == 1:\n denom_count = sum(nom_freq.values()) - 1 \n \n p_at_n_len = 0\n if denom_count:\n p_at_n_len = nom_count / denom_count\n\n c_at_n[n_len - 1] = nom_count + 1\n p_at_n[n_len - 1] = p_at_n_len \n\n p_max = max(p_at_n)\n for i, p_item in enumerate(p_at_n):\n if p_max == p_item:\n L_counts[i] += c_at_n[i]\n\n total_L = sum(L_counts)\n return [L_count_item / total_L for L_count_item in L_counts]", "def hw_func(self):\n i, o = self.inl[0].to_flow(), self.outl[0].to_flow()\n\n if abs(i[0]) < 1e-4:\n return i[1] - o[1]\n\n v_i = v_mix_ph(i, T0=self.inl[0].T.val_SI)\n v_o = v_mix_ph(o, T0=self.outl[0].T.val_SI)\n flow_dir = np.sign(i[0])\n\n return ((i[1] - o[1]) * flow_dir -\n (10.67 * abs(i[0]) ** 1.852 * self.L.val /\n (self.ks.val ** 1.852 * self.D.val ** 4.871)) *\n (9.81 * ((v_i + v_o) / 2) ** 0.852))", "def cue_integration_output(self, cue1, cue2):\r\n pi_left = cue1[0:8]\r\n\r\n pi_l = np.zeros((self.num_neuron, int(self.nt)))\r\n pi_l[:, int(self.ni):] = np.repeat(pi_left.reshape(self.num_neuron, 1), int(self.nt - self.ni), axis=1)\r\n\r\n pi_right = cue1[8:]\r\n pi_r = np.zeros((self.num_neuron, int(self.nt)))\r\n pi_r[:, int(self.ni):] = np.repeat(pi_right.reshape(self.num_neuron, 1), int(self.nt - self.ni), axis=1)\r\n\r\n v_left = cue2[0:8]\r\n v_l = np.zeros((self.num_neuron, int(self.nt)))\r\n v_l[:, int(self.ni):] = np.repeat(v_left.reshape(self.num_neuron, 1), int(self.nt - self.ni), axis=1)\r\n v_right = cue2[8:]\r\n v_r = np.zeros((self.num_neuron, int(self.nt)))\r\n v_r[:, int(self.ni):] = np.repeat(v_right.reshape(self.num_neuron, 1), int(self.nt - self.ni), axis=1)\r\n\r\n # generate the array for integration cells and the uniform inhibitory cell\r\n it_l = np.zeros((self.num_neuron, int(self.nt)))\r\n it_l[:, 0] = 0.1 * np.ones(self.num_neuron, )\r\n it_r = np.zeros((self.num_neuron, int(self.nt)))\r\n it_r[:, 0] = 0.1 * np.ones(self.num_neuron, )\r\n ul = np.zeros((1, int(self.nt)))\r\n ur = np.zeros((1, int(self.nt)))\r\n # iteration to the stable state\r\n for t in range(1, int(self.nt)):\r\n it_l[:, t] = it_l[:, t - 1] + (-it_l[:, t - 1] + np.max(\r\n [np.zeros((self.num_neuron,)),\r\n self.gammaE + np.dot(self.W_E_E, it_l[:, t - 1]) + self.W_E_I * ul[:, t - 1] + pi_l[:, t - 1]\r\n + v_l[:, t - 1]], axis=0)) * self.dt / self.tauE\r\n ul[:, t] = ul[:, t - 1] + (\r\n -ul[:, t - 1] + np.max([0, self.gammaI + self.W_I_E * np.sum(it_l[:, t - 1]) + self.W_I_I * ul[:, t - 1]],\r\n axis=0)) * self.dt / self.tauI\r\n\r\n it_r[:, t] = it_r[:, t - 1] + (-it_r[:, t - 1] + np.max(\r\n [np.zeros((self.num_neuron,)),\r\n self.gammaE + np.dot(self.W_E_E, it_r[:, t - 1]) + self.W_E_I * ur[:, t - 1] + pi_r[:, t - 1]\r\n + v_r[:, t - 1]], axis=0)) * self.dt / self.tauE\r\n ur[:, t] = ur[:, t - 1] + (\r\n -ur[:, t - 1] + np.max([0, self.gammaI + self.W_I_E * np.sum(it_r[:, t - 1]) + self.W_I_I * ur[:, t - 1]],\r\n axis=0)) * self.dt / self.tauI\r\n\r\n # get the final iteration as the output\r\n # update the neuron activation\r\n self.integration_neuron = np.hstack([it_l[:, -1], it_r[:, -1]])\r\n self.integration_neuron = noisy_sigmoid(self.integration_neuron, 5.0, 2.5, 0)\r\n self.inhibition_neuron = np.hstack([ul[-1], ur[-1]])\r\n\r\n return self.integration_neuron", "def coord_interp(parameter, interval):\r\n epoch = _np.linspace(1800, 12600 , int(10800/interval)+1) # 3h validity interval within 4h\r\n time = _np.array([epoch**deg for deg in range(len(parameter)-1,-1,-1)])\r\n return _np.matmul(parameter,time)", "def internalenergy(temp,pres):\n g = liq_g(0,0,temp,pres)\n g_t = liq_g(1,0,temp,pres)\n g_p = liq_g(0,1,temp,pres)\n u = g - temp*g_t - pres*g_p\n return u", "def pressure_ashpa(self):\n hpa = None # Variable declaration\n raw_comp1 = None # Variable declaration\n raw_comp2 = None # Variable declaration\n raw_comp3 = None # Variable declaration\n raw_pressure = None # Variable declaration\n raw_temperature = None # Variable declaration\n value_d_p1 = None # Variable declaration\n value_d_p2 = None # Variable declaration\n value_d_p3 = None # Variable declaration\n value_d_p4 = None # Variable declaration\n value_d_p5 = None # Variable declaration\n value_d_p6 = None # Variable declaration\n value_d_p7 = None # Variable declaration\n value_d_p8 = None # Variable declaration\n value_d_p9 = None # Variable declaration\n value_lsb = None # Variable declaration\n value_msb = None # Variable declaration\n value_xlsb = None # Variable declaration\n\n value_msb = self.get_pressuremsb()\n value_lsb = self.get_pressurelsb()\n value_xlsb = self.get_pressurexlsb()\n value_d_p1 = self.get_digp1()\n value_d_p2 = self.get_digp2()\n value_d_p3 = self.get_digp3()\n value_d_p4 = self.get_digp4()\n value_d_p5 = self.get_digp5()\n value_d_p6 = self.get_digp6()\n value_d_p7 = self.get_digp7()\n value_d_p8 = self.get_digp8()\n value_d_p9 = self.get_digp9()\n raw_temperature = self.temperature_ascelsius()\n raw_temperature = (raw_temperature*5120.0)\n raw_pressure = ((value_msb << 12)+(value_lsb << 4)+(value_xlsb >> 4))\n raw_comp1 = ((raw_temperature/2)-64000.0)\n raw_comp2 = ((raw_comp1*raw_comp1*value_d_p6)/32768.0)\n raw_comp2 = (raw_comp2+(raw_comp1*value_d_p5*2.0))\n raw_comp2 = ((raw_comp2/4.0)+(value_d_p4*65536.0))\n raw_comp3 = (value_d_p3*raw_comp1*raw_comp1)\n raw_comp1 = (((raw_comp3/524288.0)+(value_d_p2*raw_comp1))/524288.0)\n raw_comp1 = ((1.0+(raw_comp1/32768.0))*value_d_p1)\n hpa = (1048576.0-raw_pressure)\n hpa = ((hpa-(raw_comp2/4096.0))*(6250.0/raw_comp1))\n raw_comp1 = ((value_d_p9*hpa*hpa)/2147483648.0)\n raw_comp2 = ((hpa*value_d_p8)/32768.0)\n hpa = (hpa+((raw_comp1+raw_comp2+value_d_p7)/16.0))\n hpa = (hpa/100.0)\n return hpa", "def interpolation_mode(self):\n return self._interpolation_mode", "def interpolate(self, interp):\n x = np.linspace(0, 29, len(self.ya))\n f_ya = interpolate.interp1d(x, self.ya)\n f_yv = interpolate.interp1d(x, self.yv)\n f_pa = interpolate.interp1d(x, np.reshape(self.pa, [-1]))\n f_pv = interpolate.interp1d(x, np.reshape(self.pv, [-1]))\n\n x_interp = np.linspace(0, 29, len(self.ya)*interp)\n self.ya = list(f_ya(x_interp))\n self.yv = list(f_yv(x_interp))\n self.pa = list(f_pa(x_interp))\n self.pv = list(f_pv(x_interp))", "def test_isentropic_pressure_data_bounds_error():\n lev = [100000., 95000., 90000., 85000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 296.\n tmp[1, :] = 292.\n tmp[2, :] = 290.\n tmp[3, :] = 288.\n tmpk = tmp * units.kelvin\n isentlev = [296., 350.] * units.kelvin\n with pytest.raises(ValueError):\n isentropic_interpolation(isentlev, lev, tmpk)", "def interpolation_pot(indata):\n x_values = indata[\"inter_points_x\"]\n y_values = indata[\"inter_points_y\"]\n x_range = np.linspace(indata[\"xMin\"], indata[\"xMax\"],\n int(indata[\"nPoint\"]))\n if indata[\"interpolationtype\"] == \"linear\":\n interpolation = sp.interp1d(x_values, y_values,\n kind=\"linear\")\n elif indata[\"interpolationtype\"] == \"cspline\":\n interpolation = sp.CubicSpline(x_values, y_values,\n bc_type=\"natural\")\n elif indata[\"interpolationtype\"] == \"polynomial\":\n interpolation = np.poly1d(np.polyfit(x_values, y_values,\n indata[\"nr_interpolation_points\"]\n - 1))\n int_potential = interpolation(x_range)\n\n fileio.write_int_pot(x_range, int_potential, indata[\"directory\"])\n return int_potential", "def get_value(p, t, q, c, v):\n\n gas = chemistry.ConstituentProperties(c)\n Psat_gas = gas.Psat(t)\n\n if c.upper() == 'H2S':\n if p < 43. and p * q * v > Psat_gas: # Pressure greater than saturation pressure\n return str(1.0)\n elif p < 43. and p * q * v < Psat_gas:\n return str(v)\n else:\n return str(0.8)\n else:\n return str(1.0)", "def cp(wair,pres,entr=None,temp=None,airf=None,dhum=None,chkvals=False,\n chktol=_CHKTOL,airf0=None,temp0=None,dhum0=None,chkbnd=False,\n mathargs=None):\n airf, temp, dhum = eq_wpte(wair,pres,entr=entr,temp=temp,airf=airf,\n dhum=dhum,chkvals=chkvals,chktol=chktol,airf0=airf0,temp0=temp0,\n dhum0=dhum0,chkbnd=chkbnd,mathargs=mathargs)\n h_s = temp\n h_ss = iceair_h(0,2,0,wair,pres,temp=temp,airf=airf,dhum=dhum)\n cp = h_s/h_ss\n return cp", "def calc_pucker_torsion(self):\n return self.calc_torsion(\"pucker\")", "def calc_supply_temp(tr, Q, m, cp, case):\n if m > 0:\n if case == \"DH\":\n ts = tr + Q / (m * cp)\n else:\n ts = tr - Q / (m * cp)\n else:\n ts = 0\n return ts", "def __call__(self, x, y):\n #- TODO: compare speed to solution at\n #- http://stackoverflow.com/questions/12729228/simple-efficient-bilinear-interpolation-of-images-in-numpy-and-python\n \n #- Find where we are in grid\n #- clip to 1 because we will use i and i-1\n #- clip to len(x)-1 to allow extrapolation beyond grid boundary\n ix = np.searchsorted(self.x, x).clip(1, len(self.x)-1)\n iy = np.searchsorted(self.y, y).clip(1, len(self.y)-1)\n \n #- Interpolation distances from points\n dx = (x - self.x[ix-1]) / (self.x[ix] - self.x[ix-1])\n dy = (y - self.y[iy-1]) / (self.y[iy] - self.y[iy-1])\n\n #- Interpolate, allowing x and/or y to be multi-dimensional\n #- NOTE: these are the slow steps, about equal time each\n \n #- Original code with what appears to be vestigial transposes\n # data1 = (self.data[ix-1,iy-1].T*(1-dx) + self.data[ix,iy-1].T*dx).T\n # data2 = (self.data[ix-1,iy].T*(1-dx) + self.data[ix,iy].T*dx).T\n # dataxy = (data1.T*(1-dy) + data2.T*dy).T\n\n #- Updated without transposes\n data1 = (self.data[ix-1,iy-1]*(1-dx) + self.data[ix,iy-1]*dx)\n data2 = (self.data[ix-1,iy]*(1-dx) + self.data[ix,iy]*dx)\n dataxy = (data1*(1-dy) + data2*dy)\n\n return dataxy", "async def get_pressure(self) -> float: # type: ignore\n ...", "def _phi2psi(self):\n try:\n locq = self.param_q(self.rhotor)\n except:\n self._readeqdsk(self.shot)\n locq = self.param_q(self.rhotor)\n \n locphi = self.rhotor**2\n psi = integrate.cumtrapz(1/locq,locphi)\n psi = np.concatenate([[0], psi])\n psi = psi/max(psi)\n self.param_psi = interpolate.interp1d(self.rhotor, psi)", "def _scalar_conversion(self, p_input:float) -> float:\n return (p_input/self.units[self._unit_in]*self.units[self._unit_out])", "def READ_PRESSURE_SENSOR():\n return 15.246", "def volt_to_pressure(volt):\n return volt/10", "def loading(self, pressure):\n kp = self.params[\"K\"] * pressure\n return self.params[\"n_m\"] * kp / (1.0 + kp)" ]
[ "0.6409062", "0.6191294", "0.6163329", "0.60624963", "0.59122026", "0.591134", "0.5853295", "0.57724625", "0.5744279", "0.57384187", "0.5700157", "0.5695096", "0.56918746", "0.56765395", "0.56729674", "0.56695503", "0.56321025", "0.5626728", "0.56115127", "0.5593795", "0.55772483", "0.553922", "0.5520023", "0.55067956", "0.5497766", "0.54809713", "0.54787207", "0.54773325", "0.5465778", "0.54634136", "0.5456678", "0.54453224", "0.5441972", "0.54109603", "0.5408819", "0.5393699", "0.5387029", "0.53775793", "0.53761727", "0.53734577", "0.5360629", "0.53513867", "0.5340837", "0.5338159", "0.532935", "0.5324512", "0.5316404", "0.5300795", "0.5300711", "0.52628034", "0.52576554", "0.5254172", "0.52536577", "0.52497935", "0.52492416", "0.5245379", "0.52400076", "0.5197074", "0.51874566", "0.5167734", "0.51665074", "0.5162196", "0.51594394", "0.51559937", "0.5155281", "0.514881", "0.5144962", "0.5132339", "0.5129981", "0.5128643", "0.5127628", "0.51196253", "0.5119281", "0.51115406", "0.5107583", "0.5106476", "0.5104578", "0.5087521", "0.50846577", "0.50827384", "0.5073465", "0.50678605", "0.50626016", "0.5057727", "0.50566417", "0.50499", "0.504496", "0.50424814", "0.5039033", "0.5028306", "0.5026532", "0.5023648", "0.50213164", "0.5017547", "0.5010019", "0.50053686", "0.49989182", "0.49983728", "0.49857703", "0.49853024", "0.49848372" ]
0.0
-1
Convert between engineering and physics units.
def _raw_eng_to_phys(self, eng_value): return self.pp(eng_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_units(self):\n for prod in (\"ier\", \"ier_inc_rain\"):\n self.data[prod].data[:] /= 1e6", "def original_units(self):\n self.physical_units(distance=self.infer_original_units('km'),\n velocity=self.infer_original_units('km s^-1'),\n mass=self.infer_original_units('Msol'), persistent=False)", "def useUnits():", "def units(self, key):\n \n # Strip any operators\n _, key = get_operator(key)\n \n # Fill out aliases \n if key in component_from_alias:\n key = component_from_alias[key]\n elif key == 'E':\n key = 'electricField'\n elif key == 'B':\n key = 'magneticField' \n \n return pg_units(key)", "def get_units(self):\r\n msg = struct.pack('>2B', 56, 14)\r\n response = self.query(msg)\r\n\r\n if response[1] == 2:\r\n units = 'A'\r\n to_nm_multiplier = 1 / 10\r\n elif response[1] == 1:\r\n units = 'nm'\r\n to_nm_multiplier = 1\r\n elif response[1] == 0:\r\n units = 'um'\r\n to_nm_multiplier = 1000\r\n else:\r\n raise ValueError('Units not recognised.')\r\n\r\n # Save results locally too for quick re-use\r\n self._current_units = units\r\n self._current_to_nm_multiplier = to_nm_multiplier\r\n\r\n return units, to_nm_multiplier", "def _fix_units(cube, definition):\n\n if cube.var_name != 'pr':\n cube.convert_units(definition.units)", "def get_units(self,):\n self.UNITS = {'pressure':'Pa',}\n return", "def assign_unit(self):\n self.units = {}\n for unit in RADIAL_UNITS:\n if unit.REPR == \"2th_deg\":\n self.units[unit] = self.tth_deg\n elif unit.REPR == \"2th_rad\":\n self.units[unit] = self.tth_rad\n elif unit.REPR == \"q_nm^-1\":\n self.units[unit] = self.q_nm\n elif unit.REPR == \"q_A^-1\":\n self.units[unit] = self.q_A\n elif unit.REPR == \"r_mm\":\n self.units[unit] = self.r_mm\n else:\n logger.warning(\"Unit unknown to GUI %s\" % unit)", "def convert_units(data, units):\n # Build the dictionary of units conversions\n convert = {'m' : [1.0, 0., 'm'], \n 'meter' : [1.0, 0., 'm'], \n 'deg C' : [1.0, 273.15, 'K'], \n 'Celsius' : [1.0, 273.15, 'K'], \n 'K' : [1.0, 0., 'K'],\n 'db' : [1.e4, 101325., 'Pa'], \n 'Pa' : [1.0, 0., 'Pa'],\n 'mg/m^3': [1.e-6, 0., 'kg/m^3'], \n 'S/m': [1.0, 0., 'S/m'],\n 'mS/m' : [1.e-3, 0., 'S/m'],\n 'psu': [1.0, 0., 'psu'], \n 'salinity': [1.0, 0., 'psu'], \n 'kg/m^3': [1.0, 0., 'kg/m^3'], \n 'kilogram meter-3': [1.0, 0., 'kg/m^3'], \n 'm/s': [1.0, 0., 'm/s'], \n 'mg/l': [1.e-3, 0., 'kg/m^3'],\n 'meter second-1' : [1.0, 0., 'm/s'],\n 'm.s-1' : [1.0, 0., 'm/s'],\n 'pH units' : [1.0, 0., 'pH units'],\n 'MPa' : [1.e6, 0., 'Pa'],\n '--' : [1.0, 0., '--'],\n 'mD' : [9.869233e-16, 0., 'm^2'],\n 'um' : [1.e-6, 0., 'm'],\n 'm/s 1e-9' : [1.e-9, 0., 'm/s'],\n 'm/s 1e-7' : [1.e-7, 0., 'm/s'],\n 'wt.%' : [10., 0., 'psu'],\n '10^-15 m^2' : [1.e-15, 0., 'm^2'],\n 'm^2' : [1., 0., 'm^2'],\n 'kg/m^2/year' : [3.168808781402895e-08, 0., 'kg/m^2/s'] \n } \n \n # Make sure the data are a numpy array and the units are a list\n if isinstance(data, float) or isinstance(data, int):\n data = np.array([data])\n if isinstance(data, list):\n data = np.array(data)\n if isinstance(units, str) or isinstance(units, unicode):\n units = [units]\n if units == None:\n units = ['']\n \n # Make sure you can slice through the columns: must be two-dimensional\n sh = data.shape\n data = np.atleast_2d(data)\n \n # Allow conversion of a row of data if all of the same unit\n if len(units) == 1 and data.shape[1] > 1:\n data = data.transpose()\n \n # Create an emtpy array to hold the output\n out_data = np.zeros(data.shape)\n out_units = []\n \n # Convert the units\n for i in range(len(units)):\n try:\n out_data[:,i] = data[:,i] * convert[units[i]][0] + \\\n convert[units[i]][1]\n out_units += [convert[units[i]][2]]\n except KeyError:\n print('Do not know how to convert %s to mks units' % units[i])\n print('Continuing without converting these units...')\n out_data[:,i] = data[:,i]\n out_units += units[i]\n \n # Return the converted data in the original shape\n out_data = np.reshape(out_data, sh, 'C')\n return (out_data, out_units)", "def _raw_phys_to_eng(self, physics_value):\n roots = (self.p - physics_value).roots\n if len(roots) == 1:\n x = roots[0]\n return x\n else:\n raise ValueError(\"There doesn't exist a corresponding engineering value or \"\n \"they are not unique:\", roots)", "def units(self):\n pass", "def _override_units_system(self):\n try:\n f = open(self.filename+\".units\")\n except OSError:\n return\n\n name_mapping = {'pos': 'distance', 'vel': 'velocity'}\n units_dict = {}\n\n for line in f:\n if (not line.startswith(\"#\")):\n if \":\" not in line:\n raise OSError(\"Unknown format for units file %r\"%(self.filename+\".units\"))\n else:\n t, u = list(map(str.strip,line.split(\":\")))\n t = name_mapping.get(t,t)\n units_dict[t] = u\n\n self.set_units_system(**units_dict)", "def convert(self):\n return _libsbml.SBMLUnitsConverter_convert(self)", "def convertUnits(self, varname, arr):\n if varname == \"SPDQ\" or varname == \"PHQ\":\n return arr*2.5e6/1000.\n return arr", "def convert_units(celsius_value, units):\n if units == 0:\n return celsius_value\n if units == 1:\n return celsius_value * 1.8 + 32\n return celsius_value + 273.15", "def convert_mass(self, event):\n try:\n #Compare other unit to one unit(kilograms)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"Earth masses\": 5.97219e+24, \"Solar masses\": 1.9890000000000002e+30, \"carats\": 0.0002, \"cental\": 45.359237, \"decagrams\": 0.01, \"femtograms\": 1e-18, \"grains\": 6.479891000000001e-05, \"grams\": 0.001, \"hectograms\": 0.1, \"hundredweights\": 50.802345, \"kilograms\": 1.0, \"kilotonnes\": 1000000.0, \"megatonnes\": 1000000000.0, \"micrograms\": 1e-09, \"milligrams\": 1e-06, \"nanograms\": 1e-12, \"ounces(US & UK)\": 0.02835, \"ounces(precious metals)\": 0.031103, \"picograms\": 1e-15, \"pounds(US & UK)\": 0.453592, \"pounds(precious metals)\": 0.373242, \"slugs\": 14.593903, \"stones\": 6.350293, \"tonnes(metric)\": 1000.0, \"tons(UK)\": 1016.046909, \"tons(US)\": 907.18474}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def get_units(self):\n return str(self._modeler.GetModelUnits())", "def convert_units(src_unit: Union[str, float], tgt_unit: Union[str, float]):\n return _parse_unit(src_unit) / _parse_unit(tgt_unit)", "def getDistanceUnits(self) -> Unit:\n ...", "def ke(self):\n self._obj['w'] = (self._obj['u'])**2 + (self._obj['v'])**2\n\n if len(self._obj.attrs['units']) == 4:\n vel_units = self._obj.attrs['units'][-1]\n self._obj.attrs['units'].append(f'({vel_units})^2')\n else:\n vel_units = self._obj.attrs['units'][-2]\n self._obj.attrs['units'][-1] = (f'({vel_units})^2')\n return self._obj", "def convert_units(self, time_units=None, len_units=None):\n in_time = self.time_units\n # Check new time units\n if time_units is None:\n time_units = in_time\n flag = _units.validate_units(time_units)\n if flag == -1:\n raise ValueError('Bad time units input {}'.format(time_units))\n # Check new length units\n in_len = self.len_units\n if len_units is None:\n len_units = in_len\n flag = _units.validate_units(len_units)\n if flag == -1:\n raise ValueError('Bad length units input {}'.format(len_units))\n # Convert parameters units\n for key, value in self.parameters.items():\n if type(value) in [int, float]:\n self.parameters[key] = _units.units_conversion(value, in_len, len_units)\n # Convert drawdown data\n self.drawdown.convert_units(time_units, len_units)\n # Convert associate data units\n for i in range(self.data_count()):\n if self.data[i].dtype == 1: # drawdown units\n data_units = len_units\n elif self.data[i].dtype == 2: # first derivative units\n data_units = len_units + \"/\" + time_units\n elif self.data[i].dtype == 3: # second derivative units\n data_units = len_units + \"/\" + time_units + \"2\"\n self.data[i].convert_units(time_units, data_units)\n self.len_units = len_units\n self.time_units = time_units\n # End Function", "def convertUnit(*args, fromUnit: AnyStr=\"\", toUnit: AnyStr=\"\", **kwargs)->float:\n pass", "def convert_to(self, units: str) -> None:\n if self.units == units:\n return\n\n if units not in Variable.VALID_UNIT_CONVERSIONS[self.units]:\n msg = f\"\"\"Not a valid unit conversion. Valid destination units:\n {Variable.VALID_UNIT_CONVERSIONS[self.units]}\"\"\"\n raise ValueError(msg)\n\n if self.units == \"celsius\" and units == \"fahrenheit\":\n self._celsius_to_fahrenheit()\n elif self.units == \"m/s\" and units == \"km/h\":\n self._mps_to_kph()\n elif self.units == \"m/s\" and units == \"mph\":\n self._mps_to_mph()\n else:\n raise ValueError(\"Not a valid unit conversion.\")", "def _mps_to_kph(self) -> None:\n if self.units == \"m/s\":\n self.units = \"km/h\"\n self.value = ((self.value * 360) / 100).__round__(2)\n else:\n msg = (\n \"Not a valid unit conversion, expected units to be in 'm/s' but instead \"\n + f\"units were in {self.units}.\"\n )\n raise ValueError(msg)", "def convert_energy(self, event):\n try:\n #Compare other unit to one unit(joules)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"Btu(th)\": 1054.35, \"Btu(mean)\": 1055.87, \"calories(IT)\": 4.1868, \"calories(th)\": 4.184, \"calories(mean)\": 4.19002, \"calories(15C)\": 4.1858, \"calories(20C)\": 4.1819, \"calories(food)\": 4186.0, \"centigrade heat units\": 1900.4, \"electron volts\": 1.60219 * 10 ** -19, \"ergs\": 1.0 * 10 ** -7, \"foot-pound force\": 1.355818, \"foot poundals\": 0.04214, \"gigajoules\": 1.0 * 10 ** 9, \"horsepower hours\": 2684520.0, \"inch-pound force\": 0.112985, \"joules\": 1.0, \"kilocalories(IT)\": 4186.8, \"kilocalories(th)\": 4184.0, \"kilogram-force meters\": 9.80665, \"kilojoules\": 1000.0, \"kilowatt hours\": 3600000.0, \"megajoules\": 1.0 * 10 ** 6, \"newton meters\": 1.0, \"therms\": 105505585.257348, \"watt seconds\": 1.0, \"watt hours\" : 3600.0}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def update_units(self):\n unit_var_value = self.view.vars['unit'].get()\n if unit_var_value == 'm3ph':\n self.minran_u_label.config(text='m³/h')\n self.maxran_u_label.config(text='m³/h')\n self.points_tview.heading('vflow', text='Przepływ [m³/h]', anchor=tk.CENTER)\n elif unit_var_value == 'lps':\n self.minran_u_label.config(text='l/s')\n self.maxran_u_label.config(text='l/s')\n self.points_tview.heading('vflow', text='Przepływ [l/s]', anchor=tk.CENTER)\n self.view.vars['pump_eff_min'].convert_unit(unit_var_value)\n self.view.vars['pump_eff_max'].convert_unit(unit_var_value)\n self.view.vars['pump_characteristic'].convert_unit(unit_var_value)", "def convert_energy_2_internal_u(self,val):\n units = self.current_units[\"energy\"]\n cfact = conversion_facs_energy[self.current_units[\"energy\"]]\n \n # special handling for nano meters\n if units == \"nm\":\n # zero is interpretted as zero energy\n try:\n ret = numpy.zeros(val.shape, dtype=val.dtype)\n ret[val!=0.0] = 1.0/val[val!=0]\n return ret/cfact\n except: \n return (1.0/val)/cfact\n #if val == 0.0:\n # return 0.0\n #return (1.0/val)/cfact\n else:\n return val*cfact", "def convert_eV_kJmol(en_eV):\n return en_eV/kJmol_eV", "def setunits(self, *args, **kwargs):\n return _coordsys.coordsys_setunits(self, *args, **kwargs)", "def _raw_eng_to_phys(self, eng_value):\n return self.p(eng_value)", "def unit_converter(val, from_u, to_u):\n\tconverter = {'b':0, 'k':1, 'm':2, 'g':3, 't':4}\n\tif converter[from_u] < converter[to_u]:\n\t\tval = float(val)\n\t\tfor _ in range(converter[to_u] - converter[from_u]):\n\t\t\tval = val/1024\n\telse:\n\t\tfor _ in range(converter[from_u] - converter[to_u]):\n\t\t\tval = val * 1024\n\t\t\t\n\treturn val", "def conv(old='auto', new='auto'):\n if old == new:\n return 1.\n for unittype in [lenunits, angunits, timunits, masunits, eneunits]:\n if old in unittype and new in unittype:\n return unittype[new] / unittype[old]\n\n raise ValueError('Units \\'{}\\' and \\'{}\\' unrecognized or '\n 'not of same unit type'.format(old, new))", "def convert_units(self, units):\n self.unit_array = self.unit_array.to(units)", "def unit_of_measurement(self):\n if self.values.primary.units == \"C\":\n return TEMP_CELSIUS\n if self.values.primary.units == \"F\":\n return TEMP_FAHRENHEIT\n\n return self.values.primary.units", "def unit_of_measurement(self) -> str:\n raw_units = self.raw_units\n\n if raw_units in [TEMP_CELSIUS, TEMP_FAHRENHEIT]:\n return self.hass.config.units.temperature_unit\n return raw_units", "def kinetic_energy(self, units = 'si'):\n if units == 'si':\n return 0.5 * self.mass * (linalg.norm(self.velocity) ** 2)\n if units == 'au':\n return 0.5 * self.mass * (linalg.norm(self.velocity * (1.496e11) * 86400) ** 2)", "def test_change_units(self):\n s = State(\"water\", T=Q_(100, \"degC\"), p=Q_(1.0, \"atm\"), units=\"EE\")\n assert s.units == \"EE\"\n s.units = \"SI\"\n assert s.units == \"SI\"\n assert s.cv.units == \"kilojoule / kelvin / kilogram\"\n assert s.cp.units == \"kilojoule / kelvin / kilogram\"\n assert s.s.units == \"kilojoule / kelvin / kilogram\"\n assert s.h.units == \"kilojoule / kilogram\"\n assert s.T.units == \"degree_Celsius\"\n assert s.u.units == \"kilojoule / kilogram\"\n assert s.v.units == \"meter ** 3 / kilogram\"\n assert s.p.units == \"bar\"", "def test_convert(self):\n height = 1.6 * self.meter\n foot = .305 * self.meter\n inch = 1 / 12 * foot\n\n self.assertTrue(abs(height / foot - 5.246) < .001)\n self.assertTrue(abs(height / inch - 62.951) < .001)\n\n newton = self.kgram * self.meter / (self.second ** 2)\n pound = 4.448222 * newton\n accel = 9.8 * self.meter / (self.second ** 2)\n\n weight = 150 * pound\n mass = weight / accel\n self.assertTrue(abs(mass / self.kgram - 68.085) < .001)", "def convert_H_kJmol(en_H):\n return en_H/kJmol_H", "def _mps_to_mph(self) -> None:\n if self.units == \"m/s\":\n self.units = \"mph\"\n self.value = (self.value * 2.236936).__round__(2)\n else:\n msg = (\n \"Not a valid unit conversion, expected units to be in 'm/s' but instead \"\n + f\"units were in {self.units}.\"\n )\n raise ValueError(msg)", "def getUnits(self):\n return _libsbml.Compartment_getUnits(self)", "def unit_of_measurement(self):\n return self._tasmota_entity.unit", "def convert_kJmol_eV(en_kJmol):\n return en_kJmol*kJmol_eV", "def _get_units(self, q) -> unyt.Unit:\n try:\n units = q.units\n except AttributeError:\n units = unyt.dimensionless\n return unyt.Unit(units, registry=self.registry)", "def convert_force(self, event):\n try:\n #Compare other unit to one unit(newtons)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"dynes\": 0.00001, \"kilograms force\": 9.80665, \"kilonewtons\": 1000.0, \"kips\": 4448.222, \"meganewtons\": 1.0 * 10 ** 6, \"newtons\": 1.0, \"pounds force\": 4.448222, \"poundals\": 0.138255, \"sthene\": 1000.0, \"tonnes force\": 9806.65, \"tons force(UK)\": 9964.016418, \"tons force(US)\": 8896.443231}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def units(self):\n return self._units", "def units(self):\n return self._units", "def unit_of_measurement(self):\n return self._units", "def convert_volume(self, event):\n try:\n #Compare other unit to one unit(cubic decimeters)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"acre foot\": 1233481.837548, \"barrels\": 158.987295, \"bushels(UK)\": 36.36872, \"bushels(US)\": 35.23907, \"centiliters\": 0.01, \"cubic centimeters\": 0.001, \"cubic decameters\": 1000000.0, \"cubic decimeters\": 1.0, \"cubic feet\": 28.316847, \"cubic inches\": 0.016387, \"cubic kilometers\": 1000000000000.0, \"cubic meters\": 1000.0, \"cubic mile\": 4168181825000.0, \"cubic millimeters\": 1e-06, \"cubic yards\": 764.554858, \"cups\": 0.236588, \"deciliters\": 0.1, \"dram\": 0.003697, \"dram(imperial)\": 0.003552, \"fluid ounces(US)\": 0.029574, \"fluid ounces(imperial)\": 0.028413, \"gallons(US,dry)\": 4.404884, \"gallons(US,liquid)\": 3.785412, \"gallons(imperial)\": 4.54609, \"gill(US)\": 0.118294, \"gill(imperial)\": 0.142065, \"liters\": 1.0, \"liters(1901-1964)\": 1.000028, \"microliters\": 1e-06, \"milliliters\": 0.001, \"nanoliters\": 1e-09, \"picoliters\": 1e-12, \"pints(US,dry)\": 0.55061, \"pints(US,liquid)\": 0.473176, \"pints(imperial)\": 0.568261, \"quarts(UK,dry)\": 1.101221, \"quarts(US,liquid)\": 0.946353, \"quarts(imperial)\": 1.136523, \"table spoons\": 0.014787, \"tea spoons\": 0.004929}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def convert_speed(self, event):\n try:\n #Compare other unit to one unit(meters/second)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"Mach number\": 340.2933, \"Nm/24hr\": 0.021435, \"centimeters/minute\": 0.000167, \"centimeters/second\": 0.01, \"feet/hour\": 8.5e-05, \"feet/minute\": 0.00508, \"feet/second\": 0.3048, \"inches/minute\": 0.000423, \"inches/second\": 0.0254, \"kilometers/hour\": 0.277778, \"kilometers/second\": 1000.0, \"knots\": 0.514444, \"meters/hour\": 0.000278, \"meters/minute\": 0.016667, \"meters/second\": 1.0, \"miles/hour\": 0.44704, \"miles/minute\": 26.8224, \"miles/second\": 1609.344, \"nautical miles/hour\": 0.514444, \"speed of light\": 299790000.0, \"speed of sound\": 343.0, \"yards/hour\": 0.000254, \"yards/minute\": 0.01524, \"yards/second\": 0.9144}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def to_axis_units(self, label, vals):\n if label in ['Hmolar', 'Smolar', 'Umolar', 'Dmolar', 'P']:\n return vals / 1000\n elif label in ['T']:\n return vals\n else:\n raise ValueError(label)", "def convert_units(unt, origunits):\n if unt[0:3] == origunits[0:3] | unt[0:3] == 'ori':\n units = origunits\n convf = 1\n else:\n if 'fee' == unt[(((0:3 -1) -1) -1)]:\n if origunits[0:3] == 'met':\n units = 'feet'\n convf = 3.2808399\n else:\n units = origunits\n convf = 1\n else:\n if 'met' == unt[(((0:3 -1) -1) -1)]:\n if origunits[0:3] == 'fee':\n units = 'meters'\n convf = 0.3048\n else:\n units = origunits\n convf = 1\n else:\n if 'm/s' == unt[(((0:3 -1) -1) -1)]:\n if origunits[0:3] == 'kno':\n units = 'meters/sec'\n convf = 0.51444444\n else:\n units = origunits\n convf = 1\n else:\n if 'kno' == unt[(((0:3 -1) -1) -1)]:\n if origunits[0:3] == 'm/s':\n units = 'knots'\n convf = 1.9438445\n else:\n units = origunits\n convf = 1\n else:\n error('Unknown units')\n #\n return units, convf", "def test_convert_compatible_units(self):\n result = convert_units(self.arr, 'degC')\n expected_data = np.array([[-273.15, -272.15], [-271.15, -270.15]])\n expected_units = cf_units.Unit('degC')\n self.assertEquals(result.units, expected_units)\n self.assertArrayEqual(result.data, expected_data)", "def _get_units_object(self, units):\n if isinstance(units, cellml_units):\n # We're done\n pass\n else:\n units = amara_parse_cellml(unicode(units))\n assert isinstance(units, cellml_units)\n return units", "def to_meters(d, d_unit):\n if d_unit == UOM_M:\n dm = d\n elif d_unit == UOM_KM:\n dm = d * 1000\n elif d_unit == UOM_FEET:\n dm = feet2m(d)\n elif d_unit == UOM_SM:\n dm = SM2m(d)\n elif d_unit == UOM_NM:\n dm = NM2m(d)\n return dm", "def E2V(E):\n# for energy in mev returns velocity in m/s\n return sqrt(E/5.227e-6)", "def unit_of_measurement(self):\n return self.var_units", "def convert_units(cube, units):\n try:\n cube.convert_units(units)\n except ValueError:\n if not _try_special_conversions(cube, units):\n raise\n\n return cube", "def units(self, x):\n u = ''.join([chr(d) for d in self[x]['units'][:]])\n if (u in ['n/a']) and (x in ['latitude', 'longitude']):\n u = 'radian' # assume radians\n return u", "def unit_of_measurement(self):\n unit = get_uom_from_status(self._device.status)\n if unit == HS_UNIT_LUX:\n return LIGHT_LUX\n elif unit == HS_UNIT_CELSIUS:\n return TEMP_CELSIUS\n elif unit == HS_UNIT_FAHRENHEIT:\n return TEMP_FAHRENHEIT\n elif unit == HS_UNIT_PERCENTAGE:\n return PERCENTAGE\n elif unit == HS_UNIT_A or unit == HS_UNIT_AMPERES:\n return ELECTRIC_CURRENT_AMPERE\n elif unit == HS_UNIT_KW:\n return POWER_KILO_WATT\n elif unit == HS_UNIT_KWH:\n return ENERGY_KILO_WATT_HOUR\n elif unit == HS_UNIT_V or unit == HS_UNIT_VOLTS:\n return ELECTRIC_POTENTIAL_VOLT\n elif unit == HS_UNIT_W or unit == HS_UNIT_WATTS:\n return POWER_WATT\n return None", "def get_units(cls, wkt):\n if HAS_GDAL:\n return SpatialReference(wkt).units\n else:\n m = cls.units_regex.match(wkt)\n return m.group('unit'), m.group('unit_name')", "def convert_length(self, event):\n try:\n #Compare other unit to one unit(meters)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"angstroms\": 10 ** -10, \"au\": 149598550000.0, \"barleycorns\": 0.008467, \"cables\": 182.88, \"centimeters\": 0.01, \"chains\": 20.11684, \"decimeters\": 0.1, \"ells\": 0.875, \"ems\" : 0.004233, \"fathoms\": 1.8288, \"feet(UK & US)\": 0.3048, \"feet(US survey)\": 0.304801, \"furlongs\": 201.168, \"hands\": 0.1016, \"hectometers\": 100.0, \"inches\": 0.0254, \"kilometers\": 1000.0, \"light years\": 9460528405000000.0, \"meters\": 1.0, \"micrometers\": 0.000001, \"mil\": 0.0000254, \"miles(UK & US)\": 1609.344, \"miles(nautical, international)\": 1852.0, \"miles(nautical, UK)\": 1853.184, \"millimeters\": 0.001, \"nanometers\": 10 ** -9, \"parsecs\": 30856776000000000.0, \"picometers\": 10 ** -12, \"Scandinavian mile\": 10000.0, \"thou\": 0.0000254, \"yards\": 0.9144, \"links\": 0.2011684, \"pica\": 0.00423333, \"rods\": 5.0292, \"spans\": 0.2286}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def _standardise_dtypes_and_units(cube: Cube) -> None:\n\n def as_correct_dtype(obj: ndarray, required_dtype: dtype) -> ndarray:\n \"\"\"\n Returns an object updated if necessary to the required dtype\n\n Args:\n obj:\n The object to be updated\n required_dtype:\n The dtype required\n\n Returns:\n The updated object\n \"\"\"\n if obj.dtype != required_dtype:\n return obj.astype(required_dtype)\n return obj\n\n cube.data = as_correct_dtype(cube.data, get_required_dtype(cube))\n for coord in cube.coords():\n if coord.name() in TIME_COORDS and not check_units(coord):\n coord.convert_units(get_required_units(coord))\n req_dtype = get_required_dtype(coord)\n # ensure points and bounds have the same dtype\n if np.issubdtype(req_dtype, np.integer):\n coord.points = round_close(coord.points)\n coord.points = as_correct_dtype(coord.points, req_dtype)\n if coord.has_bounds():\n if np.issubdtype(req_dtype, np.integer):\n coord.bounds = round_close(coord.bounds)\n coord.bounds = as_correct_dtype(coord.bounds, req_dtype)", "def unit_of_measurement(self) -> Any:\n return TEMP_CELSIUS", "def test_measurment(self):\r\n self.assertEqual(Converter.MeasurmentWorldtoUS(10, \"km\"), 6.214)\r\n self.assertEqual(Converter.MeasurmentWorldtoUS(10, \"m\"), 10.936)\r\n self.assertEqual(Converter.MeasurmentWorldtoUS(10, \"cm\"), 0.328)\r\n self.assertEqual(Converter.MeasurmentWorldtoUS(10, \"mm\"), 0.394)\r\n self.assertEqual(Converter.MeasurmentUStoWorld(10, \"mi\"), 16.093)\r\n self.assertEqual(Converter.MeasurmentUStoWorld(10, \"yd\"), 9.144)\r\n self.assertEqual(Converter.MeasurmentUStoWorld(10, \"ft\"), 304.8)\r\n self.assertEqual(Converter.MeasurmentUStoWorld(10, \"in\"), 254)", "def to_unit(self):\n if self.is_zero():\n return Vector(0,0,0)\n else:\n magnitude = self.l2_norm()\n return Vector(self.x/magnitude, self.y/magnitude, self.z/magnitude)", "def unit_of_measurement(self) -> str:\n raw_units = self.raw_unit_of_measurement\n if raw_units in (TEMP_FAHRENHEIT, TEMP_CELSIUS):\n return self.hass.config.units.temperature_unit\n return raw_units", "def _dwd_kelvin_to_celsius(self, chn):\n if not self._is_solar_channel(chn) and \\\n (self[chn].info['units'] in ['K', 'degree Kelvin', 'KELVIN'] or\n self[chn].unit == 'K'):\n self[chn].data -= CONVERSION\n self[chn].info['units'] = self[chn].unit = 'C'", "def unit_of_measurement(self):\n if self.api_unit in TEMPERATURE_UNITS:\n return self.hass.config.units.temperature_unit\n\n if self.api_unit in LENGTH_UNITS:\n return self.hass.config.units.length_unit\n\n if self.api_unit in PRESSURE_UNITS:\n if self.hass.config.units == IMPERIAL_SYSTEM:\n return self.hass.config.units.pressure_unit\n return PRESSURE_HPA\n\n if self.api_unit in FUEL_CONSUMPTION_UNITS:\n if self.hass.config.units == IMPERIAL_SYSTEM:\n return FUEL_CONSUMPTION_MPG\n return FUEL_CONSUMPTION_L_PER_100KM\n\n return self.api_unit", "def _convert_unit(self, unit):\n if unit in self.units:\n return self.units[unit]\n elif unit in unit_map:\n return unit_map[unit]\n else:\n raise SBMLError('Unit not recognized: ' + str(unit))", "def convert(self, value, units, newunits):\n return value * self._units[units] / self._units[newunits]", "def cbToEngUnits( BoardNum, Range, DataVal, EngUnits = 0.0 ):\n EngUnits = ctypes.c_float( EngUnits )\n CHK( cbw.cbToEngUnits( BoardNum, Range, DataVal, byref( EngUnits ) ) )\n return EngUnits.value", "def convert_energy_2_current_u(self,val):\n units = self.current_units[\"energy\"]\n cfact = conversion_facs_energy[units]\n \n # special handling for nanometers\n if units == \"nm\":\n # zero is interpretted as zero energy\n try:\n ret = numpy.zeros(val.shape, dtype=val.dtype)\n ret[val!=0.0] = 1.0/val[val!=0]\n return ret/cfact\n except: \n return (1.0/val)/cfact\n else:\n return val/cfact", "def convert_kcalmol_kJmol(en_kcalmol):\n return en_kcalmol/kJmol_kcalmol", "def unit_of_measurement(self) -> str:\n return MS", "def to(self, unit, equivalencies=[], freq=None):\n\n if not isinstance(unit, u.Unit):\n unit = u.Unit(unit)\n\n if unit == self.unit:\n # No copying\n return self\n\n if ((self.unit.is_equivalent(u.Jy / u.beam) and\n not any({u.Jy/u.beam, u.K}.issubset(set(eq)) for eq in equivalencies))):\n # the 'not any' above checks that there is not already a defined\n # Jy<->K equivalency. If there is, the code below is redundant\n # and will cause problems.\n\n if hasattr(self, 'beams'):\n factor = (self.jtok_factors(equivalencies=equivalencies) *\n (self.unit*u.beam).to(u.Jy))\n else:\n # replace \"beam\" with the actual beam\n if not hasattr(self, 'beam'):\n raise ValueError(\"To convert objects with Jy/beam units, \"\n \"the object needs to have a beam defined.\")\n brightness_unit = self.unit * u.beam\n\n # create a beam equivalency for brightness temperature\n if freq is None:\n try:\n freq = self.with_spectral_unit(u.Hz).spectral_axis\n except AttributeError:\n raise TypeError(\"Object of type {0} has no spectral \"\n \"information. `freq` must be provided for\"\n \" unit conversion from Jy/beam\"\n .format(type(self)))\n else:\n if not freq.unit.is_equivalent(u.Hz):\n raise u.UnitsError(\"freq must be given in equivalent \"\n \"frequency units.\")\n\n bmequiv = self.beam.jtok_equiv(freq)\n # backport to handle astropy < 3: the beam equivalency was only\n # modified to handle jy/beam in astropy 3\n if bmequiv[0] == u.Jy:\n bmequiv.append([u.Jy/u.beam, u.K, bmequiv[2], bmequiv[3]])\n\n factor = brightness_unit.to(unit,\n equivalencies=bmequiv + list(equivalencies))\n\n else:\n # scaling factor\n factor = self.unit.to(unit, equivalencies=equivalencies)\n\n converted_array = (self.quantity * factor).value\n\n # use private versions of variables, not the generated property\n # versions\n # Not entirely sure the use of __class__ here is kosher, but we do want\n # self.__class__, not super()\n new = self.__class__(value=converted_array, unit=unit, copy=True,\n wcs=self._wcs, meta=self._meta, mask=self._mask,\n header=self._header)\n\n return new", "def unit_of_measurement(self):\n return self.values.primary.units", "def make_power_physical_units(power, dt):\n \n return power*dt", "def convert_pressure(self, event):\n try:\n #Compare other unit to one unit(pascals)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"atm\": 101325.0, \"bars\": 100000.0, \"centimeters mercury\": 1333.22, \"centimeters water\": 98.0665, \"feet of water\": 2989.06692, \"hectopascals\": 100.0, \"inches of mercury\": 3386.388, \"inches of water\": 249.08891, \"kilogram-force/sq.centimeter\": 98066.5, \"kilogram-force/sq.meter\": 9.80665, \"kilonewtons/sq.meter\": 1000.0, \"kilonewtons/sq.millimeter\": 1000000000.0, \"kilopascals\": 1000.0, \"kips/sq.inch\": 6894760.0, \"meganewtons/sq.meter\": 1000000.0, \"meganewtons/sq.millimeter\": 1000000000000.0, \"meters of water\": 9806.65, \"millibars\": 100.0, \"millimeters of mercury\": 133.322, \"millimeters of water\": 9.80665, \"newtons/sq.centimeter\": 10000.0, \"newtons/sq.meter\": 1.0, \"newtons/sq.millimeter\": 1000000.0, \"pascals\": 1.0, \"poundals/sq.foot\": 1.44816, \"pounds-force/sq.foot\": 47.88, \"pounds-force/sq.inch\": 6894.757, \"tonnes-force/sq.cm\": 98066500.0, \"tonnes-force/sq.meter\": 9806.65, \"tons(UK)-force/sq.foot\": 107251.0, \"tons(UK)-force/sq.inch\": 15444280.0, \"tons(US)-force/sq.foot\": 95760.0, \"tons(US)-force/sq.inch\": 13789500.0, \"torr\": 133.322}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def solve(self):\n wort_gravity = self.property('start_gravity').to('sg') +\\\n (self.total_points().to('points') / self.property('wort_volume').to('gal') / 1000.0)\n self.property('wort_gravity', Quantity(wort_gravity, 'sg'))", "def native_unit_of_measurement(self) -> str:\n return f\"{CURRENCY_CENT}/{UnitOfVolume.LITERS}\"", "def unit_of_measurement(self):\n set_req = self.gateway.const.SetReq\n unit_map = {\n set_req.V_TEMP: (TEMP_CELSIUS\n if self.gateway.metric else TEMP_FAHRENHEIT),\n set_req.V_HUM: '%',\n set_req.V_DIMMER: '%',\n set_req.V_LIGHT_LEVEL: '%',\n set_req.V_WEIGHT: 'kg',\n set_req.V_DISTANCE: 'm',\n set_req.V_IMPEDANCE: 'ohm',\n set_req.V_WATT: 'W',\n set_req.V_KWH: 'kWh',\n set_req.V_FLOW: 'm',\n set_req.V_VOLUME: 'm3',\n set_req.V_VOLTAGE: 'V',\n set_req.V_CURRENT: 'A',\n }\n if float(self.gateway.protocol_version) >= 1.5:\n if set_req.V_UNIT_PREFIX in self._values:\n return self._values[\n set_req.V_UNIT_PREFIX]\n unit_map.update({set_req.V_PERCENTAGE: '%'})\n if float(self.gateway.protocol_version) >= 2.0:\n unit_map.update({\n set_req.V_ORP: 'mV',\n set_req.V_EC: 'μS/cm',\n set_req.V_VAR: 'var',\n set_req.V_VA: 'VA',\n })\n return unit_map.get(self.value_type)", "def _get_units(self):\n #assert self.ser.isOpen()\n\n self.serial_connection.write('UNI' + self.CR + self.LF)\n acknowledgement = self.serial_connection.readline()\n self._check_acknowledgement(acknowledgement)\n\n self.serial_connection.write(self.ENQ)\n unit = self.MEASUREMENT_UNITS[self.serial_connection.readline().rstrip(self.LF).rstrip(self.CR)]\n\n self.serial_connection.write(self.CR + self.LF)\n\n return unit", "def _parse_units(self, model, comp, node):\n node = dom_child(node, 'unitDefinition')\n while node:\n name = node.getAttribute('id')\n self.log('Parsing unit definition for \"' + name + '\".')\n unit = myokit.units.dimensionless\n node2 = dom_child(node, 'listOfUnits')\n node2 = dom_child(node2, 'unit')\n while node2:\n kind = str(node2.getAttribute('kind')).strip()\n u2 = self._convert_unit(kind)\n if node2.hasAttribute('multiplier'):\n m = float(node2.getAttribute('multiplier'))\n else:\n m = 1.0\n if node2.hasAttribute('scale'):\n m *= 10 ** float(node2.getAttribute('scale'))\n u2 *= m\n if node2.hasAttribute('exponent'):\n u2 **= float(node2.getAttribute('exponent'))\n unit *= u2\n node2 = dom_next(node2, 'unit')\n self.units[name] = unit\n node = dom_next(node, 'unitDefinition')", "def resolve_units(obj, _):\n return obj.units.decode()", "def convert_units(self, time_units=None, len_units=None, pump_units=None,\n same=False):\n in_time = self.time_units\n # Check new time units\n if time_units is None:\n time_units = in_time\n flag = _units.validate_units(time_units)\n if flag == -1:\n raise ValueError('Bad time units input {}'.format(time_units))\n # Check new length units\n in_len = self.len_units\n if len_units is None:\n len_units = in_len\n flag = _units.validate_units(len_units)\n if flag == -1:\n raise ValueError('Bad length units input {}'.format(len_units))\n # Check new pumping rate units\n in_pump = self.pump_units\n if pump_units is None:\n pump_units = in_pump\n if same:\n pump_units = \"%s3/%s\" % (len_units, time_units)\n flag = _units.validate_units(pump_units)\n if flag == -1:\n raise ValueError('Bad pumping rate units input {}'.format(len_units))\n\n # Convert parameters units\n for key, value in self.parameters.items():\n if type(value) in [int, float]:\n self.parameters[key] = _units.units_conversion(value, in_len, len_units)\n # Convert pumping rate data\n self.pumprate.convert_units(time_units, pump_units)\n # Convert well data units\n for i in range(self.well_count()):\n self.wells[i].convert_units(time_units, len_units)\n # Set input units\n self.len_units = len_units\n self.time_units = time_units\n self.pump_units = pump_units\n # End Function", "def unit_of_measurement(self) -> str | None:\n # Highest priority, for registered entities: unit set by user,with fallback to\n # unit suggested by integration or secondary fallback to unit conversion rules\n if self._sensor_option_unit_of_measurement is not UNDEFINED:\n return self._sensor_option_unit_of_measurement\n\n # Second priority, for non registered entities: unit suggested by integration\n if not self.registry_entry and (\n suggested_unit_of_measurement := self.suggested_unit_of_measurement\n ):\n return suggested_unit_of_measurement\n\n # Third priority: Legacy temperature conversion, which applies\n # to both registered and non registered entities\n native_unit_of_measurement = self.native_unit_of_measurement\n\n if (\n self.device_class == SensorDeviceClass.TEMPERATURE\n and native_unit_of_measurement\n in {UnitOfTemperature.CELSIUS, UnitOfTemperature.FAHRENHEIT}\n ):\n return self.hass.config.units.temperature_unit\n\n # Fourth priority: Native unit\n return native_unit_of_measurement", "def set_units(self, units):\n self.units = units", "def convert_kcalmol_eV(en_kcalmol):\n return en_kcalmol*kcalmol_eV", "def unit_of_measurement(self):\n return self.sensor_type[\"unit\"]", "def convert_eV_kcalmol(en_eV):\n return en_eV/kcalmol_eV", "def to_inch(self):\r\n if self.units != 'inch':\r\n self.units = 'inch'\r\n for statement in self.statements:\r\n statement.to_inch()\r\n for tool in iter(self.tools.values()):\r\n tool.to_inch()\r\n for primitive in self.primitives:\r\n primitive.to_inch()\r\n for hit in self.hits:\r\n hit.to_inch()", "def normalize_emission(self):\n self._e /= self._e.sum(0)", "def setup_md_units(md_base_units: Dict[str, Union[str, float]]):\n # Initialize basic unit system\n md_base_units = {u: _parse_unit(md_base_units[u]) for u in md_base_units}\n\n # Set up unit dictionary\n units = Units(md_base_units)\n\n # Derived units (MD internal -> ASE internal)\n units[\"time\"] = units[\"length\"] * np.sqrt(units[\"mass\"] / units[\"energy\"])\n units[\"force\"] = units[\"energy\"] / units[\"length\"]\n units[\"stress\"] = units[\"energy\"] / units[\"length\"] ** 3\n units[\"pressure\"] = units[\"stress\"]\n\n # Conversion of length units\n units[\"A\"] = aseunits.Angstrom / units[\"length\"]\n units[\"Ang\"] = units[\"A\"]\n units[\"Angs\"] = units[\"A\"]\n units[\"Angstrom\"] = units[\"A\"]\n units[\"nm\"] = aseunits.nm / units[\"length\"]\n units[\"a0\"] = aseunits.Bohr / units[\"length\"]\n units[\"Bohr\"] = units[\"a0\"]\n\n # Conversion of energy units\n units[\"kcal\"] = aseunits.kcal / units[\"energy\"]\n units[\"kJ\"] = aseunits.kJ / units[\"energy\"]\n units[\"eV\"] = aseunits.eV / units[\"energy\"]\n units[\"Hartree\"] = aseunits.Hartree / units[\"energy\"]\n units[\"Ha\"] = units[\"Hartree\"]\n\n # Time units\n units[\"fs\"] = aseunits.fs / units[\"time\"]\n units[\"s\"] = aseunits.s / units[\"time\"]\n units[\"aut\"] = aseunits._aut * aseunits.s / units[\"time\"]\n\n # Pressure units\n units[\"Pascal\"] = aseunits.Pascal / units[\"pressure\"]\n units[\"bar\"] = 1e5 * units[\"Pascal\"]\n\n # Mol\n units[\"mol\"] = aseunits.mol\n\n # Mass\n units[\"Dalton\"] = 1.0 / units[\"mass\"]\n units[\"amu\"] = aseunits._amu / units[\"mass\"]\n\n # Charge distributions\n units[\"Debye\"] = aseunits.Debye / (units[\"charge\"] * units[\"length\"])\n units[\"C\"] = aseunits.C / units[\"charge\"]\n\n # Constants (internal frame)\n units[\"kB\"] = aseunits.kB / units[\"energy\"] # Always uses Kelvin\n units[\"hbar\"] = (\n aseunits._hbar * (aseunits.J * aseunits.s) / (units[\"energy\"] * units[\"time\"])\n ) # hbar is given in J*s by ASE\n units[\"ke\"] = (\n units[\"a0\"] * units[\"Ha\"] / units[\"charge\"] ** 2\n ) # Coulomb constant is 1 in atomic units\n\n # For spectra\n units[\"hbar2icm\"] = units[\"hbar\"] * 100.0 * aseunits._c * aseunits._aut\n\n return units", "def to_unit(self, unit):\n unit = _find_unit(unit)\n self.value = _convert_value(self.value, self.unit, unit)\n self.unit = unit", "def convert_kJmol_kcalmol(en_kJmol):\n return en_kJmol*kJmol_kcalmol", "def unit_of_measurement(self):\n return None", "def set_units_system(self, velocity=None, distance=None, mass=None, temperature=None):\n import configparser\n\n from .. import config_parser\n\n # if the units system doesn't exist (if this is a new snapshot), create\n # one\n if len(self._file_units_system) < 3:\n warnings.warn(\"Previous unit system incomplete -- using defaults\")\n self._file_units_system = [\n units.Unit(x) for x in ('G', '1 kpc', '1e10 Msol')]\n\n else:\n # we want to change the base units -- so convert to original\n # units first and then set all arrays to new unit system\n self.original_units()\n\n\n # if any are missing, work them out from what we already have:\n\n if velocity is None:\n velocity = self.infer_original_units('km s^-1')\n\n if distance is None:\n distance = self.infer_original_units('kpc')\n\n if mass is None:\n mass = self.infer_original_units('Msol')\n\n if temperature is None:\n temperature = self.infer_original_units('K')\n\n new_units = []\n for x in [velocity, distance, mass, temperature]:\n if x is not None:\n new_units.append(units.Unit(x))\n\n\n self._file_units_system = new_units\n\n # set new units for all known arrays\n for arr_name in list(self.keys()):\n arr = self[arr_name]\n # if the array has units, then use the current units, else\n # check if a default dimension for this array exists in\n # the configuration\n if arr.units != units.NoUnit():\n ref_unit = arr.units\n else:\n try:\n ref_unit = config_parser.get(\n 'default-array-dimensions', arr_name)\n except configparser.NoOptionError:\n # give up -- no applicable dimension found\n continue\n\n arr.set_units_like(ref_unit)", "def _get_units(self, name):\n meta = self._abs2meta\n\n if name in meta:\n return meta[name]['units']\n\n proms = self._prom2abs\n\n if name in proms['output']:\n abs_name = proms['output'][name][0]\n return meta[abs_name]['units']\n\n elif name in proms['input']:\n if len(proms['input'][name]) > 1:\n # The promoted name maps to multiple absolute names, require absolute name.\n msg = \"Can't get units for the promoted name '%s' because it refers to \" + \\\n \"multiple inputs: %s. Access the units using an absolute path name.\"\n raise RuntimeError(msg % (name, str(proms['input'][name])))\n\n abs_name = proms['input'][name][0]\n return meta[abs_name]['units']\n\n raise KeyError('Variable name \"{}\" not found.'.format(name))", "def tempConvert(temp, unit):\n if unit == 'F':\n celsius = (temp - 32) * 5 / 9\n return celsius\n else:\n return temp" ]
[ "0.6702368", "0.661202", "0.6604063", "0.6344034", "0.61844754", "0.6114347", "0.60904294", "0.6029163", "0.6023127", "0.59929097", "0.5970022", "0.59694815", "0.5943942", "0.5935189", "0.59149885", "0.59114206", "0.5895953", "0.5884657", "0.58838505", "0.5863761", "0.5856587", "0.5852781", "0.5845475", "0.58259255", "0.58205277", "0.58083826", "0.578711", "0.57794905", "0.5750528", "0.57462955", "0.57405627", "0.56955665", "0.56879526", "0.56875783", "0.56855935", "0.5681583", "0.5663293", "0.5660534", "0.56505233", "0.5649003", "0.5642654", "0.5637654", "0.56375027", "0.5633675", "0.56258166", "0.5614941", "0.5614941", "0.56058294", "0.5601029", "0.5594353", "0.5592946", "0.5591142", "0.55889", "0.55785215", "0.5575195", "0.5570567", "0.55671924", "0.5555584", "0.55422807", "0.5533645", "0.55318916", "0.5521691", "0.552161", "0.55213684", "0.55171", "0.5511836", "0.54957026", "0.5495448", "0.54885286", "0.54782766", "0.54773396", "0.547664", "0.54721856", "0.54712665", "0.54646575", "0.5464598", "0.5464068", "0.54606146", "0.5458746", "0.54554635", "0.54549843", "0.54514265", "0.54483813", "0.54427975", "0.54402554", "0.542907", "0.5425745", "0.5423836", "0.5412843", "0.53985626", "0.5391975", "0.53902304", "0.5380302", "0.53654736", "0.53650784", "0.53505236", "0.5349555", "0.5340431", "0.5339175", "0.5337363" ]
0.5688327
32
Convert between physics and engineering units.
def _raw_phys_to_eng(self, physics_value): y = [val - physics_value for val in self.y] new_pp = PchipInterpolator(self.x, y) roots = new_pp.roots() if len(roots) == 1: x = roots[0] return x else: raise UniqueSolutionException("The function does not have any solution.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_units(self):\n for prod in (\"ier\", \"ier_inc_rain\"):\n self.data[prod].data[:] /= 1e6", "def original_units(self):\n self.physical_units(distance=self.infer_original_units('km'),\n velocity=self.infer_original_units('km s^-1'),\n mass=self.infer_original_units('Msol'), persistent=False)", "def useUnits():", "def units(self, key):\n \n # Strip any operators\n _, key = get_operator(key)\n \n # Fill out aliases \n if key in component_from_alias:\n key = component_from_alias[key]\n elif key == 'E':\n key = 'electricField'\n elif key == 'B':\n key = 'magneticField' \n \n return pg_units(key)", "def _raw_phys_to_eng(self, physics_value):\n roots = (self.p - physics_value).roots\n if len(roots) == 1:\n x = roots[0]\n return x\n else:\n raise ValueError(\"There doesn't exist a corresponding engineering value or \"\n \"they are not unique:\", roots)", "def get_units(self):\r\n msg = struct.pack('>2B', 56, 14)\r\n response = self.query(msg)\r\n\r\n if response[1] == 2:\r\n units = 'A'\r\n to_nm_multiplier = 1 / 10\r\n elif response[1] == 1:\r\n units = 'nm'\r\n to_nm_multiplier = 1\r\n elif response[1] == 0:\r\n units = 'um'\r\n to_nm_multiplier = 1000\r\n else:\r\n raise ValueError('Units not recognised.')\r\n\r\n # Save results locally too for quick re-use\r\n self._current_units = units\r\n self._current_to_nm_multiplier = to_nm_multiplier\r\n\r\n return units, to_nm_multiplier", "def convert_energy_2_internal_u(self,val):\n units = self.current_units[\"energy\"]\n cfact = conversion_facs_energy[self.current_units[\"energy\"]]\n \n # special handling for nano meters\n if units == \"nm\":\n # zero is interpretted as zero energy\n try:\n ret = numpy.zeros(val.shape, dtype=val.dtype)\n ret[val!=0.0] = 1.0/val[val!=0]\n return ret/cfact\n except: \n return (1.0/val)/cfact\n #if val == 0.0:\n # return 0.0\n #return (1.0/val)/cfact\n else:\n return val*cfact", "def assign_unit(self):\n self.units = {}\n for unit in RADIAL_UNITS:\n if unit.REPR == \"2th_deg\":\n self.units[unit] = self.tth_deg\n elif unit.REPR == \"2th_rad\":\n self.units[unit] = self.tth_rad\n elif unit.REPR == \"q_nm^-1\":\n self.units[unit] = self.q_nm\n elif unit.REPR == \"q_A^-1\":\n self.units[unit] = self.q_A\n elif unit.REPR == \"r_mm\":\n self.units[unit] = self.r_mm\n else:\n logger.warning(\"Unit unknown to GUI %s\" % unit)", "def convert_energy(self, event):\n try:\n #Compare other unit to one unit(joules)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"Btu(th)\": 1054.35, \"Btu(mean)\": 1055.87, \"calories(IT)\": 4.1868, \"calories(th)\": 4.184, \"calories(mean)\": 4.19002, \"calories(15C)\": 4.1858, \"calories(20C)\": 4.1819, \"calories(food)\": 4186.0, \"centigrade heat units\": 1900.4, \"electron volts\": 1.60219 * 10 ** -19, \"ergs\": 1.0 * 10 ** -7, \"foot-pound force\": 1.355818, \"foot poundals\": 0.04214, \"gigajoules\": 1.0 * 10 ** 9, \"horsepower hours\": 2684520.0, \"inch-pound force\": 0.112985, \"joules\": 1.0, \"kilocalories(IT)\": 4186.8, \"kilocalories(th)\": 4184.0, \"kilogram-force meters\": 9.80665, \"kilojoules\": 1000.0, \"kilowatt hours\": 3600000.0, \"megajoules\": 1.0 * 10 ** 6, \"newton meters\": 1.0, \"therms\": 105505585.257348, \"watt seconds\": 1.0, \"watt hours\" : 3600.0}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def convert_eV_kJmol(en_eV):\n return en_eV/kJmol_eV", "def _fix_units(cube, definition):\n\n if cube.var_name != 'pr':\n cube.convert_units(definition.units)", "def convert_units(data, units):\n # Build the dictionary of units conversions\n convert = {'m' : [1.0, 0., 'm'], \n 'meter' : [1.0, 0., 'm'], \n 'deg C' : [1.0, 273.15, 'K'], \n 'Celsius' : [1.0, 273.15, 'K'], \n 'K' : [1.0, 0., 'K'],\n 'db' : [1.e4, 101325., 'Pa'], \n 'Pa' : [1.0, 0., 'Pa'],\n 'mg/m^3': [1.e-6, 0., 'kg/m^3'], \n 'S/m': [1.0, 0., 'S/m'],\n 'mS/m' : [1.e-3, 0., 'S/m'],\n 'psu': [1.0, 0., 'psu'], \n 'salinity': [1.0, 0., 'psu'], \n 'kg/m^3': [1.0, 0., 'kg/m^3'], \n 'kilogram meter-3': [1.0, 0., 'kg/m^3'], \n 'm/s': [1.0, 0., 'm/s'], \n 'mg/l': [1.e-3, 0., 'kg/m^3'],\n 'meter second-1' : [1.0, 0., 'm/s'],\n 'm.s-1' : [1.0, 0., 'm/s'],\n 'pH units' : [1.0, 0., 'pH units'],\n 'MPa' : [1.e6, 0., 'Pa'],\n '--' : [1.0, 0., '--'],\n 'mD' : [9.869233e-16, 0., 'm^2'],\n 'um' : [1.e-6, 0., 'm'],\n 'm/s 1e-9' : [1.e-9, 0., 'm/s'],\n 'm/s 1e-7' : [1.e-7, 0., 'm/s'],\n 'wt.%' : [10., 0., 'psu'],\n '10^-15 m^2' : [1.e-15, 0., 'm^2'],\n 'm^2' : [1., 0., 'm^2'],\n 'kg/m^2/year' : [3.168808781402895e-08, 0., 'kg/m^2/s'] \n } \n \n # Make sure the data are a numpy array and the units are a list\n if isinstance(data, float) or isinstance(data, int):\n data = np.array([data])\n if isinstance(data, list):\n data = np.array(data)\n if isinstance(units, str) or isinstance(units, unicode):\n units = [units]\n if units == None:\n units = ['']\n \n # Make sure you can slice through the columns: must be two-dimensional\n sh = data.shape\n data = np.atleast_2d(data)\n \n # Allow conversion of a row of data if all of the same unit\n if len(units) == 1 and data.shape[1] > 1:\n data = data.transpose()\n \n # Create an emtpy array to hold the output\n out_data = np.zeros(data.shape)\n out_units = []\n \n # Convert the units\n for i in range(len(units)):\n try:\n out_data[:,i] = data[:,i] * convert[units[i]][0] + \\\n convert[units[i]][1]\n out_units += [convert[units[i]][2]]\n except KeyError:\n print('Do not know how to convert %s to mks units' % units[i])\n print('Continuing without converting these units...')\n out_data[:,i] = data[:,i]\n out_units += units[i]\n \n # Return the converted data in the original shape\n out_data = np.reshape(out_data, sh, 'C')\n return (out_data, out_units)", "def convert_units(celsius_value, units):\n if units == 0:\n return celsius_value\n if units == 1:\n return celsius_value * 1.8 + 32\n return celsius_value + 273.15", "def get_units(self):\n return str(self._modeler.GetModelUnits())", "def get_units(self,):\n self.UNITS = {'pressure':'Pa',}\n return", "def convert(self):\n return _libsbml.SBMLUnitsConverter_convert(self)", "def convert_to(self, units: str) -> None:\n if self.units == units:\n return\n\n if units not in Variable.VALID_UNIT_CONVERSIONS[self.units]:\n msg = f\"\"\"Not a valid unit conversion. Valid destination units:\n {Variable.VALID_UNIT_CONVERSIONS[self.units]}\"\"\"\n raise ValueError(msg)\n\n if self.units == \"celsius\" and units == \"fahrenheit\":\n self._celsius_to_fahrenheit()\n elif self.units == \"m/s\" and units == \"km/h\":\n self._mps_to_kph()\n elif self.units == \"m/s\" and units == \"mph\":\n self._mps_to_mph()\n else:\n raise ValueError(\"Not a valid unit conversion.\")", "def convert_mass(self, event):\n try:\n #Compare other unit to one unit(kilograms)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"Earth masses\": 5.97219e+24, \"Solar masses\": 1.9890000000000002e+30, \"carats\": 0.0002, \"cental\": 45.359237, \"decagrams\": 0.01, \"femtograms\": 1e-18, \"grains\": 6.479891000000001e-05, \"grams\": 0.001, \"hectograms\": 0.1, \"hundredweights\": 50.802345, \"kilograms\": 1.0, \"kilotonnes\": 1000000.0, \"megatonnes\": 1000000000.0, \"micrograms\": 1e-09, \"milligrams\": 1e-06, \"nanograms\": 1e-12, \"ounces(US & UK)\": 0.02835, \"ounces(precious metals)\": 0.031103, \"picograms\": 1e-15, \"pounds(US & UK)\": 0.453592, \"pounds(precious metals)\": 0.373242, \"slugs\": 14.593903, \"stones\": 6.350293, \"tonnes(metric)\": 1000.0, \"tons(UK)\": 1016.046909, \"tons(US)\": 907.18474}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def convert_kJmol_eV(en_kJmol):\n return en_kJmol*kJmol_eV", "def ke(self):\n self._obj['w'] = (self._obj['u'])**2 + (self._obj['v'])**2\n\n if len(self._obj.attrs['units']) == 4:\n vel_units = self._obj.attrs['units'][-1]\n self._obj.attrs['units'].append(f'({vel_units})^2')\n else:\n vel_units = self._obj.attrs['units'][-2]\n self._obj.attrs['units'][-1] = (f'({vel_units})^2')\n return self._obj", "def convert_units(src_unit: Union[str, float], tgt_unit: Union[str, float]):\n return _parse_unit(src_unit) / _parse_unit(tgt_unit)", "def _override_units_system(self):\n try:\n f = open(self.filename+\".units\")\n except OSError:\n return\n\n name_mapping = {'pos': 'distance', 'vel': 'velocity'}\n units_dict = {}\n\n for line in f:\n if (not line.startswith(\"#\")):\n if \":\" not in line:\n raise OSError(\"Unknown format for units file %r\"%(self.filename+\".units\"))\n else:\n t, u = list(map(str.strip,line.split(\":\")))\n t = name_mapping.get(t,t)\n units_dict[t] = u\n\n self.set_units_system(**units_dict)", "def update_units(self):\n unit_var_value = self.view.vars['unit'].get()\n if unit_var_value == 'm3ph':\n self.minran_u_label.config(text='m³/h')\n self.maxran_u_label.config(text='m³/h')\n self.points_tview.heading('vflow', text='Przepływ [m³/h]', anchor=tk.CENTER)\n elif unit_var_value == 'lps':\n self.minran_u_label.config(text='l/s')\n self.maxran_u_label.config(text='l/s')\n self.points_tview.heading('vflow', text='Przepływ [l/s]', anchor=tk.CENTER)\n self.view.vars['pump_eff_min'].convert_unit(unit_var_value)\n self.view.vars['pump_eff_max'].convert_unit(unit_var_value)\n self.view.vars['pump_characteristic'].convert_unit(unit_var_value)", "def units(self):\n pass", "def convertUnits(self, varname, arr):\n if varname == \"SPDQ\" or varname == \"PHQ\":\n return arr*2.5e6/1000.\n return arr", "def convertUnit(*args, fromUnit: AnyStr=\"\", toUnit: AnyStr=\"\", **kwargs)->float:\n pass", "def convert_energy_2_current_u(self,val):\n units = self.current_units[\"energy\"]\n cfact = conversion_facs_energy[units]\n \n # special handling for nanometers\n if units == \"nm\":\n # zero is interpretted as zero energy\n try:\n ret = numpy.zeros(val.shape, dtype=val.dtype)\n ret[val!=0.0] = 1.0/val[val!=0]\n return ret/cfact\n except: \n return (1.0/val)/cfact\n else:\n return val/cfact", "def convert_units(self, time_units=None, len_units=None):\n in_time = self.time_units\n # Check new time units\n if time_units is None:\n time_units = in_time\n flag = _units.validate_units(time_units)\n if flag == -1:\n raise ValueError('Bad time units input {}'.format(time_units))\n # Check new length units\n in_len = self.len_units\n if len_units is None:\n len_units = in_len\n flag = _units.validate_units(len_units)\n if flag == -1:\n raise ValueError('Bad length units input {}'.format(len_units))\n # Convert parameters units\n for key, value in self.parameters.items():\n if type(value) in [int, float]:\n self.parameters[key] = _units.units_conversion(value, in_len, len_units)\n # Convert drawdown data\n self.drawdown.convert_units(time_units, len_units)\n # Convert associate data units\n for i in range(self.data_count()):\n if self.data[i].dtype == 1: # drawdown units\n data_units = len_units\n elif self.data[i].dtype == 2: # first derivative units\n data_units = len_units + \"/\" + time_units\n elif self.data[i].dtype == 3: # second derivative units\n data_units = len_units + \"/\" + time_units + \"2\"\n self.data[i].convert_units(time_units, data_units)\n self.len_units = len_units\n self.time_units = time_units\n # End Function", "def convert_kcalmol_eV(en_kcalmol):\n return en_kcalmol*kcalmol_eV", "def getDistanceUnits(self) -> Unit:\n ...", "def unit_converter(val, from_u, to_u):\n\tconverter = {'b':0, 'k':1, 'm':2, 'g':3, 't':4}\n\tif converter[from_u] < converter[to_u]:\n\t\tval = float(val)\n\t\tfor _ in range(converter[to_u] - converter[from_u]):\n\t\t\tval = val/1024\n\telse:\n\t\tfor _ in range(converter[from_u] - converter[to_u]):\n\t\t\tval = val * 1024\n\t\t\t\n\treturn val", "def conv(old='auto', new='auto'):\n if old == new:\n return 1.\n for unittype in [lenunits, angunits, timunits, masunits, eneunits]:\n if old in unittype and new in unittype:\n return unittype[new] / unittype[old]\n\n raise ValueError('Units \\'{}\\' and \\'{}\\' unrecognized or '\n 'not of same unit type'.format(old, new))", "def test_convert(self):\n height = 1.6 * self.meter\n foot = .305 * self.meter\n inch = 1 / 12 * foot\n\n self.assertTrue(abs(height / foot - 5.246) < .001)\n self.assertTrue(abs(height / inch - 62.951) < .001)\n\n newton = self.kgram * self.meter / (self.second ** 2)\n pound = 4.448222 * newton\n accel = 9.8 * self.meter / (self.second ** 2)\n\n weight = 150 * pound\n mass = weight / accel\n self.assertTrue(abs(mass / self.kgram - 68.085) < .001)", "def convert_H_kJmol(en_H):\n return en_H/kJmol_H", "def _raw_eng_to_phys(self, eng_value):\n return self.p(eng_value)", "def _mps_to_kph(self) -> None:\n if self.units == \"m/s\":\n self.units = \"km/h\"\n self.value = ((self.value * 360) / 100).__round__(2)\n else:\n msg = (\n \"Not a valid unit conversion, expected units to be in 'm/s' but instead \"\n + f\"units were in {self.units}.\"\n )\n raise ValueError(msg)", "def convert_eV_kcalmol(en_eV):\n return en_eV/kcalmol_eV", "def cbToEngUnits( BoardNum, Range, DataVal, EngUnits = 0.0 ):\n EngUnits = ctypes.c_float( EngUnits )\n CHK( cbw.cbToEngUnits( BoardNum, Range, DataVal, byref( EngUnits ) ) )\n return EngUnits.value", "def convert_units(self, units):\n self.unit_array = self.unit_array.to(units)", "def _dwd_kelvin_to_celsius(self, chn):\n if not self._is_solar_channel(chn) and \\\n (self[chn].info['units'] in ['K', 'degree Kelvin', 'KELVIN'] or\n self[chn].unit == 'K'):\n self[chn].data -= CONVERSION\n self[chn].info['units'] = self[chn].unit = 'C'", "def kinetic_energy(self, units = 'si'):\n if units == 'si':\n return 0.5 * self.mass * (linalg.norm(self.velocity) ** 2)\n if units == 'au':\n return 0.5 * self.mass * (linalg.norm(self.velocity * (1.496e11) * 86400) ** 2)", "def convert_volume(self, event):\n try:\n #Compare other unit to one unit(cubic decimeters)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"acre foot\": 1233481.837548, \"barrels\": 158.987295, \"bushels(UK)\": 36.36872, \"bushels(US)\": 35.23907, \"centiliters\": 0.01, \"cubic centimeters\": 0.001, \"cubic decameters\": 1000000.0, \"cubic decimeters\": 1.0, \"cubic feet\": 28.316847, \"cubic inches\": 0.016387, \"cubic kilometers\": 1000000000000.0, \"cubic meters\": 1000.0, \"cubic mile\": 4168181825000.0, \"cubic millimeters\": 1e-06, \"cubic yards\": 764.554858, \"cups\": 0.236588, \"deciliters\": 0.1, \"dram\": 0.003697, \"dram(imperial)\": 0.003552, \"fluid ounces(US)\": 0.029574, \"fluid ounces(imperial)\": 0.028413, \"gallons(US,dry)\": 4.404884, \"gallons(US,liquid)\": 3.785412, \"gallons(imperial)\": 4.54609, \"gill(US)\": 0.118294, \"gill(imperial)\": 0.142065, \"liters\": 1.0, \"liters(1901-1964)\": 1.000028, \"microliters\": 1e-06, \"milliliters\": 0.001, \"nanoliters\": 1e-09, \"picoliters\": 1e-12, \"pints(US,dry)\": 0.55061, \"pints(US,liquid)\": 0.473176, \"pints(imperial)\": 0.568261, \"quarts(UK,dry)\": 1.101221, \"quarts(US,liquid)\": 0.946353, \"quarts(imperial)\": 1.136523, \"table spoons\": 0.014787, \"tea spoons\": 0.004929}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def unit_of_measurement(self):\n if self.values.primary.units == \"C\":\n return TEMP_CELSIUS\n if self.values.primary.units == \"F\":\n return TEMP_FAHRENHEIT\n\n return self.values.primary.units", "def E2V(E):\n# for energy in mev returns velocity in m/s\n return sqrt(E/5.227e-6)", "def convert_units(unt, origunits):\n if unt[0:3] == origunits[0:3] | unt[0:3] == 'ori':\n units = origunits\n convf = 1\n else:\n if 'fee' == unt[(((0:3 -1) -1) -1)]:\n if origunits[0:3] == 'met':\n units = 'feet'\n convf = 3.2808399\n else:\n units = origunits\n convf = 1\n else:\n if 'met' == unt[(((0:3 -1) -1) -1)]:\n if origunits[0:3] == 'fee':\n units = 'meters'\n convf = 0.3048\n else:\n units = origunits\n convf = 1\n else:\n if 'm/s' == unt[(((0:3 -1) -1) -1)]:\n if origunits[0:3] == 'kno':\n units = 'meters/sec'\n convf = 0.51444444\n else:\n units = origunits\n convf = 1\n else:\n if 'kno' == unt[(((0:3 -1) -1) -1)]:\n if origunits[0:3] == 'm/s':\n units = 'knots'\n convf = 1.9438445\n else:\n units = origunits\n convf = 1\n else:\n error('Unknown units')\n #\n return units, convf", "def convert_units(cube, units):\n try:\n cube.convert_units(units)\n except ValueError:\n if not _try_special_conversions(cube, units):\n raise\n\n return cube", "def convert_force(self, event):\n try:\n #Compare other unit to one unit(newtons)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"dynes\": 0.00001, \"kilograms force\": 9.80665, \"kilonewtons\": 1000.0, \"kips\": 4448.222, \"meganewtons\": 1.0 * 10 ** 6, \"newtons\": 1.0, \"pounds force\": 4.448222, \"poundals\": 0.138255, \"sthene\": 1000.0, \"tonnes force\": 9806.65, \"tons force(UK)\": 9964.016418, \"tons force(US)\": 8896.443231}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def _raw_eng_to_phys(self, eng_value):\n return self.pp(eng_value)", "def test_convert_compatible_units(self):\n result = convert_units(self.arr, 'degC')\n expected_data = np.array([[-273.15, -272.15], [-271.15, -270.15]])\n expected_units = cf_units.Unit('degC')\n self.assertEquals(result.units, expected_units)\n self.assertArrayEqual(result.data, expected_data)", "def convert_kcalmol_kJmol(en_kcalmol):\n return en_kcalmol/kJmol_kcalmol", "def test_change_units(self):\n s = State(\"water\", T=Q_(100, \"degC\"), p=Q_(1.0, \"atm\"), units=\"EE\")\n assert s.units == \"EE\"\n s.units = \"SI\"\n assert s.units == \"SI\"\n assert s.cv.units == \"kilojoule / kelvin / kilogram\"\n assert s.cp.units == \"kilojoule / kelvin / kilogram\"\n assert s.s.units == \"kilojoule / kelvin / kilogram\"\n assert s.h.units == \"kilojoule / kilogram\"\n assert s.T.units == \"degree_Celsius\"\n assert s.u.units == \"kilojoule / kilogram\"\n assert s.v.units == \"meter ** 3 / kilogram\"\n assert s.p.units == \"bar\"", "def unit_of_measurement(self) -> str:\n raw_units = self.raw_units\n\n if raw_units in [TEMP_CELSIUS, TEMP_FAHRENHEIT]:\n return self.hass.config.units.temperature_unit\n return raw_units", "def to(self, unit, equivalencies=[], freq=None):\n\n if not isinstance(unit, u.Unit):\n unit = u.Unit(unit)\n\n if unit == self.unit:\n # No copying\n return self\n\n if ((self.unit.is_equivalent(u.Jy / u.beam) and\n not any({u.Jy/u.beam, u.K}.issubset(set(eq)) for eq in equivalencies))):\n # the 'not any' above checks that there is not already a defined\n # Jy<->K equivalency. If there is, the code below is redundant\n # and will cause problems.\n\n if hasattr(self, 'beams'):\n factor = (self.jtok_factors(equivalencies=equivalencies) *\n (self.unit*u.beam).to(u.Jy))\n else:\n # replace \"beam\" with the actual beam\n if not hasattr(self, 'beam'):\n raise ValueError(\"To convert objects with Jy/beam units, \"\n \"the object needs to have a beam defined.\")\n brightness_unit = self.unit * u.beam\n\n # create a beam equivalency for brightness temperature\n if freq is None:\n try:\n freq = self.with_spectral_unit(u.Hz).spectral_axis\n except AttributeError:\n raise TypeError(\"Object of type {0} has no spectral \"\n \"information. `freq` must be provided for\"\n \" unit conversion from Jy/beam\"\n .format(type(self)))\n else:\n if not freq.unit.is_equivalent(u.Hz):\n raise u.UnitsError(\"freq must be given in equivalent \"\n \"frequency units.\")\n\n bmequiv = self.beam.jtok_equiv(freq)\n # backport to handle astropy < 3: the beam equivalency was only\n # modified to handle jy/beam in astropy 3\n if bmequiv[0] == u.Jy:\n bmequiv.append([u.Jy/u.beam, u.K, bmequiv[2], bmequiv[3]])\n\n factor = brightness_unit.to(unit,\n equivalencies=bmequiv + list(equivalencies))\n\n else:\n # scaling factor\n factor = self.unit.to(unit, equivalencies=equivalencies)\n\n converted_array = (self.quantity * factor).value\n\n # use private versions of variables, not the generated property\n # versions\n # Not entirely sure the use of __class__ here is kosher, but we do want\n # self.__class__, not super()\n new = self.__class__(value=converted_array, unit=unit, copy=True,\n wcs=self._wcs, meta=self._meta, mask=self._mask,\n header=self._header)\n\n return new", "def setunits(self, *args, **kwargs):\n return _coordsys.coordsys_setunits(self, *args, **kwargs)", "def convert_speed(self, event):\n try:\n #Compare other unit to one unit(meters/second)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"Mach number\": 340.2933, \"Nm/24hr\": 0.021435, \"centimeters/minute\": 0.000167, \"centimeters/second\": 0.01, \"feet/hour\": 8.5e-05, \"feet/minute\": 0.00508, \"feet/second\": 0.3048, \"inches/minute\": 0.000423, \"inches/second\": 0.0254, \"kilometers/hour\": 0.277778, \"kilometers/second\": 1000.0, \"knots\": 0.514444, \"meters/hour\": 0.000278, \"meters/minute\": 0.016667, \"meters/second\": 1.0, \"miles/hour\": 0.44704, \"miles/minute\": 26.8224, \"miles/second\": 1609.344, \"nautical miles/hour\": 0.514444, \"speed of light\": 299790000.0, \"speed of sound\": 343.0, \"yards/hour\": 0.000254, \"yards/minute\": 0.01524, \"yards/second\": 0.9144}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def _get_units_object(self, units):\n if isinstance(units, cellml_units):\n # We're done\n pass\n else:\n units = amara_parse_cellml(unicode(units))\n assert isinstance(units, cellml_units)\n return units", "def unit_of_measurement(self) -> str | None:\n # Highest priority, for registered entities: unit set by user,with fallback to\n # unit suggested by integration or secondary fallback to unit conversion rules\n if self._sensor_option_unit_of_measurement is not UNDEFINED:\n return self._sensor_option_unit_of_measurement\n\n # Second priority, for non registered entities: unit suggested by integration\n if not self.registry_entry and (\n suggested_unit_of_measurement := self.suggested_unit_of_measurement\n ):\n return suggested_unit_of_measurement\n\n # Third priority: Legacy temperature conversion, which applies\n # to both registered and non registered entities\n native_unit_of_measurement = self.native_unit_of_measurement\n\n if (\n self.device_class == SensorDeviceClass.TEMPERATURE\n and native_unit_of_measurement\n in {UnitOfTemperature.CELSIUS, UnitOfTemperature.FAHRENHEIT}\n ):\n return self.hass.config.units.temperature_unit\n\n # Fourth priority: Native unit\n return native_unit_of_measurement", "def unit_of_measurement(self):\n unit = get_uom_from_status(self._device.status)\n if unit == HS_UNIT_LUX:\n return LIGHT_LUX\n elif unit == HS_UNIT_CELSIUS:\n return TEMP_CELSIUS\n elif unit == HS_UNIT_FAHRENHEIT:\n return TEMP_FAHRENHEIT\n elif unit == HS_UNIT_PERCENTAGE:\n return PERCENTAGE\n elif unit == HS_UNIT_A or unit == HS_UNIT_AMPERES:\n return ELECTRIC_CURRENT_AMPERE\n elif unit == HS_UNIT_KW:\n return POWER_KILO_WATT\n elif unit == HS_UNIT_KWH:\n return ENERGY_KILO_WATT_HOUR\n elif unit == HS_UNIT_V or unit == HS_UNIT_VOLTS:\n return ELECTRIC_POTENTIAL_VOLT\n elif unit == HS_UNIT_W or unit == HS_UNIT_WATTS:\n return POWER_WATT\n return None", "def unit_of_measurement(self):\n return self._tasmota_entity.unit", "def _convert_unit(self, unit):\n if unit in self.units:\n return self.units[unit]\n elif unit in unit_map:\n return unit_map[unit]\n else:\n raise SBMLError('Unit not recognized: ' + str(unit))", "def convert(self, value, units, newunits):\n return value * self._units[units] / self._units[newunits]", "def getUnits(self):\n return _libsbml.Compartment_getUnits(self)", "def to_axis_units(self, label, vals):\n if label in ['Hmolar', 'Smolar', 'Umolar', 'Dmolar', 'P']:\n return vals / 1000\n elif label in ['T']:\n return vals\n else:\n raise ValueError(label)", "def cu_energy(self,val,units=\"1/cm\"):\n if units in self.units[\"energy\"]:\n x = conversion_facs_energy[units]\n i_val = x*val\n \n cu = self.current_units[\"energy\"] \n if cu != \"1/fs\":\n y = conversion_facs_energy[units] \n return i_val/y\n \n return i_val", "def test_measurment(self):\r\n self.assertEqual(Converter.MeasurmentWorldtoUS(10, \"km\"), 6.214)\r\n self.assertEqual(Converter.MeasurmentWorldtoUS(10, \"m\"), 10.936)\r\n self.assertEqual(Converter.MeasurmentWorldtoUS(10, \"cm\"), 0.328)\r\n self.assertEqual(Converter.MeasurmentWorldtoUS(10, \"mm\"), 0.394)\r\n self.assertEqual(Converter.MeasurmentUStoWorld(10, \"mi\"), 16.093)\r\n self.assertEqual(Converter.MeasurmentUStoWorld(10, \"yd\"), 9.144)\r\n self.assertEqual(Converter.MeasurmentUStoWorld(10, \"ft\"), 304.8)\r\n self.assertEqual(Converter.MeasurmentUStoWorld(10, \"in\"), 254)", "def convert_kJmol_kcalmol(en_kJmol):\n return en_kJmol*kJmol_kcalmol", "def unit_of_measurement(self):\n return self._units", "def _mps_to_mph(self) -> None:\n if self.units == \"m/s\":\n self.units = \"mph\"\n self.value = (self.value * 2.236936).__round__(2)\n else:\n msg = (\n \"Not a valid unit conversion, expected units to be in 'm/s' but instead \"\n + f\"units were in {self.units}.\"\n )\n raise ValueError(msg)", "def units(self):\n return self._units", "def units(self):\n return self._units", "def _get_units(self, q) -> unyt.Unit:\n try:\n units = q.units\n except AttributeError:\n units = unyt.dimensionless\n return unyt.Unit(units, registry=self.registry)", "def tempConvert(temp, unit):\n if unit == 'F':\n celsius = (temp - 32) * 5 / 9\n return celsius\n else:\n return temp", "def parse_engineering( string, unit = \"\" ):\n if not string.endswith(unit):\n raise ValueError(\"string '%s' is missing the unit '%s'\" % (string, unit))\n if unit:\n string = string[:-len(unit)]\n\n m = re.match(r\"\\s*([\\+\\-]?[.0-9]+)\\s*([a-zA-Z]*)\\s*\", string)\n if not m:\n raise ValueError(\"string '%s' cannot be parsed\" % string)\n x = m.group(1)\n mod = m.group(2)\n conv = {'a':1e-18, 'f':1e-15, 'p':1e-12, 'n':1e-9, 'u':1e-6,\n 'm':1e-3 , 'c':1e-2 , 'd':1e-1 , '':1.0 , 'k':1e3 ,\n 'M':1e6 , 'G':1e9 , 'T':1e12 , 'P':1e15, 'E':1e18}\n return float(x) * conv[mod]", "def solve(self):\n wort_gravity = self.property('start_gravity').to('sg') +\\\n (self.total_points().to('points') / self.property('wort_volume').to('gal') / 1000.0)\n self.property('wort_gravity', Quantity(wort_gravity, 'sg'))", "def convert_pressure(self, event):\n try:\n #Compare other unit to one unit(pascals)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"atm\": 101325.0, \"bars\": 100000.0, \"centimeters mercury\": 1333.22, \"centimeters water\": 98.0665, \"feet of water\": 2989.06692, \"hectopascals\": 100.0, \"inches of mercury\": 3386.388, \"inches of water\": 249.08891, \"kilogram-force/sq.centimeter\": 98066.5, \"kilogram-force/sq.meter\": 9.80665, \"kilonewtons/sq.meter\": 1000.0, \"kilonewtons/sq.millimeter\": 1000000000.0, \"kilopascals\": 1000.0, \"kips/sq.inch\": 6894760.0, \"meganewtons/sq.meter\": 1000000.0, \"meganewtons/sq.millimeter\": 1000000000000.0, \"meters of water\": 9806.65, \"millibars\": 100.0, \"millimeters of mercury\": 133.322, \"millimeters of water\": 9.80665, \"newtons/sq.centimeter\": 10000.0, \"newtons/sq.meter\": 1.0, \"newtons/sq.millimeter\": 1000000.0, \"pascals\": 1.0, \"poundals/sq.foot\": 1.44816, \"pounds-force/sq.foot\": 47.88, \"pounds-force/sq.inch\": 6894.757, \"tonnes-force/sq.cm\": 98066500.0, \"tonnes-force/sq.meter\": 9806.65, \"tons(UK)-force/sq.foot\": 107251.0, \"tons(UK)-force/sq.inch\": 15444280.0, \"tons(US)-force/sq.foot\": 95760.0, \"tons(US)-force/sq.inch\": 13789500.0, \"torr\": 133.322}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def extendedConvert(self):\r\n devId = str(self.deviceId)\r\n if(devId == '28' or devId == '29'):\r\n answers = []\r\n #just add the counter value\r\n answers.append(self.fields[1])\r\n #find the engineering units converter\r\n enum = self.fields[0] & 0x3F\r\n #look up the scale and offset for that eeu\r\n eeu = self._eeumaps[str(enum)]\r\n self.eeu1 = eeu\r\n print('eeu:' + str(eeu))\r\n #convert from twos complement and adjust by scale/offset\r\n val = (self.convertSigned16(self.fields[2]) * eeu[1]) + eeu[0]\r\n answers.append(val)\r\n #reset fields to hold the new answers\r\n self.fields = answers\r\n self.units = [self.UNITS_COUNT, eeu[2]]\r\n elif(devId == '53' or devId == '54'):\r\n #strip off the first part of the answer which is the last part of the\r\n #serial number\r\n answers = [self.fields[1]]\r\n self.fields = answers\r\n elif(devId == '75' or devId == '76'):\r\n answers = []\r\n #find out the number of I/O points\r\n pointCount = self.fields[0] & 3\r\n #find out engineering units for 1st I/O\r\n enum = self.fields[1] & 0x3F\r\n eeu = self._eeumaps[str(enum)]\r\n self.eeu1 = eeu\r\n #new value = old value * scale + offset\r\n val = (self.convertSigned16(self.fields[3]) * eeu[1]) + eeu[0]\r\n answers.append(val)\r\n self.units = [eeu[2]]\r\n #see if there's two\r\n if pointCount == 2:\r\n #find out engineering units for 2nd I/O\r\n #and off first two bits\r\n enum = self.fields[0] >> 2\r\n eeu = self._eeumaps[str(enum)]\r\n self.eeu2 = eeu\r\n val = (self.convertSigned16(self.fields[2]) * eeu[1]) + eeu[0]\r\n answers.append(val)\r\n self.units.append(eeu[2])\r\n else:\r\n self.eeu2 = []\r\n #reset fields to hold the new answers\r\n self.fields = answers\r\n\r\n return", "def unit_of_measurement(self) -> str:\n raw_units = self.raw_unit_of_measurement\n if raw_units in (TEMP_FAHRENHEIT, TEMP_CELSIUS):\n return self.hass.config.units.temperature_unit\n return raw_units", "def to_inch(self):\r\n if self.units != 'inch':\r\n self.units = 'inch'\r\n for statement in self.statements:\r\n statement.to_inch()\r\n for tool in iter(self.tools.values()):\r\n tool.to_inch()\r\n for primitive in self.primitives:\r\n primitive.to_inch()\r\n for hit in self.hits:\r\n hit.to_inch()", "def unit_of_measurement(self):\n return self.values.primary.units", "def native_unit_of_measurement(self) -> str:\n return f\"{CURRENCY_CENT}/{UnitOfVolume.LITERS}\"", "def unit_of_measurement(self):\n set_req = self.gateway.const.SetReq\n unit_map = {\n set_req.V_TEMP: (TEMP_CELSIUS\n if self.gateway.metric else TEMP_FAHRENHEIT),\n set_req.V_HUM: '%',\n set_req.V_DIMMER: '%',\n set_req.V_LIGHT_LEVEL: '%',\n set_req.V_WEIGHT: 'kg',\n set_req.V_DISTANCE: 'm',\n set_req.V_IMPEDANCE: 'ohm',\n set_req.V_WATT: 'W',\n set_req.V_KWH: 'kWh',\n set_req.V_FLOW: 'm',\n set_req.V_VOLUME: 'm3',\n set_req.V_VOLTAGE: 'V',\n set_req.V_CURRENT: 'A',\n }\n if float(self.gateway.protocol_version) >= 1.5:\n if set_req.V_UNIT_PREFIX in self._values:\n return self._values[\n set_req.V_UNIT_PREFIX]\n unit_map.update({set_req.V_PERCENTAGE: '%'})\n if float(self.gateway.protocol_version) >= 2.0:\n unit_map.update({\n set_req.V_ORP: 'mV',\n set_req.V_EC: 'μS/cm',\n set_req.V_VAR: 'var',\n set_req.V_VA: 'VA',\n })\n return unit_map.get(self.value_type)", "def _parse_units(self, model, comp, node):\n node = dom_child(node, 'unitDefinition')\n while node:\n name = node.getAttribute('id')\n self.log('Parsing unit definition for \"' + name + '\".')\n unit = myokit.units.dimensionless\n node2 = dom_child(node, 'listOfUnits')\n node2 = dom_child(node2, 'unit')\n while node2:\n kind = str(node2.getAttribute('kind')).strip()\n u2 = self._convert_unit(kind)\n if node2.hasAttribute('multiplier'):\n m = float(node2.getAttribute('multiplier'))\n else:\n m = 1.0\n if node2.hasAttribute('scale'):\n m *= 10 ** float(node2.getAttribute('scale'))\n u2 *= m\n if node2.hasAttribute('exponent'):\n u2 **= float(node2.getAttribute('exponent'))\n unit *= u2\n node2 = dom_next(node2, 'unit')\n self.units[name] = unit\n node = dom_next(node, 'unitDefinition')", "def normalize_emission(self):\n self._e /= self._e.sum(0)", "def _get_units(self):\n #assert self.ser.isOpen()\n\n self.serial_connection.write('UNI' + self.CR + self.LF)\n acknowledgement = self.serial_connection.readline()\n self._check_acknowledgement(acknowledgement)\n\n self.serial_connection.write(self.ENQ)\n unit = self.MEASUREMENT_UNITS[self.serial_connection.readline().rstrip(self.LF).rstrip(self.CR)]\n\n self.serial_connection.write(self.CR + self.LF)\n\n return unit", "def convert_H_kcalmol(en_H):\n return en_H/kcalmol_H", "def _standardise_dtypes_and_units(cube: Cube) -> None:\n\n def as_correct_dtype(obj: ndarray, required_dtype: dtype) -> ndarray:\n \"\"\"\n Returns an object updated if necessary to the required dtype\n\n Args:\n obj:\n The object to be updated\n required_dtype:\n The dtype required\n\n Returns:\n The updated object\n \"\"\"\n if obj.dtype != required_dtype:\n return obj.astype(required_dtype)\n return obj\n\n cube.data = as_correct_dtype(cube.data, get_required_dtype(cube))\n for coord in cube.coords():\n if coord.name() in TIME_COORDS and not check_units(coord):\n coord.convert_units(get_required_units(coord))\n req_dtype = get_required_dtype(coord)\n # ensure points and bounds have the same dtype\n if np.issubdtype(req_dtype, np.integer):\n coord.points = round_close(coord.points)\n coord.points = as_correct_dtype(coord.points, req_dtype)\n if coord.has_bounds():\n if np.issubdtype(req_dtype, np.integer):\n coord.bounds = round_close(coord.bounds)\n coord.bounds = as_correct_dtype(coord.bounds, req_dtype)", "def unit_of_measurement(self):\n if self.api_unit in TEMPERATURE_UNITS:\n return self.hass.config.units.temperature_unit\n\n if self.api_unit in LENGTH_UNITS:\n return self.hass.config.units.length_unit\n\n if self.api_unit in PRESSURE_UNITS:\n if self.hass.config.units == IMPERIAL_SYSTEM:\n return self.hass.config.units.pressure_unit\n return PRESSURE_HPA\n\n if self.api_unit in FUEL_CONSUMPTION_UNITS:\n if self.hass.config.units == IMPERIAL_SYSTEM:\n return FUEL_CONSUMPTION_MPG\n return FUEL_CONSUMPTION_L_PER_100KM\n\n return self.api_unit", "def convert_length(self, event):\n try:\n #Compare other unit to one unit(meters)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"angstroms\": 10 ** -10, \"au\": 149598550000.0, \"barleycorns\": 0.008467, \"cables\": 182.88, \"centimeters\": 0.01, \"chains\": 20.11684, \"decimeters\": 0.1, \"ells\": 0.875, \"ems\" : 0.004233, \"fathoms\": 1.8288, \"feet(UK & US)\": 0.3048, \"feet(US survey)\": 0.304801, \"furlongs\": 201.168, \"hands\": 0.1016, \"hectometers\": 100.0, \"inches\": 0.0254, \"kilometers\": 1000.0, \"light years\": 9460528405000000.0, \"meters\": 1.0, \"micrometers\": 0.000001, \"mil\": 0.0000254, \"miles(UK & US)\": 1609.344, \"miles(nautical, international)\": 1852.0, \"miles(nautical, UK)\": 1853.184, \"millimeters\": 0.001, \"nanometers\": 10 ** -9, \"parsecs\": 30856776000000000.0, \"picometers\": 10 ** -12, \"Scandinavian mile\": 10000.0, \"thou\": 0.0000254, \"yards\": 0.9144, \"links\": 0.2011684, \"pica\": 0.00423333, \"rods\": 5.0292, \"spans\": 0.2286}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def convert_units(self, time_units=None, len_units=None, pump_units=None,\n same=False):\n in_time = self.time_units\n # Check new time units\n if time_units is None:\n time_units = in_time\n flag = _units.validate_units(time_units)\n if flag == -1:\n raise ValueError('Bad time units input {}'.format(time_units))\n # Check new length units\n in_len = self.len_units\n if len_units is None:\n len_units = in_len\n flag = _units.validate_units(len_units)\n if flag == -1:\n raise ValueError('Bad length units input {}'.format(len_units))\n # Check new pumping rate units\n in_pump = self.pump_units\n if pump_units is None:\n pump_units = in_pump\n if same:\n pump_units = \"%s3/%s\" % (len_units, time_units)\n flag = _units.validate_units(pump_units)\n if flag == -1:\n raise ValueError('Bad pumping rate units input {}'.format(len_units))\n\n # Convert parameters units\n for key, value in self.parameters.items():\n if type(value) in [int, float]:\n self.parameters[key] = _units.units_conversion(value, in_len, len_units)\n # Convert pumping rate data\n self.pumprate.convert_units(time_units, pump_units)\n # Convert well data units\n for i in range(self.well_count()):\n self.wells[i].convert_units(time_units, len_units)\n # Set input units\n self.len_units = len_units\n self.time_units = time_units\n self.pump_units = pump_units\n # End Function", "def unit_of_measurement(self):\n return self.var_units", "def to_unit(self):\n if self.is_zero():\n return Vector(0,0,0)\n else:\n magnitude = self.l2_norm()\n return Vector(self.x/magnitude, self.y/magnitude, self.z/magnitude)", "def get_units(cls, wkt):\n if HAS_GDAL:\n return SpatialReference(wkt).units\n else:\n m = cls.units_regex.match(wkt)\n return m.group('unit'), m.group('unit_name')", "def unit_of_measurement(self) -> Any:\n return TEMP_CELSIUS", "def geten(self):\n lat = self.getlatlon()[0]\n return (0.5*self._sm*(self._vr**2 + self._vt**2 + self._vp**2) +\n forces.wgs84_pot(self._r, lat)*self._sm)\n # G*self._mm*self._sm/self._r)", "def _uni_to_diff(self, v, omega):\n\n# print(\"--MuleBot._uni_to_diff({:.3f}, {:.3f})\".format(v, omega))\n loggerMB.debug(\"--MuleBot._uni_to_diff({:.3f}, {:.3f})\".format(v, omega))\n\n # v = translation velocity (m/s)\n # omega = angular velocity (rad/s)\n\n # For some reason, it is necessary to multiply the angle by -1.\n # TODO: Probably have to put this back in.\n omega *= -1.0\n\n inches_per_meter = 39.3701\n circumference_in = 2.0 * math.pi * MuleBot.WHEEL_RADIUS\n circumference_m = circumference_in / inches_per_meter\n radians_per_circumference = 2.0\n # R = roll?(meters/radian)\n R = circumference_m / radians_per_circumference\n\n # Get info in inches\n Lin = MuleBot.WHEEL_BASE_LENGTH\n # Convert inches to meters\n Lm = Lin / inches_per_meter\n\n # All measurements are now metric.\n v_l = ( (2.0 * v) - (omega * Lm) ) / (2.0 * R)\n v_r = ( (2.0 * v) + (omega * Lm) ) / (2.0 * R)\n loggerMB.debug(\"--MuleBot._uni_to_diff v_l, v_r: {:.3f}, {:.3f}\".format(v_l, v_r))\n\n rpm_l = self.rps_to_rpm(v_l)\n rpm_r = self.rps_to_rpm(v_r)\n# print(\"--MuleBot._uni_to_diff rpm_l, rpm_r: {:.3f}, {:.3f}\".format(rpm_l, rpm_r))\n loggerMB.debug(\"--MuleBot._uni_to_diff rpm_l, rpm_r: {:.3f}, {:.3f}\".format(rpm_l, rpm_r))\n\n return v_l, v_r", "def convert(self):\n return _libsbml.SBMLInferUnitsConverter_convert(self)", "def to_meters(d, d_unit):\n if d_unit == UOM_M:\n dm = d\n elif d_unit == UOM_KM:\n dm = d * 1000\n elif d_unit == UOM_FEET:\n dm = feet2m(d)\n elif d_unit == UOM_SM:\n dm = SM2m(d)\n elif d_unit == UOM_NM:\n dm = NM2m(d)\n return dm", "def test_unit_conversion(self):\n self.cube_uv_down.convert_units(\"kW m-2\")\n scale_factor = 1.0\n expected = np.full_like(\n self.cube_uv_down.data, dtype=np.float32, fill_value=0.1\n )\n result = calculate_uv_index(self.cube_uv_down, scale_factor)\n self.assertArrayEqual(result.data, expected)", "def convert_H_eV(en_H):\n return en_H/eV_H", "def _get_units(self, name):\n meta = self._abs2meta\n\n if name in meta:\n return meta[name]['units']\n\n proms = self._prom2abs\n\n if name in proms['output']:\n abs_name = proms['output'][name][0]\n return meta[abs_name]['units']\n\n elif name in proms['input']:\n if len(proms['input'][name]) > 1:\n # The promoted name maps to multiple absolute names, require absolute name.\n msg = \"Can't get units for the promoted name '%s' because it refers to \" + \\\n \"multiple inputs: %s. Access the units using an absolute path name.\"\n raise RuntimeError(msg % (name, str(proms['input'][name])))\n\n abs_name = proms['input'][name][0]\n return meta[abs_name]['units']\n\n raise KeyError('Variable name \"{}\" not found.'.format(name))", "def convert_fuelconsumption(self, event):\n try:\n #Compare other unit to one unit(liters/100 kilometer)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n if current_value != 0:\n unit_comp = {\"car(2014 US Average)\": 9.260417, \"gallon(UK)/100 miles\": 2.824809, \"gallon(US)/100 miles\": 2.352146, \"kilometer/liter\": 100.0 / (current_value ** 2), \"liters/100 kilometer\": 1.0, \"liters/meter\": 100000.0, \"miles/gallon(UK)\": 282.480936 / (current_value ** 2), \"miles/gallon(US)\": 235.214583 / (current_value ** 2)}\n else: #In case current_value == 0, it will error coz number division by zero.\n unit_comp = {\"car(2014 US Average)\": 1.0, \"gallon(UK)/100 miles\": 1.0, \"gallon(US)/100 miles\": 1.0, \"kilometer/liter\": 1.0, \"liters/100 kilometer\": 1.0, \"liters/meter\": 1.0, \"miles/gallon(UK)\": 1.0, \"miles/gallon(US)\": 1.0}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)" ]
[ "0.67884886", "0.65156615", "0.6370421", "0.62429684", "0.60435265", "0.60196847", "0.59708744", "0.59549516", "0.59494585", "0.59487826", "0.59480655", "0.591996", "0.5908836", "0.590394", "0.5892939", "0.58907676", "0.58825135", "0.58816415", "0.5879758", "0.5878579", "0.5834652", "0.58009183", "0.579033", "0.57901025", "0.5783464", "0.5773485", "0.5744693", "0.57161945", "0.57076705", "0.5697574", "0.5696437", "0.56928194", "0.5675286", "0.5655954", "0.5650785", "0.5647491", "0.5645676", "0.5641953", "0.5639426", "0.5638304", "0.5626676", "0.56151414", "0.5610815", "0.5610746", "0.56017226", "0.55971813", "0.5592975", "0.55918", "0.55850744", "0.55806005", "0.5574443", "0.5571404", "0.5568221", "0.5563917", "0.55595124", "0.55365306", "0.5517177", "0.5515644", "0.5515515", "0.5503816", "0.5488333", "0.54800034", "0.54791677", "0.54677075", "0.5467437", "0.546374", "0.54593176", "0.54522955", "0.54510224", "0.54510224", "0.5442614", "0.5436857", "0.5428591", "0.54252756", "0.54036176", "0.53952324", "0.53919107", "0.53915", "0.5388711", "0.53873014", "0.53855306", "0.5380819", "0.5380178", "0.5379003", "0.53786266", "0.5377345", "0.53772956", "0.5375169", "0.5368997", "0.53680503", "0.536593", "0.5365505", "0.5363762", "0.53589594", "0.5358799", "0.5346344", "0.53370255", "0.5327439", "0.5324261", "0.5322217", "0.53111446" ]
0.0
-1
This function is used to make the final dataset to be used, computes and adds the new WsRF and service classification
def make_dataset(interim_file_path, processed_file_path, weights, version): qws_wsrf, qws_complete_numpy_array = src.dataset.compute_wsrf.compute_wsrf(interim_file_path, weights) # qws_complete_numpy_array_temp = np.append(qws_complete_numpy_array, qws_wsrf[:, np.newaxis], axis=1) qws_wsrf_level = np.array([]) for score in qws_wsrf: if(score > 0.78): level = 1 elif(score > 0.7): level = 2 elif(score > 0.65): level = 3 else: level = 4 score = np.append(score, level) qws_wsrf_level = np.append(qws_wsrf_level, score) qws_wsrf_level = qws_wsrf_level.reshape(qws_wsrf.shape[0], 2) if(version == 1): qws_complete_numpy_array[:, 9:11] = qws_wsrf_level elif(version == 2): qws_complete_numpy_array = np.hstack((qws_complete_numpy_array, np.zeros((qws_wsrf.shape[0], 2)))) qws_complete_numpy_array[:, 11:13] = qws_complete_numpy_array[:, 9:11] qws_complete_numpy_array[:, 9:11] = qws_wsrf_level else: print("Version has to be either 1 or 2") qws_complete_dataframe_new = pd.DataFrame(qws_complete_numpy_array) qws_complete_dataframe_new = qws_complete_dataframe_new.astype({10: int}) qws_complete_dataframe_new.to_csv(processed_file_path, header=False, index=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __train__(self):\n if (self.type_camf == 'CAMF_CI'):\n #users, items, context, ratings\n ci = camf_ci.CI_class(self.__users_array__, self.__items_array__, self.__context_array__, self.__ratings__, self.fold, self.lr, self.factors)\n predictions, losses = ci.fit()\n elif (self.type_camf == 'CAMF_CU'):\n cu = camf_cu.CU_class(self.__users_array__, self.__items_array__, self.__context_array__, self.__ratings__, self.fold, self.lr, self.factors)\n predictions, losses = cu.fit()\n elif (self.type_camf == 'CAMF_C'):\n c = camf_c.C_class(self.__users_array__, self.__items_array__, self.__context_array__, self.__ratings__, self.fold, self.lr, self.factors)\n predictions, losses = c.fit()\n\n dummy_pred = np.zeros((predictions.shape))\n for r, pred_array in enumerate(predictions):\n for c, pred in enumerate(pred_array):\n dummy_pred[r][c] = self.__check_ratings__(pred)\n predictions = dummy_pred\n #save a plot with a loss function\n plots = prs.PlotRSData()\n #print(losses)\n plots.plot_loss_cars(losses, self.type_camf, self.__save_prefix__+\"_loop\"+str(self.loop))\n pd.DataFrame(losses).to_csv(\"./RecSys/out/CAMF/train/\"+self.type_camf+\"/\" + self.__save_prefix__ +\"losses_loop\"+str(self.loop)+\".csv\")\n print('Saving the feature matrix...')\n # set predictions back to the pivot table\n self.__utility_saved_training__(predictions) \n # save results\n self.utility_predictions.to_csv(\"./RecSys/out/CAMF/train/\"+self.type_camf+\"/\" + self.__save_prefix__ + \"_SGD_predictions_loop\"+str(self.loop)+\".csv\")", "def train(self):\n for data_tier in self.data_tiers:\n fd = open(self.data_path + '/training_data_' + data_tier + '.json', 'r')\n self.preprocessed_data[data_tier] = json.load(fd)\n fd.close()\n tot = len(self.preprocessed_data[data_tier]['features'])\n p = int(math.ceil(tot*0.8))\n training_features = np.array(self.preprocessed_data[data_tier]['features'][:p])\n trend_training_classifications = np.array(self.preprocessed_data[data_tier]['trend_classifications'][:p])\n avg_training_classifications = np.array(self.preprocessed_data[data_tier]['avg_classifications'][:p])\n t1 = datetime.datetime.utcnow()\n self.clf_trend[data_tier].fit(training_features, trend_training_classifications)\n self.clf_avg[data_tier].fit(training_features, avg_training_classifications)\n t2 = datetime.datetime.utcnow()\n td = t2 - t1\n self.logger.info('Training %s for data tier %s took %s', self.name, data_tier, str(td))\n joblib.dump(self.clf_trend[data_tier], self.data_path + '/' + self.name + '_trend_' + data_tier + '.pkl')\n joblib.dump(self.clf_avg[data_tier], self.data_path + '/' + self.name + '_avg_' + data_tier + '.pkl')", "def buildAndTrain(trainingData):\n\tname = trainingData.drop(['count', 'casual', 'registered'], axis=1).columns\n\ttarget = trainingData['count'].values\n\tfeature = trainingData.drop(['count', 'casual', 'registered'], axis=1).values\n\t# feature scaling\n\tfeature_scaled = preprocessing.scale(feature)\n\t# 0.5 cross validate\n\tcv = cross_validation.ShuffleSplit(len(feature_scaled), n_iter=5, test_size=0.2, random_state=0)\n\t# build model, then training and get accuracy of it\n\tprint('\\n---------岭回归结果--------\\n')\n\tfor train, test in cv:\n\t\tregLR = linear_model.Ridge().fit(feature_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tregLR.score(feature_scaled[train], target[train]),\n\t\t regLR.score(feature_scaled[test], target[test])))\n\tprint('\\n---------svm结果--------\\n')\n\tfor train, test in cv:\n\t\tregSvm = svm.SVR().fit(feature_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregSvm.score(feature_scaled[train], target[train]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregSvm.score(feature_scaled[test], target[test])))\n\tprint('\\n---------随机森林结果--------\\n')\n\tfor train, test in cv:\n\t\tregRF = RandomForestRegressor(n_estimators=100).fit(feature_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRF.score(feature_scaled[train], target[train]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRF.score(feature_scaled[test], target[test])))\n\t# reduce some low correction feature\n\tfeatureReduced = trainingData.drop(['count', 'casual', 'registered', 'holiday', 'workingday', 'day'], axis=1).values\n\tfeatureReduced_scaled = preprocessing.scale(featureReduced)\n\tprint('\\n---------减少特征维度以避免过拟合后的随机森林结果--------\\n')\n\tfor train, test in cv:\n\t\tregRFImpr = RandomForestRegressor(n_estimators=100).fit(featureReduced_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRFImpr.score(featureReduced_scaled[train], target[train]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRFImpr.score(featureReduced_scaled[test], target[test])))\n\t# use grid search algorithm to improve random forest regression\n\tX_train, X_test, y_train, y_test = cross_validation.train_test_split(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfeature_scaled, target, test_size=0.2, random_state=0)\n\ttuned_parameters = [{'n_estimators': [10,100,500], 'max_depth': [2,3,4,5,6,7,8,9,10]}]\n\tscores = ['r2']\n\n\tfor score in scores:\n\t\tprint(score)\n\t\tclf = GridSearchCV(RandomForestRegressor(), tuned_parameters, cv=5, scoring=score)\n\t\tclf.fit(X_train, y_train)\n\t\tprint(clf.best_estimator_)\n\t\tprint('each parameter combination is ')\n\t\tfor params, mean_score, scores in clf.grid_scores_:\n\t\t\tprint('{0:.3f} (+/-{1:.03f}) for {2}'.format(mean_score, scores.std()/2, params))\n\n\tprint('--------最优参数下的随机森林结果--------')\n\tfor train, test in cv:\n\t\tregRFBest = RandomForestRegressor(n_estimators=100, max_depth=10).fit(feature_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRFBest.score(feature_scaled[train], target[train]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRFBest.score(feature_scaled[test], target[test])))\n\treturn regRFBest, feature_scaled, target", "def main():\n datasets = {}\n for dataset_name in tqdm(SOURCE_DATASET_NAMES, desc=\"Processing datasets and fitting base models\"):\n logger.info(f\"processing dataset {dataset_name}\")\n clusters_path: Optional[str] = None\n if dataset_name not in PAIRWISE_ONLY_DATASETS:\n clusters_path = os.path.join(DATA_DIR, dataset_name, dataset_name + \"_clusters.json\")\n train_pairs_path = None\n val_pairs_path = None\n test_pairs_path = None\n else:\n train_pairs_path = os.path.join(DATA_DIR, dataset_name, \"train_pairs.csv\")\n val_pairs_path = os.path.join(DATA_DIR, dataset_name, \"val_pairs.csv\")\n if not os.path.exists(val_pairs_path):\n val_pairs_path = None\n test_pairs_path = os.path.join(DATA_DIR, dataset_name, \"test_pairs.csv\")\n\n logger.info(f\"loading dataset {dataset_name}\")\n anddata = ANDData(\n signatures=os.path.join(DATA_DIR, dataset_name, dataset_name + \"_signatures.json\"),\n papers=os.path.join(DATA_DIR, dataset_name, dataset_name + \"_papers.json\"),\n name=dataset_name,\n mode=\"train\",\n specter_embeddings=os.path.join(DATA_DIR, dataset_name, dataset_name + \"_specter.pickle\"),\n clusters=clusters_path,\n block_type=BLOCK_TYPE,\n train_pairs=train_pairs_path,\n val_pairs=val_pairs_path,\n test_pairs=test_pairs_path,\n train_pairs_size=N_TRAIN_PAIRS_SIZE,\n val_pairs_size=N_VAL_TEST_SIZE,\n test_pairs_size=N_VAL_TEST_SIZE,\n preprocess=True,\n )\n\n logger.info(f\"featurizing {dataset_name}\")\n train, val, test = featurize(\n anddata,\n FEATURIZER_INFO,\n n_jobs=N_JOBS,\n use_cache=True,\n chunk_size=100,\n nameless_featurizer_info=NAMELESS_FEATURIZER_INFO,\n nan_value=NAN_VALUE,\n )\n X_train, y_train, nameless_X_train = train\n X_val, y_val, nameless_X_val = val\n X_test, y_test, nameless_X_test = test\n\n dataset = {}\n dataset[\"anddata\"] = anddata\n dataset[\"X_train\"] = X_train\n dataset[\"y_train\"] = y_train\n dataset[\"X_val\"] = X_val\n dataset[\"y_val\"] = y_val\n dataset[\"X_test\"] = X_test\n dataset[\"y_test\"] = y_test\n dataset[\"nameless_X_train\"] = nameless_X_train\n dataset[\"nameless_X_val\"] = nameless_X_val\n dataset[\"nameless_X_test\"] = nameless_X_test\n dataset[\"name\"] = anddata.name\n datasets[dataset_name] = dataset\n\n anddatas = [\n datasets[dataset_name][\"anddata\"]\n for dataset_name in SOURCE_DATASET_NAMES\n if dataset_name not in PAIRWISE_ONLY_DATASETS\n ]\n\n X_train = np.vstack([datasets[dataset_name][\"X_train\"] for dataset_name in SOURCE_DATASET_NAMES])\n y_train = np.hstack([datasets[dataset_name][\"y_train\"] for dataset_name in SOURCE_DATASET_NAMES])\n X_val = np.vstack(\n [datasets[dataset_name][\"X_val\"] for dataset_name in SOURCE_DATASET_NAMES if dataset_name not in {\"augmented\"}]\n )\n y_val = np.hstack(\n [datasets[dataset_name][\"y_val\"] for dataset_name in SOURCE_DATASET_NAMES if dataset_name not in {\"augmented\"}]\n )\n\n nameless_X_train = np.vstack([datasets[dataset_name][\"nameless_X_train\"] for dataset_name in SOURCE_DATASET_NAMES])\n nameless_X_val = np.vstack(\n [\n datasets[dataset_name][\"nameless_X_val\"]\n for dataset_name in SOURCE_DATASET_NAMES\n if dataset_name not in {\"augmented\"}\n ]\n )\n\n logger.info(\"fitting pairwise\")\n union_classifier = PairwiseModeler(n_iter=N_ITER, monotone_constraints=MONOTONE_CONSTRAINTS)\n union_classifier.fit(X_train, y_train, X_val, y_val)\n\n nameless_union_classifier = None\n if USE_NAMELESS_MODEL:\n logger.info(\"nameless fitting pairwise for \" + str(SOURCE_DATASET_NAMES))\n nameless_union_classifier = PairwiseModeler(\n n_iter=N_ITER,\n monotone_constraints=NAMELESS_MONOTONE_CONSTRAINTS,\n )\n nameless_union_classifier.fit(nameless_X_train, y_train, nameless_X_val, y_val)\n logger.info(\"nameless pairwise fit for \" + str(SOURCE_DATASET_NAMES))\n\n logger.info(\"fitting clusterer for\")\n union_clusterer = Clusterer(\n FEATURIZER_INFO,\n union_classifier.classifier,\n cluster_model=FastCluster(),\n search_space=search_space,\n n_jobs=N_JOBS,\n nameless_classifier=nameless_union_classifier.classifier if nameless_union_classifier is not None else None,\n nameless_featurizer_info=NAMELESS_FEATURIZER_INFO if nameless_union_classifier is not None else None,\n )\n union_clusterer.fit(anddatas)\n print(\n \"best clustering parameters:\",\n union_clusterer.best_params,\n )\n\n models = {}\n models[\"clusterer\"] = union_clusterer\n\n with open(\n f\"full_union_model_script_dump_average_{FEATURIZER_VERSION}.pickle\",\n \"wb\",\n ) as _pickle_file:\n pickle.dump(models, _pickle_file)\n logger.info(\"Done.\")", "def main():\n\n logger.info('Process initiated - Building dataset')\n\n if os.path.isfile(train_path) and os.path.isfile(test_path):\n logger.info('Loading pickled data')\n return pd.read_pickle(train_path), pd.read_pickle(test_path)\n\n logger.info('Reading COSMIC Cancer Gene Census')\n gene_census = cancer_gene_census()\n gene_census.extend(civic_cancer_genes())\n\n gene_census = set(gene_census)\n\n training_data = pd.DataFrame()\n testing_data = pd.DataFrame()\n\n for cancer_type in cancer_types:\n data_file_name = cancer_type + \".meth.by_mean.data.txt\"\n data_file_location = os.path.join(data_location, data_file_name)\n\n logger.info('Reading Methylation data for {}'.format(cancer_type))\n\n methyl_data = pd.read_csv(data_file_location, delimiter='\\t', skiprows=[1], index_col=0)\n\n logger.info(\n 'Number of Genes: {0} | Number of Patients: {1}'.format(methyl_data.shape[0], methyl_data.shape[1]))\n logger.info('Preprocessing Methylation data')\n\n methyl_data = genes_feature_selection(methyl_data, gene_census)\n\n logger.info('Number of Genes after processing: {0}\\n'.format(methyl_data.shape[0]))\n\n methyl_data = add_classification_label(methyl_data)\n methyl_data = methyl_data.transpose()\n\n normal_cases = methyl_data[methyl_data['Tumor'] == 0]\n logger.info(normal_cases.shape)\n train_normal_cases = normal_cases.sample(frac=0.7, random_state=200)\n logger.info(train_normal_cases.shape)\n test_normal_cases = normal_cases.drop(train_normal_cases.index)\n logger.info(train_normal_cases.shape)\n\n tumor_cases = methyl_data[methyl_data['Tumor'] != 0]\n logger.info(tumor_cases.shape)\n train_tumor_cases = tumor_cases.sample(frac=0.7, random_state=200)\n logger.info(train_tumor_cases.shape)\n\n test_tumor_cases = tumor_cases.drop(train_tumor_cases.index)\n logger.info(test_tumor_cases.shape)\n\n training_data = training_data.append(train_normal_cases)\n training_data = training_data.append(train_tumor_cases)\n\n testing_data = testing_data.append(test_normal_cases)\n testing_data = testing_data.append(test_tumor_cases)\n\n training_data = training_data.sample(frac=1)\n testing_data = testing_data.sample(frac=1)\n\n logger.info('Pickling training and testing data')\n training_data.to_pickle(train_path)\n testing_data.to_pickle(test_path)\n\n logger.info('Processing completed!')\n visualize_data(training_data)\n\n return training_data, testing_data", "def train_loop(train_per_list, cut_off_list, C_list,\n factors, non_factors, data_path, executable_path, \n trial_factors_list=None): \n if trial_factors_list is None:\n trial_factors_list=[factors]\n sql_table = 'aggregated_ctr' #Data table\n # remove cross terms\n sql_features = list(set(sum([fs.split('*') for fs in factors], [])))\n# factors+=['campaign_id','ad_account_id','pub_account_id', \n# 'campaign_id*site', 'ad*pub_account_id']\n con_dict_dse={'host':'db.lqm.io','db':'dse',\n 'user':'dse','passwd':'dSe@lQm'}\n con_dict_mad={'host':'db.lqm.io','db':'madvertise_production',\n 'user':'readonly','passwd':'z0q909TVZj'}\n \n rtb_flag=[0,1]\n model_type=0\n has_intercept = True # bias term in LR\n tol = 0.00000001\n # NB these filenames are HARDCODED in write_sparse routines\n weights_file = 'train_ais.txt'\n train_file = 'train_svm.txt'\n test_file = 'test_svm.txt'\n probability_file = 'preds_SummModel_py.txt'\n results = []\n for train_per in train_per_list:\n test_per = ( add_hour(train_per[1], 1), add_hour(train_per[1], 3))\n # DATA RANGE IS INCLUSIVE => 00:00-02:00 = 3 HOURS\n train_df=mysql_lqm.MySQL_getdata(con_dict_dse,\n sql_table, train_per, sql_features, rtb_flag)\n train_df=mysql_lqm.add_features( train_df)\n test_df= mysql_lqm.MySQL_getdata(con_dict_dse,\n sql_table, test_per, sql_features, rtb_flag)\n test_df = mysql_lqm.add_features(test_df)\n \n sc, click_no_click_df, weights, targets \\\n = libLinear_functions.create_sparse_cat(train_df, factors, non_factors)\n\n \n for cut_off in cut_off_list:\n sparse_train_all = libLinear_functions.create_sparse(sc, cut_off, click_no_click_df)\n sparse_test_all = sc.transform(test_df)\n for trial_factors in trial_factors_list:\n trial_factors=trial_factors[:] # copy\n trial_factors.sort(key=lambda x: sc.factors.index(x))\n # libsvm expects the indices in ascending order\n print (trial_factors) \n sparse_train=sc.select_factors(sparse_train_all, trial_factors)\n sparse_test=sc.select_factors(sparse_test_all, trial_factors)\n libLinear_functions.write_sparse(sc, sparse_train, weights, targets, data_path, len(trial_factors))\n libLinear_functions.write_sparse_test(sc, sparse_test, data_path, n_columns_used= len(trial_factors))\n\n\n for C in C_list:\n model_file = \\\n '{start}_{stop}_cut_{cut_off}_C_{C:0.3}.model'.format(\n start=date_name(train_per[0]),\n stop=date_name(train_per[1]),\n cut_off=cut_off, C=C)\n fit(executable_path, data_path, train_file,\n model_file, weights_file, model_type, reg_param=C, tol=tol,\n has_intercept=has_intercept)\n \n \n pCTR = libLinear_functions.predict(executable_path, data_path, test_file,\n model_file, probability_file)\n if type(pCTR) is pd.Series:\n amounts = pd.DataFrame({\n 'no_clicks':test_df['instances' ]-test_df['clicks'],\n 'clicks':test_df['clicks']})\n mean_log_loss, weighted_log_loss = log_loss_weighted(pCTR, amounts)\n results.append([train_per[:],trial_factors[:],\n cut_off,C,amounts.clicks.sum(),amounts.no_clicks.sum(), mean_log_loss])\n results_df=pd.DataFrame(results,columns=['date','features','cutoff','C','clicks','no_clicks','lloss'])\n results_df.to_csv(data_path+'resultsX.txt',index=False, sep='|')\n # what to do if ERROR?\n return results_df, weighted_log_loss", "def train( self, trainingData, trainingLabels, validationData, validationLabels ):\n\n self.features = trainingData[0].keys() # could be useful later\n # DO NOT ZERO OUT YOUR WEIGHTS BEFORE STARTING TRAINING, OR\n # THE AUTOGRADER WILL LIKELY DEDUCT POINTS.\n for iteration in range(self.max_iterations):\n #pdb.set_trace() # esto es un break point para que puedas comprobar el formato de los datos\n print (\"Starting iteration \", iteration, \"...\")\n for i in range(len(trainingData)):#training data\n max = -10000000\n for j in range(len(self.weights)):\n prod = np.dot(self.weights[j], trainingData[i]) #este sería x0 (en la primera vuelta) (xj)\n if (prod > max):\n max=prod #en max guardamos la distancia a la instancia que más cerca está de la que estamos recorriendo\n indclase=j #guardas el índice de la clase a la que predices que pertenece\n\n if(indclase != trainingLabels[i]):\n # recalcular pesos\n self.weights[trainingLabels[i]].__radd__(trainingData[i]) #honek jarraian egiten du gehiketa pisu guztientzat\n #pdb.set_trace() # esto es un break point para que puedas comprobar el formato de los datos\n self.weights[indclase].__sub__(trainingData[i]) #honek jarraian egiten du kenketa pisu guztientzat\n\n\n\n\n\n ########################################################################################\n # 1. i es el indice de un ejemplo (un item, f(x) de un ejemplo) del conjunto de entrenamiento.\n # 2. Asi pues, en cada vuelta de este loop se trata un solo ejemplo\n # por cada ejemplo calculareis el producto punto (dotProduct) w*item\n # NOTAS: Recordad que cada ejemplo viene representado por varios rasgos (o features), es decir, es un vector de rasgos, tantos como nos marca el atributo self.features.\n # Asi cada ejemplo es de dimension 1 filas y self.features).\n # La dimension del vector w tambien es self.features, es decir, habra tantos pesos en w_rasgo dentro de w como rasgos haya en cada item de ejemplo\n # Recordad tambien que es una clasificacion multiclase en este caso. Hay tantas clases como nos marca el atributo self.legalLabels\n #########################################################################################", "def training(df, type=None):\r\n df=dataCleaner(df[DISC_FEATURES_COL_TO_USE+CONT_FEATURES_COL_TO_USE+[DISC_TARGET_COL_TO_USE]])\r\n print(\"Using %d numbers of features\"%len(DISC_FEATURES_COL_TO_USE + CONT_FEATURES_COL_TO_USE))\r\n df_coded = trainEncode(df)\r\n df_coded = scalarNormalizer(df_coded)\r\n visualizeHistogram(df_coded)\r\n # visualizePCA(df_coded)\r\n df_shuffled = df_coded.sample(frac=1, random_state=100).reset_index(drop=True)\r\n X, y = df_shuffled[DISC_FEATURES_COL_TO_USE + CONT_FEATURES_COL_TO_USE], df_shuffled[DISC_TARGET_COL_TO_USE]\r\n X, y = resampling(X, y)\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = TEST_FR, random_state = 42)\r\n\r\n print(\"Training the classifier!\")\r\n if type=='LR':\r\n print(\"Using Logistic Regression Classifier\")\r\n cls=LogisticRegression(n_jobs=2, class_weight='balanced', tol=1e-4, C=1,random_state=111)\r\n elif type=='SVM':\r\n print(\"Using Support Vector Machine Classifier\")\r\n cls=SVC(class_weight='balanced', probability=True)\r\n elif type=='RF':\r\n print(\"Using Random Forst Classifier\")\r\n cls=RandomForestClassifier( n_jobs=3, n_estimators=8192, class_weight='balanced', max_depth=8,\r\n min_samples_leaf=1, random_state=24)\r\n elif type=='GBC':\r\n print(\"Using Gradient Boosting Classifier\")\r\n cls = GradientBoostingClassifier(n_estimators=2048, max_depth=4,\r\n subsample=0.8, learning_rate=0.004,\r\n random_state=34, min_samples_split=4,\r\n max_features=\r\n int(0.4*len(DISC_FEATURES_COL_TO_USE+\r\n CONT_FEATURES_COL_TO_USE)))\r\n else:\r\n print(\"Using Naive Bayes Classifier\")\r\n cls = GaussianNB()\r\n model = cls.fit(X_train, y_train)\r\n print (\"Cross-validated scores:\", cross_val_score(model, X_train, y_train, cv=10))\r\n print (\"Score:\", model.score(X_test, y_test))\r\n predict_test = model.predict(X_test)\r\n\r\n print('precision_score=%f\\nrecall_score=%f'%(precision_score(y_test, predict_test),recall_score(y_test, predict_test)))\r\n\r\n print(metrics.roc_auc_score(y_test, predict_test))\r\n\r\n cm=confusion_matrix(y_test, predict_test)\r\n print(\"Confusion matrix:\\n\" + str(cm))\r\n # showConfusionMatrix(cm)\r\n\r\n pickle.dump(model, open(MODEL_FILENAME, 'wb'))\r\n print(\"Model Created!\")", "def main():\n \n # The following 5 command lines can be outcommented if the features are already created.\n # There is no need to process the data every single time.\n # Fine tuning the learning algorythm is much faster without that extra step.\n \n # by reading the train dataset the feature index is created.\n # First calling of the processdata function\n # Data limited to 300000\n featureIndexes = processData(os.path.join(dataFolder,\"avito_train.tsv\"), itemsLimit=600000)\n print \"featureIndex generated!\"\n print len(featureIndexes)\n\n # Trainfeature is created using the indexfeatures...\n # Second calling of the processdata function\n trainFeatures, trainTargets, trainItemIds, trainPrices, trainUrls, trainPhones, trainEmails, trainLength = processData(os.path.join(dataFolder,\"avito_train.tsv\"), itemsLimit=600000) # Original itemsLimit=300000\n\n # Building the test dataset... just like the training...\n testFeatures, testItemIds, testPrices, testUrls, testPhones, testEmails, testLength = processData(os.path.join(dataFolder,\"avito_test.tsv\"), featureIndexes)\n\n # Dumping data into file...\n # joblib.dump((trainFeatures, trainTargets, trainItemIds, testFeatures, testItemIds), os.path.join(dataFolder,\"train_data.pkl\"))\n joblib.dump((trainFeatures,trainTargets,trainItemIds,trainPrices,trainUrls,trainPhones,trainEmails,trainLength,\n testFeatures, testItemIds,testPrices,testUrls,testPhones,testEmails,testLength), os.path.join(dataFolder,\"SeparatedByCategory.pkl\"))\n\n\n # loading data pack...\n # trainFeatures, trainTargets, trainItemIds, testFeatures, testItemIds = joblib.load(os.path.join(dataFolder,\"train_data.pkl\"))\n\n #logging.info(\"Feature preparation done, fitting model...\")\n\n # Stochastic gradient model", "def stepwise_regression(train_per_list, cut_off_list, C_list,\n factors,non_factors, data_path, executable_path):\n sql_table = 'aggregated_ctr' #Data table\n sql_features = list(set(sum([fs.split('*') for fs in factors], [])))\n # remove cross terms\n\n factors+=['campaign_id','ad_account_id','pub_account_id', \n 'campaign_id*site', 'ad*pub_account_id']\n con_dict_mad={'host':'db.lqm.io','db':'madvertise_production',\n 'user':'readonly','passwd':'z0q909TVZj'}\n con_dict_dse={'host':'db.lqm.io','db':'dse','user':'dse','passwd':'dSe@lQm'}\n rtb_flag=[0,1]\n \n test_per_list= map(lambda x: ( add_hour(x[1], 1), add_hour(x[1], 3)), train_per_list)\n \n # test period is next 3 hours after end of training period\n # DATA RANGE IS INCLUSIVE => 00:00-02:00 = 3 HOURS\n MySQL_save_data_loop(con_dict_dse, sql_table,\n train_per_list, sql_features, rtb_flag, data_path)\n MySQL_save_data_loop(con_dict_dse, sql_table,\n test_per_list, sql_features, rtb_flag, data_path)\n \n model_type=0\n has_intercept = True # bias term in LR\n tol = 0.00000001\n\n # NB these filenames are HARDCODED in write_sparse routines\n weights_file = 'train_ais.txt'\n train_file = 'train_svm.txt'\n test_file = 'test_svm.txt'\n probability_file = 'preds_SummModel_py.txt'\n\n \n res_df_list=[]\n trial_factors=[]\n remaining_factors=factors[:]\n while len(remaining_factors):\n results = [] \n # we assume we cannot load all the data in memory\n # so we have to reload for every step of stepwise selection\n for train_per, test_per in zip(train_per_list, test_per_list):\n \n train_df=load_data(data_path,train_per)\n test_df=load_data(data_path,test_per)\n \n sc, click_no_click_df, weights, targets \\\n = libLinear_functions.create_sparse_cat(train_df, factors, non_factors)\n \n for cut_off in cut_off_list:\n sparse_train_all = libLinear_functions.create_sparse(sc, cut_off, click_no_click_df)\n sparse_test_all = sc.transform(test_df)\n for fac in remaining_factors:\n trial_factors.append(fac)\n trial_factors.sort(key=lambda x: sc.factors.index(x))\n # libsvm expects the indices in ascending order\n print (trial_factors) \n sparse_train=sc.select_factors(sparse_train_all, trial_factors)\n sparse_test=sc.select_factors(sparse_test_all, trial_factors)\n libLinear_functions.write_sparse(sc, sparse_train, weights, targets, data_path, len(trial_factors))\n libLinear_functions.write_sparse_test(sc, sparse_test, data_path, n_columns_used= len(trial_factors))\n\n for C in C_list:\n model_file = \\\n '{start}_{stop}_cut_{cut_off}_C_{C:0.3}.model'.format(\n start=date_name(train_per[0]),\n stop=date_name(train_per[1]),\n cut_off=cut_off, C=C)\n fit(executable_path, data_path, train_file,\n model_file, weights_file, model_type, reg_param=C, tol=tol,\n has_intercept=has_intercept)\n \n pCTR = libLinear_functions.predict(\n executable_path, data_path, test_file,\n model_file, probability_file)\n if type(pCTR) is pd.Series:\n amounts = pd.DataFrame({\n 'no_clicks':test_df['instances' ]-test_df['clicks'],\n 'clicks':test_df['clicks']})\n mean_log_loss, weighted_log_loss =\\\n libLinear_functions.log_loss_weighted(pCTR, amounts)\n results.append([train_per[:], tuple(trial_factors),fac, cut_off, C, mean_log_loss])\n # what to do if ERROR?\n trial_factors.remove(fac)\n res_df=pd.DataFrame(results,columns=['train_per','factors','add_factor','cut_off','C','mean_log_loss'])\n res_avg=res_df.groupby(['factors','add_factor','cut_off','C']).agg([np.mean,np.std])\n best_params=res_avg['mean_log_loss','mean'].argmin()\n best_fac=best_params[1]\n remaining_factors.remove(best_fac)\n trial_factors.append(best_fac)\n res_df_list.append(res_df)\n results_df=pd.concat(res_df_list)\n return results_df", "def make_data(input_filepath, output_filepath):\n\n df_train = pd.read_csv(input_filepath+'train_u6lujuX_CVtuZ9i.csv', index_col=0)\n df_test = pd.read_csv(input_filepath+'test_Y3wMUE5_7gLdaTN.csv', index_col=0)\n print('Sizes', df_train.shape, df_test.shape)\n print(\"Outcome dispersion:\\n\", df_train['Loan_Status'].value_counts())\n\n\n # recode and save outcome vector\n y = df_train['Loan_Status'].map({'N': 0, 'Y': 1})\n\n del df_train['Loan_Status']\n\n # all in one dataframe\n df = pd.concat([df_train, df_test])\n print(df.shape)\n\n from src.features.build_features import make_features\n df = make_features(df)\n\n # Divide data on train and test again and save\n data_train = df[df.index.isin(df_train.index)]\n data_test = df[df.index.isin(df_test.index)]\n print(data_train.shape, data_test.shape)\n\n data_tmp = data_train.copy()\n data_tmp['y'] = y\n\n\n data_tmp.to_csv(output_filepath + 'train_ready.csv', index=False)\n data_test.to_csv(output_filepath + 'test_ready.csv', index=False)\n id_test = pd.DataFrame(data=df_test.index, columns=['Loan_ID'])\n id_test.to_csv(output_filepath + 'id_test.csv', index=False)", "def classify():\n yes_dataset = df[df[\"_class\"] == 1] # 470588\n no_dataset = df[df[\"_class\"] == 0] # 1971\n\n parameter_analysis = list()\n for criterion in np.arange(0.05, 0.91, 0.05):\n print(\"doing experiment at criterion = %s ...\" % criterion)\n rate_list = list()\n for i in range(10):\n # shuffle yes_dataset and no_dataset, so we can randomly choose 90% yes_dataset\n # + 90% no_dataset as train dataset, 10% yes_dataset + 10% no_dataset as test dataset\n yes_index = yes_dataset.index.tolist()\n random.shuffle(yes_index)\n no_index = no_dataset.index.tolist()\n random.shuffle(no_index)\n \n # concatenate 90%yes + 90%no, 10%yes + 10%no\n train = pd.concat([\n yes_dataset.loc[yes_index[:1774], :],\n no_dataset.loc[no_index[:423530], :]\n ])\n test = pd.concat([\n yes_dataset.loc[yes_index[1774:], :],\n no_dataset.loc[no_index[423530:], :]\n ]) \n \n # split data and label\n train_data, train_label = (train[[\"Revenue.Code\", \n \"Service.Code\", \n \"Procedure.Code\", \n \"Diagnosis.Code\", \n \"Subscriber.Index\"]], \n train[\"_class\"])\n test_data, test_label = (test[[\"Revenue.Code\", \n \"Service.Code\", \n \"Procedure.Code\", \n \"Diagnosis.Code\", \n \"Subscriber.Index\"]], \n test[\"_class\"])\n \n # apply classifier\n clf = GaussianNB()\n clf.fit(train_data, train_label)\n probability = clf.predict_proba(test_data).T[1]\n \n result = pd.DataFrame()\n result[\"_class\"] = test_label\n result[\"_predict\"] = probability >= criterion\n \n result_yes = result[result[\"_class\"] == 1]\n yes_yes_rate = sum(result_yes[\"_class\"] == result_yes[\"_predict\"])/len(result_yes[\"_predict\"])\n \n result_no = result[result[\"_class\"] == 0]\n no_no_rate = sum(result_no[\"_class\"] == result_no[\"_predict\"])/len(result_no[\"_predict\"])\n \n rate_list.append((yes_yes_rate, no_no_rate))\n\n rate_list = pd.DataFrame(rate_list)\n yes_yes_rate, no_no_rate = rate_list.mean()[0], rate_list.mean()[1]\n parameter_analysis.append((criterion, yes_yes_rate, no_no_rate))\n \n # save data to excel spreadsheet\n parameter_analysis = pd.DataFrame(parameter_analysis, columns=[\"criterion\", \"yes_yes_rate\", \"no_no_rate\"])\n writer = pd.ExcelWriter(\"parameter_analysis.xlsx\")\n parameter_analysis.to_excel(writer, \"parameter_analysis\", index=False)\n writer.save()", "def main():\n housing = pd.read_csv(\"Data/train_original.csv\")\n housing[\"TotalSF\"] = (\n housing[\"TotalBsmtSF\"] + housing[\"1stFlrSF\"] + housing[\"2ndFlrSF\"]\n )\n training_features, testing_features, training_target, testing_target = impute_dummify_and_split(\n housing, drop_target=False\n )\n\n p_values = [\n (c, pearsonr(training_features[\"SalePrice\"], training_features[c])[1])\n for c in training_features.columns\n ]\n\n p_value_limits = [0.05]\n\n result = []\n ps_and_cols = {}\n\n for p_value_limit in p_value_limits:\n\n high_ps = list(\n map(lambda t: t[0], sorted(p_values, key=lambda t1: t1[1])[:15])\n )\n\n print(training_features[high_ps].corr())\n\n columns = [p[0] for p in p_values if p[1] < p_value_limit]\n\n training_features_restricted = training_features[columns].drop(\n \"SalePrice\", axis=\"columns\"\n )\n\n testing_features_restricted = testing_features[columns].drop(\n \"SalePrice\", axis=\"columns\"\n )\n\n for model in (\n linear_model.Lasso(alpha=2.1),\n linear_model.Ridge(alpha=2.1),\n ):\n\n model.fit(training_features_restricted, training_target)\n\n train_score = model.score(\n training_features_restricted, training_target\n )\n\n test_score = model.score(\n testing_features_restricted, testing_target\n )\n\n name = str(model).split(\"(\")[0]\n\n result = result + [\n (\n \"_2_restrict_features\",\n name,\n \"p value limit: {:.3f}, alpha: 2.1\".format(p_value_limit),\n train_score,\n test_score,\n )\n ]\n\n print(ps_and_cols)\n return training_features[high_ps].corr()", "def prepare_train_data(self):\r\n ## Impute rlkpis\r\n print(\"Imputing rlKPI df\")\r\n self.rlkpi.add_target_labels(1)\r\n self.rlkpi.impute_rl_kpis()\r\n\r\n print(\"Add 'met-real-station_no' & met-forecast-station_no to rl_kpis_df\")\r\n self.add_met_real_forecast_station_col_to_rlkpis()\r\n print(\"Merge 'met-real-sampled df to rl kps \")\r\n self.merge_met_real_sampled_df_to_rlkpis()\r\n\r\n ## Imputations for met-forecast\r\n print(\"Impute met-forecast\")\r\n met_forecast_obj = self.metfcast\r\n met_forecast_obj.impute_met_forecast()\r\n\r\n #Merge met forecast data to earlier merged data\r\n print(\"Merge Train data with imputed forecast df\")\r\n self.train_data = pd.merge(self.train_data,\r\n met_forecast_obj.imputed_forecast_df,\r\n on=['datetime-station_no'], indicator=True, how='inner')\r\n print(\"Check any imputation needed\", self.train_data.isna().sum().sum())\r\n self.train_data.drop(['_merge'], axis=1, inplace=True)\r\n self.perform_data_under_sampling(self.train_data)", "def trainModel( self, featureTrain, classTrain):", "def set_training_data(self):\n # Optional training data period\n # TODO: add training data period feature to training data query\n if not self.training_period == None:\n training_period_date = (datetime.datetime.utcnow() - timedelta(minutes=self.training_period)).strftime(\"%Y-%m-%d\")\n print(f\"Training data start date: {training_period_date}\")\n # Extract queried data from Athena\n #athena = athena_connect.Athena()\n #features_df = athena.pandas_read_athena(self.training_data_sql)\n with open('feature_sql.txt', 'w') as f:\n print(self.training_data_sql, file=f) \n features_df = pd.read_sql(self.training_data_sql, self.logic_db_engine())\n features_df.fillna(0, inplace=True)\n print(features_df.shape)\n features_df = features_df[max(self.feature_minutes_list):]\n print(features_df.shape)\n # Remove infinity string\n features_df.replace({'Infinity': 0}, inplace=True)\n # Convert all object fields to numeric except date fields\n object_col_list = features_df.columns[features_df.dtypes.eq('object')]\n object_col_list = [col for col in object_col_list if 'trade_date' not in col]\n features_df[object_col_list] = features_df[object_col_list].apply(pd.to_numeric, errors='coerce')\n self.training_df = features_df", "def preprocessing(train_raw_, test_raw_):\n \n undef = np.float64(-999.0)\n pred_dict = {'s':'1','b':'0', '?':'-1'}\n # drop 1st column (Id) and also 1st row with column names (\"[1:,\") \n train_raw = train_raw_[1:, :]\n test_raw = test_raw_[1:, :] \n \n # Change s(signal) and b(background) for s:1 and b:0, and change '?' for -1\n train_raw[:,1] = np.vectorize(pred_dict.get)(train_raw[:,1].astype(str))\n test_raw[:,1] = np.vectorize(pred_dict.get)(test_raw[:,1].astype(str))\n \n # Divide the dataset in four according to PRI_jet_num feature and cast to float\n train_data_jets = divide_dataset_by_jet(train_raw)\n test_data_jets = divide_dataset_by_jet(test_raw)\n \n # Remove columns with nan values or with standard deviation of 0\n test_data_jets, train_data_jets = clean_features(test_data_jets, train_data_jets, undef)\n \n # Standardize train and test sets to have mean=0 and std=1\n train_data_jets, test_data_jets = standardize(train_data_jets, test_data_jets)\n \n # Replace remaining undefined values by mean, median or zero\n train_data_mean, train_data_median, train_data_null = replace_nan(train_data_jets)\n test_data_mean, test_data_median, test_data_null = replace_nan(test_data_jets)\n \n return train_data_mean, train_data_median, train_data_null, test_data_mean, test_data_median, test_data_null", "def main():\n df = prepro_last()\n X, y = train_build(df)\n fit_store(X, y)", "def data_setup(self):\n # Make sure the dataset is download and put into the data folder\n training_data = pd.read_csv('./data/train.csv', sep=',', nrows=self.training_dataset_size)\n testing_data = pd.read_csv('./data/test.csv', sep=',' , nrows=self.training_dataset_size)\n question_list1 = training_data['question1']\n question_list2 = training_data['question2']\n is_duplicate = training_data['is_duplicate']\n # for will\n X = []\n Y = []\n for i in range(0, 1000):\n print(\"*\"*20, i ,\"*\"*20 )\n feature = self.call_feature_generator(question_list1[i],question_list2[i], self.feature_code )\n X.append(feature)\n Y.append(is_duplicate[i])\n print(feature)\n print(is_duplicate[i])\n print(question_list1[i])\n print(question_list2[i])\n\n # we train classifier\n\n classifer = self.call_classifier(X, Y, self.classifier_code)\n\n # testing\n testX = []\n testY = []\n\n for i in range(1001, 1500):\n print(\"-\"*20, i ,\"-\"*20 )\n feature = self.call_feature_generator(question_list1[i],question_list2[i], self.feature_code )\n testX.append(feature)\n testY.append(is_duplicate[i])\n\n X= np.array(testX).reshape(-1,1)\n\n calculate_y = classifer.predict(X)\n\n print(calculate_y)\n tp = 0.0\n fp = 0.0\n fn = 0.0\n\n for i in range(0, len(calculate_y)):\n if calculate_y[i] == testY[i]:\n print(\"Tp : \", testX[i], question_list1[i], question_list2[i], is_duplicate[i] )\n tp += 1.0\n else:\n if testY[i] == 1 and calculate_y[i] == 0:\n print(\"Fn : \", testX[i] , question_list1[i], question_list2[i], is_duplicate[i] )\n fn += 1.0\n else:\n print(\"Fp : \", testX[i], question_list1[i], question_list2[i], is_duplicate[i])\n fp += 1.0\n\n print(\"Tp: \", tp, \" Fp: \", fp, \" Fn: \", fn)\n print(\"Accuracy \", tp/( tp+fn), \"%\")\n\n result = precision_recall_fscore_support(testY, calculate_y)\n print (\"Precision: Class 1 - \", result[0][0], \"% and Class 0 - \", result[0][1], \"%\")\n print (\"Recall: Class 1 - \", result[1][0], \"% and Class 0 - \", result[1][1], \"%\")\n print (\"F-Score: Class 1 - \", result[2][0], \"% and Class 0 - \", result[2][1], \"%\")", "def prepare_nfold_datasets(self): # i.e. split into different train/ground-truth(test) dataset\n for alpha in range(1, self.ALPHAs+1):\n if alpha != self.ALPHAs:\n gt_years = np.array2string(self.tl_model.years[(alpha-1)*self.PSI : alpha*self.PSI], separator='-')\n else:\n gt_years = np.array2string(self.tl_model.years[(alpha-1)*self.PSI : alpha*self.PSI+self.runoff_years], separator='-')\n new_cluster_dir = str(Path(self.tl_model.cluster_dir) / f'alpha_{alpha}_GT-{gt_years}')\n os.makedirs(new_cluster_dir, exist_ok=True)\n\n new_prepared_data_dir = str(Path(self.tl_model.prepared_data_dir) / f'alpha_{alpha}')\n os.makedirs(new_prepared_data_dir, exist_ok=True)\n \n if utils.find(f'*alpha_{alpha}_preprocessed.pkl', new_prepared_data_dir) and utils.find(f'*alpha_{alpha}_standardized_stacked_arr.pkl', new_prepared_data_dir):\n pass\n else:\n if not utils.find(f'*target*alpha_{alpha}_preprocessed.pkl', new_prepared_data_dir):\n print(f\"=> No input datasets pre-processed for alpha of {alpha}\")\n prepare.cut_target_dataset(self, alpha, new_prepared_data_dir)\n\n if not utils.find(f'*rf*alpha_{alpha}_preprocessed.pkl', new_prepared_data_dir):\n print(f\"=> No rainfall datasets pre-processed for alpha of {alpha}\")\n prepare.cut_rf_dataset(self, alpha, new_prepared_data_dir)\n \n print(f'Preprocessed pickles for alpha split {alpha} can be found @:\\n{new_prepared_data_dir}')", "def build_data_set(self):\n if not self.assert_data_correct():\n self.download_all_data()\n self.unpack_rename_data()\n self.split_data_characters()\n self.clean_data_fragments()\n self.create_font_data()\n if not self.assert_train_augmented():\n self.augment_train_data()\n if not self.assert_style_data_correct():\n self.download_style_data()\n self.unpack_rename_data()", "def main(self):\n\n assault_mech_df = self.get_mech_df(url=self.assault_url)\n heavy_mech_df = self.get_mech_df(url=self.heavy_url)\n medium_mech_df = self.get_mech_df(url=self.medium_url)\n light_mech_df = self.get_mech_df(url=self.light_url)\n all_weights_df = pd.concat([assault_mech_df, heavy_mech_df, medium_mech_df, \n light_mech_df])\n\n self.save_data(assault_mech_df, \"assault\")\n self.save_data(heavy_mech_df, \"heavy\")\n self.save_data(medium_mech_df, \"medium\")\n self.save_data(light_mech_df, \"light\")\n self.save_data(all_weights_df, \"all_weights\")\n #get maximum new columns needed for splitting variants\n max_cols = all_weights_df.variants.apply(lambda x: len(x)).max()\n melt_cols = []\n\n for i in range(max_cols):\n all_weights_df[\"var_\"+str(i)] = \"\"\n melt_cols.append(\"var_\"+str(i))\n\n variant_weights_df = pd.DataFrame()\n for index, row in all_weights_df.iterrows():\n for i in range(len(row[\"variants\"])):\n #add each variant to variant weights as a row with mech, tonnage, variant\n new_row_dict = {\n \"mech_name\":row[\"mechs\"],\n \"tonnage\":row[\"tonnage\"],\n \"variant\":row[\"variants\"][i].upper()\n }\n new_row_df = pd.DataFrame(new_row_dict, index=[0])\n variant_weights_df = pd.concat([variant_weights_df, new_row_df])\n\n for i in range(len(row[\"hero_chassis\"])):\n new_row_dict = {\n \"mech_name\":row[\"hero_names\"],\n \"tonnage\":row[\"tonnage\"],\n \"variant\":row[\"hero_chassis\"][i].upper()\n }\n new_row_df = pd.DataFrame(new_row_dict, index=[0])\n variant_weights_df = pd.concat([variant_weights_df, new_row_df])\n\n\n for i in range(len(row[\"special_variants\"])):\n new_row_dict = {\n \"mech_name\":row[\"mechs\"],\n \"tonnage\":row[\"tonnage\"],\n \"variant\":row[\"special_variants\"][i].upper()\n }\n new_row_df = pd.DataFrame(new_row_dict, index=[0])\n variant_weights_df = pd.concat([variant_weights_df, new_row_df]) \n\n #add champion variants by matching on \n for i in range(len(row[\"champion_variants\"])):\n new_row_dict = {\n \"mech_name\":row[\"mechs\"],\n \"tonnage\":row[\"tonnage\"],\n \"variant\":row[\"champion_variants\"][i].upper()\n }\n new_row_df = pd.DataFrame(new_row_dict, index=[0])\n variant_weights_df = pd.concat([variant_weights_df, new_row_df])\n #remove duplicate rows \n variant_weights_df = variant_weights_df[variant_weights_df.duplicated(keep=\"first\")==False]\n self.save_data(variant_weights_df, \"variant_weights\")", "def build_dataset(self):\n print(\"reading data of images currently , please wait......\")\n x_train, y_train, _ = get_images(self.train_directory)\n x_test, y_test, _ = get_images(self.test_directory)\n x_train, y_train = image_subset(self.num_classes, x_train, y_train)\n x_test, y_test = image_subset(self.num_classes, x_test, y_test)\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n self.x_train = x_train / 255\n self.x_test = x_test / 255\n self.y_train = utils.to_categorical(y_train, self.num_classes)\n self.y_test = utils.to_categorical(y_test, self.num_classes)", "def __init__(self):\n self.train(positivity_files, 0)\n self.train(subjectivity_files, 1)", "def trainAndPredict(self):\r\n print(\"train\")\r\n filename= 'finalized_model.sav'\r\n # train the algorithm on training data and predict using the testing data\r\n model = self.svc_model.fit(self.X.T, self.Y)\r\n pickle.dump(model, open(filename, 'wb'))\r\n #model = pickle.load(open(filename, 'rb'))\r\n pred1 =model.predict(self.TestSet.T)\r\n # print the accuracy score of the model\r\n print(\"LinearSVC accuracy : \", accuracy_score(self.TestSetY, pred1, normalize=True))", "def main():\n tpd_file_name = get_nonexisting_file(\"Enter name of new tpd file: \")\n tpd = TrainPredictData(tpd_file_name)\n\n print \"You can now enter the file paths of the the newly created tpd file.\"\n print \"If you want to skip a data set, just press enter without typing anything.\"\n\n train_raw_path = get_existing_file(\"Enter training raw path: \", skip=True)\n if train_raw_path is not None:\n train_raw_key = extract_h5_key(train_raw_path, \"Enter training raw h5 key: \")\n tpd.set_train_raw(train_raw_path, train_raw_key)\n\n train_gt_path = get_existing_file(\"Enter training gt path: \", skip=True)\n if train_gt_path is not None:\n train_gt_key = extract_h5_key(train_gt_path, \"Enter training gt h5 key: \")\n tpd.set_train_gt(train_gt_path, train_gt_key)\n\n train_pred_path = get_existing_file(\"Enter training pred path: \", skip=True)\n if train_pred_path is not None:\n train_pred_key = extract_h5_key(train_pred_path, \"Enter training pred h5 key: \")\n tpd.set_train_pred(train_pred_path, train_pred_key)\n\n train_feat_path = get_existing_file(\"Enter training feature path: \", skip=True)\n while train_feat_path is not None:\n train_feat_key = extract_h5_key(train_feat_path, \"Enter training feature path: \")\n tpd.add_train_feature(train_feat_path, train_feat_key)\n train_feat_path = get_existing_file(\"Enter training feature path: \", skip=True)\n\n test_raw_path = get_existing_file(\"Enter test raw path: \", skip=True)\n if test_raw_path is not None:\n test_raw_key = extract_h5_key(test_raw_path, \"Enter test raw h5 key: \")\n tpd.set_test_raw(test_raw_path, test_raw_key)\n\n test_gt_path = get_existing_file(\"Enter test gt path: \", skip=True)\n if test_gt_path is not None:\n test_gt_key = extract_h5_key(test_gt_path, \"Enter test gt h5 key: \")\n tpd.set_test_gt(test_gt_path, test_gt_key)\n\n test_pred_path = get_existing_file(\"Enter test pred path: \", skip=True)\n if test_pred_path is not None:\n test_pred_key = extract_h5_key(test_pred_path, \"Enter test pred h5 key: \")\n tpd.set_test_pred(test_pred_path, test_pred_key)\n\n test_feat_path = get_existing_file(\"Enter test feature path: \", skip=True)\n while test_feat_path is not None:\n test_feat_key = extract_h5_key(test_feat_path, \"Enter test feature path: \")\n tpd.add_test_feature(test_feat_path, test_feat_key)\n test_feat_path = get_existing_file(\"Enter test feature path: \", skip=True)\n\n return 0", "def train(self):\n # >>> YOUR ANSWER HERE\n\n fake_docs = []\n fake_words = []\n fake_words_freq = {}\n real_docs = []\n real_words = []\n real_words_freq = {}\n\n # load fake data of the training dataset, store the docs and words\n fake_data = open(self.train_data['fake']).readlines()\n for sentence in fake_data:\n preprocess_sentence = sentence.strip()\n fake_docs.append(preprocess_sentence)\n fake_words.extend(preprocess_sentence.split())\n\n # load real data of the training dataset, store the docs, words and word frequencies.\n real_data = open(self.train_data['real']).readlines()\n for sentence in real_data:\n preprocess_sentence = sentence.strip()\n real_docs.append(preprocess_sentence)\n real_words.extend(preprocess_sentence.split())\n\n # remove stop words if necessary\n if self.REMOVE_STOPWORDS:\n fake_words = [word for word in fake_words if word not in self.stopwords]\n real_words = [word for word in real_words if word not in self.stopwords]\n\n # calculate all words' frequency\n for word in fake_words:\n self.vocabulary.add(word)\n fake_words_freq[word] = fake_words_freq.get(word, 0) + 1\n for word in real_words:\n self.vocabulary.add(word)\n real_words_freq[word] = real_words_freq.get(word, 0) + 1\n\n # pre-calculate the number of all docs, the number of docs per class and words frequency per class for\n # calculation in the training loop.\n n_doc = len(fake_docs) + len(real_docs)\n n_class = {'fake': len(fake_docs), 'real': len(real_docs)}\n big_doc_dict = {'fake': fake_words_freq, 'real': real_words_freq}\n fake_words_num = 0\n real_words_num = 0\n for w in self.vocabulary:\n fake_words_num += fake_words_freq.get(w, 0)\n real_words_num += real_words_freq.get(w, 0)\n words_frequency_per_class = {'fake': fake_words_num, 'real': real_words_num}\n\n # Training\n for c in self.classes:\n self.logprior[c] = math.log(n_class[c] / n_doc)\n for w in self.vocabulary:\n count_w_c = big_doc_dict[c].get(w, 0)\n log_likelihood = math.log((count_w_c + 1) / (len(self.vocabulary) + words_frequency_per_class[c]))\n self.loglikelihood[(w, c)] = log_likelihood\n # >>> END YOUR ANSWER", "def _build_wmt_filtered(self, half=False):\n paracrawl_files = [\n self.data_dir + '/' + wmt_paracrawl % i for i in range(40)\n ]\n europarl_files = [\n self.data_dir + '/' + wmt_euro % i for i in range(4)\n ]\n newscomment_files = [\n self.data_dir + '/' + wmt_newscomment % i for i in range(4)\n ]\n commoncrawl_files = [\n self.data_dir + '/' + wmt_commoncrawl % i for i in range(1)\n ]\n\n pc_data = tf.data.experimental.CsvDataset(\n paracrawl_files,\n record_defaults=[tf.string, tf.string],\n field_delim='\\t',\n use_quote_delim=False)\n euro_data = tf.data.experimental.CsvDataset(\n europarl_files,\n record_defaults=[tf.string, tf.string],\n field_delim='\\t',\n use_quote_delim=False)\n nc_data = tf.data.experimental.CsvDataset(\n newscomment_files,\n record_defaults=[tf.string, tf.string],\n field_delim='\\t',\n use_quote_delim=False)\n cc_data = tf.data.experimental.CsvDataset(\n commoncrawl_files,\n record_defaults=[tf.string, tf.string],\n field_delim='\\t',\n use_quote_delim=False)\n\n pc_eval_data = pc_data.skip(10000).take(5000)\n euro_eval_data = euro_data.skip(10000).take(5000)\n nc_eval_data = nc_data.skip(10000).take(5000)\n cc_eval_data = cc_data.skip(10000).take(5000)\n\n pc_train_data = pc_data.skip(15000)\n euro_train_data = euro_data.skip(15000)\n nc_train_data = nc_data.skip(15000)\n cc_train_data = cc_data.skip(15000)\n\n if half:\n pc_train_data = pc_train_data.take(14_125_429)\n euro_train_data = euro_train_data.take(89_725)\n nc_train_data = nc_train_data.take(125_726)\n cc_train_data = cc_train_data.take(747_389)\n\n # Save these examples for testing\n # this is not intended to be uncommented. It just shows\n # pseudo-code for which examples are saved for testing.\n # pc_test_data = pc_data.take(10000)\n # euro_test_data = euro_data.take(10000)\n # nc_test_data = nc_data.take(10000)\n # cc_test_data = cc_data.take(10000)\n\n eval_data = tf.data.experimental.sample_from_datasets(\n [pc_eval_data, euro_eval_data, nc_eval_data, cc_eval_data], seed=42)\n eval_data = eval_data.cache()\n\n train_data = tf.data.experimental.sample_from_datasets(\n [pc_train_data, euro_train_data, nc_train_data, cc_train_data],\n weights=[0.9375, 0.0054, 0.00785, 0.0491], seed=42)\n train_data = train_data.cache() # only read once\n\n def to_features_dict(eng, rus):\n return {'inputs': eng, 'targets': rus}\n\n train_data = train_data.map(to_features_dict)\n eval_data = eval_data.map(to_features_dict)\n\n self.default_builder_obj = None\n\n return train_data, eval_data", "def train_calibration(config):\n run_dates = pd.date_range(start=config.start_dates[\"train\"],\n end=config.end_dates[\"train\"],\n freq='1D').strftime(config.run_date_format)\n \n target_calib_models = {}\n print()\n print('Loading Data')\n\n for size_index,size in enumerate(config.size_threshold):\n target_calib_models[size] = {}\n train_files, target_files = [], []\n for date in run_dates: \n train_data_files = glob(config.train_data_path+ \\\n \"20{2}/netcdf/*{0}*unsmoothed*_{1}_*{2}*{3}*{4}.nc\".format(\n config.forecast_model_names,size,date,\n config.start_hour,config.end_hour))\n if len(train_data_files) < 1:\n continue\n if config.sector:\n target_data_files = glob(config.target_data_path+'{0}*{1}*{2}*.nc'.format(\n date,size,config.sector)) \n else:\n target_data_files = glob(config.target_data_path+'{0}*{1}*.nc'.format(\n date,size))\n if len(target_data_files) < 1:\n continue\n train_files.append(train_data_files[0])\n target_files.append(target_data_files[0])\n \n date_indices = [index for index in range(len(train_files))]\n percent_train_indices = int(len(train_files)*0.70)\n t_data = [Dataset(x).variables[\"Data\"][:] for x in train_files] \n tar_data = [Dataset(x).variables[\"24_Hour_All_12z_12z\"][:] for x in target_files] \n print()\n print('Number of files:')\n print('Train (70%): {0}'.format(int(len(t_data)*0.70)))\n print('Validate (30%): {0}'.format(int(len(t_data)*0.30)))\n print()\n for ind,model_name in enumerate(config.calibration_model_names):\n bs = []\n random_models = []\n print('Random Cross-Validation, {0} >{1}mm'.format(model_name,size)) \n random_seed = random.sample(range(1, 100), 10)\n for s,seed in enumerate(random_seed):\n np.random.seed(seed)\n print('Index',s, 'Random Seed', seed)\n train_indices = np.random.choice(date_indices, percent_train_indices, replace=False)\n test_indices = [ind for ind in date_indices if ind not in train_indices]\n \n train_data = np.array(t_data)[train_indices].ravel()\n target_train_data = np.array(tar_data)[train_indices].ravel()\n \n val_data = np.array(t_data)[test_indices].ravel()\n target_val_data = np.array(tar_data)[test_indices].ravel()\n \n model = deepcopy(config.calibration_model_objs[ind])\n model.fit(train_data,target_train_data)\n random_models.append(model)\n \n predict = model.transform(val_data)\n \n #plt.figure(figsize=(9, 6))\n #plt.plot(sorted(val_data),model.transform(sorted(val_data)))\n #plt.xlabel('data')\n #plt.ylabel('calibrated')\n #plt.show()\n #plt.close()\n\n print(brier_score(predict, target_val_data))\n bs.append(brier_score(predict, target_val_data))\n \n best_bs = np.argmin(bs)\n target_calib_models[size][model_name] = np.array(random_models)[best_bs]\n print('Lowest Brier Score: {0}'.format(np.array(bs)[best_bs]))\n print()\n print()\n return target_calib_models", "def create_train_set(addition_to_filename = ''):\n df = f.create_df()\n print('original df size is ', df.shape)\n print('original df columns ', df.columns)\n\n df = f.calculate_missing_prices_for_train_set(df)\n print('df size after averaging price ', df.shape)\n df = f.downcast_dtypes(df)\n df = f.add_lag(all_data_df = df, df_to_add_lag= df, number_of_months=3)\n df = f.add_days_stat(df)\n\n print(df.columns)\n\n # df = h.add_holidays(df)\n # print('df size with holidays ', df.shape)\n\n timestr = time.strftime(\"%Y%m%d-%H%M%S\")\n pickle.dump(df, open(f\"{timestr}_{addition_to_filename}_train.pickle.dat\", \"wb\"))\n\n # save feature names for further use\n features_list = create_feature_names_list(df)\n pickle.dump(features_list, open(f\"{timestr}_{addition_to_filename}_features.pickle.dat\", \"wb\"))", "def process_dataset(self):\n\n logger.info('\\n')\n logger.info('=' * 40)\n logger.info('=\\t DeepRank Data Set')\n logger.info('=')\n logger.info('=\\t Training data')\n for f in self.train_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n if self.valid_database:\n logger.info('=\\t Validation data')\n for f in self.valid_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n if self.test_database:\n logger.info('=\\t Test data')\n for f in self.test_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n logger.info('=' * 40 + '\\n')\n sys.stdout.flush()\n\n # check if the files are ok\n self.check_hdf5_files(self.train_database)\n\n if self.valid_database:\n self.valid_database = self.check_hdf5_files(\n self.valid_database)\n\n if self.test_database:\n self.test_database = self.check_hdf5_files(\n self.test_database)\n\n # create the indexing system\n # alows to associate each mol to an index\n # and get fname and mol name from the index\n self.create_index_molecules()\n\n # get the actual feature name\n if self.mapfly:\n self.get_raw_feature_name()\n else:\n self.get_mapped_feature_name()\n\n # get the pairing\n self.get_pairing_feature()\n\n # get grid shape\n self.get_grid_shape()\n\n # get the input shape\n self.get_input_shape()\n\n # get renormalization factor\n if self.normalize_features or self.normalize_targets or self.clip_features:\n if self.mapfly:\n self.compute_norm()\n else:\n self.get_norm()\n\n logger.info('\\n')\n logger.info(\" Data Set Info:\")\n logger.info(\n f' Augmentation : {self.use_rotation} rotations')\n logger.info(\n f' Training set : {self.ntrain} conformations')\n logger.info(\n f' Validation set : {self.nvalid} conformations')\n logger.info(\n f' Test set : {self.ntest} conformations')\n logger.info(f' Number of channels : {self.input_shape[0]}')\n logger.info(f' Grid Size : {self.data_shape[1]}, '\n f'{self.data_shape[2]}, {self.data_shape[3]}')\n sys.stdout.flush()", "def train(self):\n\t\traise NotImplementedError", "def fill_dataset(self):\n rm, rstd = self.get_rolling_stats()\n\n self.add_rolling_mean(rm)\n self.add_bollinger_bands(rstd)\n self.add_spy_info()\n self.add_beta_and_sharpe()\n self.add_stlouis_data()", "def train_classifiers(params):\n # Create result dataframe\n out = pd.DataFrame(\n columns=[\"Dataset\", \"Classifier\", \"Accuracy\", \"F1\", \"Precision\", \"Recall\"])\n\n for model_type, all_languages in params.items():\n print(\"Classifier: \", str(model_type))\n\n for language, all_targets in all_languages.items():\n print(language)\n for target, model_params in all_targets.items():\n print(target)\n print(model_params)\n\n datasets = sample_datasets(\n language, target, SAMPLING, TFIDF, model_params['top_k_words'], SUB_SAMPLE_RERUNS)\n\n # Iterate the datasets\n for data_id, dataset in enumerate(datasets):\n dataset_name = dataset[0]\n data = dataset[1]\n y = np.array(dataset[2])\n val_data = dataset[3]\n val_y = np.array(dataset[4])\n\n acc_scores = []\n pre_scores = []\n rec_scores = []\n f1_scores = []\n \n global X_train\n X_train, X_test = data, val_data\n y_train, y_test = y, val_y\n y_pred = None\n\n # Create model instance.\n model = mlp_model(layers=model_params['hidden_layers'], units=model_params['hidden_units'], dropout_rate=model_params['dropout_rate'],\n input_shape=X_train.shape[1:], num_classes=2)\n optimizer = tf.keras.optimizers.Adam(\n lr=model_params['learning_rate'])\n model.compile(optimizer=optimizer,\n loss='binary_crossentropy', metrics=['acc'])\n\n # Stop training is validation loss doesnt decrease for 3 steps\n callbacks = [tf.keras.callbacks.EarlyStopping(\n monitor='val_loss', patience=3)]\n\n # Train and validate model.\n history = model.fit(\n X_train,\n y_train,\n epochs=model_params['epochs'],\n callbacks=callbacks,\n validation_data=(X_test, y_test),\n verbose=0,\n batch_size=512)\n\n acc_scores.append(\n history.history['val_acc'][-1])\n y_pred = [round(a[0])\n for a in model.predict(X_test)]\n\n # Compute the results\n prfs = precision_recall_fscore_support(\n y_test, y_pred, warn_for=[])\n\n pre_scores.append(prfs[0].mean())\n rec_scores.append(prfs[1].mean())\n f1_scores.append(prfs[2].mean())\n\n # Append average scores\n clf_acc = np.array(acc_scores).mean()\n clf_pre = np.array(pre_scores).mean()\n clf_rec = np.array(rec_scores).mean()\n clf_f1 = np.array(f1_scores).mean()\n\n out = out.append(pd.DataFrame(\n [[dataset_name, model_type, clf_acc, clf_f1, clf_pre, clf_rec]], columns=out.columns), ignore_index=True)\n\n return out", "def existing_data(self):\n # Set the directory and file name\n data_summary_dir = op.join('../logs', self.name, 'data_summary')\n file_name = 'Train_Test_Summary_generative.csv'\n\n # Read the csv and obtain the train data list\n df = pd.read_csv(op.join(data_summary_dir, file_name))\n train_data = df['Train Data'].dropna().values.tolist()\n test_data = df['Test Data'].dropna().values.tolist()\n\n train_data_list, test_data_list = [], []\n for single_train in train_data:\n data_name = single_train.split('_')[0]\n if data_name == 'LTRC':\n series = single_train.split('_')[3] + '_' + single_train.split('_')[4]\n else:\n series = single_train.split('_')[3] + '_' + single_train.split('_')[4] + '_' + single_train.split('_')[5]\n full_data_name = single_train.split('_')[0] + '_' + single_train.split('_')[1] + '_' + single_train.split('_')[2] + '_' + series\n train_data_list.append(full_data_name)\n\n for single_test in test_data:\n data_name = single_test.split('_')[0]\n if data_name == 'LTRC':\n series = single_test.split('_')[3] + '_' + single_test.split('_')[4]\n else:\n series = single_test.split('_')[3] + '_' + single_test.split('_')[4] + '_' + single_test.split('_')[5]\n full_data_name = single_test.split('_')[0] + '_' + single_test.split('_')[1] + '_' + single_test.split('_')[2] + '_' + series\n test_data_list.append(full_data_name)\n\n # Obtain the label map and CT list and file names\n label_map_list = glob(op.join(self.save_root_dir, 'source_data_2', '*'))\n ct_list = glob(op.join(self.save_root_dir, 'target_data_2', '*'))\n\n label_map_files = [single_file.split('/')[-1] for single_file in label_map_list]\n ct_files = [single_file.split('/')[-1] for single_file in ct_list]\n label_map_files.sort(), ct_files.sort()\n\n # Initialize empty list\n existing_train_lm, existing_train_ct = [], []\n existing_test_lm, existing_test_ct = [], []\n\n for single_lm, single_ct in zip(label_map_files, ct_files):\n\n ct_data_name = single_ct.split('_')[0] + '_' + single_ct.split('_')[1] + '_' + single_ct.split('_')[2]\n lm_data_name = single_lm.split('_')[0] + '_' + single_lm.split('_')[1] + '_' + single_lm.split('_')[2]\n\n assert ct_data_name == lm_data_name, 'Data is not the same.'\n\n data_name = single_ct.split('_')[0]\n if data_name == 'LTRC':\n series = single_ct.split('_')[3] + '_' + single_ct.split('_')[4]\n else:\n series = single_ct.split('_')[3] + '_' + single_ct.split('_')[4] + '_' + single_ct.split('_')[5]\n full_data_name = single_ct.split('_')[0] + '_' + single_ct.split('_')[1] + '_' + single_ct.split('_')[2]\\\n + '_' + series\n\n if full_data_name in train_data_list:\n existing_train_lm.append(single_lm)\n existing_train_ct.append(single_ct)\n if full_data_name in test_data_list:\n existing_test_lm.append(single_lm)\n existing_test_ct.append(single_ct)\n existing_train_data = [existing_train_lm, existing_train_ct]\n existing_test_data = [existing_test_lm, existing_test_ct]\n return existing_train_data, existing_test_data", "def make_dataset():\n\n\tnumberOfTrials = dataset_params.num_of_samples\n\tnumberOfTrials_train = int(numberOfTrials*0.8)\n\tnumberOfTrials_test = int(numberOfTrials*0.2)\n\n\tprint(\"==================================================\")\n\tprint(\"1. Generating Train images ......\")\n\tprint(\"\\nTrain image per variation\", numberOfTrials_train)\n\tmakeDataset(numberOfTrials_train, \"train\")\n\n\tprint(\"==================================================\")\n\tprint(\"2. Generating Test images ......\")\n\tprint(\"\\nTest image per variation\", numberOfTrials_test)\n\tmakeDataset(numberOfTrials_test, \"test\")\n\n\tprint(\"==================================================\")\n\tprint(\"Done!!!\")", "def main_modeling_pipeline():\n\n\n data_df = pd.read_csv('gs://aiplatformfilipegracio2020/head_train_data.csv')\n data_df = data_df[[LABEL, 'price', 'days_on_site']]\n\n class_weights = calculate_class_weights(data_df[LABEL])\n print('class weights', class_weights)\n logging.info('Data loaded and processed')\n train_ds, val_ds, test_ds = make_tf_datasets(data_df, LABEL)\n logging.info('Tensorflow datasets created')\n\n with strategy.scope():\n logging.info('Inside strategy')\n simple_feature_layer = make_simple_feature_layer(data_df)\n logging.info('Going to make model')\n simple_model = make_simple_model(simple_feature_layer)\n\n logging.info('Going fit model')\n simple_model_results, simple_model = model_fit_and_evaluate(model=simple_model,\n train_ds=train_ds,\n val_ds=val_ds,\n test_ds=test_ds,\n class_weights=class_weights,\n epochs=TRAINING_EPOCHS,\n job_name='simple_model')\n\n simple_model.save('gs://aiplatformfilipegracio2020/')", "def build_dataset(self): \n start_time = datetime.datetime.now()\n self.func_log(\"\\n\\tIn build_dataset()\")\n \n self.dict_feature = {}\n for key,value in self.key_points.items():\n category = []\n buff_time = datetime.datetime.now()\n for img in value:\n histogram = np.zeros(len(self.visual_words))\n for each_feature in img:\n ind = self.find_index(each_feature, self.visual_words)\n histogram[ind] += 1\n category.append(histogram)\n self.dict_feature[key] = category\n \n buff_time = datetime.datetime.now() - buff_time\n self.func_log(\"\\t\\tKEY: {} finish, Time cose:{}\".format(key, buff_time))\n end_time = datetime.datetime.now() \n self.func_log(\"\\n\\t\\tTime Cost: {}\\n\".format(end_time-start_time))", "def gen_dataset_hrsc2016(xml_path, source_img_path, save_img_path):\r\n if not os.path.exists(xml_path):\r\n raise FileExistsError('path not found! : %s' % xml_path)\r\n if not os.path.exists(source_img_path):\r\n raise FileExistsError('path not found! : %s' % source_img_path)\r\n train_img_path = os.path.join(source_img_path, 'Train', 'AllImages')\r\n test_img_path = os.path.join(source_img_path, 'Test', 'AllImages')\r\n categories_dict = {}\r\n with xml.dom.minidom.parse(os.path.join(source_img_path, 'FullDataSet', 'sysdata.xml')) as category_document:\r\n categories = category_document.getElementsByTagName('HRSC_Classes')[0].getElementsByTagName('HRSC_Class')\r\n for category in categories:\r\n category_id = category.getElementsByTagName('Class_ID')[0].firstChild.data\r\n category_layer = category.getElementsByTagName('Class_Layer')[0].firstChild.data\r\n category_engname = category.getElementsByTagName('Class_EngName')[0].firstChild.data.split('(')[0].replace(\r\n ' ', '-').replace('|--)', '')\r\n category_name = category.getElementsByTagName('Class_Name')[0].firstChild.data.split('(')[0]\r\n if '0' != category_layer: # there is specific ship category\r\n category_class_id = category.getElementsByTagName('HRS_Class_ID')[0].firstChild.data\r\n categories_dict[category_id] = {\r\n 'category_id': category_id,\r\n 'category_layer':category_layer,\r\n 'category_engname':category_engname,\r\n 'category_name':category_name,\r\n 'category_class_id': category_class_id\r\n }\r\n else: # label is just 'ship'\r\n categories_dict[category_id] = {\r\n 'category_id': category_id,\r\n 'category_layer': category_layer,\r\n 'category_engname': category_engname,\r\n 'category_name': category_name,\r\n 'category_class_id': '100000001'\r\n }\r\n # train files\r\n train_pbar = tqdm(os.scandir(train_img_path))\r\n for train_img in train_pbar:\r\n if train_img.is_file():\r\n extension = os.path.splitext(train_img.path)[1][1:]\r\n train_img_name = train_img.name.split('.')[0]\r\n if 'bmp' == extension: # bmp images\r\n train_pbar.set_description(\"Processing %s\" % train_img.path)\r\n try:\r\n document = xml.dom.minidom.parse(os.path.join(xml_path, train_img_name+'.xml'))\r\n is_annotated = document.getElementsByTagName('Annotated')[0].firstChild.data\r\n if '0' == is_annotated: # without annotations\r\n continue\r\n # img_id = document.getElementsByTagName('Img_ID')[0].firstChild.data\r\n img_resolution = document.getElementsByTagName('Img_Resolution')[0].firstChild.data\r\n ships = document.getElementsByTagName('HRSC_Objects')[0].getElementsByTagName('HRSC_Object')\r\n for ship in ships:\r\n ship_category_id = ship.getElementsByTagName('Class_ID')[0].firstChild.data\r\n ship_category_dict = categories_dict[ship_category_id]\r\n\r\n # get four corner points' coordinates of the rotated bounding box\r\n box_cx = float(ship.getElementsByTagName('mbox_cx')[0].firstChild.data)\r\n box_cy = float(ship.getElementsByTagName('mbox_cy')[0].firstChild.data)\r\n box_w = float(ship.getElementsByTagName('mbox_w')[0].firstChild.data)\r\n box_h = float(ship.getElementsByTagName('mbox_h')[0].firstChild.data)\r\n box_angle = float(ship.getElementsByTagName('mbox_ang')[0].firstChild.data) # rad\r\n box_x1 = int(box_cx + box_h * 0.5 * np.sin(box_angle) - box_w * 0.5 * np.cos(box_angle))\r\n box_y1 = int(box_cy - box_h * 0.5 * np.cos(box_angle) - box_w * 0.5 * np.sin(box_angle))\r\n box_x2 = int(box_cx + box_h * 0.5 * np.sin(box_angle) + box_w * 0.5 * np.cos(box_angle))\r\n box_y2 = int(box_cy - box_h * 0.5 * np.cos(box_angle) + box_w * 0.5 * np.sin(box_angle))\r\n box_x3 = int(box_cx - box_h * 0.5 * np.sin(box_angle) + box_w * 0.5 * np.cos(box_angle))\r\n box_y3 = int(box_cy + box_h * 0.5 * np.cos(box_angle) + box_w * 0.5 * np.sin(box_angle))\r\n box_x4 = int(box_cx - box_h * 0.5 * np.sin(box_angle) - box_w * 0.5 * np.cos(box_angle))\r\n box_y4 = int(box_cy + box_h * 0.5 * np.cos(box_angle) - box_w * 0.5 * np.sin(box_angle))\r\n\r\n # get ship orientation, define as the clockwise angle from ship head to North (Up)\r\n try:\r\n ship_head_x = int(ship.getElementsByTagName('header_x')[0].firstChild.data)\r\n ship_head_y = int(ship.getElementsByTagName('header_y')[0].firstChild.data)\r\n if box_w < box_h:\r\n if ship_head_y > box_cy:\r\n ship_orientation = np.pi - box_angle\r\n elif box_angle < 0:\r\n ship_orientation = -box_angle\r\n else:\r\n ship_orientation = 2.0 * np.pi - box_angle\r\n else:\r\n if ship_head_x < box_cx:\r\n ship_orientation = np.pi * 0.5 - box_angle\r\n else:\r\n ship_orientation = 1.5 * np.pi - box_angle\r\n except: # ship head coordinates is not given\r\n if box_w < box_h: # heads up\r\n if box_angle < 0:\r\n ship_orientation = -box_angle\r\n else:\r\n ship_orientation = 2.0 * np.pi - box_angle\r\n else: # heads right\r\n ship_orientation = 1.5 * np.pi - box_angle\r\n\r\n # crop ship images\r\n ori_image = cv2.imread(train_img.path, -1)\r\n box = [(box_x1, box_y1), (box_x2, box_y2), (box_x3, box_y3), (box_x4, box_y4)]\r\n xmin = min(box_x1, box_x2, box_x3, box_x4)\r\n xmax = max(box_x1, box_x2, box_x3, box_x4)\r\n ymin = min(box_y1, box_y2, box_y3, box_y4)\r\n ymax = max(box_y1, box_y2, box_y3, box_y4)\r\n if len(ori_image.shape) == 3:\r\n ori_h, ori_w, image_channels = ori_image.shape\r\n sub_image = np.zeros([ymax - ymin + 1, xmax - xmin + 1, image_channels], dtype=np.int)\r\n else:\r\n oir_h, ori_w = ori_image.shape\r\n sub_image = np.zeros([ymax - ymin + 1, xmax - xmin + 1], dtype=np.int)\r\n for y in range(sub_image.shape[0]): # row\r\n for x in range(sub_image.shape[1]): # col\r\n if pnpoly([xmin + x, ymin + y], box):\r\n sub_image[y, x] = ori_image[min(ymin + y - 1, ori_h-1), min(xmin + x - 1, ori_w-1)]\r\n sub_imagename = f'''{train_img_name}_{ship_category_dict['category_engname']}''' + \\\r\n f'''_ort_{ship_orientation:.3f}_rsl_{img_resolution}_x_{int(box_cx)}_y_{int(box_cy)}.bmp'''\r\n\r\n if '0' == ship_category_dict['category_layer']: # just be 'ship'\r\n ship_save_folder = os.path.join(save_img_path, 'train', 'ship')\r\n elif '1' == ship_category_dict['category_layer']: # ship class\r\n ship_save_folder = os.path.join(save_img_path,'train', 'ship',\r\n ship_category_dict['category_engname'])\r\n else: # '2' == ship_category_dict['category_layer']: # ship type\r\n ship_class_name = categories_dict[ship_category_dict['category_class_id']][\r\n 'category_engname']\r\n ship_save_folder = os.path.join(save_img_path, 'train', 'ship', ship_class_name,\r\n ship_category_dict['category_engname'])\r\n os.makedirs(ship_save_folder, exist_ok=True)\r\n cv2.imwrite(os.path.join(ship_save_folder, sub_imagename), sub_image)\r\n except: #\r\n print(f'''could not find {os.path.join(xml_path, train_img_name+'.xml')}''')\r\n\r\n pass\r\n\r\n # test files\r\n test_pbar = tqdm(os.scandir(test_img_path))\r\n for test_img in test_pbar:\r\n if test_img.is_file():\r\n extension = os.path.splitext(test_img.path)[1][1:]\r\n test_img_name = test_img.name.split('.')[0]\r\n if 'bmp' == extension: # bmp images\r\n test_pbar.set_description(\"Processing %s\" % test_img.path)\r\n try:\r\n document = xml.dom.minidom.parse(os.path.join(xml_path, test_img_name + '.xml'))\r\n is_annotated = document.getElementsByTagName('Annotated')[0].firstChild.data\r\n if '0' == is_annotated: # without annotations\r\n continue\r\n # img_id = document.getElementsByTagName('Img_ID')[0].firstChild.data\r\n img_resolution = document.getElementsByTagName('Img_Resolution')[0].firstChild.data\r\n ships = document.getElementsByTagName('HRSC_Objects')[0].getElementsByTagName('HRSC_Object')\r\n for ship in ships:\r\n ship_category_id = ship.getElementsByTagName('Class_ID')[0].firstChild.data\r\n ship_category_dict = categories_dict[ship_category_id]\r\n\r\n # get four corner points' coordinates of the rotated bounding box\r\n box_cx = float(ship.getElementsByTagName('mbox_cx')[0].firstChild.data)\r\n box_cy = float(ship.getElementsByTagName('mbox_cy')[0].firstChild.data)\r\n box_w = float(ship.getElementsByTagName('mbox_w')[0].firstChild.data)\r\n box_h = float(ship.getElementsByTagName('mbox_h')[0].firstChild.data)\r\n box_angle = float(ship.getElementsByTagName('mbox_ang')[0].firstChild.data) # rad\r\n box_x1 = int(box_cx + box_h * 0.5 * np.sin(box_angle) - box_w * 0.5 * np.cos(box_angle))\r\n box_y1 = int(box_cy - box_h * 0.5 * np.cos(box_angle) - box_w * 0.5 * np.sin(box_angle))\r\n box_x2 = int(box_cx + box_h * 0.5 * np.sin(box_angle) + box_w * 0.5 * np.cos(box_angle))\r\n box_y2 = int(box_cy - box_h * 0.5 * np.cos(box_angle) + box_w * 0.5 * np.sin(box_angle))\r\n box_x3 = int(box_cx - box_h * 0.5 * np.sin(box_angle) + box_w * 0.5 * np.cos(box_angle))\r\n box_y3 = int(box_cy + box_h * 0.5 * np.cos(box_angle) + box_w * 0.5 * np.sin(box_angle))\r\n box_x4 = int(box_cx - box_h * 0.5 * np.sin(box_angle) - box_w * 0.5 * np.cos(box_angle))\r\n box_y4 = int(box_cy + box_h * 0.5 * np.cos(box_angle) - box_w * 0.5 * np.sin(box_angle))\r\n\r\n # get ship orientation, define as the clockwise angle from ship head to North (Up)\r\n try:\r\n ship_head_x = int(ship.getElementsByTagName('header_x')[0].firstChild.data)\r\n ship_head_y = int(ship.getElementsByTagName('header_y')[0].firstChild.data)\r\n if box_w < box_h:\r\n if ship_head_y > box_cy:\r\n ship_orientation = np.pi - box_angle\r\n elif box_angle < 0:\r\n ship_orientation = -box_angle\r\n else:\r\n ship_orientation = 2.0 * np.pi - box_angle\r\n else:\r\n if ship_head_x < box_cx:\r\n ship_orientation = np.pi * 0.5 - box_angle\r\n else:\r\n ship_orientation = 1.5 * np.pi - box_angle\r\n except: # ship head coordinates is not given\r\n if box_w < box_h: # heads up\r\n if box_angle < 0:\r\n ship_orientation = -box_angle\r\n else:\r\n ship_orientation = 2.0 * np.pi - box_angle\r\n else: # heads right\r\n ship_orientation = 1.5 * np.pi - box_angle\r\n\r\n # crop ship images\r\n ori_image = cv2.imread(test_img.path, -1)\r\n box = [(box_x1, box_y1), (box_x2, box_y2), (box_x3, box_y3), (box_x4, box_y4)]\r\n xmin = min(box_x1, box_x2, box_x3, box_x4)\r\n xmax = max(box_x1, box_x2, box_x3, box_x4)\r\n ymin = min(box_y1, box_y2, box_y3, box_y4)\r\n ymax = max(box_y1, box_y2, box_y3, box_y4)\r\n if len(ori_image.shape) == 3:\r\n ori_h, ori_w, image_channels = ori_image.shape\r\n sub_image = np.zeros([ymax - ymin + 1, xmax - xmin + 1, image_channels], dtype=np.int)\r\n else:\r\n oir_h, ori_w = ori_image.shape\r\n sub_image = np.zeros([ymax - ymin + 1, xmax - xmin + 1], dtype=np.int)\r\n for y in range(sub_image.shape[0]): # row\r\n for x in range(sub_image.shape[1]): # col\r\n if pnpoly([xmin + x, ymin + y], box):\r\n sub_image[y, x] = ori_image[\r\n min(ymin + y - 1, ori_h - 1), min(xmin + x - 1, ori_w - 1)]\r\n sub_imagename = f'''{test_img_name}_{ship_category_dict['category_engname']}''' + \\\r\n f'''_ort_{ship_orientation:.3f}_rsl_{img_resolution}''' + \\\r\n f'''_x_{int(box_cx)}_y_{int(box_cy)}.bmp'''\r\n\r\n if '0' == ship_category_dict['category_layer']: # just be 'ship'\r\n ship_save_folder = os.path.join(save_img_path, 'test', 'ship')\r\n elif '1' == ship_category_dict['category_layer']: # ship class\r\n ship_save_folder = os.path.join(save_img_path, 'test', 'ship',\r\n ship_category_dict['category_engname'])\r\n else: # '2' == ship_category_dict['category_layer']: # ship type\r\n ship_class_name = categories_dict[ship_category_dict['category_class_id']][\r\n 'category_engname']\r\n ship_save_folder = os.path.join(save_img_path, 'test', 'ship', ship_class_name,\r\n ship_category_dict['category_engname'])\r\n os.makedirs(ship_save_folder, exist_ok=True)\r\n cv2.imwrite(os.path.join(ship_save_folder, sub_imagename), sub_image)\r\n except: #\r\n print(f'''could not find {os.path.join(xml_path, test_img_name + '.xml')}''')", "def _build_datasets(self):\n self._build_datasets_sis3302()\n self._build_datasets_sis3305()", "def do_training(self):\n json_data = request.data\n global g_list_of_classifier\n\n datas = json.loads(json_data.decode('UTF-8')) #datas = liste\n\n for ite_clf in g_list_of_classifier:\n for data in datas:\n ite_clf.add_data(data['score'], data['answer'])\n print(ite_clf.get_info())\n return ''", "def trainModels():\n\n # load actives from ChEMBL\n actives = {}\n if not os.path.exists(DATA_FOLDER_PATH):\n os.mkdir(DATA_FOLDER_PATH)\n actives_file = [x for x in os.listdir(DATA_FOLDER_PATH) if x.startswith('actives_chembl') and x.endswith('.p')]\n if not actives_file or RELOAD_DATA and not USE_DOWNLOADED_STRUCTS:\n actives = chembl.loadChEMBLData(ACCESSION, IC_50_THRESHOLD, DATA_FOLDER_PATH)\n else:\n actives = pickle.load(open(DATA_FOLDER_PATH + actives_file[0], 'rb'))\n\n if not actives_file or RELOAD_DATA and not USE_DOWNLOADED_STRUCTS:\n chembl.computeConsensualIC50(actives, DATA_FOLDER_PATH)\n chembl.appendRDKitMols(actives, DATA_FOLDER_PATH)\n\n # load decoys downloaded from DUD\n decoys = {}\n if os.path.exists(DECOYS_SDF_FILE_PATH[:-4] + \".p\"):\n decoys = pickle.load(open(DECOYS_SDF_FILE_PATH[:-4] + \".p\", 'rb'))\n else:\n if os.path.exists(DECOYS_SDF_FILE_PATH):\n decoys = dud.getDecoys(DECOYS_SDF_FILE_PATH)\n else:\n print \"Decoys not found in: \" + DECOYS_SDF_FILE_PATH\n print \"Make sure you set the right path.\"\n exit()\n\n # merge both data sets\n compounds_all = {}\n compounds_all.update(actives)\n compounds_all.update(decoys)\n\n # compute Morgan fingerprints\n if os.path.exists(MERGED_DATASET_PATH) and not RELOAD_DATA:\n print \"Loading previously created dataset...\"\n compounds_all = pickle.load(open(MERGED_DATASET_PATH, 'rb'))\n else:\n fingerprinter.appendMorganFingerprints(compounds_all)\n\n actives = { cmpndid : compounds_all[cmpndid] for cmpndid in compounds_all.keys() if compounds_all[cmpndid]['active']}\n pickle.dump(actives, open(ACTIVES_DUMP, 'wb'))\n decoys = { cmpndid : compounds_all[cmpndid] for cmpndid in compounds_all.keys() if not compounds_all[cmpndid]['active']}\n\n # train and cross-validate multiple Naive Bayes Classifiers\n classification_results = dict()\n if not os.path.exists(CLASS_RESULTS_SAVE_FILE_PATH) or RELOAD_DATA:\n classification_results = classification.naiveBayesClassifierTraining(compounds_all)\n print \"Saving results...\"\n pickle.dump(classification_results, open(CLASS_RESULTS_SAVE_FILE_PATH, 'wb'))\n print \"Finished analysis.\"\n else:\n print \"Loading previous results...\"\n classification_results = pickle.load(open(CLASS_RESULTS_SAVE_FILE_PATH, 'rb'))\n\n # have fun with the classification results\n print \"# CLASSIFICATION STATISTICS #\"\n classification.playWithResults(classification_results)\n\n # cluster actives according to their similarity and keep only the diverse molecules\n actives_testset = dict()\n if CLUSTER:\n clusters = utilities.clusterMols(actives)\n actives_kept = dict()\n for cluster in clusters:\n actives_kept[cluster[0]] = actives[cluster[0]]\n remains = cluster[1:]\n actives_filtered_out = {chmblid : actives[chmblid] for chmblid in remains}\n actives_testset.update(actives_filtered_out)\n actives = actives_kept\n\n # estimate maximum distances between active molecules to set threshold for the application domain\n # distance_actives = regression.estimateDistanceThreshold(actives) # median of distances between two actives\n # min_distance_decoys, max_distance_decoys = regression.compareDistances(actives, decoys) # average min/max distance of closest/farthest decoy from any of the actives\n # print \"median of distances between two actives: \" + str(distance_actives)\n # print \"average min/max distance of closest/farthest decoy from any of the actives: \" + str(min_distance_decoys) + \"/\" + str(max_distance_decoys)\n\n # Support vector regression\n regression_results = dict()\n if not os.path.exists(REGRESS_RESULTS_SAVE_FILE_PATH) or RELOAD_DATA:\n regression_results = regression.supportVectorRegression(actives)\n pickle.dump(regression_results, open(REGRESS_RESULTS_SAVE_FILE_PATH, 'wb'))\n else:\n regression_results = pickle.load(open(REGRESS_RESULTS_SAVE_FILE_PATH, 'rb'))\n\n\n # do something with the regression results\n print \"# REGRESSION STATISTICS #\"\n regression.playWithResults(regression_results, decoys, actives_testset)\n\n return classification_results['final_model'], regression_results['final_model']", "def MakeDataSetFiles(dirname):\n\n\n if not os.path.exists(dirname):\n os.mkdir(dirname)\n if not os.path.exists(os.path.join(dirname, 'train')):\n os.mkdir(os.path.join(dirname, 'train'))\n if not os.path.exists(os.path.join(dirname, 'test')):\n os.mkdir(os.path.join(dirname, 'test'))\n data_train = fetch_20newsgroups(subset='train', categories=None, shuffle=True, random_state=42)\n data_test = fetch_20newsgroups(subset='test', categories=None, shuffle=True, random_state=42)\n\n if dirname[-1] == '/' or dirname[-1] == '\\\\':\n dirname = dirname[:-1]\n \n Util.WriteClassFile(data_train.target, os.path.join(dirname, 'train_classes.txt'))\n Util.WriteClassFile(data_test.target,os.path.join(dirname, 'test_classes.txt'))\n\n\n train_counter = 0;\n for doc in data_train.data:\n filename = 'train_' + str(train_counter).zfill(5);\n f = file(os.path.join(dirname, 'train', filename), 'w');\n f.write(doc.encode('ascii', 'ignore'));\n f.close();\n train_counter = train_counter + 1;\n\n test_counter = 0;\n for doc in data_test.data:\n filename = 'test_' + str(test_counter).zfill(5);\n f = file(os.path.join(dirname, 'test', filename), 'w');\n f.write(doc.encode('ascii', 'ignore'));\n f.close();\n test_counter = test_counter + 1;\n\n class_index = file(os.path.join(dirname, 'class_label_index.txt'), 'w')\n for label in data_train.target_names:\n class_index.write(label + '\\n')\n class_index.close()", "def main():\n feature_extraction_model = \"HOG\"\n # feature_extraction_models = [\"CM\", \"HOG\"]\n feature_extraction_model_1 = \"CM\"\n dimension_reduction_model = \"PCA\"\n k_value = 10\n dim_k_value = 40\n # K_value = 20\n # lab_folder = \"Dataset3/Labelled/Set1\"\n # unlab_folder = \"Dataset3/Unlabelled/Set 2\"\n lab_folder = get_input_folder(\"Labelled Folder\")\n unlab_folder = get_input_folder(\"Classify\")\n start = time.time()\n # ================================================================================================================\n # labelled Images\n dim_red = DimensionReduction(feature_extraction_model, dimension_reduction_model, dim_k_value,\n folder_metadata=lab_folder,\n metadata_collection=\"labelled\")\n obj_feat_lab = dim_red.get_object_feature_matrix()\n features_list_lab = np.array(obj_feat_lab['featureVector'].tolist())\n images_list_lab = np.array(obj_feat_lab['imageId'])\n # filtering the labelled set\n dorsal_list, palmar_list = filter_images_by_label(images_list_lab)\n\n # unlabelled images\n dim_red = DimensionReduction(feature_extraction_model, dimension_reduction_model, dim_k_value,\n folder_metadata=unlab_folder,\n metadata_collection=\"unlabelled\")\n obj_feat_unlab = dim_red.get_object_feature_matrix()\n features_list_unlab = np.array(obj_feat_unlab['featureVector'].tolist())\n images_list_unlab = np.array(obj_feat_unlab['imageId'])\n\n # ================================================================================================================\n # labelled Images\n dim_red = DimensionReduction(feature_extraction_model_1, dimension_reduction_model, dim_k_value,\n folder_metadata=lab_folder,\n metadata_collection=\"labelled\")\n obj_feat_lab_1 = dim_red.get_object_feature_matrix()\n features_list_lab_1 = np.array(obj_feat_lab_1['featureVector'].tolist())\n # images_list_lab = np.array(obj_feat_lab_1['imageId'])\n # filtering the labelled set\n\n\n # unlabelled images\n dim_red = DimensionReduction(feature_extraction_model_1, dimension_reduction_model, dim_k_value,\n folder_metadata=unlab_folder,\n metadata_collection=\"unlabelled\")\n obj_feat_unlab_1 = dim_red.get_object_feature_matrix()\n features_list_unlab_1 = np.array(obj_feat_unlab_1['featureVector'].tolist())\n # images_list_unlab = np.array(obj_feat_unlab['imageId'])\n features_list_lab = np.concatenate((features_list_lab, features_list_lab_1), axis=1)\n features_list_unlab = np.concatenate((features_list_unlab, features_list_unlab_1), axis=1)\n\n # ================================================================================================================\n\n dorsal_list, palmar_list = filter_images_by_label(images_list_lab)\n features_list = np.concatenate((features_list_lab, features_list_unlab))\n images_list = np.concatenate((images_list_lab, images_list_unlab))\n images_list = list(images_list)\n # Finding Similarity Matrix\n cos_sim = cosine_similarity(features_list)\n sim_graph = np.empty((0, len(cos_sim)))\n for row in cos_sim:\n k_largest = np.argsort(-np.array(row))[1:k_value + 1]\n sim_graph_row = [d if i in k_largest else 0 for i, d in enumerate(row)]\n sim_graph = np.append(sim_graph, np.array([sim_graph_row]), axis=0)\n\n row_sums = sim_graph.sum(axis=1)\n sim_graph = sim_graph / row_sums[:, np.newaxis]\n idx = 0\n results_dorsal = ppr(sim_graph, images_list, dorsal_list)\n results_palmar = ppr(sim_graph, images_list, palmar_list)\n final_results = {}\n\n for img in images_list_unlab:\n if results_dorsal[img] < results_palmar[img]:\n final_results[img] = \"dorsal\"\n else:\n final_results[img] = \"palmar\"\n\n actual_labels = fetch_actual_labels(images_list_unlab)\n print(\"Classification\")\n no_correct = 0\n correctly_classified = []\n incorrectly_classified = []\n print(\"| ImageId | Prediction | Actual |\")\n for r in final_results:\n print(\"| {} | {} | {} |\".format(r, final_results[r], actual_labels[r]))\n if final_results[r] == actual_labels[r]:\n correctly_classified.append(r)\n no_correct += 1\n else:\n incorrectly_classified.append(r)\n\n print(\"Correctly classified: {}\\n\".format(correctly_classified))\n print(\"InCorrectly classified: {}\\n\".format(incorrectly_classified))\n\n print(\"Classification Accuracy: {}%\".format(no_correct / len(images_list_unlab) * 100))\n print(\"Execution time: {} seconds\".format(time.time() - start))", "def generate_datasets(self) -> (tf.data.Dataset, tf.data.Dataset):\n self.obtain_meta_data_frame_for_available_lightcurves()\n positive_example_paths = self.meta_data_frame[self.meta_data_frame['disposition'] == 'PC']['lightcurve_path']\n print(f'{len(positive_example_paths)} positive examples.')\n negative_example_paths = self.meta_data_frame[self.meta_data_frame['disposition'] != 'PC']['lightcurve_path']\n print(f'{len(negative_example_paths)} negative examples.')\n positive_datasets = self.get_training_and_validation_datasets_for_file_paths(positive_example_paths)\n positive_training_dataset, positive_validation_dataset = positive_datasets\n negative_datasets = self.get_training_and_validation_datasets_for_file_paths(negative_example_paths)\n negative_training_dataset, negative_validation_dataset = negative_datasets\n training_dataset = self.get_ratio_enforced_dataset(positive_training_dataset, negative_training_dataset,\n positive_to_negative_data_ratio=1)\n validation_dataset = positive_validation_dataset.concatenate(negative_validation_dataset)\n if self.trial_directory is not None:\n self.log_dataset_file_names(training_dataset, dataset_name='training')\n self.log_dataset_file_names(validation_dataset, dataset_name='validation')\n training_dataset = training_dataset.shuffle(buffer_size=len(list(training_dataset)))\n training_preprocessor = lambda file_path: tuple(tf.py_function(self.training_preprocessing,\n [file_path], [tf.float32, tf.float32]))\n training_dataset = training_dataset.map(training_preprocessor, num_parallel_calls=16)\n training_dataset = training_dataset.padded_batch(self.batch_size, padded_shapes=([None, 2], [None])).prefetch(\n buffer_size=tf.data.experimental.AUTOTUNE)\n validation_preprocessor = lambda file_path: tuple(tf.py_function(self.evaluation_preprocessing,\n [file_path], [tf.float32, tf.float32]))\n validation_dataset = validation_dataset.map(validation_preprocessor, num_parallel_calls=4)\n validation_dataset = validation_dataset.padded_batch(1, padded_shapes=([None, 2], [None])).prefetch(\n buffer_size=tf.data.experimental.AUTOTUNE)\n return training_dataset, validation_dataset", "def prep_data_fn(self, st_train_dt, end_train_dt, st_val_dt, end_val_dt, st_test_dt, end_test_dt):\n df = self.get_prep_data()\n train = df[(df['ft_data_dt'] >= st_train_dt) & (df['ft_data_dt'] <= end_train_dt)]\n val = df[(df['ft_data_dt'] >= st_val_dt) & (df['ft_data_dt'] <= end_val_dt)].sample(frac=0.4, random_state=2021)\n test = df[(df['ft_data_dt'] >= st_test_dt) & (df['ft_data_dt'] <= end_test_dt)]\n print(f'----train----')\n print(train[['ft_data_dt', 'target', 'idd']].groupby(['ft_data_dt', 'target']).agg(['count']))\n print(f'----validation----')\n print(val[['ft_data_dt', 'target', 'idd']].groupby(['ft_data_dt', 'target']).agg(['count']))\n print(f'----test----')\n print(test[['ft_data_dt', 'target', 'idd']].groupby(['ft_data_dt', 'target']).agg(['count']))\n self.set_train(train)\n self.set_validation(val)\n self.set_test(test)\n train_X = train[[c for c in train.columns if c not in ['idd', 'ft_data_dt', 'target']]]\n train_y = train['target']\n val_X = val[[c for c in train.columns if c not in ['idd', 'ft_data_dt', 'target']]]\n val_y = val['target']\n test_X = test[[c for c in train.columns if c not in ['idd', 'ft_data_dt', 'target']]]\n test_y = test['target']\n self.set_train_X(train_X)\n self.set_train_y(train_y)\n self.set_val_X(val_X)\n self.set_val_y(val_y)\n self.set_test_X(test_X)\n self.set_test_y(test_y)", "def final_clf_training(Xs, ys, X_holdout, y_holdout, scorer_type, sanity_check=False, oversampling=False):\n\n # stack all the feature vectors of all the folds\n X_train = np.vstack(tuple([Xs[i] for i in range(10)]))\n y_train = np.hstack(tuple([ys[i] for i in range(10)]))\n\n # stack the holdout feature vectors on the feature vectors of all folds\n X_all = np.concatenate([X_train, X_holdout], axis=0)\n y_all = np.concatenate([y_train, y_holdout], axis=0)\n\n # define and create parent folder to save all trained classifiers into\n parent_folder = \"%s/data/fnc-1/mlp_models/\" % (path.dirname(path.dirname(path.abspath(__file__))))\n\n # create the new save folder for the specific classifer\n scorer_folder_name = scorer_type+\"_final\"\n save_folder = get_save_folder(parent_folder, scorer_folder_name+\"_new\")\n\n # get classifier and only pass a save folder if the classifier should be saved\n clf = esitmator_definitions.get_estimator(scorer_type, save_folder=save_folder)\n\n #perform oversampling if selected\n if oversampling == True:\n if 'f_ext' in scorer_type:\n print(\"Oversampling not defined for LSTM\")\n exit()\n\n import datetime\n start = datetime.datetime.now().time()\n print(\"Started oversampling/undersampling at: \" + str(start))\n # uncomment following lines for the different sampling methods #####\n # Oversampling\n from imblearn.over_sampling import SMOTE, ADASYN, RandomOverSampler\n print(\"Oversampling data\")\n #kind = ['regular', 'borderline1', 'borderline2', 'svm']\n #sm = SMOTE(kind='regular',)\n #X_res, y_res = sm.fit_sample(X_all, y_all)\n\n #ros = RandomOverSampler()\n #X_res, y_res = ros.fit_sample(X_all, y_all)\n\n #ada = ADASYN()\n #X_res, y_res = ada.fit_sample(X_all, y_all)\n\n ######################################################\n # Undersampling\n from imblearn.under_sampling import TomekLinks, EditedNearestNeighbours, CondensedNearestNeighbour, \\\n NeighbourhoodCleaningRule, InstanceHardnessThreshold\n # remove Tomek links\n tl = TomekLinks(return_indices=True)\n X_res, y_res, idx_resampled = tl.fit_sample(X_all, y_all)\n\n #enn = EditedNearestNeighbours(random_state=0)\n #X_res, y_res = enn.fit_sample(X_all, y_all)\n\n #cnn = CondensedNearestNeighbour(random_state=0)\n #X_res, y_res = cnn.fit_sample(X_all, y_all)\n\n #ncr = NeighbourhoodCleaningRule(random_state=0)\n #X_res, y_res = ncr.fit_sample(X_all, y_all)\n\n #iht = InstanceHardnessThreshold(random_state=0, estimator=clf)\n #X_res, y_res = iht.fit_sample(X_all, y_all)\n\n\n ##################\n # Combination of Undersampling and oversampling\n\n from imblearn.combine import SMOTEENN, SMOTETomek\n #smote_enn = SMOTEENN(random_state=0)\n #X_res, y_res = smote_enn.fit_sample(X_all, y_all)\n\n #smote_tomek = SMOTETomek(random_state=0)\n #X_res, y_res = smote_tomek.fit_sample(X_all, y_all)\n\n end = datetime.datetime.now().time()\n print(\"Ended oversampling/undersampling at: \" + str(end))\n\n clf.fit(X_res, y_res)\n else: # if oversampling is false\n import datetime\n # fit the final classifier\n loss_monitor_file_dir = \"%s/data/fnc-1/model_results/loss_results/\" % (\n path.dirname(path.dirname(path.abspath(__file__))))\n loss_filename = loss_monitor_file_dir + str(datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M\")) + \".txt\"\n # fit the classifier\n if 'f_ext' in scorer_type:\n append_to_loss_monitor_file(\"\\n\\nFOLD holdout and classifier: \" + scorer_type + \"\\n\", loss_filename)\n append_to_loss_monitor_file(str(datetime.datetime.now()).split('.')[0], loss_filename)\n clf.fit(X_train, y_train, X_holdout, np.array(y_holdout), 'holdout', loss_filename)\n else:\n clf.fit(X_all, y_all)\n\n # save the model\n filename = scorer_folder_name + \".sav\"\n save_model(clf, save_folder, filename) # save model with filename to specific folder\n\n # predict on the data the classifier was trained on => should give near perfect score\n if sanity_check == True:\n # get predicted and actual labels\n y_predicted = clf.predict(X_all)\n predicted = [LABELS[int(a)] for a in y_predicted]\n actual = [LABELS[int(a)] for a in y_all]\n\n # calc FNC score\n fold_score, _ = score_submission(actual, predicted)\n max_fold_score, _ = score_submission(actual, actual)\n score = fold_score / max_fold_score\n\n # calc accuracy, f1 macro\n accuracy_stance = score_calculation.get_accuracy(y_predicted, y_all, stance=True)\n accuracy_related = score_calculation.get_accuracy(y_predicted, y_all, stance=False)\n f1_stance = score_calculation.get_f1score(y_predicted, y_all, stance=True)\n f1_related = score_calculation.get_f1score(y_predicted, y_all, stance=False)\n\n # printout results\n printout = printout_manager.get_holdout_printout(save_folder, accuracy_related, accuracy_stance, f1_related,\n f1_stance, score)\n print(\"SANITY CHECK (predict on train data):\")\n print(printout)\n return clf", "def final_clf_prediction(data_path, features, features_dir, scorer_type, run_final_train, final_clf):\n\n d = myConstants.testdataset\n\n # generate features for the unlabeled testing set\n X_final_test = generate_features_test(d.stances, d, str(\"final_test\"), features, features_dir)\n\n # define and create parent folder to save all trained classifiers into\n parent_folder = \"%s/data/fnc-1/mlp_models/\" % (path.dirname(path.dirname(path.abspath(__file__))))\n fnc_result_folder = \"%s/data/fnc-1/fnc_results/\" % (path.dirname(path.dirname(path.abspath(__file__))))\n\n # load model [scorer_type]_final_2 classifier\n filename = scorer_type + \"_final.sav\"\n load_clf = load_model(parent_folder + scorer_type + myConstants.model_name, filename)\n # The model is set in settings.py in class \"myConstants\"\n\n print(\"Load model for final prediction of test set: \" + parent_folder + scorer_type + myConstants.model_name + filename)\n\n # predict classes and turn into labels\n y_predicted = load_clf.predict(X_final_test)\n predicted = [LABELS[int(a)] for a in y_predicted]\n\n # create folder to save the file\n if not os.path.exists(parent_folder):\n os.makedirs(parent_folder)\n if not os.path.exists(fnc_result_folder):\n os.makedirs(fnc_result_folder)\n\n # save the submission file, including the prediction for the labels\n with open(fnc_result_folder + scorer_type + \"_submission.csv\", 'w') as csvfile:\n fieldnames = [\"Headline\", \"Body ID\", \"Stance\"]\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n i = 0\n for stance in d.stances:\n writer.writerow(\n {'Headline': stance['Headline'], 'Body ID': stance['Body ID'], 'Stance': predicted[i]})\n i += 1\n\n\n # save the probabilities file, including the prediction for the labels\n if (\"voting_\" not in scorer_type) and (\"f_ext\" not in scorer_type) and (\"MLP_base_2\" not in scorer_type) and (\"featMLP\" not in scorer_type) and (\"stackLSTM\" not in scorer_type):\n print(\"Generating submission_probabilities.csv\")\n predicted_proba = load_clf.predict_proba(X_final_test)\n with open(fnc_result_folder + scorer_type + \"_probabilities.csv\", 'w') as csvfile:\n fieldnames = [\"Headline\", \"Body ID\", \"Agree\", \"Disagree\", \"Discuss\", \"Unrelated\"]\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n i = 0\n for stance in d.stances:\n writer.writerow(\n {'Headline': stance['Headline'], 'Body ID': stance['Body ID'], 'Agree': predicted_proba[i][0],\n 'Disagree': predicted_proba[i][1], 'Discuss': predicted_proba[i][2],\n 'Unrelated': predicted_proba[i][3]})\n i += 1\n else:\n print(\"Not generating submission_probabilities.csv - because classifier contains \\\"voting\\\", \\\"f_ext\\\" or \\\"MLP_base_2\\\"\")\n\n # check whether loaded clf from disk and just trained clf return the same results\n if (run_final_train == True) and not (final_clf is None):\n print(\"Check whether loaded final model and just trained final model retrieve the same results.\"\n \"The results are only equal (=None) if they are the same model. This is a check to see whether \"\n \"saving and loading the model works correctly:\")\n print(np.testing.assert_array_equal(y_predicted, final_clf.predict(X_final_test)))", "def create_train_valid_set(self):\n\n if not self.eq_train:\n X_train_high_level, X_valid_high_level, X_train_low_level, X_valid_low_level, train_w, valid_w, y_train, y_valid = train_test_split(self.X_train_high_level, self.X_train_low_level, self.train_weights, self.y_train,\n train_size=0.7, test_size=0.3\n )\n else:\n X_train_high_level, X_valid_high_level, X_train_low_level, X_valid_low_level, train_w, valid_w, w_train_eq, w_valid_eq, y_train, y_valid = train_test_split(self.X_train_high_level, self.X_train_low_level,\n self.train_weights, self.train_weights_eq, self.y_train,\n train_size=0.7, test_size=0.3\n )\n self.train_weights_eq = w_train_eq\n\n #NOTE: might need to re-equalise weights in each folds as sumW_sig != sumW_bkg anymroe!\n self.train_weights = train_w\n self.valid_weights = valid_w #validation weights should never be equalised weights!\n\n print 'creating validation dataset'\n self.X_train_high_level = X_train_high_level\n self.X_train_low_level = self.join_objects(X_train_low_level)\n\n self.X_valid_high_level = X_valid_high_level\n self.X_valid_low_level = self.join_objects(X_valid_low_level)\n print 'finished creating validation dataset'\n\n self.y_train = y_train\n self.y_valid = y_valid", "def update(self) -> None:\n self.faithful = self.is_faithful()\n if self.faithful:\n old_class_names = self.class_names\n old_training_image_names = self.training_image_names\n self.class_names = self.find_class_names()\n self.training_image_names = self.find_training_image_names()\n self.extracted_features = list()\n if old_class_names != self.class_names or old_training_image_names != self.training_image_names:\n self.generate_csv_dictionary()\n return", "def _create_data():\n tf.logging.info(\"Create records..\")\n train, val, test = util.load_data(data_dir, FLAGS[\"is_aug\"])\n tf.logging.info(\"Dataset size: Train-{} Test-{} Val-{}\".format(len(train), len(test), len(val)))\n return train, val, test", "def _prepare_data(self):\n #TODO hardcoded values need to change\n print_info(\"Preprocessing the train data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"train\"),\n self.TRAIN_OUT_PATH)\n\n print_info(\"Preprocessing the test data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"test\"),\n self.TEST_OUT_PATH)\n\n print_info(\"Preprocessing the validation data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"val\"),\n self.VAL_OUT_PATH)", "def _train(self, features: pd.DataFrame, labels: pd.DataFrame,\n output_folder: str, n_iter: int=3, n_epoch: int=100,\n train_size: float=0.8,\n out_features: int=None, weight_class: bool=False,\n balanced_sampling: bool=False,\n base_net: Net=None, train_last: bool=False,\n refit: bool=False, refit_n_epoch: int=100, verbose: bool=True):\n\n # weight_class and balanced_sample cannot be True at the same time.\n # if weight_class and balanced_sample:\n # raise ValueError('weight_class and balanced_sample cannot be '\n # '\"True\" at the same time.')\n\n # Make an output folder if not exist.\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n\n # apply log10 to some features.\n # TODO: fine this code.\n features['period'], min_period = apply_log10(features['period'])\n features['amplitude'], min_amplitude = apply_log10(features['amplitude'])\n features['hl_amp_ratio'], min_hl_amp_ratio = \\\n apply_log10(features['hl_amp_ratio'])\n features['kurtosis'], min_kurtosis = apply_log10(features['kurtosis'])\n features['phase_cusum'], min_phase_cusum = \\\n apply_log10(features['phase_cusum'])\n features['phase_eta'], min_phase_eta = \\\n apply_log10(features['phase_eta'])\n features['quartile31'], min_quartile31 = \\\n apply_log10(features['quartile31'])\n features['skewness'], min_skewness = apply_log10(features['skewness'])\n features['slope_per90'], min_slope_per90 = \\\n apply_log10(features['slope_per90'])\n\n min_values = {\n 'min_period': min_period,\n 'min_amplitude': min_amplitude,\n 'min_hl_amp_ratio': min_hl_amp_ratio,\n 'min_kurtosis': min_kurtosis,\n 'min_phase_cusum': min_phase_cusum,\n 'min_phase_eta': min_phase_eta,\n 'min_quartile31': min_quartile31,\n 'min_skewness': min_skewness,\n 'min_slope_per90': min_slope_per90\n }\n\n self.min_values = min_values\n # Save for later usage.\n pickle.dump(self.min_values, open(os.path.join(\n output_folder, 'min_params.pkl'), 'wb'))\n\n features = np.array(features)\n labels = np.array(labels)\n\n # Normalize.\n features_median = np.median(features, axis=0)\n features_std = np.std(features, axis=0)\n\n # original.\n features_norm = (features - features_median) / features_std\n\n # new.\n # features_min = np.min(features, axis=0)\n # features_max = np.max(features, axis=0)\n # features_norm = features - features_min\n # features_norm /= features_max\n\n # Save the number of features at the last layers.\n if out_features is None:\n self.n_final = np.unique(labels).size\n else:\n self.n_final = out_features\n\n # Save.\n pickle.dump(self.n_final, open(os.path.join(\n output_folder, 'n_final.pkl'), 'wb'))\n\n # Save the values for later usage (e.g. prediction).\n # original.\n self.norm_params = [features_median, features_std]\n # new.\n # self.norm_params = [features_min, features_max]\n pickle.dump(self.norm_params, open(os.path.join(\n output_folder, 'norm_params.pkl'), 'wb'))\n\n # Fit a label encoder.\n le = LabelEncoder()\n le.fit(labels)\n labels_encoded = le.transform(labels)\n\n # Save the label encoder.\n self.label_encoder = le\n pickle.dump(self.label_encoder, open(os.path.join(\n output_folder, 'label_encoder.pkl'), 'wb'))\n\n # Derive class weight by its frequency.\n if weight_class:\n unique, counts = np.unique(labels_encoded, return_counts=True)\n counts = np.array(counts)\n rev_counts = 1. / counts\n # weights = rev_counts / np.sum(rev_counts)\n weights = np.sum(counts) / counts\n class_weights = torch.FloatTensor(weights).to(self.device)\n\n # Training information.\n training_info = {'learning_rate': [],\n 'training_loss': [], 'validation_loss': [],\n 'test_f1': [], 'training_f1': [],\n 'test_mc': [], 'training_mc': []}\n\n # Train a model for the number of iteration.\n best_f1 = 0.\n best_mc = 0.\n f1_average = 'macro'\n for i in range(n_iter):\n # Train and test set split. So each iteration,\n # using a set separated differently.\n x_train, x_test, y_train, y_test = \\\n train_test_split(features_norm, labels_encoded,\n train_size=train_size, stratify=labels_encoded)\n\n # Build datasets.\n trainset = LightCurveDataset(x_train, y_train)\n testset = LightCurveDataset(x_test, y_test)\n\n # Up-sampling imbalanced dataset.\n if balanced_sampling:\n train_weights = self._get_balanced_sample_weights(y_train)\n test_weights = self._get_balanced_sample_weights(y_test)\n\n train_sampler = torch.utils.data.sampler.WeightedRandomSampler(\n train_weights, len(train_weights), replacement=True)\n test_sampler = torch.utils.data.sampler.WeightedRandomSampler(\n test_weights, len(test_weights), replacement=True)\n shuffle = False\n else:\n train_sampler = None\n test_sampler = None\n shuffle = True\n\n # Build data loaders.\n # batch_size = 1024\n batch_size = 10240\n trainloader = torch.utils.data.DataLoader(\n trainset, batch_size=batch_size, shuffle=shuffle,\n sampler=train_sampler, num_workers=2)\n testloader = torch.utils.data.DataLoader(\n testset, batch_size=batch_size, shuffle=shuffle,\n sampler=test_sampler, num_workers=2)\n\n # Initialize a network before entering the iteration.\n net = Net()\n net.to(self.device)\n if base_net is not None:\n # For transfer learning.\n net.load_state_dict(base_net.state_dict())\n\n # Set the number of neurons at the final layers, which is\n # actually the number of target classes.\n net.fc4 = nn.Linear(net.bn4.num_features, self.n_final)\n net.bn5 = nn.BatchNorm1d(self.n_final)\n net.to(self.device)\n\n # Initial learning rate.\n learning_rate = 0.1\n\n # Set training instances.\n if base_net is not None:\n # Transfer only the last layer.\n if train_last:\n optimizer = optim.SGD(net.fc4.parameters(), lr=learning_rate,\n momentum=0.9)\n else:\n optimizer = optim.SGD(net.parameters(), lr=learning_rate,\n momentum=0.9)\n else:\n optimizer = optim.SGD(net.parameters(), lr=learning_rate,\n momentum=0.9)\n\n scheduler = ReduceLROnPlateau(optimizer, 'min', patience=3,\n eps=1e-15)\n if weight_class:\n criterion = nn.CrossEntropyLoss(weight=class_weights)\n else:\n criterion = nn.CrossEntropyLoss()\n\n # Iterate.\n for epoch in range(n_epoch):\n running_loss = 0.0\n\n # Iterate learning rate.\n if optimizer.param_groups[0]['lr'] <= 1e-10:\n optimizer.param_groups[0]['lr'] = learning_rate\n\n # For each batch.\n predicted_label = []\n true_label = []\n net.train()\n for l, data in enumerate(trainloader, 0):\n # Get the inputs.\n inputs, labels = data\n inputs, labels = inputs.to(self.device), \\\n labels.to(self.device)\n\n # Zero the parameter gradients.\n optimizer.zero_grad()\n\n # Forward + backward + optimize.\n outputs = net(inputs)\n loss = criterion(outputs, labels)\n\n # Get true and predicted labels.\n outputs_numpy = torch.max(outputs, 1)[1].cpu().numpy()\n test_numpy = labels.cpu().numpy()\n predicted_label += outputs_numpy.tolist()\n true_label += test_numpy.tolist()\n\n loss.backward()\n optimizer.step()\n\n # Running loss.\n running_loss += loss.item()\n\n # Calculate training f1.\n training_f1 = f1_score(true_label, predicted_label,\n average=f1_average)\n training_mc = matthews_corrcoef(true_label, predicted_label)\n training_mc = (training_mc + 1) / 2.\n\n # Get test-set performance\n val_loss = 0.\n predicted_label = []\n true_label = []\n net.eval()\n for m, test_data in enumerate(testloader, 0):\n test_inputs, test_labels = test_data\n test_inputs, test_labels = test_inputs.to(self.device), \\\n test_labels.to(self.device)\n\n outputs = net(test_inputs)\n val_loss += criterion(outputs, test_labels).item()\n\n # Get true and predicted labels.\n outputs_numpy = torch.max(outputs, 1)[1].cpu().numpy()\n test_numpy = test_labels.cpu().numpy()\n predicted_label += outputs_numpy.tolist()\n true_label += test_numpy.tolist()\n\n test_f1 = f1_score(true_label, predicted_label,\n average=f1_average)\n test_mc = matthews_corrcoef(true_label, predicted_label)\n test_mc = (test_mc + 1) / 2.\n\n curr_f1 = test_f1\n curr_mc = test_mc\n\n if verbose:\n self.logger.info(('[{0}, {1}] '\n 'train Mc: {2:.6f}, test Mc: {3:.6f}, '\n 'learning rate {4:.1e}').format(\n i + 1, epoch + 1, training_mc, curr_mc,\n optimizer.param_groups[0]['lr'])\n )\n\n # Save training information for later usage.\n training_info['learning_rate'].append(\n optimizer.param_groups[0]['lr'])\n training_info['training_loss'].append(running_loss)\n training_info['validation_loss'].append(val_loss)\n training_info['training_f1'].append(training_f1)\n training_info['test_f1'].append(curr_f1)\n training_info['training_mc'].append(training_mc)\n training_info['test_mc'].append(curr_mc)\n\n # We save at the end of each epoch,\n # just in case the training stops unexpectedly.\n pickle.dump(training_info, open(os.path.join(\n output_folder, 'training_info.pkl'), 'wb'))\n\n # Update the best f1 score.\n if curr_f1 > best_f1:\n best_f1 = curr_f1\n self.f1_best = best_f1\n\n # Only if the new model is better.\n if curr_mc > best_mc:\n best_mc = curr_mc\n self.mc_best = best_mc\n\n # Save the model.\n torch.save(net.state_dict(), os.path.join(\n output_folder, 'state_dict.pt'))\n self.net = net\n # self.logger.info('Better model saved.')\n\n # Save true and predicted labels for later usages.\n pickle.dump([true_label, predicted_label],\n open(os.path.join(output_folder,\n 'true_predicted.pkl'), 'wb'))\n\n # Save the best mc as a plain text for temporary saving.\n fp = open(os.path.join(output_folder, 'info.txt'), 'w')\n fp.writelines('# Mc: {0:.6f}, F1: {1:.6f}\\n'.\n format(best_mc, best_f1))\n fp.close()\n\n # Scheduler based on validation loss (i.e. test-set loss).\n scheduler.step(val_loss)\n\n # Epoch ends.\n if verbose:\n self.logger.info('The overall best Mc and F1 using the '\n 'validation set: {0:.6f} and {1:.6f}'.\n format(self.mc_best, self.f1_best))\n\n ################################\n # The whole training finishes. #\n ################################\n\n # Get the best test F1 for each iteration.\n test_f1 = np.max(\n np.array(training_info['test_f1']).reshape(-1, n_epoch), axis=1)\n # Calculate statistics of test_f1.\n self.f1_mean = np.mean(test_f1)\n self.f1_median = np.median(test_f1)\n self.f1_std = np.std(test_f1)\n\n # Get the best test Mc for each iteration.\n test_mc = np.max(\n np.array(training_info['test_mc']).reshape(-1, n_epoch), axis=1)\n # Calculate statistics of test_mc.\n self.mc_mean = np.mean(test_mc)\n self.mc_median = np.median(test_mc)\n self.mc_std = np.std(test_mc)\n\n # Save F1 information.\n fp = open(os.path.join(output_folder, 'info.txt'), 'w')\n fp.writelines('# Best_Mc Median_Mc Mean_Mc Std_Mc '\n 'Best_F1 Median_F1 Mean_F1 Std_F1\\n')\n fp.writelines('{0:.10f} {1:.10f} {2:.10f} {3:.10f} '\n '{4:.10f} {5:.10f} {6:.10f} {7:.10f}\\n'.format(\n self.mc_best, self.mc_median, self.mc_mean, self.mc_std,\n self.f1_best, self.f1_median, self.f1_mean, self.f1_std))\n fp.close()\n\n # Refit the model using the entire dataset.\n if refit:\n self.logger.info('Refit the trained model.')\n self._refit(features_norm, labels_encoded, output_folder,\n weight_class, balanced_sampling,\n refit_n_epoch, verbose)", "def run():\n # all data and labels\n # tracemalloc.start()\n # start = time.time()\n data, labels = Startdata.getdata() # texts\n data2, labels2 = Startdata.getdata2() # emails\n # Startdata.bagofwords(data2, labels2)\n data, labels = Startdata.combinedata(data, data2, labels, labels2)\n # split into training and testing. 1/3 test, 2/3 train\n traind, trainl, testd, testl = Startdata.splitdata(data, labels)\n\n # labels\n trainlabels = Startdata.labelfix(trainl)\n testlabels = Startdata.labelfix(testl)\n\n # selective features\n #\n # extract features for use. in the shape of NxD\n # N is number of samples, D is number of features\n # current, peak = tracemalloc.get_traced_memory()\n trainfeat = Startdata.featurextract(traind, trainl)\n testfeat = Startdata.featurextract(testd, testl)\n # theta is the weights in a D+1 X 1 array\n theta = Spamfilter.train(trainfeat, trainlabels)\n #\n # trying bag of words\n #\n\n # Startdata.featurextract(data, labels)\n # error rate was 1.69% for trainingdata\n # 2.21% for testing data\n # bag, tfeat = Startdata.bagofwords(traind)\n # theta = Spamfilter.train(tfeat, trainlabels)\n # testfeat = Startdata.features(testd, bag)\n\n test(theta, testfeat, testlabels)\n # tracemalloc.stop()\n # done = time.time()\n # print(f\"Current memory usage is {current / 10**6} MB; Peak was {peak / 10**6} MB\")\n # print(\"time to complete\", done - start)\n # NTR 12/1/2020 current best featextraction at 25 iterations is about\n # 0.7-1% error for\n # trainingdata and testing data\n # NTR 12/2/2020 bag of words at 25 iterations\n # 1.69% training error, 2.21% testing error\n # NTR 12/2/2020 bag of words, 25 iter, removal of some features\n # NTR 12/3/2020 featextraction 20 iterations, new features, emails inc\n # 0.59% error on training. 0.63% testing error", "def Segmentation(WorkingDirectory, ListTrainingDataFile, ListImageName, modelname, noiseReduction, numberOfClasses, classesNamesList, ROI, ListAreaNames, fusionClassesY_N, maskY_N, imageY_N, InfoY_N, NFMaskY_N, BiggestBlobY_N, chosenArea, ReferencePicture):\n ### Create the folder where the output will be saved \n if maskY_N=='Y':\n if not os.path.exists(WorkingDirectory+'/Masks'): \n os.mkdir(WorkingDirectory+'/Masks')\n if imageY_N=='Y':\n if not os.path.exists(WorkingDirectory+'/MaskedImages'): \n os.mkdir(WorkingDirectory+'/MaskedImages')\n if NFMaskY_N=='Y':\n if not os.path.exists(WorkingDirectory+'/NonFilteredMasks'): \n os.mkdir(WorkingDirectory+'/NonFilteredMasks')\n\n \n ### Import and format the training data from the training data files.\n trainDataTab=np.array([[0,0,0,0,0,0,0,0,0,0,0,0,0]])\n for file in ListTrainingDataFile: \n f=open(file,\"r\",newline='') \n TrainData = list(csv.reader(f))\n f.close()\n TrainData.remove(['Class', 'Image', 'x','y','B','G','R','H','S','V','L','a','b'])\n TrainData=np.asarray(TrainData) \n trainDataTab=np.concatenate((trainDataTab, TrainData), axis=0)\n trainDataTab=np.delete(trainDataTab, (0), axis=0)\n if len(ListTrainingDataFile)>1: # if the user choose more than one file, a new file is saved combining all the selected files.\n np.savetxt(WorkingDirectory+'/trainData_'+str(numberOfClasses)+'classes.csv', trainDataTab, delimiter=\",\",header='Class,Image,x,y,B,G,R,H,S,V,L,a,b', comments='',fmt='%s')\n trainDataTab=np.delete(trainDataTab,1, 1)\n trainDataTab=np.delete(trainDataTab,1, 1)\n trainDataTab=np.delete(trainDataTab,1, 1)\n\n ### Format the list of ROI \n if ROI!='Whole pictures':\n ROI=ast.literal_eval(ROI)\n\n \n ### Train the model \n model=TrainModel(trainDataTab, modelname,classesNamesList) \n\n \n \n ### Get the size of the reference picture with a 1 pixel difference to avoid any resizing issue\n FirstImage=cv2.imread(ReferencePicture)\n ShapeFirstImage=np.shape(FirstImage)\n a=ShapeFirstImage[0]\n b=ShapeFirstImage[1]\n c=ShapeFirstImage[2]\n ShapeFirstImage2=(a+1,b,c)\n ShapeFirstImage3=(a+1,b+1,c)\n ShapeFirstImage4=(a+1,b-1,c)\n ShapeFirstImage5=(a,b,c)\n ShapeFirstImage6=(a,b+1,c)\n ShapeFirstImage7=(a,b-1,c) \n ShapeFirstImage8=(a-1,b,c)\n ShapeFirstImage9=(a-1,b+1,c)\n ShapeFirstImage10=(a-1,b-1,c) \n\n ### List initialization \n ListImageWrongSize=[]\n ListRunningTimes=[]\n ListTestDataTimes=[]\n ListApplyModelTimes=[]\n ListSaveOutputTimes=[]\n \n if BiggestBlobY_N=='Y':\n ListAirs=np.array([['Area/Plant','Image Name','Surface','Coverage', 'Aspect Ratio','Extent','Solidity', 'Equivalent Diameter', 'Main axe', 'Secondary axe']]) \n else:\n ListAirs=np.array([['Area/Plant','Image Name','Surface','Coverage']]) \n \n ### Main loop on the image list.\n for i in ListImageName:\n start_time = time.monotonic() \n TestImageBGR=cv2.imread(i) \n ImageName=i.split('/')\n ImageName=ImageName[-1] \n ImageName=ImageName.split('.')\n ImageName=ImageName[0] \n ######################################THESE THREE LINES CAN BE USED TO ADD a TIME FILTER ( only keep the pictures between certain hours)\n# hour=float(ImageName[8:10]) #get the time the picture was taken from the name of the file\n hour=float(10)\n if 8<hour<16: # apply a time condition \n ######################################\n if ROI!='Whole pictures':\n if np.shape(TestImageBGR)==ShapeFirstImage or np.shape(TestImageBGR)==ShapeFirstImage2 or np.shape(TestImageBGR)==ShapeFirstImage3 or np.shape(TestImageBGR)==ShapeFirstImage4 or np.shape(TestImageBGR)==ShapeFirstImage5 or np.shape(TestImageBGR)==ShapeFirstImage6 or np.shape(TestImageBGR)==ShapeFirstImage7 or np.shape(TestImageBGR)==ShapeFirstImage8 or np.shape(TestImageBGR)==ShapeFirstImage9 or np.shape(TestImageBGR)==ShapeFirstImage10 : # Test the size of the picture\n for j in range(len(ROI)): \n #Crop the picture for each ROI\n x1,y1,x2,y2=ROI[j]\n if x1>x2:\n a=x1\n x1=x2\n x2=a\n if y1>y2:\n a=y1\n y1=y2\n y2=a \n croppedImagej=TestImageBGR[y1:y2,x1:x2] \n \n NameArea=ListAreaNames[j] \n #Initialize the output names\n OutputMaskName=''\n OutputimageName=''\n OutputNFMaskName=''\n \n #Create the output names and folders\n if maskY_N=='Y': \n croppedMaskDirectoryArea=WorkingDirectory+'/Masks/'+NameArea \n if not os.path.exists(croppedMaskDirectoryArea): \n os.mkdir(croppedMaskDirectoryArea)\n OutputMaskName=croppedMaskDirectoryArea+'/'+ImageName+'_crop_'+NameArea+'_mask.png'\n \n if imageY_N=='Y': \n croppedMaskedImagesDirectoryArea=WorkingDirectory+'/MaskedImages/'+NameArea \n if not os.path.exists(croppedMaskedImagesDirectoryArea): \n os.mkdir(croppedMaskedImagesDirectoryArea) \n OutputimageName=croppedMaskedImagesDirectoryArea+'/'+ImageName+'_crop_'+NameArea+'_maskedImage.png'\n \n if NFMaskY_N=='Y':\n croppedNonFilteredMaskDirectoryArea=WorkingDirectory+'/NonFilteredMasks/'+NameArea \n if not os.path.exists(croppedNonFilteredMaskDirectoryArea): \n os.mkdir(croppedNonFilteredMaskDirectoryArea) \n OutputNFMaskName=croppedNonFilteredMaskDirectoryArea+'/'+ImageName+'_crop_'+NameArea+'_NFMask.png'\n \n # Segment the image with the function ApplyModelAndSaveOutput\n ListAirs, ListTestDataTimes,ListApplyModelTimes,ListSaveOutputTimes=ApplyModelAndSaveOutput(model, modelname, croppedImagej, ImageName, NameArea, noiseReduction, numberOfClasses, classesNamesList, fusionClassesY_N, maskY_N, InfoY_N, imageY_N, NFMaskY_N, BiggestBlobY_N, chosenArea, OutputMaskName, OutputimageName, OutputNFMaskName, ListAirs, ListTestDataTimes,ListApplyModelTimes,ListSaveOutputTimes)\n \n \n print(str(ImageName)+' '+str(NameArea)+' Done!') \n else: #if the picture is not the right size \n ListImageWrongSize.append(i) \n print(str(ImageName)+' Wrong size')\n \n else: #if the user wants to use the whole pictures\n #Create the output names\n OutputMaskName=WorkingDirectory+'/Masks/'+ImageName+'_mask.png'\n OutputimageName=WorkingDirectory+'/MaskedImages/'+ImageName+'_maskedImage.png'\n OutputNFMaskName=WorkingDirectory+'/NonFilteredMasks/'+ImageName+'_NFMask.png'\n \n # Segment the image with the function ApplyModelAndSaveOutput\n ListAirs, ListTestDataTimes,ListApplyModelTimes,ListSaveOutputTimes=ApplyModelAndSaveOutput(model, modelname, TestImageBGR, ImageName, '', noiseReduction, numberOfClasses, classesNamesList, fusionClassesY_N, maskY_N, InfoY_N, imageY_N, NFMaskY_N, BiggestBlobY_N, chosenArea, OutputMaskName, OutputimageName, OutputNFMaskName, ListAirs, ListTestDataTimes,ListApplyModelTimes,ListSaveOutputTimes)\n \n \n print(str(ImageName)+' Done!')\n \n end_time = time.monotonic()\n RunningTime=timedelta(seconds=end_time - start_time)\n sec=float(RunningTime.days*86400+RunningTime.seconds+RunningTime.microseconds/1000000)\n \n if i==ListImageName[0]: # get an estimation of the running time after the first picture is done\n print('Running time for 1 image =', RunningTime)\n print('Total running time estimation =', RunningTime*len(ListImageName))\n ListRunningTimes.append(sec) \n \n \n else: # usefull only if you apply a time filter \n ListImageWrongSize.append(i) \n print(str(ImageName)+' Wrong time')\n \n # Save the info file \n if len(ListAirs)>1:\n np.savetxt(WorkingDirectory+'/'+'InformationFile.csv', ListAirs, delimiter=\",\", comments='', fmt='%s') \n \n return ListImageWrongSize,ListRunningTimes, ListTestDataTimes,ListApplyModelTimes,ListSaveOutputTimes", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n raise NotImplementedError", "def __surface_labelled_segmentation_pipeline(self, features):\n tic = time.perf_counter()\n\n # Collect the data\n ###########################################\n training_data, dev_data, test_data = {}, {}, {}\n dictionaries = (training_data, dev_data, test_data)\n counter = 0\n for file in self.input_files:\n input_file = open(os.path.join(sys.path[0], file), 'r')\n for line in input_file.readlines():\n content = line.rstrip('\\n').split(\" | \")\n labels = '-'.join(get_labels(content[2]))\n segments = removeLabels(content[2])\n\n # dictionaries[counter][content[0]] = [segments, labels] # word:[[segments],[labels]]\n dictionaries[counter][segments] = labels # segments : labels\n input_file.close()\n counter += 1\n\n toc = time.perf_counter()\n print(\"Data Collected in \" + str(tic - toc.__round__(2)))\n\n # Evaluate Model On the Test Set Using Optimised Model\n #######################################################\n\n print(\"Beginning Feature Computation and Model Optimisation\")\n tic = time.perf_counter()\n\n X_training, Y_training, words_training = surface_labelled_data_preparation(training_data)\n X_dev, Y_dev, words_dev = surface_labelled_data_preparation(dev_data)\n X_test, Y_test, words_test = surface_labelled_data_preparation(test_data)\n print(\"Data Processed\")\n\n best_epsilon = 1e-07\n best_max_iteration = 280\n best_algo = 'ap'\n\n # crf = sklearn_crfsuite.CRF(algorithm=best_algo, epsilon=best_epsilon, max_iterations=best_max_iteration)\n '''crf = sklearn_crfsuite.CRF(\n algorithm='lbfgs',\n c1=0.1,\n c2=0.1,\n max_iterations=100,\n all_possible_transitions=True\n )'''\n crf = sklearn_crfsuite.CRF(algorithm='ap', epsilon=best_epsilon, max_iterations=best_max_iteration)\n print(\"CRF Initialized\")\n # crf.fit(X_training, Y_training, X_dev=X_dev, y_dev=Y_dev)\n crf.fit(X_training, Y_training)\n print(\"Data Fitted\")\n Y_predict = crf.predict(features)\n labels = list(crf.classes_)\n sorted_labels = sorted(labels)\n return Y_predict, Y_test", "def create_tf_datasets(self):\n images = []\n labels = []\n\n images = self.dataframe_labeled_samples.index.values\n\n labels.append(\n tuple(self.dataframe_labeled_samples['Intersection'].values.astype('uint8')))\n\n images = [\n os.path.join(\n os.path.dirname(\n self.summary_manager.current_labelgui_summary_filepath),\n img_name) for img_name in images]\n labels = list(chain.from_iterable(labels))\n\n\n if self.validation_split == 0:\n images = np.array([\n self.image_preprocessor(\n imageio.imread(f)) for f in tqdm(images)])\n images = tf.data.Dataset.from_tensor_slices(images)\n labels = tf.data.Dataset.from_tensor_slices(labels)\n dataset = tf.data.Dataset.zip((images, labels))\n return dataset, None\n\n images, images_val, labels, labels_val = train_test_split(\n images, labels, test_size=self.validation_split, random_state=0)\n\n train_split_filename = ((\n f'{self.save_checkpoint_filepath or self.checkpoint_filepath}'\n f'_train_split.txt'\n ))\n print(f\"Saving train split files to: {train_split_filename}\")\n with open(train_split_filename, 'w+')\\\n as train_split_file:\n for img in images:\n train_split_file.write(img + '\\n')\n \n val_split_filename = ((\n f'{self.save_checkpoint_filepath or self.checkpoint_filepath}'\n f'_val_split.txt'\n ))\n print(f\"Saving train split files to: {val_split_filename}\")\n with open(val_split_filename, 'w+')\\\n as val_split_file:\n for img in images_val:\n val_split_file.write(img + '\\n')\n\n print(f\"Loading validation image paths ({len(images)}) with preprocessor\")\n images = np.array([\n self.image_preprocessor(\n imageio.imread(f)) for f in tqdm(images)])\n images = tf.data.Dataset.from_tensor_slices(images)\n\n print(f\"Loading labels into tf tensor\")\n labels = tf.data.Dataset.from_tensor_slices(labels)\n print(f\"Creating zipped dataset with images and labels\")\n dataset = tf.data.Dataset.zip((images, labels))\n\n print(f\"Loading validation image paths ({len(images_val)}) with preprocessor\")\n images_val = np.array([\n self.image_preprocessor(\n imageio.imread(f)) for f in tqdm(images_val)])\n #images_val = np.array([self.image_preprocessor(f) for f in tqdm(images_val)])\n images_val = tf.data.Dataset.from_tensor_slices(images_val)\n #images_val = tf.data.Dataset.list_files(images_val)\n #images_val = images_val.map(tf.io.read_file)\n print(f\"Loading validation labels into tf tensor\")\n labels_val = tf.data.Dataset.from_tensor_slices(labels_val)\n print(f\"Creating validation zipped dataset with images and labels\")\n dataset_val = tf.data.Dataset.zip((images_val, labels_val))\n\n return dataset, dataset_val", "def ssd_synthia_car_fine_tune():\n merged_annotation = '/home/public/synthia/ssd_car_fine_tune/SYNTHIA-SEQS-01-TRAIN_MERGED-shuffle.json'\n if False:\n print('we combine the training and validation here')\n annotations_url_1 = '/home/public/synthia/SYNTHIA-SEQS-01-TRAIN-shuffle.json'\n annotations_url_2 = '/home/public/synthia/SYNTHIA-SEQS-01-VALIDATE-shuffle.json'\n combine_gt(annotations_url_1, annotations_url_2, merged_annotation)\n\n if False:\n print('collect front and rear cars')\n annotations_url_1 = '/home/public/synthia/SYNTHIA-SEQS-01-TRAIN-shuffle.json'\n annotations_url_2 = '/home/public/synthia/SYNTHIA-SEQS-01-VALIDATE-shuffle.json'\n save_dir = '/home/stevenwudi/PycharmProjects/autonomous_driving/Experiments/SEQ_01_SEQ_06_cars'\n collect_front_and_rear_gt(annotations_url_1, annotations_url_2, save_dir, image_interval=50)\n\n gt_file = '/home/public/synthia/ssd_car_fine_tune/ssd_car_fine_tune_gt-shuffle.pkl'\n if False:\n print('Training annotation conversion')\n converting_gt(merged_annotation, gt_file, POR=1e-3)\n # POR: 1e-3 Finish converting, total annotated car number is 22332 in total image of 8814.\n # POR: 5e-4: Finish converting, total annotated fish number is 26800 in total image of 8814.\n\n model_checkpoint = '/home/public/synthia/ssd_car_fine_tune/weights_512.54-0.19.hdf5'\n if False:\n print('Start DDS 512 training')\n train_ssd512(gt_file, model_checkpoint=model_checkpoint, base_lr=1e-5)\n\n test_gt_file = '/home/public/synthia/ssd_car_fine_tune/ssd_car_test_gt-shuffle.pkl'\n if False:\n print('Converting testing GT')\n annotations_url = '/home/public/synthia/SYNTHIA-SEQS-01-TEST-shuffle.json'\n converting_gt(annotations_url, test_gt_file)\n if False:\n # Examine test data\n examine_ssd512(test_gt_file, model_checkpoint)\n\n test_json_file = '/home/public/synthia/ssd_car_fine_tune/ssd_car_test-shuffle_nms_'+str(nms_thresh)+'.json'\n if False:\n test_ssd512(test_gt_file, model_checkpoint, test_json_file)\n # A separate file for accepting gt file and predicted json fil\n if True:\n calculate_iou(test_gt_file, test_json_file, POR=2e-3, draw=False)\n\n test_gt_file = '/home/public/synthia/ssd_car_fine_tune/ssd_car_test_gt-shuffle.pkl'\n test_json_file = '/home/public/synthia/ssd_car_test_faster-shuffle.json'\n\n calculate_iou(test_gt_file, test_json_file, POR=2e-3, draw=False)\n \"\"\"\n<<<<<<< HEAD\n This is the network results train by SSD512 (with 0.05% POR trained)\n Conf: [ 0.5 0.55 0.6 0.65 0.7 0.75 0.8 0.85 0.9 0.95]\n \n ### POR=1e-3\n Total GT: 2327. \n Total prediction: [ 2016. 1986. 1951. 1918. 1888. 1852. 1815. 1774. 1711. 1641.]\n Precision: [ 0.913 0.908 0.9 0.895 0.886 0.879 0.867 0.853 0.83 0.804]\n Recall: [ 0.791 0.787 0.78 0.775 0.768 0.761 0.751 0.739 0.719 0.697]\n F score: [[ 0.847 0.843 0.837 0.825 0.816 0.79 0.748 0.666 0.449 0.106]\n [ 0.843 0.839 0.834 0.823 0.813 0.788 0.748 0.665 0.449 0.106]\n [ 0.835 0.833 0.829 0.818 0.809 0.786 0.747 0.665 0.449 0.106]\n [ 0.831 0.829 0.826 0.815 0.807 0.784 0.746 0.664 0.449 0.105]\n [ 0.823 0.822 0.82 0.81 0.802 0.781 0.744 0.663 0.447 0.105]\n [ 0.816 0.816 0.815 0.806 0.799 0.778 0.741 0.661 0.446 0.105]\n [ 0.805 0.805 0.806 0.797 0.792 0.772 0.738 0.66 0.445 0.105]\n [ 0.792 0.793 0.793 0.789 0.784 0.766 0.734 0.658 0.445 0.105]\n [ 0.77 0.773 0.775 0.771 0.768 0.753 0.725 0.652 0.441 0.104]\n [ 0.746 0.749 0.751 0.751 0.75 0.741 0.716 0.645 0.439 0.103]]\n<<<<<<< HEAD\n\n ### faster RCNN detection result POR= 1e-3\n Total GT: 2327.\n Total prediction: [ 2120. 2084. 2062. 2037. 2003. 1971. 1947. 1901. 1862. 1785.]\n Precision: [ 0.83 0.826 0.825 0.823 0.82 0.818 0.817 0.807 0.801 0.787]\n Recall: [ 0.756 0.752 0.751 0.75 0.747 0.745 0.744 0.735 0.73 0.717]\n F score: [[ 0.791 0.758 0.72 0.669 0.59 0.485 0.332 0.174 0.054 0.006]\n [ 0.787 0.756 0.719 0.669 0.59 0.485 0.332 0.174 0.054 0.006]\n [ 0.786 0.755 0.718 0.668 0.589 0.485 0.332 0.174 0.054 0.006]\n [ 0.785 0.754 0.718 0.668 0.589 0.484 0.332 0.174 0.054 0.006]\n [ 0.782 0.752 0.716 0.666 0.588 0.484 0.331 0.174 0.054 0.006]\n [ 0.78 0.75 0.715 0.666 0.588 0.484 0.331 0.174 0.054 0.006]\n [ 0.779 0.749 0.714 0.665 0.588 0.484 0.331 0.174 0.054 0.006]\n [ 0.77 0.745 0.711 0.663 0.586 0.483 0.331 0.174 0.054 0.006]\n [ 0.764 0.74 0.709 0.66 0.584 0.482 0.331 0.173 0.054 0.006]\n [ 0.75 0.73 0.703 0.657 0.582 0.481 0.33 0.173 0.054 0.006]]\n\n=======\n \n ### POR shuffle\n Total GT: 2563. \n Total prediction: [ 2185. 2137. 2081. 2047. 2010. 1976. 1937. 1893. 1831. 1713.]\n Precision: [ 0.843 0.836 0.826 0.819 0.813 0.805 0.795 0.784 0.766 0.726]\n Recall: [ 0.719 0.712 0.704 0.698 0.693 0.686 0.678 0.669 0.653 0.619]\n F score: [[ 0.776 0.698 0.646 0.619 0.597 0.571 0.525 0.447 0.31 0.106]\n [ 0.769 0.693 0.642 0.616 0.595 0.57 0.524 0.447 0.31 0.106]\n [ 0.76 0.686 0.637 0.612 0.591 0.568 0.523 0.445 0.31 0.106]\n [ 0.754 0.681 0.632 0.608 0.588 0.565 0.521 0.443 0.31 0.106]\n [ 0.749 0.677 0.629 0.605 0.586 0.563 0.52 0.442 0.309 0.106]\n [ 0.741 0.671 0.624 0.602 0.583 0.561 0.518 0.441 0.309 0.106]\n [ 0.732 0.664 0.619 0.597 0.579 0.558 0.516 0.44 0.308 0.106]\n [ 0.722 0.656 0.612 0.59 0.573 0.553 0.515 0.439 0.308 0.106]\n [ 0.705 0.642 0.6 0.58 0.564 0.545 0.508 0.435 0.306 0.105]\n [ 0.668 0.614 0.578 0.561 0.548 0.531 0.496 0.426 0.302 0.105]]\n \n POR = 2e-3\n=======\n ############################# SSD512 NMS 0.6 ###########################\n Conf: [ 0.5 0.55 0.6 0.65 0.7 0.75 0.8 0.85 0.9 0.95]\n>>>>>>> 419d28c4e27b82c4cfef6a3aa01425cf29929973\n Total GT: 1433. \n Total prediction: [ 1617. 1605. 1593. 1579. 1568. 1551. 1537. 1517. 1498. 1465.]\n Precision: [ 0.821 0.819 0.818 0.818 0.816 0.811 0.809 0.804 0.8 0.792]\n Recall: [ 0.927 0.925 0.923 0.923 0.92 0.915 0.913 0.907 0.902 0.894]\n F score: [[ 0.871 0.868 0.859 0.852 0.836 0.81 0.742 0.636 0.48 0.161]\n [ 0.869 0.866 0.857 0.85 0.834 0.808 0.741 0.635 0.48 0.161]\n [ 0.867 0.864 0.857 0.849 0.834 0.808 0.741 0.635 0.48 0.161]\n [ 0.867 0.864 0.857 0.849 0.834 0.808 0.741 0.635 0.48 0.161]\n [ 0.865 0.862 0.855 0.848 0.832 0.807 0.741 0.635 0.48 0.161]\n [ 0.86 0.857 0.851 0.844 0.828 0.804 0.739 0.633 0.479 0.161]\n [ 0.858 0.855 0.849 0.843 0.827 0.802 0.738 0.633 0.478 0.161]\n [ 0.852 0.85 0.844 0.839 0.823 0.8 0.736 0.631 0.477 0.16 ]\n [ 0.848 0.846 0.841 0.836 0.82 0.797 0.734 0.631 0.477 0.16 ]\n [ 0.84 0.838 0.834 0.829 0.814 0.792 0.729 0.629 0.476 0.16 ]]\n \n ############################# SSD512 NMS 0.45 ###########################\n Total GT: 1433. \n Total prediction: [ 1438. 1428. 1421. 1413. 1403. 1398. 1384. 1374. 1348. 1305.]\n Precision: [ 0.92 0.918 0.917 0.914 0.911 0.91 0.905 0.902 0.894 0.874]\n Recall: [ 0.923 0.921 0.92 0.918 0.914 0.913 0.908 0.905 0.897 0.877]\n F score: [[ 0.922 0.918 0.91 0.901 0.883 0.851 0.785 0.673 0.476 0.164]\n [ 0.92 0.916 0.908 0.9 0.882 0.851 0.785 0.673 0.476 0.164]\n [ 0.918 0.915 0.907 0.899 0.882 0.851 0.785 0.673 0.476 0.164]\n [ 0.916 0.913 0.905 0.897 0.88 0.849 0.785 0.673 0.476 0.164]\n [ 0.913 0.91 0.903 0.895 0.878 0.847 0.783 0.671 0.475 0.164]\n [ 0.911 0.909 0.901 0.894 0.877 0.846 0.782 0.671 0.475 0.164]\n [ 0.906 0.905 0.898 0.89 0.874 0.843 0.781 0.67 0.475 0.164]\n [ 0.904 0.901 0.895 0.888 0.871 0.84 0.78 0.67 0.475 0.164]\n [ 0.896 0.894 0.888 0.882 0.866 0.836 0.776 0.668 0.474 0.164]\n [ 0.876 0.876 0.872 0.869 0.855 0.829 0.77 0.663 0.471 0.164]]\n \n \n<<<<<<< HEAD\n>>>>>>> bb5caf05d4bc41e182e41686b8b5e497053f9ca5\n ### POR = None (consider all testing examples)\n Total GT: 2696. \n Total prediction: [ 2273. 2221. 2166. 2111. 2055. 2000. 1945. 1875. 1786. 1684.]\n Precision: [ 0.822 0.818 0.81 0.805 0.796 0.789 0.777 0.763 0.741 0.716]\n Recall: [ 0.693 0.69 0.683 0.678 0.671 0.665 0.655 0.643 0.625 0.604]\n F score: [[ 0.752 0.746 0.74 0.729 0.724 0.703 0.668 0.597 0.405 0.096]\n [ 0.748 0.744 0.737 0.727 0.722 0.702 0.668 0.597 0.405 0.096]\n [ 0.741 0.737 0.733 0.723 0.718 0.699 0.667 0.596 0.405 0.096]\n [ 0.736 0.733 0.729 0.72 0.716 0.698 0.666 0.596 0.404 0.095]\n [ 0.728 0.726 0.724 0.716 0.712 0.695 0.664 0.595 0.403 0.095]\n [ 0.722 0.721 0.72 0.712 0.708 0.692 0.661 0.593 0.402 0.095]\n [ 0.711 0.711 0.711 0.704 0.703 0.687 0.659 0.592 0.401 0.095]\n [ 0.698 0.7 0.7 0.696 0.696 0.682 0.655 0.59 0.401 0.095]\n [ 0.678 0.682 0.684 0.681 0.681 0.67 0.647 0.585 0.398 0.095]\n [ 0.655 0.659 0.662 0.663 0.665 0.659 0.639 0.578 0.395 0.094]]\n\n POR = 2e-3 fastercnn result\n Conf: [ 0.5 0.55 0.6 0.65 0.7 0.75 0.8 0.85 0.9 0.95]\n Total GT: 1433.\n Total prediction: [ 1614. 1601. 1593. 1585. 1576. 1561. 1557. 1544. 1532. 1507.]\n=======\n ############################# Faster-RCNN ###########################\n Conf: [ 0.5 0.55 0.6 0.65 0.7 0.75 0.8 0.85 0.9 0.95]\n Total GT: 1433. \n Total prediction: [ 1614. 1601. 1593. 1585. 1576. 1561. 1557. 1544. 1532. 1507.]\n>>>>>>> 419d28c4e27b82c4cfef6a3aa01425cf29929973\n Precision: [ 0.812 0.812 0.811 0.811 0.811 0.81 0.81 0.807 0.805 0.8 ]\n Recall: [ 0.914 0.914 0.913 0.913 0.913 0.913 0.913 0.909 0.907 0.902]\n F score: [[ 0.86 0.843 0.808 0.75 0.666 0.577 0.459 0.311 0.14 0.016]\n [ 0.86 0.843 0.808 0.75 0.666 0.577 0.459 0.311 0.14 0.016]\n [ 0.859 0.842 0.807 0.75 0.666 0.577 0.459 0.311 0.14 0.016]\n [ 0.859 0.842 0.807 0.75 0.666 0.577 0.459 0.311 0.14 0.016]\n [ 0.859 0.842 0.807 0.75 0.666 0.577 0.459 0.311 0.14 0.016]\n [ 0.859 0.842 0.807 0.75 0.666 0.577 0.459 0.311 0.14 0.016]\n [ 0.859 0.842 0.807 0.75 0.666 0.577 0.459 0.311 0.14 0.016]\n [ 0.855 0.839 0.804 0.748 0.665 0.576 0.458 0.311 0.14 0.016]\n [ 0.853 0.838 0.804 0.747 0.664 0.575 0.458 0.311 0.14 0.016]\n [ 0.848 0.834 0.8 0.745 0.662 0.574 0.457 0.31 0.139 0.016]]\n \"\"\"", "def generate_data(self):\n\n # cfg.d_sour_num = 20 # number of source domains\n self.d_sour_a = [np.random.uniform(0.1, 5.0) for _ in range(cfg.d_sour_num)]\n self.d_sour_b = [np.random.uniform(0, np.pi) for _ in range(cfg.d_sour_num)]\n # cfg.d_targ_num = 1 # number of target domain\n self.d_targ_a = [np.random.uniform(0.1, 5.0) for _ in range(cfg.d_targ_num)]\n self.d_targ_b = [np.random.uniform(0, np.pi) for _ in range(cfg.d_targ_num)]\n\n\n # cfg.train_num = 100 # number of training point in each domain\n self.train_x = np.array([np.random.uniform(-5.0, 5.0) for _ in range(cfg.train_num)], dtype=np.float32).reshape(-1,1)\n self.train_y = np.array([[self.d_sour_a[j] * np.sin(i + self.d_sour_b[j]) for i in self.train_x] for j in range(cfg.d_sour_num)], dtype=np.float32).reshape(cfg.d_sour_num, cfg.train_num, 1)\n\n # cfg.val_num = 100\n self.val_x = np.array([np.random.uniform(-5.0, 5.0) for _ in range(cfg.val_num)], dtype=np.float32).reshape(-1,1)\n self.val_y = np.array([[self.d_sour_a[j] * np.sin(i + self.d_sour_b[j]) for i in self.val_x] for j in range(cfg.d_sour_num)], dtype=np.float32).reshape(cfg.d_sour_num, cfg.val_num, 1)\n\n # cfg.support_num = 10\n self.support_x = np.array([np.random.uniform(-5.0, 5.0) for _ in range(cfg.support_num)], dtype=np.float32).reshape(-1,1)\n self.support_y = np.array([[self.d_targ_a[j] * np.sin(i + self.d_targ_b[j]) for i in self.support_x] for j in range(cfg.d_targ_num)], dtype=np.float32).reshape(cfg.d_targ_num, cfg.support_num, 1)\n\n # cfg.test_num = 100\n self.test_x = np.array([np.random.uniform(-5.0, 5.0) for _ in range(cfg.test_num)], dtype=np.float32).reshape(-1,1)\n self.test_y = np.array([[self.d_targ_a[j] * np.sin(i + self.d_targ_b[j]) for i in self.test_x] for j in range(cfg.d_targ_num)], dtype=np.float32).reshape(cfg.d_targ_num, cfg.test_num, 1)\n\n self.test_x_old = np.array([np.random.uniform(-5.0, 5.0) for _ in range(cfg.test_num)], dtype=np.float32).reshape(-1,1)\n self.test_y_old = np.array([[self.d_sour_a[j] * np.sin(i + self.d_sour_b[j]) for i in self.test_x_old] for j in range(cfg.d_sour_num)], dtype=np.float32).reshape(cfg.d_sour_num, cfg.test_num, 1)", "def main(domain):\n\n filepath_train1 = '../../Non_covid_data_15oct/train_data_batch1_disregard_removed.pkl'\n filepath_test1 = '../../Non_covid_data_15oct/test_data_batch1_disregard_removed.pkl'\n filepath_train2 = '../../Covid_data_11nov/traindata_covidbatch.pkl'\n filepath_test2 = '../../Covid_data_11nov/testdata_covidbatch.pkl'\n\n df_train_nc, df_test_nc = createDataframe(filepath_train1, filepath_test1, domain, 'noncovid')\n df_train_c, df_test_c = createDataframe(filepath_train2, filepath_test2, domain, 'covid')\n #print(df_train)\n sen_reps_tr_nc, labels_tr_nc, sen_reps_te_nc, labels_te_nc = prepro(df_train_nc, df_test_nc)\n sen_reps_tr_c, labels_tr_c, sen_reps_te_c, labels_te_c = prepro(df_train_c, df_test_c)\n #print(labels_te)\n\n #Uncomment to combine training datasets \n #sen_reps_tr_c += sen_reps_tr_nc\n #labels_tr_c += labels_tr_nc\n\n #Uncomment to combine test datasets and test labels if necessary (if you do so, also combine test df's)\n #sen_reps_te_c += sen_reps_te_nc\n #labels_te_c += labels_te_nc\n #df_test = pd.concat([df_test_c, df_test_nc])\n\n #Feed selected train and test data to regression model\n predictions = get_predictions(sen_reps_tr_c, labels_tr_c, sen_reps_te_c)\n\n #Make dataframes of note id's and labels\n df_ann = make_note_df(df_test_c, labels_te_c)\n df_pred = make_note_df(df_test_c, predictions)\n\n #Evaluate on sentence level\n MSE, MAE, RMSE = evaluation(labels_te_c, predictions)\n\n print(\"MSE \"+domain, MSE)\n print(\"MAE \"+domain, MAE)\n print(\"RMSE \"+domain, RMSE)\n\n #Aggregate per note\n means_ann = means(df_ann)\n means_pred = means(df_pred)\n\n #Evaluate on note level\n MSE, MAE, RMSE = evaluation(means_ann, means_pred)\n\n print(\"MSE agg\"+domain, MSE)\n print(\"MAE agg\"+domain, MAE)\n print(\"RMSE agg\"+domain, RMSE)", "def main(feats_name, targets_name, model_name, n_boot, seed_start, output_filename, train_test_flag):\n\n #load feats and targets\n input_dict = {}\n input_dict['feats'] = 'data/%s' % (feats_name)\n input_dict['targets'] = 'data/%s' % (targets_name)\n #load the feats and targets\n df = pd.read_csv(\"%s\" % (input_dict['feats']))\n targets = pd.read_csv(\"%s\" % (input_dict['targets']))\n #drop columns not used for prediction\n drop_cols = [\"Unnamed: 0\",\"index\"]\n for dc in drop_cols:\n if dc in targets.columns:\n targets = targets.drop(dc,axis=1)\n if dc in df.columns:\n df = df.drop(dc,axis=1)\n #reduce to training or test set only if requested\n if (train_test_flag == 'train') and ('test_set' in df.columns):\n targets = targets[df['test_set'] == 0]\n df = df[df['test_set'] == 0]\n elif (train_test_flag == 'test') and ('test_set' in df.columns):\n targets = targets[df['test_set'] == 1]\n df = df[df['test_set'] == 1]\n df = df.drop('test_set', axis = 1)\n \n #broadcast the feats and targets\n df_b = sc.broadcast(df)\n targets_b = sc.broadcast(targets)\n\n #Set up the classifier. 3fold CV for selection of regularization term.\n if model_name == 'linear':\n model = LinearRegression(fit_intercept=True,\n normalize=False,\n copy_X=True,\n n_jobs=1) \n elif model_name == 'lasso':\n model = LassoCV(alphas = [.05,.1,.2],\n normalize = False,\n fit_intercept = True,\n verbose = False,\n copy_X = False,\n n_jobs = 3)\n elif model_name == 'ridge':\n model = RidgeCV(alphas = [.00001,.0001,.001,.01,.1,1,10,100,1000,10000],\n normalize = False,\n fit_intercept = True,\n verbose = 1,\n cv = 3)\n else:\n raise ValueError('model_name not recognized.')\n \n #Create an RDD that specifies prng seed to use\n samp_list = [(n,) for n in np.arange(seed_start, seed_start+n_boot)]\n samp_rdd = sc.parallelize(samp_list,n_boot) #create RDD with one partition for each row (second arg is number of partitions)\n #Create a function that takes a tuple as input and returns \n def func(tup):\n \"\"\"\n Takes as input a tuple containing an integer. The integer specifies the random seed that will be used to \n randomly sample, with replacement, observations from the feats set provided. The model is fitted to the \n sampled feats. Resulting best fit parameters, along with some other summary statistics and information are\n provided as input in a JSON string that will be written to the output file when all jobs are completed.\n \n Parameters\n ----------\n tup, rdd\n - series of tuples with different integer values defining the RNG seed to be used to sample observations\n \n Returns\n ----------\n tup[0], int\n - the seed that was used\n json.dumps(results_dict), str\n - dict in json format with the following keys:\n - alpha, the regularization term providing the best fit according to 3 fold cross-validation\n - random_state, the initial state used for fitting\n - training_feats, the name of the training_feats csv file\n - training_targets, the name of the target variable csv file\n - cv, the type of cross-validation used\n - sklearn_version, which version of sklearn was used\n - mse_min, the mean squared error for the test set on each fold\n - r2, the r-squared value (% var explained)\n - coef, parameter vector\n - intercept, intercept parameter\n - column_names, feature name corresponding to each parameter in the parameter vector\n \"\"\"\n #take a random sample with replacement\n np.random.seed(seed=tup[0]) #set the seed\n n_obs = np.shape(df_b.value)[0] #number of observations determines sample size\n samp = list(np.random.randint(0,high=n_obs,size=n_obs)) #draw the random sample with replacement\n #fit the model\n tic = time.time()\n results = model.fit(df_b.value.iloc[samp,:],np.ravel(targets_b.value.iloc[samp]))\n toc = tic - time.time()\n #save the results in a dict\n results_dict = {}\n results_dict['alpha'] = results.alpha_\n results_dict['random_state'] = results.random_state\n results_dict['training_feats'] = input_dict['feats']\n results_dict['training_targets'] = input_dict['targets']\n results_dict['cv'] = results.cv\n results_dict['sklearn_version'] = sklearn.__version__\n results_dict['mse_min'] = results.mse_path_.min()\n results_dict['r2'] = results.score(df_b.value.iloc[samp,:],np.ravel(targets_b.value.iloc[samp]))\n results_dict['coef'] = list(results.coef_)\n results_dict['intercept'] = results.intercept_\n results_dict['column_names'] = [i for i in df_b.value.columns]\n results_dict['fit_time'] = toc\n #convert results dict to json and save in tuple\n return(json.dumps(results_dict))\n\n #fit model in parallel\n results = samp_rdd.map(lambda p: func(p))\n #save to text file\n results.saveAsTextFile(output_filename)\n #stop the SparkContext.\n if not local_mode:\n sc.stop()", "def create_classifiers(testmode=0, cores=4):\n if (cores == 0):\n cores = 4 #how many cores to use for the training\n\n\n print(\"loading catalogs\")\n\n df_all_quasars= pd.read_csv(\"data/training/quasars_dr14_and_highz.csv\")\n df_all_stars=pd.read_csv(\"data/training/stars_sdss_and_dwarfs.csv\")\n\n #to test ###########################################\n if (testmode==1):\n df_all_stars = df_all_stars.sample(n=10000)\n df_all_quasars = df_all_quasars.sample(n=10000)\n #df_sim = df_sim.sample(n=10000)\n\n df_train = pd.concat([df_all_stars,df_all_quasars], sort=False)\n\n print(\"Excluding two stripes\")\n start_shape = df_train.shape[0]\n df_excluded = df_train.query(\"(ps_ra <= 60 or ps_ra >= 300) and (ps_dec <= 1.26 and ps_dec >= -1.26) \") #used for observation\n #df_excluded = df_train.query(\"(ps_ra <= 60 or ps_ra >= 300) and (ps_dec <= 5 and ps_dec >= -5) \")\n\n df_train = df_train.query(\"ps_ra > 60 and ps_ra < 300 and (ps_dec> 1.26 or ps_dec < -1.26) \").copy()\n #df_train = df_train.query(\"(ps_ra > 60 and ps_ra < 300) or (ps_dec> 5 or ps_dec < -5) \").copy()\n\n print(\"{} elements where excluded. Here are the value counts counts:\".format(start_shape-df_train.shape[0]))\n print(df_excluded['mult_class_true'].value_counts())\n print(\"---\")\n\n print(\"Here are the value counts for the training data:\")\n print(df_train['mult_class_true'].value_counts())\n\n features = ['PS_z','WISE_w1','gr','ri','iz','zy', 'yw1','w1w2']\n label = 'mult_class_true'\n\n\n #params = {'class_weight': 'balanced', 'criterion': 'gini', 'max_depth': 25,\n # 'min_samples_split': 2, 'n_estimators': 200,'random_state': 1,'n_jobs': cores}#n_jobs: how many processors, random_state: give me the same random variables for reproducability\n\n #optimized params for stackmag 5th october\n params = {'class_weight': 'balanced', 'criterion': 'gini', 'max_depth': 25,\n 'min_samples_split': 3, 'n_estimators': 300,'random_state': 1,'n_jobs': cores}#n_jobs: how many processors, random_state: give me the same random variables for reproducability\n\n\n rand_state = 1\n\n rf.rf_class_create(df_train, features, label, params, rand_state, save=True, save_filename=\"data/rf_classifier_PS_ALLWISE\")\n\n # print(\"-------------------------------------------------------------\")\n # print(\"finished, now again with non quasar list\")\n # print(\"-------------------------------------------------------------\")\n #\n #\n # df_train = pd.concat([df_all_stars,df_all_quasars,df_not_quasars], sort=False)\n #\n #\n # features = ['PS_z','WISE_w1','gr','ri','iz','zy', 'yw1','w1w2'] ##shoudl i use PS_z?\n # label = 'mult_class_true'\n #\n #\n # #params = {'class_weight': 'balanced', 'criterion': 'gini', 'max_depth': 25,\n # # 'min_samples_split': 2, 'n_estimators': 200,'random_state': 1,'n_jobs': cores}#n_jobs: how many processors, random_state: give me the same random variables for reproducability\n #\n # #optimized params for stackmag 5th october\n # params = {'class_weight': 'balanced', 'criterion': 'gini', 'max_depth': 25,\n # 'min_samples_split': 3, 'n_estimators': 300,'random_state': 1,'n_jobs': cores}\n #\n # rand_state = 1\n #\n # rf_class.rf_class_create(df_train, features, label, params, rand_state, save=True, save_filename=\"data/rf_classifier_with_Not_quasars_PS_ALLWISE\")\n #\n\n # print(\"-------------------------------------------------------------\")\n # print(\"finished, now again with simulated quasars\")\n # print(\"-------------------------------------------------------------\")\n #\n # df_train = pd.concat([df_all_stars,df_sim, df_sim_low], sort=False) #removed not quasars from sim class\n #\n # #optimized parameters old\n # #params = {'class_weight': 'balanced', 'criterion': 'gini', 'max_depth': 30,\n # # 'min_samples_split': 3, 'n_estimators': 200,'random_state': 1,'n_jobs': cores}#n_jobs: how many processors, random_state: give me the same random variables for reproducability\n #\n # #optimized parameters old\n # params = {'class_weight': 'balanced', 'criterion': 'gini', 'max_depth': 25,\n # 'min_samples_split': 3, 'n_estimators': 300,'random_state': 1,'n_jobs': cores}#n_jobs: how many processors, random_state: give me the same random variables for reproducability\n #\n #\n #\n # rand_state = 1\n # label = 'mult_class_true'\n #\n #\n #\n # rf_class.rf_class_create(df_train, features, label, params, rand_state, save=True, save_filename=\"data/rf_classifier_sim_QSO_PS_ALLWISE\")\n #", "def setup(self):\n osr_split_path = os.path.join(\n self.data_root, \"imagenet_osr_splits_winter21.pkl\"\n )\n if not os.path.exists(osr_split_path):\n os.makedirs(self.data_root, exist_ok=True)\n osr_split = requests.get(self.OSR_URL)\n open(osr_split_path, \"wb\").write(osr_split.content)\n else:\n with open(osr_split_path, \"rb\") as f:\n osr_split = pickle.load(f)\n # Ensure data is downloaded\n assert_data_downloaded(\n osr_split, shifthappens.config.imagenet21k_preprocessed_validation_path\n )\n test_transform = tv_transforms.Compose(\n [\n tv_transforms.ToTensor(),\n tv_transforms.Lambda(lambda x: x.permute(1, 2, 0)),\n ]\n )\n\n dataset_out = _get_imagenet_ssb_subset(\n imagenet21k_root=shifthappens.config.imagenet21k_preprocessed_validation_path,\n osr_split=osr_split,\n test_transform=test_transform,\n subset_type=self.subset_type,\n )\n\n self.dataset_out = sh_data_torch.IndexedTorchDataset(\n sh_data_torch.ImagesOnlyTorchDataset(dataset_out)\n )", "def prep_func(data_dic):\n\n df_all = pd.DataFrame(columns=datasets[dataset_labels[0]].columns.tolist())\n\n # combine desired datasets into one dataframe\n for label in dataset_labels:\n df_all = pd.concat([df_all, data_dic[label]], ignore_index=True)\n\n df_all_columns = df_all.copy() # keep a copy of the original dataframes before dropping the trial names\n\n # dropping unused columns/features\n for col in ['Time', 'trial', 'maneuver']:\n if col in df_all.columns:\n df_all = df_all.drop(columns=[col])\n\n columns_all = df_all.columns.tolist()\n columns_torque = [col for col in df_all.columns.tolist() if 'Torque' in col] # all torque data\n\n # all torque features except for roc (mean/std/... & left/right/sum/diff)\n columns_2d_torque = [col for col in df_all.columns.tolist()\n if 'Torque_sum' in col or 'Torque_diff' in col and 'roc' not in col]\n\n # all torque features of left and right only (mean/std/... & left/right)\n columns_lr_torque = [col for col in df_all.columns.tolist()\n if ('Torque_L' in col or 'Torque_R' in col) and 'roc' not in col]\n\n columns_lr_torque_mean = ['Mean Torque_L', 'Mean Torque_R'] # mean torque left and right only\n columns_2d_torque_mean = ['Mean Torque_sum', 'Mean Torque_diff'] # mean torque left and right only\n\n # dictionary of list of feature subsets to be used for dimension_reduction or clustering\n featureSet_dic = {'ALL': columns_all, 'ALL_TORQUE': columns_torque,\n '2D_TORQUE': columns_2d_torque, '2D_TORQUE_MEAN': columns_2d_torque_mean,\n 'LR_TORQUE': columns_lr_torque, 'LR_TORQUE_MEAN': columns_lr_torque_mean}\n\n # Standardize features by removing the mean and scaling to unit variance\n scaler = StandardScaler()\n feat_all_stand = scaler.fit_transform(df_all.values)\n df_all_stand = pd.DataFrame(feat_all_stand, columns=data_columns) # normalized dataset\n\n return df_all_stand, df_all_columns, featureSet_dic", "def generate_final_data(model_names):\n\n for model_name in model_names:\n print(\"Creating fina data for \" + model_name[0])\n\n final_data = {}\n brush_data = common.load_json(\"../steps/\" + model_name[0] + \"/brush_data.json\")\n diff_data = common.load_json(\"../steps/\" + model_name[0] + \"/diff_plot_data.json\")\n distance_data = common.load_json(\"../steps/\" + model_name[0] + \"/distance_data.json\")\n\n final_data[0] = {\n \"step_number\" : 0,\n \"valid\" : brush_data['0'][\"valid\"],\n \"brush_data\" : sanitize_brush_data(brush_data['0']),\n \"diff_data\" : null_diff_data(),\n \"distance_data\" : null_distance_data()\n }\n\n for step_idx in range(1, len(brush_data)):\n print(str(step_idx) + \" \",)\n final_data[step_idx] = {}\n final_data[step_idx][\"step_number\"] = step_idx\n final_data[step_idx][\"valid\"] = brush_data[str(step_idx)][\"valid\"]\n final_data[step_idx][\"brush_data\"] = sanitize_brush_data(brush_data[str(step_idx)])\n final_data[step_idx][\"diff_data\"] = get_diff_data_step(diff_data, step_idx - 1)\n final_data[step_idx][\"distance_data\"] = get_distance_data_step(distance_data, str(step_idx))\n\n common.save_json(final_data, \"../final_data/\" + model_name[0] + \"/final_data.json\", compressed=False)", "def training_data_preprocessing(raw_data, num_passed_rows=72):\r\n # some samples have errors\r\n raw_data = raw_data[num_passed_rows:].reset_index(drop=True) \r\n \r\n # get data output\r\n data_output = raw_data[['Submitby Date Time', 'Challenge Manager', 'Challenge Copilot', 'Posting Date Date', 'Track',\r\n 'Technology List', 'First Place Prize', 'Num Registrations', 'Total Prize']]\r\n data_output, extended_columns = class_binaryzation(data_output)\r\n \r\n # save extended columns to cache\r\n extended_columns_filepath = 'cache/extended_columns.pkl'\r\n with open(extended_columns_filepath, 'wb') as f:\r\n pickle.dump(extended_columns, f)\r\n\r\n num_date_columns_filepath = 'cache/num_date_columns.pkl'\r\n try:\r\n data_output = date_separation1(data_output) \r\n with open(num_date_columns_filepath, 'wb') as f:\r\n pickle.dump(6, f)\r\n\r\n except:\r\n data_output = date_separation2(data_output)\r\n with open(num_date_columns_filepath, 'wb') as f:\r\n pickle.dump(5, f)\r\n\r\n data_output = money_digitalization(data_output)\r\n data_output = get_date_in_days(data_output)\r\n data_output['Days from Posting to Submit'] = data_output['Submitby Date Time Days from 2016'] \\\r\n - data_output['Posting Date Date Days from 2016'] \r\n \r\n # get other output\r\n label_output = pd.DataFrame(columns=['Success'])\r\n success_output = pd.DataFrame(columns=data_output.columns)\r\n failure_output = pd.DataFrame(columns=data_output.columns)\r\n for i in range(len(raw_data)):\r\n if raw_data.loc[i, 'Num Submissions Passed Review'] >= 1:\r\n label_output.loc[i, 'Success'] = 1\r\n success_output.loc[len(success_output)] = data_output.loc[i]\r\n else:\r\n label_output.loc[i, 'Success'] = 0\r\n failure_output.loc[len(failure_output)] = data_output.loc[i]\r\n\r\n return data_output, label_output, success_output, failure_output, extended_columns", "def prepare_data(self):\n # Set up the path\n self.path_target_train = os.path.join(self.data_dir, self.train_path_file_target + \".pkl\")\n self.path_target_test = os.path.join(self.data_dir, self.test_path_file_target + \".pkl\")\n\n if not os.path.exists(self.path_target_train) or not os.path.exists(self.path_target_test):\n # Create vocabularies of the appropriate sizes.\n self.create_vocabulary(self.train_path_file)\n\n # Create token ids for the training data.\n input_train_path = self.train_path_file\n target_train_path = self.train_path_file_target\n train_input, train_input_length, train_labels = self.data_to_token_ids(input_train_path, target_train_path)\n\n # Create token ids for the validation data.\n input_test_path = self.test_path_file\n target_test_path = self.test_path_file_target\n test_input, test_input_length, _ = self.data_to_token_ids(input_test_path, target_test_path, train=False)\n\n # Collect data into a list\n training_data = [train_input, train_input_length, train_labels]\n test_data = [test_input, test_input_length]\n\n # Save all the data\n with open(self.path_target_train, 'wb') as f:\n pickle.dump(training_data,f)\n with open(self.path_target_test, 'wb') as f:\n pickle.dump(test_data, f)\n else:\n # Load data\n with open(self.path_target_train, 'rb') as f:\n training_data = pickle.load(f)\n with open(self.path_target_test, 'rb') as f:\n test_data = pickle.load(f)\n\n # Initialize vocabulary\n self.initialize_vocabulary()\n\n # Convert list into a numpy array - train data\n train_input = pd.DataFrame(training_data[0]).fillna(value=0).astype(int).values\n train_length_input = np.array(training_data[1], dtype=int)\n train_labels = np.array(training_data[2], dtype=int)\n\n # Convert list into a numpy array - test data\n test_input = pd.DataFrame(test_data[0]).fillna(value=0).astype(int).values\n test_length_input = pd.DataFrame(test_data[1]).fillna(value=0).astype(int).values\n\n # Printing maximum length\n print(\"Shape of the input training matrix {}\".format(str(train_input.shape)))\n print(\"Shape of the input test matrix {}\".format(str(test_input.shape)))\n\n # Copy the files\n self.copy_files()\n\n # Return output\n return train_input, train_length_input, train_labels, test_input, test_length_input", "def produce_init(filename):\n training_dataset = pd.read_csv(f'../Modified Data/{filename}')\n test_dataset = pd.read_csv(f'../Raw Data/test.csv')\n features = list(training_dataset.columns)\n features.remove('SalePrice')\n predict_feature = ['SalePrice']\n\n # Produce Test Data\n test_X = test_dataset.loc[:, features]\n ids_test = test_dataset.loc[:, 'Id']\n\n for column in features:\n if str(training_dataset.loc[:, column].dtype) == 'object':\n # Initialize encoder\n labelencoder = LabelEncoder()\n # Encode Train Data\n training_dataset.loc[:, column] = training_dataset.loc[:, column].fillna('Missing')\n training_dataset.loc[:, column] = pd.Series(labelencoder.fit_transform(training_dataset.loc[:, column]))\n # Encode Test Data\n test_X.loc[:, column] = test_X.loc[:, column].fillna('Missing')\n test_X.loc[:, column] = pd.Series(labelencoder.fit_transform(test_X.loc[:, column]))\n else:\n # Fix missing values for train data\n training_dataset.loc[:, column] = training_dataset.loc[:, column].fillna(int(training_dataset.loc[:, column].mean()))\n # Fix missing values for test data\n test_X.loc[:, column] = test_X.loc[:, column].fillna(int(test_X.loc[:, column].mean()))\n\n return training_dataset, test_X, ids_test", "def build_wmt_ft(self):\n train_files = [self.data_dir + '/' + wmt_train]\n eval_files = [self.data_dir + '/' + wmt_test]\n\n train_data = tf.data.experimental.CsvDataset(\n train_files,\n record_defaults=[tf.string, tf.string],\n field_delim='\\t',\n use_quote_delim=False)\n eval_data = tf.data.experimental.CsvDataset(\n eval_files,\n record_defaults=[tf.string, tf.string],\n field_delim='\\t',\n use_quote_delim=False)\n\n eval_data = eval_data.cache()\n train_data = train_data.cache() # only read once\n\n def to_features_dict(eng, rus):\n return {'inputs': eng, 'targets': rus}\n\n train_data = train_data.map(to_features_dict)\n eval_data = eval_data.map(to_features_dict)\n\n self.default_builder_obj = None\n return train_data, eval_data", "def _collect_and_train(self) -> None:\n self.info_process('\\n\\n')\n self.info_process('Performing daily data collection and model training...')\n\n for symbol in Settings.get_symbols(self):\n # Interrupt collection if the collection loop was stopped\n if not self._running:\n break\n\n # Revert data to last stable day.\n date_last_collected_for = self.time().now().date()\n # If it's past midnight, move back a day.\n if self.time().get_secs_to_open() < timedelta(hours=9, minutes=30).total_seconds():\n date_last_collected_for -= timedelta(days=1)\n # Move back two market days from the most recent market day.\n date_last_collected_for = self.time().get_prev_mkt_day(date_last_collected_for)\n date_last_collected_for = self.time().get_prev_mkt_day(date_last_collected_for)\n # Remove mongo price data after the stable day.\n self.mongo().remove_price_data_after(symbol, date_last_collected_for, today=self.time().now().today())\n date_rest_available_for = self.time().get_next_mkt_day(date_last_collected_for)\n\n # Collect yesterday's polygon-rest data and train on it.\n if self._train_on_rest_data(symbol, date_rest_available_for):\n self.info_process(f'Trained {symbol} on yesterday\\'s polygon rest data')\n else:\n self.warn_process(f'Invalid {symbol} rest data collected for {date_rest_available_for}. '\n f'Discarding them and attempting to use cached stream data instead')\n if self._train_on_stream_data(symbol, date_rest_available_for):\n self.info_process(f'Trained {symbol} on yesterday\\'s polygon stream data')\n else:\n self.warn_process(f'Invalid {symbol} candles cached for {date_rest_available_for}. '\n f'Could not find valid data to train on yesterday!')\n\n # Load today's polygon-stream data and train on it.\n date_cache_available_for = self.time().get_next_mkt_day(date_rest_available_for)\n if self._train_on_stream_data(symbol, date_cache_available_for):\n self.info_process(f'Trained {symbol} on today\\'s polygon stream data')\n else:\n self.warn_process(f'Invalid {symbol} candles cached for {date_rest_available_for}. '\n f'Could not find valid data to train on today!')", "def create_tc_data(data_name, base_location='data',mode = 'train'):\n\n dataset = {'labels': [], 'content': []}\n max_samples = 115000 if mode == 'train' else 7600\n label_to_class = dict()\n\n if data_name == 'yelp':\n df = pd.read_csv(os.path.join(base_location, 'yelp_review_full_csv', mode+'.csv'), \n header=None, names=['labels', 'content'])\n \n df.dropna(subset=['content'], inplace=True)\n df.loc[:, 'content'] = df.content.swifter.apply(preprocess)\n # filter rows with length greater than 20 (2 words including spaces on average)\n df.drop(df[df['content'].map(len) < 20].index, inplace=True)\n # shuffle and sample \n df = df.sample(n = max_samples)\n \n \n dataset['labels'].extend(list(df.labels[:max_samples]))\n dataset['content'].extend(list(df.content[:max_samples]))\n\n elif data_name == 'amazon':\n df = pd.read_csv(os.path.join(base_location, 'amazon_review_full_csv', mode+'.csv'), \n header=None, names=['labels','title','content'])\n df.dropna(subset=['content'], inplace=True)\n # df.dropna(subset=['title'], inplace=True)\n df.loc[:, 'content'] = df.content.swifter.apply(preprocess)\n # filter rows with length greater than 20 (2 words including spaces on average)\n df.drop(df[df['content'].map(len) < 20].index, inplace=True)\n # shuffle and sample \n df = df.sample(n = max_samples)\n\n dataset['labels'].extend(list(df.labels[:max_samples]))\n # dataset['content'].extend( [title + \"[SEP]\"+ content for title,content in zip(list(df.title[:max_samples]),list(df.content[:max_samples]))])\n dataset['content'].extend(list(df.content[:max_samples]))\n\n elif data_name == 'yahoo':\n df = pd.read_csv(os.path.join(base_location, 'yahoo_answers_csv', mode+'.csv'), \n header=None, names=['labels', 'title', 'content', 'answer'])\n df.dropna(subset=['content'], inplace=True)\n # df.dropna(subset=['title'], inplace=True)\n df.dropna(subset=['answer'], inplace=True)\n df.loc[:, 'content'] = df.content.swifter.apply(preprocess)\n # filter rows with length greater than 20 (2 words including spaces on average)\n df.drop(df[df['content'].map(len) < 20].index, inplace=True)\n # shuffle and sample \n df = df.sample(n = max_samples)\n dataset['labels'].extend(list(df.labels[:max_samples]))\n # dataset['content'].extend( [title + \"[SEP]\"+ content + \"[SEP]\" +answer for title,content,answer in zip(list(df.title[:max_samples]),list(df.content[:max_samples]),list(df.answer[:max_samples]))])\n dataset['content'].extend( [content + \"[SEP]\" +answer for content,answer in zip(list(df.content[:max_samples]),list(df.answer[:max_samples]))])\n\n elif data_name == 'dbpedia':\n df = pd.read_csv(os.path.join(base_location, 'dbpedia_csv', mode+'.csv'), \n header=None, names=['labels','title','content'])\n\n df.dropna(subset=['content'], inplace=True)\n # df.dropna(subset=['title'], inplace=True)\n df.loc[:, 'content'] = df.content.swifter.apply(preprocess)\n # filter rows with length greater than 20 (2 words including spaces on average)\n df.drop(df[df['content'].map(len) < 20].index, inplace=True)\n # shuffle and sample \n df = df.sample(n = max_samples)\n\n dataset['labels'].extend(list(df.labels[:max_samples]))\n # dataset['content'].extend( [title + \"[SEP]\"+ content for title,content in zip(list(df.title[:max_samples]),list(df.content[:max_samples]))])\n dataset['content'].extend(list(df.content[:max_samples]))\n \n else:\n df = pd.read_csv(os.path.join(base_location, 'ag_news_csv', mode+'.csv'), \n header=None, names=['labels','title','content'])\n df.dropna(subset=['content'], inplace=True)\n # df.dropna(subset=['title'], inplace=True)\n df.loc[:, 'content'] = df.content.swifter.apply(preprocess)\n # filter rows with length greater than 20 (2 words including spaces on average)\n df.drop(df[df['content'].map(len) < 20].index, inplace=True)\n # shuffle and sample \n df = df.sample(n = max_samples)\n dataset['labels'].extend(list(df.labels[:max_samples]))\n # dataset['content'].extend( [title + \"[SEP]\"+ content for title,content in zip(list(df.title[:max_samples]),list(df.content[:max_samples]))])\n dataset['content'].extend(list(df.content[:max_samples]))\n\n return dataset['labels'],dataset['content']", "def train(self, ):\n raise NotImplementedError", "def main(FLAGS):\n if FLAGS.format == 'tfrecords':\n raise NotImplementedError\n else:\n # get the names of the train image files\n train_files = txt2list(FLAGS.train_file_names)\n train_limit = floor(FLAGS.train_fraction * FLAGS.n_train)\n train_count = 0\n train_full = False\n\n # get the names of the validation image files\n valid_files = txt2list(FLAGS.valid_file_names)\n valid_limit = floor(FLAGS.valid_fraction * FLAGS.n_valid)\n valid_count = 0\n valid_full = False\n\n # get the names of the test image files\n test_files = txt2list(FLAGS.test_file_names)\n test_limit = floor(FLAGS.test_fraction * FLAGS.n_test)\n test_count = 0\n test_full = False\n\n # accumulators for the image and annotation pairs\n train_windows_with = []\n valid_windows_with = []\n test_windows_with = []\n train_windows_without = []\n valid_windows_without = []\n test_windows_without = []\n train_locations = []\n valid_locations = []\n test_locations = []\n\n # directories of sensor data and annotations\n sub_dirs = glob(os.path.join(FLAGS.satnet_data_dir, '*'))\n\n # go through each sensor collection from each site and prepare\n # the training, validation, and testing sub-windows\n for dir in sub_dirs:\n if train_full and valid_full and test_full:\n pass\n else:\n img_files = glob(os.path.join(dir, 'ImageFiles', '*.fits'))\n json_files = glob(os.path.join(dir, 'Annotations', '*.json'))\n\n # get only the name of the .json file w/o extension\n json_names = [file.split(\"\\\\\")[-1] for file in json_files]\n json_names = [name.split(\".json\")[0] for name in json_names]\n\n # get only the name of the .fits file w/o extension\n img_names = [file.split(\"\\\\\")[-1] for file in img_files]\n img_names = [name.split(\".fits\")[0] for name in img_names]\n\n # in case some annotations/images aren't paired, find the\n # common .json and .fits files names\n similar_files = set(img_names).intersection(json_names)\n\n # prepare the new images and annotations via the sliding-window\n # algorithm\n for file in similar_files:\n if train_full and valid_full and test_full:\n pass\n else:\n # load SatNet image and its corresponding annotations\n img_path = os.path.join(dir, 'ImageFiles', file + '.fits')\n anno_path = os.path.join(dir, 'Annotations', file + '.json')\n image = SatelliteImage(img_path)\n anno = ImageAnnotations(anno_path)\n\n # find the data partition this example belongs to and add\n # that data to the accumulators\n comp_name = '_'.join([anno.directory, anno.name])\n\n # pull all object centroids in the image and store in a list\n centroids = []\n [centroids.append([obj.y_c, obj.x_c]) for obj in anno.objects]\n\n # run sliding window algorithm across the image\n sw = SatNetSubWindows(img=image.image,\n centroids=centroids,\n window_size=FLAGS.window_size,\n stride=FLAGS.stride,\n padding=FLAGS.padding,\n img_width=FLAGS.width,\n img_height=FLAGS.height)\n sw.get_obj_windows()\n\n # find how many background windows to include from the image\n # and generate that many number of random indices to pull\n # them\n if sw.windows_with is not None:\n n_with = sw.windows_with.shape[0]\n n_without = int(FLAGS.bg2sat_ratio * n_with)\n else:\n n_without = int(FLAGS.bg2sat_ratio)\n inds = np.random.permutation(sw.windows_without.shape[0])\n inds = inds[:n_without]\n\n # determine the status of the accumulators\n if train_count >= train_limit:\n train_full = True\n if valid_count >= valid_limit:\n valid_full = True\n if test_count >= test_limit:\n test_full = True\n\n # accumulate sub-windows into the three data\n # partitions\n if comp_name in train_files and not train_full:\n if sw.windows_with is not None:\n train_windows_with.append(sw.windows_with)\n train_locations.append(sw.object_location_with)\n train_windows_without.append(sw.windows_without[inds, :, :])\n train_count += 1\n elif comp_name in valid_files and not valid_full:\n if sw.windows_with is not None:\n valid_windows_with.append(sw.windows_with)\n valid_locations.append(sw.object_location_with)\n valid_windows_without.append(sw.windows_without[inds, :, :])\n valid_count += 1\n elif comp_name in test_files and not test_full and FLAGS.save_test:\n if sw.windows_with is not None:\n test_windows_with.append(sw.windows_with)\n test_locations.append(sw.object_location_with)\n test_windows_without.append(sw.windows_without[inds, :, :])\n test_count += 1\n else:\n print('Windows belong to a filled accumulator... skipped them.')\n pass\n print('Accumulators: train - {}% , valid - {}% , test - {}%'.format(\n int(train_count / train_limit * 100),\n int(valid_count / valid_limit * 100),\n int(test_count / test_limit * 100)))\n\n # combine all of the sub-windows and annotations for each data\n # partition\n train_windows_with = np.concatenate(train_windows_with)\n train_windows_without = np.concatenate(train_windows_without)\n train_locations = np.concatenate(train_locations)\n train_annos_with = np.ones(train_windows_with.shape[0])\n train_annos_without = np.zeros(train_windows_without.shape[0])\n valid_windows_with = np.concatenate(valid_windows_with)\n valid_windows_without = np.concatenate(valid_windows_without)\n valid_locations = np.concatenate(valid_locations)\n valid_annos_with = np.ones(valid_windows_with.shape[0])\n valid_annos_without = np.zeros(valid_windows_without.shape[0])\n\n if FLAGS.save_test:\n test_windows_with = np.concatenate(test_windows_with)\n test_windows_without = np.concatenate(test_windows_without)\n test_locations = np.concatenate(test_locations)\n test_annos_with = np.ones(test_windows_with.shape[0])\n test_annos_without = np.zeros(test_windows_without.shape[0])\n\n train_windows = np.concatenate((train_windows_with, train_windows_without))\n train_annos = np.concatenate((train_annos_with, train_annos_without))\n valid_windows = np.concatenate((valid_windows_with, valid_windows_without))\n valid_annos = np.concatenate((valid_annos_with, valid_annos_without))\n\n if FLAGS.save_test:\n test_windows = np.concatenate((test_windows_with, test_windows_without))\n test_annos = np.concatenate((test_annos_with, test_annos_without))\n\n path_append = '_seedNet2satNet_windowsize_{}_stride_{}_padding_{}_ratio_{}_trainfraction_{}.h5'.format(FLAGS.window_size, FLAGS.stride, FLAGS.padding, FLAGS.bg2sat_ratio, FLAGS.train_fraction)\n train_c_windows_path = os.path.join(FLAGS.save_data_dir, 'train_classification_windows' + path_append)\n train_c_labels_path = os.path.join(FLAGS.save_data_dir, 'train_classification_labels' + path_append)\n train_l_windows_path = os.path.join(FLAGS.save_data_dir, 'train_localization_windows' + path_append)\n train_l_labels_path = os.path.join(FLAGS.save_data_dir, 'train_localization_labels' + path_append)\n valid_c_windows_path = os.path.join(FLAGS.save_data_dir, 'valid_classification_windows' + path_append)\n valid_c_labels_path = os.path.join(FLAGS.save_data_dir, 'valid_classification_labels' + path_append)\n valid_l_windows_path = os.path.join(FLAGS.save_data_dir, 'valid_localization_windows' + path_append)\n valid_l_labels_path = os.path.join(FLAGS.save_data_dir, 'valid_localization_labels' + path_append)\n\n if FLAGS.save_test:\n test_c_windows_path = os.path.join(FLAGS.save_data_dir, 'test_classification_windows' + path_append)\n test_c_labels_path = os.path.join(FLAGS.save_data_dir, 'test_classification_labels' + path_append)\n test_l_windows_path = os.path.join(FLAGS.save_data_dir, 'test_localization_windows' + path_append)\n test_l_labels_path = os.path.join(FLAGS.save_data_dir, 'test_localization_labels' + path_append)\n\n write_hdf5(train_c_windows_path, train_windows)\n write_hdf5(train_c_labels_path, train_annos)\n write_hdf5(train_l_windows_path, train_windows_with)\n write_hdf5(train_l_labels_path, train_locations)\n write_hdf5(valid_c_windows_path, valid_windows)\n write_hdf5(valid_c_labels_path, valid_annos)\n write_hdf5(valid_l_windows_path, valid_windows_with)\n write_hdf5(valid_l_labels_path, valid_locations)\n\n if FLAGS.save_test:\n write_hdf5(test_c_windows_path, test_windows)\n write_hdf5(test_c_labels_path, test_annos)\n write_hdf5(test_l_windows_path, test_windows_with)\n write_hdf5(test_l_labels_path, test_locations)", "def ensemble_001():\n n_centroids = 3000\n s = 15\n crop = 150\n n_patches = 400000\n rf_size = 5\n\n train_x_crop_scale = CropScaleImageTransformer(training=True,\n crop_size=crop,\n scaled_size=s,\n n_jobs=-1,\n memmap=True)\n\n kmeans_generator = KMeansFeatureGenerator(n_centroids=n_centroids,\n rf_size=rf_size,\n result_path='data/mdl_ensemble_001',\n n_iterations=20,\n n_jobs=-1,)\n\n patch_extractor = models.KMeansFeatures.PatchSampler(n_patches=n_patches,\n patch_size=rf_size,\n n_jobs=-1)\n images = train_x_crop_scale.transform()\n patches = patch_extractor.transform(images)\n\n kmeans_generator.fit(patches)\n\n del patches\n gc.collect()\n\n X = kmeans_generator.transform(images, save_to_file='data/data_ensemble_001.npy', memmap=True)\n Y = classes.train_solutions.data\n\n # Unload some objects\n del images\n gc.collect()\n\n # Get the input for the RF so that we can split together\n sampler = SampleTransformer(training=True, steps=2, step_size=20, n_jobs=-1)\n pX = sampler.transform()\n\n # manual split of train and test\n train_x, test_x, ptrain_x, ptest_x, train_y, test_y = train_test_split(X, pX, Y, test_size=0.5)\n\n wrapper = ModelWrapper(models.Ridge.RidgeRFEstimator, {'alpha': 500, 'n_estimators': 500}, n_jobs=-1)\n wrapper.fit(train_x, train_y)\n kmeans_preds = wrapper.predict(test_x)\n\n pWrapper = ModelWrapper(RandomForestRegressor, {'n_estimators': 500, 'verbose': 3}, n_jobs=-1)\n pWrapper.fit(ptrain_x, train_y)\n pixel_preds = pWrapper.predict(ptest_x)\n\n logger.info('Kmeans')\n classes.colwise_rmse(kmeans_preds, test_y)\n classes.rmse(kmeans_preds, test_y)\n logger.info('Pixel RF')\n classes.colwise_rmse(pixel_preds, test_y)\n classes.rmse(pixel_preds, test_y)\n\n logger.info(\"Ensembling predictions\")\n etrain_x = np.hstack((wrapper.predict(train_x), pWrapper.predict(ptrain_x)))\n etest_x = np.hstack((kmeans_preds, pixel_preds))\n eWrapper = ModelWrapper(RandomForestRegressor, {'n_estimators': 500, 'verbose': 3}, n_jobs=-1)\n eWrapper.fit(etrain_x, train_y)\n ensemble_preds = eWrapper.predict(etest_x)\n classes.colwise_rmse(ensemble_preds, test_y)\n classes.rmse(ensemble_preds, test_y)", "def classification(trainData, trainLabels, testData, method):\n\n nClass = 2\n classLabels = [0,1]\n\n trainLabelsUnqArr = np.unique(trainLabels)\n\n if method == 'NaiveBayes':\n classifier = GaussianNB()\n model = classifier.fit(trainData, trainLabels)\n result = model.predict(testData)\n proba = model.predict_proba(testData)\n proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n probaDf = pd.DataFrame(data=proba, columns=classLabels)\n elif method == 'knnVoting':\n\n classifier = KNeighborsClassifier(5)\n model = classifier.fit(trainData, trainLabels)\n\n result = model.predict(testData)\n\n proba = model.predict_proba(testData)\n proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n probaDf = pd.DataFrame(data=proba, columns=classLabels)\n\n elif method == 'RandomForests':\n\n classifier = RandomForestClassifier(max_depth=10, random_state=0)\n model = classifier.fit(trainData, trainLabels)\n\n result = model.predict(testData)\n\n proba = model.predict_proba(testData)\n proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n probaDf = pd.DataFrame(data=proba, columns=classLabels)\n ############################################\n importances = model.feature_importances_\n std = np.std([tree.feature_importances_ for tree in model.estimators_],\n axis=0)\n indices = np.argsort(importances)[::-1]\n # Print the feature ranking\n print(\"Feature ranking:\")\n for f in range(trainData.shape[1]):\n print(\"%d. feature %d (%f)\" % (f + 1, indices[f], importances[indices[f]]))\n # Plot the feature importances of the forest\n plt.figure()\n plt.title(\"Feature importances\")\n plt.bar(range(trainData.shape[1]), importances[indices],\n color=\"r\", yerr=std[indices], align=\"center\")\n plt.xticks(range(trainData.shape[1]), indices)\n plt.xlim([-1, trainData.shape[1]])\n plt.show()\n\n elif method == 'SVM':\n\n classifier = svm.SVC(C=3, gamma=0.003, probability=True)\n model = classifier.fit(trainData, trainLabels)\n\n result = model.predict(testData)\n\n proba = model.predict_proba(testData)\n proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n probaDf = pd.DataFrame(data=proba, columns=classLabels)\n\n elif method == 'AdaBoost':\n\n classifier = AdaBoostClassifier()\n model = classifier.fit(trainData, trainLabels)\n\n result = model.predict(testData)\n\n proba = model.predict_proba(testData)\n proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n probaDf = pd.DataFrame(data=proba, columns=classLabels)\n ############################################\n importances = model.feature_importances_\n std = np.std([tree.feature_importances_ for tree in model.estimators_],\n axis=0)\n indices = np.argsort(importances)[::-1]\n # Print the feature ranking\n print(\"Feature ranking:\")\n for f in range(trainData.shape[1]):\n print(\"%d. feature %d (%f)\" % (f + 1, indices[f], importances[indices[f]]))\n # Plot the feature importances of the forest\n plt.figure()\n plt.title(\"Feature importances\")\n plt.bar(range(trainData.shape[1]), importances[indices],\n color=\"r\", yerr=std[indices], align=\"center\")\n plt.xticks(range(trainData.shape[1]), indices)\n plt.xlim([-1, trainData.shape[1]])\n plt.show()\n\n elif method == 'NeuralNetwork':\n classifier = MLPClassifier(alpha=1)\n model = classifier.fit(trainData, trainLabels)\n\n result = model.predict(testData)\n\n proba = model.predict_proba(testData)\n proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n probaDf = pd.DataFrame(data=proba, columns=classLabels)\n\n elif method == 'LogisticRegression':\n classifier = LogisticRegression()\n model = classifier.fit(trainData, trainLabels)\n\n result = model.predict(testData)\n\n proba = model.predict_proba(testData)\n proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n probaDf = pd.DataFrame(data=proba, columns=classLabels)\n\n elif method == 'LinearSVM':\n classifier = LinearSVC(random_state=0)\n model = classifier.fit(trainData, trainLabels)\n\n result = model.predict(testData)\n\n ############################################\n importances = model.coef_\n # std = np.std([tree.feature_importances_ for tree in model.estimators_],\n plt.plot(importances.shape[1])\n plt.ylabel('some numbers')\n plt.show()\n elif method == 'kNN':\n\n # logger.info(model.coef_)\n # proba = model.predict_proba(testData)\n # proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n # probaDf = pd.DataFrame(data=proba, columns=classLabels)\n neigh = KNeighborsClassifier(n_neighbors=3)\n neigh.fit(trainData, trainLabels)\n\n result=neigh.predict(testData)\n probaDf=neigh.predict_proba(testData)\n\n # logger.info(method)\n\n return result, probaDf", "def form_data(stocks, init_param):\r\n \r\n rs = stocks[1].rsi\r\n ts = stocks[1].tsi\r\n a = 1\r\n \r\n \r\n for date in init_param.train_dates:\r\n try:\r\n training_data\r\n except NameError:\r\n training_data = LearningData()\r\n training_data.construct(stocks, date, init_param.future_day, init_param.features)\r\n else:\r\n training_data.append(stocks, date, init_param.future_day, init_param.features)\r\n \r\n for date in init_param.test_dates:\r\n try:\r\n test_data\r\n except NameError:\r\n test_data = LearningData()\r\n test_data.construct(stocks, date, init_param.future_day, init_param.features)\r\n else:\r\n test_data.append(stocks, date, init_param.future_day, init_param.features)\r\n \r\n #reference_date = dateutl.days_since_1900('1991-01-01')\r\n #test_data.construct(stocks,[reference_date, day_history, init_param.future_day])\r\n \r\n return training_data, test_data", "def finetune_learningrate_createData():\n acc,auc = [],[]\n for i in tqdm([j*0.005 for j in range(1,31)],desc='Progress(max_depth)',ncols=70,smoothing=0.5):\n X_train, X_test, y_train, y_test, X, y_binary = initializing()\n XGBCla = get_XGBmodel(lr=i)\n XGBCla = XGBCla.fit(X_train, y_train)\n acc.append(accuracy_score(XGBCla.predict(X_test),y_test))\n auc.append(roc_auc_score(XGBCla.predict(X_test),y_test))\n np.save(\"npy-data/result_learningrate_tuning_acc_auc_crossval_train\",acc+auc)", "def train(train_set, test_set, train_label, test_label, data_name, test_filenames, dimension_reduce=False,\n distribute_training=False):\n train_set = np.array(train_set)\n test_set = np.array(test_set)\n\n print(\"The shape of training set before dimension reduction is {0}\".format(train_set.shape))\n print(\"The shape of test set before dimension reduction is {0}\".format(test_set.shape))\n print('Use distribute training ? >> {0}'.format(distribute_training))\n reg = linear_model.BayesianRidge()\n\n if dimension_reduce:\n pca = PCA(n_components=128)\n train_set = pca.fit_transform(train_set)\n test_set = pca.fit_transform(test_set)\n\n print(\"The shape of training set after dimension reduction is {0}\".format(train_set.shape))\n print(\"The shape of test set after dimension reduction is {0}\".format(test_set.shape))\n\n if not distribute_training:\n reg.fit(train_set, train_label)\n else:\n train_set, test_set, train_label, test_label = da.array(train_set), da.array(test_set), da.array(\n train_label), da.array(test_label)\n reg.fit(train_set, train_label)\n\n predicted_label = reg.predict(test_set)\n mae_lr = round(mean_absolute_error(test_label, predicted_label), 4)\n rmse_lr = round(math.sqrt(mean_squared_error(test_label, predicted_label)), 4)\n pc = round(np.corrcoef(test_label, predicted_label)[0, 1], 4)\n print('===============The Mean Absolute Error of Model is {0}===================='.format(mae_lr))\n print('===============The Root Mean Square Error of Model is {0}===================='.format(rmse_lr))\n print('===============The Pearson Correlation of Model is {0}===================='.format(pc))\n\n mkdirs_if_not_exist('./model')\n joblib.dump(reg, './model/BayesRidge_%s.pkl' % data_name)\n print('The regression model has been persisted...')\n\n mkdirs_if_not_exist('./result')\n\n out_result(test_filenames, predicted_label, test_label, None, path='./result/Pred_GT_{0}.csv'.format(data_name))\n\n df = pd.DataFrame([mae_lr, rmse_lr, pc])\n df.to_csv('./result/%s.csv' % data_name, index=False)\n print('The result csv file has been generated...')", "def train():\n pass", "def prepare_data(self):\n try:\n self.train_dataset = self.datasets['train']\n self.val_dataset = self.datasets['val']\n try:\n self.test_dataset = self.datasets['test']\n except:\n pass\n except Exception as e:\n print('Data was not succesfully prepared:', e)", "def create_models(self):\r\n self.all_ratings = AllRatingsWithCommon(\r\n experts=self.users,\r\n objects=self.videos,\r\n output_features=self.features,\r\n name=\"prod\",\r\n )\r\n\r\n print_memory(stage=\"DPLF:ratings_nodata_created\")\r\n\r\n # creating models\r\n self.user_to_model = {\r\n user: FeaturelessPreferenceLearningModel(\r\n expert=user, all_ratings=self.all_ratings\r\n )\r\n for user in self.users\r\n }\r\n\r\n print_memory(stage=\"DPLF:models_created\")\r\n\r\n # before creating the aggregator, filling models with data\r\n self.user_to_size = {\r\n user: self.fill_model_data(self.user_to_model[user], user)\r\n for user in tqdmem(self.users, desc=\"fill_data\")\r\n }\r\n\r\n # virtual 'common' data\r\n fplm_common = FeaturelessPreferenceLearningModel(\r\n expert=AllRatingsWithCommon.COMMON_EXPERT, all_ratings=self.all_ratings\r\n )\r\n fplm_common.on_dataset_end()\r\n\r\n print_memory(stage=\"DPLF:data_filled\")\r\n\r\n # resetting the model given the data\r\n self.all_ratings.reset_model()\r\n\r\n print_memory(stage=\"DPLF:model_reset_ok\")\r\n\r\n # aggregating models\r\n self.aggregator = FeaturelessMedianPreferenceAverageRegularizationAggregator(\r\n models=[self.user_to_model[u] for u in self.users]\r\n )\r\n self.aggregator.certification_status = self.user_certified\r\n\r\n print_memory(stage=\"DPLF:aggregator_created\")", "def getRegressionOutput(self):\n\n # Construct train data\n X_tmp = np.empty(shape=(0, 14))\n for flightNum in range(len(self.routes)):\n # concatenate the buy or wait info to get the total datas\n y_train = self.y_train.reshape((self.y_train.shape[0],1))\n y_train_price = self.y_train_price.reshape((self.y_train_price.shape[0],1))\n\n X_train = np.concatenate((self.X_train, y_train, y_train_price), axis=1)\n\n # choose one route datas\n X_train = X_train[np.where(X_train[:, flightNum]==1)[0], :]\n\n # remove dummy variables\n # feature 8: departure date; feature 9: observed date state\n # feature 10: minimum price; feature 11: maximum price\n # feature 12: prediction(buy or wait); feature 13: current price\n X_train = X_train[:, 0:14]\n\n # group by the feature: departure date\n departureDates_train = np.unique(X_train[:, 8])\n\n # get the final datas, the observed data state should be from large to small(i.e. for time series)\n for departureDate in departureDates_train:\n indexs = np.where(X_train[:, 8]==departureDate)[0]\n datas = X_train[indexs, :]\n minPrice = min(datas[:, 10])\n datas[:, 12] = minPrice\n \"\"\"\n print departureDate\n print minPrice\n print datas\n \"\"\"\n X_tmp = np.concatenate((X_tmp, datas), axis=0)\n\n X_train = X_tmp[:, 0:12]\n y_train = X_tmp[:, 12]\n y_train_price = X_tmp[:, 13]\n y_train = y_train.reshape((y_train.shape[0], 1))\n y_train_price = y_train_price.reshape((y_train_price.shape[0], 1))\n\n\n X_train = np.concatenate((X_train, y_train_price), axis=1)\n np.save('inputReg/X_train', X_train)\n np.save('inputReg/y_train', y_train)\n np.save('inputReg/y_train_price', y_train_price)\n\n\n # Construct test data\n X_tmp = np.empty(shape=(0, 14))\n for flightNum in range(len(self.routes)):\n # concatenate the buy or wait info to get the total datas\n y_test = self.y_test.reshape((self.y_test.shape[0],1))\n y_test_price = self.y_test_price.reshape((self.y_test_price.shape[0],1))\n\n X_test = np.concatenate((self.X_test, y_test, y_test_price), axis=1)\n\n # choose one route datas\n X_test = X_test[np.where(X_test[:, flightNum]==1)[0], :]\n\n # remove dummy variables\n # feature 8: departure date; feature 9: observed date state\n # feature 10: minimum price; feature 11: maximum price\n # feature 12: prediction(buy or wait); feature 13: current price\n X_test = X_test[:, 0:14]\n\n # group by the feature: departure date\n departureDates_test = np.unique(X_test[:, 8])\n\n # get the final datas, the observed data state should be from large to small(i.e. for time series)\n for departureDate in departureDates_test:\n indexs = np.where(X_test[:, 8]==departureDate)[0]\n datas = X_test[indexs, :]\n minPrice = min(datas[:, 10])\n datas[:, 12] = minPrice\n \"\"\"\n print departureDate\n print minPrice\n print datas\n \"\"\"\n X_tmp = np.concatenate((X_tmp, datas), axis=0)\n\n X_test = X_tmp[:, 0:12]\n y_test = X_tmp[:, 12]\n y_test_price = X_tmp[:, 13]\n y_test = y_test.reshape((y_test.shape[0], 1))\n y_test_price = y_test_price.reshape((y_test_price.shape[0], 1))\n X_test = np.concatenate((X_test, y_test_price), axis=1)\n np.save('inputReg/X_test', X_test)\n np.save('inputReg/y_test', y_test)\n np.save('inputReg/y_test_price', y_test_price)", "def get_dataset(FOLD, AR_PERCENTAGE, d_type='yelp', AUTHOR='inf', POST='inf'):\n global AR_TYPE\n\n # dataset = loader.load(d_type, AUTHOR, POST)\n first_dataset = loader.unimportant_load(AUTHOR, POST * FOLD, AR_TYPE)\n datasets = first_dataset.fold_to(FOLD)\n \n for i in range(0, len(datasets)):\n dataset = datasets[i]\n dataset.divide_ar_ir(AR_PERCENTAGE)\n texts = []\n\n # check if we have this dataset already calculated.\n \n ir_filename = 'processed/' + get_ir_identifier(d_type, i, AUTHOR, POST)\n ar_filename = 'processed/' + get_ar_identifier(d_type, i, AUTHOR, POST)\n\n ir_features = None\n if os.path.isfile(ir_filename):\n print '@get: we have the file', ir_filename, 'and going to load it.'\n with open(ir_filename, 'rb') as fp:\n ir_features = pickle.load(fp)\n \n ar_features = None\n if os.path.isfile(ar_filename):\n print '@get: we have the file', ar_filename, 'and going to load it.'\n with open(ar_filename, 'rb') as fp:\n ar_features = pickle.load(fp)\n\n\n if ir_features is not None:\n for author in dataset.authors:\n dataset.features[author][-1] = ir_features[author]\n\n if ar_features is not None:\n for author in dataset.authors:\n dataset.features[author][:-1] = ar_features[author]\n\n for author in dataset.authors:\n if ar_features is None:\n texts.extend(dataset.get_ars(author))\n if ir_features is None: \n texts.append(dataset.get_ir(author))\n\n print '@getting_features, #dataset'#, index_fold\n pool = Pool(processes=NUMBER_OF_CORES)\n it = pool.imap(get_dataset_features, texts)\n pool.close()\n pool.join()\n\n print '@getting_features FINISHED, adding features to dictionary'\n for author in dataset.authors:\n # for each ar + ir, get back the features\n if ar_features is None:\n for i in range(0, dataset.get_ar_size(author)):\n dataset.put_feature(author, i, it.next())\n if ir_features is None:\n dataset.put_feature(author, dataset.get_ar_size(author), it.next())\n\n if ir_features is None:\n print '@get: we DONOT have the file', ir_filename, 'is going to be created and saved.'\n with open(ir_filename, 'wb') as fp:\n tmp = dict()\n for key, value in dataset.features.iteritems():\n tmp[key] = value[-1]\n pickle.dump(tmp, fp)\n\n if ar_features is None:\n print '@get: we DONOT have the file', ar_filename, 'is going to be created and saved.'\n with open(ar_filename, 'wb') as fp:\n tmp = defaultdict(list)\n for key, value in dataset.features.iteritems():\n tmp[key] = value[:-1]\n pickle.dump(tmp, fp)\n\n return datasets", "def prediction_data(median_split,mean_split,std_split,degrees_split,weight_split,export_file):\n DATA_TEST_PATH = '../data/test.csv' # Download train data and supply path here \n print('\\nIMPORTING TESTING DATA :',end=\" \")\n y_test, tX_test, ids_test = load_csv_data(DATA_TEST_PATH)\n print('DONE')\n \n #5.a. Splitting the testing data\n print('SPLITTING TESTING DATA :',end=\" \")\n y_test_split,tx_test_split,id_test_split = split_dataset(y_test,tX_test,ids_test) \n print('DONE') \n #5.b. prediction on each model\n y_pred = list()\n \n for split,(y_test_s,tx_test_s,id_test_s) in enumerate(zip(y_test_split,tx_test_split,id_test_split)): \n print('PREDICTION FOR TESTING DATA SPLIT NUMBER',split)\n \n #Formatting to the correct datatype\n y_test_s = np.squeeze(y_test_s)\n tx_test_s = np.squeeze(tx_test_s)\n id_test_s = np.squeeze(id_test_s)\n print('Size of the vectors',y_test_s.shape,tx_test_s.shape) \n #Formatting the data themselves\n print('Counting NaN',end='. ')\n tx_test_s = count_NaN(tx_test_s)\n print('Sanitizing',end = ' . ')\n tx_test_s,median_vec = sanitize_NaN(tx_test_s,median_split[split])\n print('Standardizing',end = ' .')\n tx_test_s,mean_te,std_te = standardize(tx_test_s,mean_split[split],std_split[split])\n print('Building polynomial basis') \n tx_test_s = build_poly(tx_test_s, degrees_split[split])\n \n #Prediction\n y_pred.append(predict_labels(np.array(weight_split[split]), np.array(tx_test_s))) \n \n print('MERGING TESTING DATA',end=\"\")\n y_pred_merged, ids_merged = merge_dataset(y_pred,id_test_split)\n print('DONE')\n \n OUTPUT_PATH = 'results/output_sanitized_normalization_'+export_file+'.csv' \n print('EXPORTING TESTING DATA WITH PREDICTIONS :',end=\" \")\n \n create_csv_submission(ids_merged, y_pred_merged, OUTPUT_PATH)\n print('DONE')", "def generate_new_features(data):\n utils.save_log('{0} :: {1}'.format(\n generate_new_features.__module__,\n generate_new_features.__name__))\n\n data = create_feature_is_credit_debit(data)\n data = create_feature_value_category(data)\n data = create_features_from_transaction_timestamp(data)\n data = create_feature_based_on_spent_by_timestamp(data)\n list_of_categories = config.feature_categorical_to_check_spent_value\n data = create_features_avg_ratio_value_by_categories(data,\n list_of_categories)\n return data", "def build_newstest_finetune(self):\n # Note that this function is purposefully similar to build_newscomment_only\n # The two datasets have very similar structure and it would just be more\n # confusing to refactor code, creating multiple overlapping paths.\n logging.info('Building newstest finetune dataset')\n logging.info(self.configs[NEWSTEST])\n builder = tfds.builder(WMT_BASE_DATASET_NAME,\n config=self.configs[NEWSTEST],\n data_dir=self.data_dir)\n self.default_builder_obj = builder\n shard_spec = self.build_shard_spec()\n logging.info('Training on TFDS dataset %s with split %s',\n WMT_BASE_DATASET_NAME, 'train' + shard_spec)\n train_data = builder.as_dataset(split='train' + shard_spec,\n shuffle_files=self.shuffle_train_files)\n eval_data = self.default_eval_builder(builder, shard_spec)\n return train_data, eval_data", "def preprocess():\n #get a list of all sentinel-image filenames\n s2files = [f for f in listdir(s2path) if endswith(join(s2path, f),\".tif\")==True]\n #read in a csv-file with information about the cluster\n csvpath = os.path.abspath(os.path.join(os.path.abspath(__file__),\"../../dataResearch/Data_with_Pooled.csv\"))\n df = pd.read_csv(csvpath)\n #get the min and max values per band \n minmaxlist = minmax()\n timelist = []\n print(\"STEP 2/2\")\n print(\"CREATING TFRECORDS\")\n for i in s2files:\n start = time.time()\n s2file = s2path + \"/\" + i\n #Get Features out of the Dataframe\n #get the name of the label (equals the SurveyID in the data)\n labelname = i.replace(\".tif\",\"\")\n #get the index of the entry to get the information out of the dataframe\n index = df.ID[df.ID == labelname].index\n wealthpooled = float(df['wealthpooled'].loc[index].max().replace(\",\",\".\"))\n wealthpooled5country = float(df['wealthpooled5country'].loc[index].max().replace(\",\",\".\"))\n country = bytes(df['country'].loc[index].max(), 'utf-8')\n urbanrural = bytes(df['URBAN_RURA'].loc[index].max(), 'utf-8')\n csvlat = float(df['LATNUM'].loc[index].max().replace(\",\",\".\"))\n csvlon = float(df['LONGNUM'].loc[index].max().replace(\",\",\".\"))\n year = int(df['year'].loc[index].max())\n wealth = float(df['wealth'].loc[index].max().replace(\",\",\".\"))\n #Get all Bands out of the GEOTIFF File\n s2raster = gdal.Open(s2file)\n bandlist = []\n for n in range(s2raster.RasterCount):\n f = n+1\n if n not in [13,14,15]:\n s2band = s2raster.GetRasterBand(f)\n s2band = s2band.ReadAsArray()\n s2band = np.resize(s2band,(1050,1050)).flatten()\n min = minmaxlist[n][0]\n max = minmaxlist[n][1]\n s2band = (s2band-min)/(max-min)\n bandlist.append(s2band.flatten())\n #get the Nightlight Band out of the GEOTIFF File\n nlfile = nlpath + \"/\" + i\n nlraster = gdal.Open(nlfile)\n nlband = nlraster.GetRasterBand(1)\n nlband = nlband.ReadAsArray()\n nlband = np.resize(nlband,(1050,1050)).flatten()\n min = minmaxlist[13][0]\n max = minmaxlist[13][1]\n nlband = (nlband-min)/(max-min)\n bandlist.append(nlband)\n #create a TFRecords-File with the TFRecordWriter\n with tf.io.TFRecordWriter(exportpath + '/' + labelname + '.tfrec') as writer:\n example = serialize_example(B1=bandlist[0],\n B2=bandlist[1],\n B3=bandlist[2],\n B4=bandlist[3],\n B5=bandlist[4],\n B6=bandlist[5],\n B7=bandlist[6],\n B8=bandlist[7],\n B8A=bandlist[8],\n B9=bandlist[9],\n B10=bandlist[10],\n B11=bandlist[11],\n B12=bandlist[12],\n NL=bandlist[13],\n wealth=wealth,\n wealthpooled=wealthpooled,\n wealthpooled5country=wealthpooled5country,\n country=country,\n urbanrural=urbanrural,\n lon_coord=csvlon,\n lat_coord=csvlat,\n year=year)\n writer.write(example)\n end = time.time()\n timelist.append(end-start)\n print(\"Done!\",str(s2files.index(i)+1) + \"/\" + str(len(s2files)),\"Est. time left:\",time.strftime('%d:%H:%M:%S',time.gmtime(int(sum(timelist)/len(timelist)*(len(s2files)-s2files.index(i))))))", "def generate_stats(simulation_folder, featured_model=\"RF\", test_version=''):\n\n Start = datetime.now()\n project_directory = os.path.dirname(os.getcwd())\n path_to_data = os.path.join(project_directory, \"Data\", \"Synthetic data\")\n path_to_characteristics_data = os.path.join(path_to_data, simulation_folder,\n \"Characteristics\" + test_version)\n path_to_scenario = os.path.join(project_directory, \"Models\", featured_model,\n simulation_folder, \"Model\" + test_version)\n path_to_stats = os.path.join(path_to_scenario, \"Stats\")\n if not os.path.exists(path_to_stats):\n os.makedirs(path_to_stats)\n path_to_model = os.path.join(path_to_scenario, \"model.sav\")\n path_to_labelencoder = os.path.join(path_to_characteristics_data, 'classes.npy')\n labelencoder = LabelEncoder()\n classes = [x.split(\"_\")[0] for x in np.load(path_to_labelencoder)]\n labelencoder.classes_ = classes\n X_train = np.load(os.path.join(path_to_characteristics_data, \"X_train.npy\"))\n X_test = np.load(os.path.join(path_to_characteristics_data, \"X_test.npy\"))\n y_train = np.load(os.path.join(path_to_characteristics_data, \"y_train.npy\"))\n y_test = np.load(os.path.join(path_to_characteristics_data, \"y_test.npy\"))\n # TODO: fix the save of the data to get variable names from there\n characteristics_data = pd.read_csv(os.path.join(path_to_characteristics_data, \"characteristics.csv\"))\n model = joblib.load(path_to_model)\n data_type = [\"Train\", \"Test\"]\n for dt in data_type:\n X = X_train if dt == \"Train\" else X_test\n y = y_train if dt == \"Train\" else y_test\n # Making the Confusion Matrix\n y_pred = model.predict(X)\n cm = confusion_matrix(y, y_pred)\n\n vers = \"no D\" if \"_noD\" in test_version else \"with D\"\n\n # Plot non-normalized confusion matrix\n fig = plt.figure()\n plot_confusion_matrix(cm, classes=labelencoder.classes_, title=featured_model+ \", \" +vers)\n fig.savefig(os.path.join(path_to_stats, \"Confusion_Matrix_NotNormalized_\" + dt + \".pdf\"), dpi=fig.dpi)\n plt.close()\n\n # Plot normalized confusion matrix\n fig = plt.figure()\n plot_confusion_matrix(cm, classes=labelencoder.classes_, normalize=True, title=featured_model+ \", \" +vers)\n fig.savefig(os.path.join(path_to_stats, \"Confusion_Matrix_Normalized_\" + dt + \".pdf\"), dpi=fig.dpi)\n plt.close()\n\n # class report\n print(\"class report\")\n report = classification_report(y, y_pred, target_names=labelencoder.classes_, digits=3)\n report = pandas_classification_report(report)\n report.to_csv(os.path.join(path_to_stats, \"Classification_Report_\" + dt + \".csv\"))\n\n # accuracy\n print(\"acc\")\n acu = accuracy_score(y, y_pred)\n df = pd.DataFrame({'acc': [acu]})\n df.to_csv(os.path.join(path_to_stats, \"Accuracy_\" + dt + \".csv\"))\n\n # feature importances\n importances = model.feature_importances_\n if \"_noD\" in test_version:\n column_names = characteristics_data.drop([\"file\", \"motion\", \"diff_type\", \"D\"], axis=1).columns.values\n else:\n column_names = characteristics_data.drop([\"file\", \"motion\", \"diff_type\"], axis=1).columns.values\n df = imp_df(column_names, importances)\n df.to_csv(os.path.join(path_to_stats, \"Feature_importances.csv\"), index=False)\n\n # permutation importances\n X_train_df = pd.DataFrame(X_train, columns=column_names)\n y_train_df = pd.DataFrame(y_train)\n df = permutation_importances(clone(model), X_train_df, y_train_df, accuracy)\n df.to_csv(os.path.join(path_to_stats, \"Permutation_fi.csv\"), index=True)\n\n # drop column feature importance\n X_train_df = pd.DataFrame(X_train, columns=column_names)\n df = drop_col_feat_imp(model, X_train_df, y_train)\n df.to_csv(os.path.join(path_to_stats, \"Drop_column_fi.csv\"), index=False)\n\n End = datetime.now()\n ExecutedTime = End - Start\n df = pd.DataFrame({'ExecutedTime': [ExecutedTime]})\n df.to_csv(os.path.join(path_to_stats, \"time_for_stats_generator.csv\"))\n print(ExecutedTime)", "def init_data(dataset_config: dict):\n # train and dev will be in random order, test may be ordered according to labels\n if dataset_config[\"name\"] == \"CoLA\":\n train, dev, test, num_classes = load_cola(dataset_config)\n elif dataset_config[\"name\"] == \"AGNews\":\n train, dev, test, num_classes = load_ag_news(dataset_config)\n elif dataset_config[\"name\"] == \"DBPedia\":\n train, dev, test, num_classes = load_dbpedia(dataset_config)\n elif dataset_config[\"name\"] == \"YRF\":\n train, dev, test, num_classes = load_yrf(dataset_config)\n else:\n raise NameError(f\"Dataset {dataset_config['name']} not implemented.\")\n # etc.\n\n # shrink size if debugging\n if dataset_config[\"debug\"]:\n # choose a random subset using huggingface select function\n train = train.select(random.sample(range(len(train)), k=200))\n dev = dev.select(random.sample(range(len(dev)), k=40))\n test = test.select(random.sample(range(len(test)), k=200))\n\n # create class imbalance\n random.seed(dataset_config[\"seed\"])\n if dataset_config[\"pool_balance\"] == \"balanced\":\n pass\n elif dataset_config[\"pool_balance\"] == \"imbalanced\":\n train = train.filter(lambda example: create_imbalanced_dataset(example, dataset_config[\"imbalance_prop\"], dataset_config['imbalance_cls']))\n else:\n NameError(f\"pool_balance = {dataset_config['pool_balance']} not allowed\")\n\n if dataset_config[\"dev_balance\"] == \"balanced\":\n pass\n elif dataset_config[\"dev_balance\"] == \"imbalanced\":\n dev = dev.filter(lambda example: create_imbalanced_dataset(example, dataset_config[\"imbalance_prop\"], dataset_config['imbalance_cls']))\n else:\n NameError(f\"dev_balance = {dataset_config['dev_balance']} not allowed\")\n\n # get seed labelled pool indices (using the same seed data every time)\n random.seed(dataset_config[\"seed\"])\n if dataset_config[\"seed_balance\"] == \"balanced\":\n # this is random (will have some variance vs pool)\n indices = list(range(len(train)))\n unlabelled_pool_idx, labelled_pool_idx = split(\n indices,\n random_state=dataset_config[\"seed\"],\n test_size=dataset_config[\"seed_size\"]\n )\n elif dataset_config[\"seed_balance\"] == \"stratified\":\n # this is the same as the underlying train set (which may be unbalanced)\n indices = list(range(len(train)))\n unlabelled_pool_idx, labelled_pool_idx = split(\n indices,\n random_state=dataset_config[\"seed\"],\n test_size=dataset_config[\"seed_size\"],\n stratify=train['label']\n )\n elif dataset_config[\"seed_balance\"] == \"imbalanced\":\n # artificially sample an imbalanced seed set from the pool\n unlabelled_pool_idx, labelled_pool_idx = create_imbalanced_seed(\n train,\n num_classes,\n dataset_config[\"seed_size\"],\n dataset_config['imbalance_prop'],\n dataset_config['imbalance_cls']\n )\n else:\n raise NameError(f\"seed_balance = {dataset_config['seed_balance']} not allowed\")\n\n return train, dev, test, num_classes, labelled_pool_idx, unlabelled_pool_idx", "def collect_best_features(self):\n bincsp = self.binary_csp # just to make code shorter\n n_folds = len(self.binary_csp.folds)\n n_class_pairs = len(self.binary_csp.class_pairs)\n result_shape = (n_folds, n_class_pairs)\n self.train_feature = np.empty(result_shape, dtype=object)\n self.train_feature_full_fold = np.empty(result_shape, dtype=object)\n self.test_feature = np.empty(result_shape, dtype=object)\n self.test_feature_full_fold = np.empty(result_shape, dtype=object)\n self.selected_filters_per_filterband = np.empty(result_shape, dtype=object)\n for fold_i in range(n_folds):\n for class_pair_i in range(n_class_pairs):\n bin_csp_train_features = deepcopy(bincsp.train_feature[\n self.selected_filter_inds, fold_i, class_pair_i])\n bin_csp_train_features_full_fold = deepcopy(\n bincsp.train_feature_full_fold[\n self.selected_filter_inds,\n fold_i, class_pair_i])\n bin_csp_test_features = deepcopy(bincsp.test_feature[\n self.selected_filter_inds, fold_i, class_pair_i])\n bin_csp_test_features_full_fold = deepcopy(\n bincsp.test_feature_full_fold[\n self.selected_filter_inds,fold_i, class_pair_i])\n selected_filters_per_filt = self.select_best_filters_best_filterbands(\n bin_csp_train_features, max_features=self.n_features,\n forward_steps=self.forward_steps, \n backward_steps=self.backward_steps,\n stop_when_no_improvement=self.stop_when_no_improvement)\n self.train_feature[fold_i, class_pair_i] = \\\n self.collect_features_for_filter_selection(\n bin_csp_train_features, selected_filters_per_filt)\n self.train_feature_full_fold[fold_i, class_pair_i] = \\\n self.collect_features_for_filter_selection(\n bin_csp_train_features_full_fold, selected_filters_per_filt)\n \n self.test_feature[fold_i, class_pair_i] = \\\n self.collect_features_for_filter_selection(\n bin_csp_test_features, selected_filters_per_filt)\n self.test_feature_full_fold[fold_i, class_pair_i] = \\\n self.collect_features_for_filter_selection(\n bin_csp_test_features_full_fold, selected_filters_per_filt)\n \n self.selected_filters_per_filterband[fold_i, class_pair_i] = \\\n selected_filters_per_filt", "def train(self, training_data):\n pass", "def train_all(X_train_fuse, Y_train, X_dev_fuse, Y_dev, R_train, R_dev, hyperparams):", "def analysisData(train_data, test_data, users_data, products_data):\n\n #\n\n # how many data are\n dataShape(train_data)\n dataShape(test_data)\n dataShape(users_data)\n dataShape(products_data)\n\n \"\"\" CSV shape \n Train: 416147\n Test: 46239\n Usuario: 68819\n Producto: 309961\n \"\"\"\n\n # Users, products and tag are unique:\n removeDuplicated(train_data) # obtenemos: tag_id column of your data is not duplicated\n removeDuplicated(users_data) # obtenemos: user_id column of your data is not duplicated\n removeDuplicated(products_data) #\n\n # Null values\n checkNulls(train_data, remove= False) # No null values\n checkNulls(users_data, remove= False) # No null values\n checkNulls(products_data, remove= False) # product_info: 443 null values and description: 81381 null values\n\n\n # cluck count description\n dataDescription(train_data, 'click_count')\n\n # click count distribution\n countClickIntervals(train_data) # Most of tag id have only 1 click, then 0 clicks\n # It is not possible detecting outliers by the moment\n\n # analize the data tange in users and train\n dateAnalyzing(users_data, train_data)\n\n \"\"\"\n USERS: maxim date:, 2016-01-21 minim date: 2017-06-30 unique dates 522\n TRAIN: maxim date:, 2017-04-23 minim date:: 2017-06-19 unique dates 58\n \n Number of users increases with the time. las few dates i where the number of new users is widely higher \n \"\"\"\n\n # Sample distribution\n g = visualization.plotSns(train_data[['tag_id', 'post_id', 'product_id', 'user_id', 'click_count']], diag_kind=\"kde\", markers='+')\n\n # How many unique users are in the training subset?\n print('The number of unique users are:', len(train_data['user_id'].unique()))\n\n return train_data, test_data, users_data, products_data", "def surface_labelled_segmentation(self):\n tic = time.perf_counter()\n\n # Collect the data\n ###########################################\n training_data, dev_data, test_data = {}, {}, {}\n dictionaries = (training_data, dev_data, test_data)\n counter = 0\n for file in self.input_files:\n input_file = open(os.path.join(sys.path[0], file), 'r')\n for line in input_file.readlines():\n content = line.rstrip('\\n').split(\" | \")\n labels = '-'.join(get_labels(content[2]))\n segments = removeLabels(content[2])\n\n # dictionaries[counter][content[0]] = [segments, labels] # word:[[segments],[labels]]\n dictionaries[counter][segments] = labels # segments : labels\n input_file.close()\n counter += 1\n\n toc = time.perf_counter()\n print(\"Data Collected in \" + str(tic - toc.__round__(2)))\n\n # Evaluate Model On the Test Set Using Optimised Model\n #######################################################\n\n best_delta = 8\n best_epsilon = 0.0000001\n best_max_iteration = 160\n best_algo = 'ap'\n\n best_epsilon, best_max_iteration = 0, 0\n maxF1 = 0\n print(\"Beginning Feature Computation and Model Optimisation\")\n tic = time.perf_counter()\n\n '''for epsilon in [0.001, 0.00001, 0.0000001]:\n for max_iterations in [80, 120, 160, 200]:\n X_training, Y_training, words_training = surface_labelled_data_preparation(training_data)\n X_dev, Y_dev, words_dev = surface_labelled_data_preparation(dev_data)\n crf = sklearn_crfsuite.CRF(algorithm='ap', epsilon=epsilon, max_iterations=max_iterations)\n crf.fit(X_training, Y_training, X_dev=X_dev, y_dev=Y_dev)\n\n Y_predict = crf.predict(X_dev)\n # f1 = f1_score(Y_dev, Y_predict, average='micro')\n labels = list(crf.classes_)\n sorted_labels = sorted(labels)\n f1 = metrics.flat_f1_score(Y_dev, Y_predict, average='micro', labels=labels, zero_division=0)\n if f1 > maxF1:\n f1 = maxF1\n best_epsilon = epsilon\n best_max_iteration = max_iterations\n\n print(best_max_iteration)\n print(best_epsilon)'''\n\n X_training, Y_training, words_training = surface_labelled_data_preparation(training_data)\n X_dev, Y_dev, words_dev = surface_labelled_data_preparation(dev_data)\n X_test, Y_test, words_test = surface_labelled_data_preparation(test_data)\n print(\"Data Processed\")\n\n best_epsilon = 1e-07\n best_max_iteration = 280\n best_algo = 'ap'\n\n # crf = sklearn_crfsuite.CRF(algorithm=best_algo, epsilon=best_epsilon, max_iterations=best_max_iteration)\n '''crf = sklearn_crfsuite.CRF(\n algorithm='lbfgs',\n c1=0.1,\n c2=0.1,\n max_iterations=100,\n all_possible_transitions=True\n )'''\n crf = sklearn_crfsuite.CRF(algorithm='ap', epsilon=best_epsilon, max_iterations=best_max_iteration)\n print(\"CRF Initialized\")\n # crf.fit(X_training, Y_training, X_dev=X_dev, y_dev=Y_dev)\n crf.fit(X_training, Y_training)\n print(\"Data Fitted\")\n Y_predict = crf.predict(X_test)\n # print(Y_predict[0])\n # print(Y_test[0])\n labels = list(crf.classes_)\n sorted_labels = sorted(labels)\n return Y_predict, Y_test, sorted_labels" ]
[ "0.66069067", "0.66063356", "0.6386136", "0.63641405", "0.6359857", "0.6340846", "0.63389325", "0.63323617", "0.6254566", "0.62452006", "0.6183691", "0.61397463", "0.61367977", "0.6132934", "0.61155736", "0.6114331", "0.6066086", "0.6050593", "0.60490036", "0.6045469", "0.6035445", "0.60265094", "0.6019195", "0.60063267", "0.59908444", "0.59732574", "0.59713846", "0.596698", "0.5949055", "0.5943352", "0.59403753", "0.59399295", "0.59368247", "0.59307253", "0.5912592", "0.5903168", "0.59030783", "0.5899604", "0.5899032", "0.5898267", "0.5897572", "0.5895105", "0.5893693", "0.58902645", "0.5888489", "0.58638173", "0.5850568", "0.58479154", "0.5843667", "0.5838686", "0.58364326", "0.5826843", "0.5821193", "0.5814437", "0.5807372", "0.5806378", "0.5806378", "0.5806378", "0.5806378", "0.5806378", "0.58051395", "0.5803008", "0.57998437", "0.5799001", "0.5785485", "0.57684946", "0.5764141", "0.5758818", "0.57560766", "0.57552487", "0.575121", "0.57463014", "0.57462305", "0.57461184", "0.5743966", "0.5735317", "0.5732731", "0.5730477", "0.57302254", "0.57137644", "0.57125145", "0.5710647", "0.57079256", "0.5706423", "0.57040954", "0.5698776", "0.5698523", "0.5694096", "0.5690472", "0.5690231", "0.56878", "0.56786656", "0.5677852", "0.56727856", "0.56701016", "0.56668174", "0.5664426", "0.5664019", "0.5661521", "0.5661164" ]
0.6155825
11
Return the serializer instance that should be used for validating and deserializing input, and for serializing output.
def get_serializer_in(self, *args, **kwargs): serializer_class = self.get_serializer_class_in() kwargs['context'] = self.get_serializer_context() return serializer_class(*args, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getSerializer():", "def get_serializer(self, *args, **kwargs):\n serializer_class = self.get_serializer_class()\n kwargs['context'] = self.get_serializer_context()\n return serializer_class(*args, **kwargs)", "def serializer_for(self, obj):\n # 1-NULL serializer\n if obj is None:\n return self._null_serializer_adapter\n\n obj_type = type(obj)\n serializer = None\n\n # 2-Default serializers, Dataserializable, Portable, primitives, arrays, String and some helper types(BigInteger etc)\n serializer = self.lookup_default_serializer(obj_type)\n\n # 3-Custom registered types by user\n if serializer is None:\n serializer = self.lookup_custom_serializer(obj_type)\n\n # 4 Internal serializer\n if serializer is None and self._global_serializer_adaptor is None:\n serializer = self.lookup_python_serializer(obj_type)\n\n # 5-Global serializer if registered by user\n if serializer is None:\n serializer = self.lookup_global_serializer(obj_type)\n\n if serializer is not None:\n if self._active:\n raise HazelcastSerializationError(\"There is no suitable serializer for:\" + str(obj_type))\n else:\n raise HazelcastInstanceNotActiveError()\n return serializer", "def get_serializer_class(self):\n assert self.serializer_class is not None, (\n \"'%s' should either include a `serializer_class` attribute, \"\n \"or override the `get_serializer_class()` method.\"\n % self.__class__.__name__\n )\n\n return self.serializer_class", "def get_serializer_class(self):\n return self.serializer_class", "def get_serializer():\n if 'serializer' in _CACHE:\n serializer = _CACHE['serializer']\n else:\n name = getattr(settings, 'DJANGO_NUMERICS_SERIALIZER_BACKEND',\n _DEFAULT_SERIALIZER)\n serializer = import_string(name)()\n _CACHE['serializer'] = serializer\n return serializer", "def get_serializer_class(self):\n if self.request.method == 'POST':\n return my_serializers.UploadedDataPostSerializer\n return self.serializer_class", "def get_serializer_class(self):\n pk_lookup, dataid_lookup = self.lookup_fields\n form_pk = self.kwargs.get(pk_lookup)\n dataid = self.kwargs.get(dataid_lookup)\n fmt = self.kwargs.get(\"format\", self.request.GET.get(\"format\"))\n sort = self.request.GET.get(\"sort\")\n fields = self.request.GET.get(\"fields\")\n if fmt == Attachment.OSM:\n serializer_class = OSMSerializer\n elif fmt == \"geojson\":\n serializer_class = GeoJsonSerializer\n elif fmt == \"xml\":\n serializer_class = DataInstanceXMLSerializer\n elif (\n form_pk is not None\n and dataid is None\n and form_pk != self.public_data_endpoint\n ):\n if sort or fields:\n serializer_class = JsonDataSerializer\n else:\n serializer_class = DataInstanceSerializer\n else:\n serializer_class = super().get_serializer_class()\n\n return serializer_class", "def serializer(self) -> ArchiveSerializer:\n return serializer_from_dict(self.doc.get('serializer'))", "def get_serializer_class(self):\n if self.action == 'create':\n return self.serializer_classes.get('create')\n else:\n return self.serializer_classes.get('default')", "def get_serializer_class(self):\n if self.action == 'list' or self.action == 'retrieve':\n return SillaSerializer\n else:\n return SillaSerializer", "def get_serializer_class(self):\n serializer_map = {\n \"witness\": WitnessServiceSerializer,\n \"review\": ReviewServiceSerializer,\n \"certificate_provider\": LPACertificateServiceSerializer,\n }\n\n return serializer_map[self.kwargs.get(\"service_type\", \"witness\")]", "def force_serializer_instance(serializer):\n if inspect.isclass(serializer):\n assert issubclass(serializer, serializers.BaseSerializer), \"Serializer required, not %s\" % serializer.__name__\n return serializer()\n\n assert isinstance(serializer, serializers.BaseSerializer), \\\n \"Serializer class or instance required, not %s\" % type(serializer).__name__\n return serializer", "def get_query_serializer(self):\n query_serializer = self.overrides.get('query_serializer', None)\n if query_serializer is not None:\n query_serializer = force_serializer_instance(query_serializer)\n return query_serializer", "def get_serializer_class(self):\n if self.request.method in ['GET', ]:\n return QuestionSerializer\n return QuestionGroupQuestionSerializer", "def get_deserialization_instance(cls):\n if cls.__orig__ is None:\n return cls()\n else:\n return cls.__orig__()", "def get_serializer(self, *args, **kwargs):\n kwargs['context'] = self.get_serializer_context()\n realm = kwargs['context'].get('realm', None)\n if realm is not None:\n serializer_class = ItemSerializer\n else:\n serializer_class = self.get_serializer_class()\n return serializer_class(*args, **kwargs)", "def get_serializer(name):\n\n dumps = importlib.import_module(name).dumps\n\n # Serializers that handle unicode streams and a are safe against comments\n # can be used directly\n if name == 'json':\n import json\n return json.dumps\n\n def serializer(x):\n # Serialize\n try:\n data = dumps(x)\n except Exception as ex:\n raise SerializationError(ex)\n\n # Transmit with b85 encode: safe characters and no newlines\n return (b'+' + base64.b85encode(data)).decode('ascii')\n\n return serializer", "def get_serializer_class(self):\n if self.action == 'login':\n return UserLoginSerializer\n if self.action == 'signup':\n return UserSignUpSerializer\n if self.action == 'remember_code':\n return RememberCodeSerializer\n return UserModelSerializer", "def get_serializer_class(self):\n if self.action == 'list' or self.action == 'retrieve':\n return EventosSerializer\n else:\n return EventosRegisterSerializer", "def get_serializer_class(self):\n if self.action == 'retrieve':\n return serializers.BookDetailSerializer\n elif self.action == 'upload_image':\n return serializers.BookImageSerializer\n\n return self.serializer_class", "def initialize_serializer(self):\n serializer = self.pyleus_config.get('serializer')\n if serializer in SERIALIZERS:\n self._serializer = SERIALIZERS[serializer](\n self._input_stream, self._output_stream)\n else:\n raise ValueError(\"Unknown serializer: {0}\", serializer)", "def get_request_serializer(self):\n body_override = self.overrides.get('request_body', None)\n\n if body_override is not None:\n if body_override is no_body:\n return None\n if self.method not in self.body_methods:\n raise SwaggerGenerationError(\"request_body can only be applied to PUT, PATCH or POST views; \"\n \"are you looking for query_serializer or manual_parameters?\")\n if isinstance(body_override, openapi.Schema.OR_REF):\n return body_override\n return force_serializer_instance(body_override)\n elif self.method in self.body_methods:\n return self.get_view_serializer()\n\n return None", "def get_serializer_class(self):\n return self.serializers.get(self.action,\n self.serializers['default'])", "def get_serializer_class(self):\n return {\"create\": ReportFileSerializer, }.get(self.action, ReportFileSerializer)", "def get_serializer_class(self):\n return self.serializer_classes.get(self.action,\n self.default_serializer_class)", "def get_serializer_class(self):\n\n if self.action in ['list', 'retrieve']:\n return OrderListSerializer\n else:\n return OrderSerializer", "def get_serializer_class(self):\n if self.action == 'retrieve':\n return self.serializer_classes.get('retrieve')\n elif self.action == 'create':\n return self.serializer_classes.get('create')\n elif self.action == 'update':\n return self.serializer_classes.get('update')\n else:\n return self.serializer_classes.get('default')", "def get_serializer_class(self):\n if self.action in [\"list\", \"retrieve\"]:\n return OrderSerializer\n return OrderCreateSerializer", "def get_view_serializer(self):\n if not hasattr(self.view, 'get_serializer'):\n return None\n return self.view.get_serializer()", "def get_serializer_class(self):\n if (self.request.method == \"GET\" and\n self.request.query_params.get(\"nested\")):\n return serializers.ReviewNestedSerializer\n return serializers.ReviewSerializer", "def serializer_class(self):", "def get_serializer_class(self):\n if self.action == 'list' or self.action == 'retrieve':\n return UserReadSerializer\n else:\n return UserSerializer", "def get_serializer_class(self):\n if self.action == 'update':\n return UserChangePassword\n elif self.action == 'create':\n return UserVerifyTokenSerializer\n else:\n None", "def get_serializer(self, *args, **kwargs):\n if self.__class__.serializer_class is not None:\n cls = self.__class__.serializer_class\n else:\n if self.action == 'list' and hasattr(self.__class__,\n 'list_serializer_class'):\n cls = self.__class__.list_serializer_class\n elif hasattr(self.__class__, 'detail_serializer_class'):\n cls = self.__class__.detail_serializer_class\n else:\n # error handling\n return super().get_serializer(*args, **kwargs)\n\n # default the context\n kwargs['context'] = self.get_serializer_context()\n\n return cls(*args, **kwargs)", "def get_query_serializer(self):\n serializer = super().get_query_serializer()\n serializer_class = getattr(self.view, 'request_serializer_class', None)\n\n if not serializer and serializer_class:\n serializer = serializer_class()\n\n return serializer", "def serialize(serializer_class, instance, data=None, **kwargs):\n\n if data is None:\n serializer = serializer_class(instance, **kwargs)\n else:\n serializer = serializer_class(instance, data=data, **kwargs)\n serializer.is_valid(raise_exception=True)\n\n return serializer", "def get_serializer_class(self):\n #overide function this is a fun that called to retrive the serailizer class\n #for perticular request\n #this fun are used for wanted to chang the serailzer class for the different action\n #that are available on the recip0e viewset\n if self.action == 'retrieve':\n print('okkkkkkkkkkkkw')\n return serializers.RecipeDetailSerializer\n elif self.action == 'upload_image':\n print('okkkkkkkkkkkkkkkkk')\n return serializers.RecipeImageSerailzer\n\n return self.serializer_class", "def get_serializer_class(self):\n serializer_map = {\n \"RealEstate\": RealEstateSerializer,\n \"BankAccount\": BankAccountSerializer,\n \"Insurance\": InsuranceSerializer,\n \"Investment\": InvestmentSerializer,\n \"Company\": CompanySerializer,\n \"Residual\": ResidualSerializer,\n }\n\n return serializer_map[self.kwargs.get(\"asset_type\", \"RealEstate\")]", "def get_serializer_class(self):\n if self.action == 'retrieve':\n return serializers.OperationDetailSerializer\n\n return self.serializer_class", "def get_serializer_class(self):\n if self.action == 'retrieve':\n return serializers.AccountDetailSerializer\n\n return self.serializer_class", "def get_serializer_class(self):\n if self.action == 'retrieve':\n return serializers.ProductDetailSerializer\n\n return self.serializer_class", "def get_serializer_class(self, *args, **kwargs):\n\n if self.request.method in ['GET', 'POST']:\n serializer_class = SearchSerialzer\n\n elif self.action == 'destroy':\n serializer_class = SearchNotRequiredSerializer\n\n elif self.action == 'destroy_all':\n serializer_class = SearchDeleteAllSerializer\n\n return serializer_class", "def get_serializer_class(self) -> serializers.ModelSerializer:\n if self.request.user.rank == 'Management':\n return employee_serializers.ManagerSerializer\n return employee_serializers.EmployeeSerializer", "def get_serializer_class(self):\n\n if self.action == 'create':\n return CreateRideSerializer\n\n if self.action == 'join':\n return JoinRideSerializer\n\n if self.action == 'finish':\n return EndRideSerializer\n\n if self.action == 'qualify':\n return QualifyRideSerializer\n\n return RideModelSerializer", "def default_serializer(_cls: Type[Any], obj: Any) -> Any:", "def get_user_serializer_class(self):\n return durin_settings.USER_SERIALIZER", "def lookup_serializer(encoding: str) -> Serializer:\n try:\n return _SERIALIZERS[encoding]\n except KeyError:\n raise ValueError(f\"Unregistered encoding {encoding!r}\")", "def get_serializer_class(self):\n group = self.request.query_params.get('type_group')\n return self.serializer_lookup.get(group, serializers.MeasurementTypeSerializer)", "def get_serializer(self, *args, **kwargs):\n serializer_class = self.get_serializer_class()\n kwargs['context'] = self.get_serializer_context()\n return serializer_class(many=True, *args, **kwargs)", "def serializers(self, **kwargs):\n return serializers.serializers(self._host, self._session, **kwargs)", "def get_serializer_class(self):\n if self.action in ('retrieve', 'list', 'update', 'partial_update'):\n return ListaPedidoSerializer\n return PedidoSerializer", "def get_proto_serializer():\n def _serialize_proto(proto):\n return proto.SerializeToString()\n return _serialize_proto", "def get_serializer_class(self, *args, **kwargs):\n if self.action == 'list':\n return self.serializer_list_class\n else:\n return self.serializer_class", "def by_extension_and_format(cls, extension: str, ser_format: str):\n if cls._format_to_serializer is None:\n cls._register_subclasses()\n if ser_format == 'auto':\n serializer = cls._extension_to_serializer.get(extension.lstrip('.'))\n else:\n serializer = cls._format_to_serializer.get(ser_format)\n\n if serializer is None:\n raise InvalidExtensionOrFormat(\n 'Cannot find serializer for format: %s and extension %s' % (\n ser_format, extension))\n return serializer", "def serialize(self):\n serialization = self._weaver.Serialize()\n if not serialization:\n raise AssertionError(\n 'Weaver Serialization failed: %s' % self._weaver.error_string())\n return serialization", "def get_serializer_class(self):\n if self.action == 'retrieve':\n return ContaRetrieveSerializer\n\n return ContaSerializer", "def get_serializer_class(self):\n if self.request is None or self.request.method == \"POST\":\n return serializers.ProfileItemDetailSerializer\n\n return serializers.ProfileItemListSerializer", "def get_serializer(self, *args, **kwargs):\n try:\n params = self.request.query_params\n\n for key in ['part_detail', 'location_detail', 'supplier_part_detail', 'tests']:\n kwargs[key] = str2bool(params.get(key, False))\n except AttributeError:\n pass\n\n kwargs['context'] = self.get_serializer_context()\n\n return self.serializer_class(*args, **kwargs)", "def get_serializer_class(self):\n profile = self.get_object()\n\n # Owner of the profile\n if self.request.user == profile.user:\n if profile.filled_out or self.request.data.get('filled_out'):\n return self.serializer_class_filled_out\n else:\n return self.serializer_class_owner\n # Staff or instructor is looking at profile\n elif not self.request.user.is_anonymous and self.request.user.role_set.filter(\n role__in=(Staff.ROLE_ID, Instructor.ROLE_ID),\n program__programenrollment__user__profile=profile,\n ).exists():\n return self.serializer_class_staff\n # Profile is public\n elif profile.account_privacy == Profile.PUBLIC:\n return self.serializer_class_limited\n # Profile is public to mm verified users only\n elif profile.account_privacy == Profile.PUBLIC_TO_MM:\n return self.serializer_class_limited\n # this should never happen, but just in case\n return self.serializer_class_limited", "def get_serializer_class(self):\n if self.request.method == \"POST\":\n return VideoUsersCreationSerializer\n else: \n return VideoUserSerializer", "def getDeserializer():", "def get_serializer(self, *args, **kwargs):\n kwargs['part_detail'] = True\n kwargs['location_detail'] = True\n kwargs['supplier_part_detail'] = True\n kwargs['context'] = self.get_serializer_context()\n\n return self.serializer_class(*args, **kwargs)", "def _get_serializer_for_value(value, serializing):\n _init_serialization()\n\n cls = type(value)\n is_class = inspect.isclass(value)\n\n serialization_cls = None\n\n if inspect.isclass(value):\n if cls in _serialization_map:\n serialization_cls = _serialization_map[cls]\n elif is_class:\n serialization_cls = ClassSerialization\n else:\n if cls in _deconstructed_serialization_map:\n serialization_cls = _deconstructed_serialization_map[cls]\n elif (Enum is not None and\n (serializing and issubclass(cls, Enum)) or\n (not serializing and\n cls is dict and\n value.get('_enum') is True)):\n serialization_cls = EnumSerialization\n elif serializing and hasattr(value, 'deconstruct'):\n serialization_cls = DeconstructedSerialization\n elif (not serializing and\n cls is dict and\n value.get('_deconstructed') is True):\n serialization_cls = DeconstructedSerialization\n elif isinstance(value, BasePlaceholder):\n serialization_cls = PlaceholderSerialization\n elif cls in _serialization_map:\n serialization_cls = _serialization_map[cls]\n\n return serialization_cls", "def get_serializer_class(self, *args, **kwargs):\n\n if self.request.method == 'GET':\n serializer_class = FavoriteModelSerializer\n\n elif self.request.method == 'POST':\n serializer_class = FavoriteCreateSerializer\n\n elif self.action == 'destroy':\n serializer_class = FavoriteDestorySerializer\n\n elif self.action == 'destroy_all':\n serializer_class = FavoriteDestroyAllSerializer\n\n return serializer_class", "def serializer_from_settings():\n if settings.PROFILE_SERIALIZER:\n return import_string(settings.PROFILE_SERIALIZER)\n\n return UserProfileSerializer", "def __init__(self, serializer=None):\r\n self.client = Client()\r\n self.serializer = serializer\r\n\r\n if not self.serializer:\r\n self.serializer = Serializer()", "def deserialize(serializer_class, data, **kwargs):\n\n serializer = serializer_class(data=data, **kwargs)\n serializer.is_valid(raise_exception=True)\n\n return serializer", "def get_serializer_class(self):\n if self.action in (\"list\",):\n return serializers.NotesGroupListSerializer\n\n return serializers.NotesGroupDetailSerializer", "def serialize(self) -> typing.Any:\n return self._serialize(self.__dict__)", "def get_serializer_class(self):\n if self.request is None or self.request.method == \"POST\":\n return serializers.ProfileTopicDetailSerializer\n\n return serializers.ProfileTopicListSerializer", "def get_serializer_class(self):\n if self.action == \"list_attendances\":\n return serializers.LiveAttendanceGraphSerializer\n return super().get_serializer_class()", "def serialize(self, value, **kwargs):\n kwargs.update({'include_class': kwargs.get('include_class', True)})\n if self.serializer is not None:\n return self.serializer(value, **kwargs)\n if value is None:\n return None\n if isinstance(value, HasProperties):\n return value.serialize(**kwargs)\n return self.to_json(value, **kwargs)", "def serialize(cls, *args, **kwargs):\n return serialize_cls(cls)(*args, **kwargs)", "def register_serializer(cls, *, serializer, deserializer):\n context = ray.worker.global_worker.get_serialization_context()\n context._register_cloudpickle_serializer(cls, serializer, deserializer)", "def _ReadSerializerStream(self):\n stream_name = 'serializer.txt'\n if not self._HasStream(stream_name):\n return\n\n serialization_format = self._ReadStream(stream_name)\n if serialization_format != definitions.SERIALIZER_FORMAT_JSON:\n raise ValueError(\n 'Unsupported stored serialization format: {0:s}'.format(\n serialization_format))\n\n return serialization_format", "def get_serializer(self, *args, **kwargs):\n try:\n kwargs['user_detail'] = str2bool(self.request.query_params.get('user_detail', False))\n except Exception:\n pass\n\n kwargs['context'] = self.get_serializer_context()\n\n return self.serializer_class(*args, **kwargs)", "def serialize(self, format, queryset, **options):\n s = get_serializer(format)() # noqa\n s.serialize(queryset, **options)\n return s.getvalue()", "def get_serializer_class(self):\n try:\n return self.serializer_action_classes[self.action]\n except (KeyError, AttributeError):\n return super(\n MultiSerializerViewSetMixin, self).get_serializer_class()", "def transparent_serialize(cls):\n return _create_wrapper_cls(cls, store_init_parameters=False)", "def _init_serialization():\n global _deconstructed_serialization_map, _serialization_map\n\n if _deconstructed_serialization_map or _serialization_map:\n return\n\n _deconstructed_serialization_map = {\n Q: QSerialization,\n }\n\n if CombinedExpression is not None:\n _deconstructed_serialization_map[CombinedExpression] = \\\n CombinedExpressionSerialization\n\n _serialization_map = {\n # String-based\n bytes: StringSerialization,\n six.text_type: StringSerialization,\n\n # Dictionary-based\n OrderedDict: DictSerialization,\n dict: DictSerialization,\n\n # Primitives\n bool: PrimitiveSerialization,\n float: PrimitiveSerialization,\n int: PrimitiveSerialization,\n type(None): PrimitiveSerialization,\n\n # Iterables\n list: ListSerialization,\n set: SetSerialization,\n tuple: TupleSerialization,\n\n # Class references\n type: ClassSerialization,\n }\n\n if six.PY2:\n _serialization_map.update({\n long: PrimitiveSerialization,\n })", "def _register_serializers(self):\n import ray.util.serialization_addons\n from ray.util.serialization import StandaloneSerializationContext\n\n ctx = StandaloneSerializationContext()\n ray.util.serialization_addons.apply(ctx)", "def serialize_cls(cls):\n return _create_wrapper_cls(cls)", "def get_deserializer(name):\n\n loads = importlib.import_module(name).loads\n\n # Serializers that handle unicode streams and a are safe against comments\n # can be used directly\n if name == 'json':\n import json\n return json.loads\n\n def deserializer(x):\n # Load base85 bytes data\n x = x[1:].encode('ascii')\n x = base64.b85decode(x)\n try:\n return loads(x)\n except Exception as ex:\n raise SerializationError(ex)\n\n return deserializer", "def decorator(cls):\n\n instance = cls(*args, **kwargs)\n serializer_services.register_serializer(instance, **kwargs)\n\n return cls", "def serialize(self, obj):\n return obj", "def encoder(self) -> json.JSONEncoder:\n return encoder_from_string(self.doc.get('encoder'))", "def get_serialization_instance(cls, value):\n\n # if the instance is a list, convert it to a cls instance.\n # this is only useful when deserializing method arguments for a client\n # request which is the only time when the member order is not arbitrary\n # (as the members are declared and passed around as sequences of\n # arguments, unlike dictionaries in a regular class definition).\n if isinstance(value, list) or isinstance(value, tuple):\n assert len(value) <= len(cls._type_info)\n\n cls_orig = cls\n if cls.__orig__ is not None:\n cls_orig = cls.__orig__\n inst = cls_orig()\n\n keys = cls._type_info.keys()\n for i in range(len(value)):\n setattr(inst, keys[i], value[i])\n\n elif isinstance(value, dict):\n inst = cls()\n\n for k in cls._type_info:\n setattr(inst, k, value.get(k, None))\n\n else:\n inst = value\n\n return inst", "def serialize(self, request, content_type, default_serializers=None):\n\n if self.serializer:\n serializer = self.serializer\n else:\n _mtype, _serializer = self.get_serializer(content_type,\n default_serializers)\n serializer = _serializer()\n\n response = webob.Response()\n response.status_int = self.code\n for hdr, value in self._headers.items():\n response.headers[hdr] = str(value)\n response.headers['Content-Type'] = content_type\n if self.obj is not None:\n response.body = serializer.serialize(self.obj)\n\n return response", "def get_serializer(self, content_type, default_serializers=None):\n\n default_serializers = default_serializers or {}\n\n try:\n mtype = _MEDIA_TYPE_MAP.get(content_type, content_type)\n if mtype in self.serializers:\n return mtype, self.serializers[mtype]\n else:\n return mtype, default_serializers[mtype]\n except (KeyError, TypeError):\n raise exception.InvalidContentType(content_type=content_type)", "def serialize(self):\n raise NotImplementedError(\n \"Subclasses of Serializable must implement serialize\"\n )", "def test_get_serializer_class():\n view = views.PasswordResetRequestView()\n expected = serializers.PasswordResetRequestSerializer\n\n assert view.get_serializer_class() == expected", "def _serialize(\n obj: object,\n to_proto: bool = True,\n to_bytes: bool = False,\n) -> Union[str, bytes, Message]:\n\n is_serializable: Serializable\n if not isinstance(obj, Serializable):\n if hasattr(obj, \"serializable_wrapper_type\"):\n is_serializable = obj.serializable_wrapper_type(value=obj) # type: ignore\n else:\n traceback_and_raise(\n Exception(f\"Object {type(obj)} has no serializable_wrapper_type\")\n )\n else:\n is_serializable = obj\n\n serialize_method = getattr(is_serializable, \"sy_serialize\", None)\n if serialize_method is None:\n serialize_method = getattr(is_serializable, \"serialize\", None)\n if serialize_method is None:\n raise Exception(f\"Object {type(obj)} has no serialize method\")\n\n return serialize_method(to_proto=to_proto, to_bytes=to_bytes)", "def test_get_serializer_class():\n view = views.EmailVerificationView()\n expected = serializers.EmailVerificationSerializer\n\n assert view.get_serializer_class() == expected", "def set_serializer(self, serializer):\n self._serializer = serializer\n return self", "def test_get_serializer_class():\n view = views.UserCreateView()\n expected = serializers.UserCreationSerializer\n\n assert view.get_serializer_class() == expected", "def register_serializer(\n encoding: Union[str, Tuple[str, ...]], serializer: Serializer = None\n):\n\n def wrapper(serializer):\n if isinstance(encoding, tuple):\n for e in encoding:\n register_serializer(e, serializer)\n else:\n _SERIALIZERS[encoding] = serializer\n return serializer\n\n return wrapper(serializer) if serializer is not None else wrapper", "def get_serializer(self, *args, **kwargs):\n try:\n kwargs['item_detail'] = str2bool(self.request.query_params.get('item_detail', False))\n except Exception:\n pass\n\n try:\n kwargs['user_detail'] = str2bool(self.request.query_params.get('user_detail', False))\n except Exception:\n pass\n\n kwargs['context'] = self.get_serializer_context()\n\n return self.serializer_class(*args, **kwargs)", "def serialize(self, data, format='application/json'):\r\n return self.serializer.serialize(data, format=format)", "def serialize(self):\n return self.instantiate_queue()" ]
[ "0.72009987", "0.7115752", "0.69366866", "0.6924428", "0.68449354", "0.6820688", "0.67195165", "0.6692233", "0.6686074", "0.65120816", "0.6473344", "0.6473284", "0.6444412", "0.6443525", "0.6418904", "0.63524395", "0.6348317", "0.63381505", "0.6311651", "0.63109744", "0.62789714", "0.62647057", "0.6243169", "0.6240013", "0.623506", "0.62324035", "0.61878854", "0.6182841", "0.617552", "0.6173757", "0.61729735", "0.614281", "0.61395276", "0.6133147", "0.6128442", "0.61279124", "0.61093885", "0.6107114", "0.6105589", "0.6101638", "0.60992885", "0.6092012", "0.6079949", "0.6061221", "0.6045427", "0.6022098", "0.5991614", "0.5989082", "0.595159", "0.5938331", "0.5910282", "0.58833855", "0.5866978", "0.57846475", "0.57676476", "0.5748564", "0.5727291", "0.5686052", "0.5667387", "0.56470567", "0.5640231", "0.5635826", "0.5543641", "0.54997194", "0.5493936", "0.5413152", "0.5407627", "0.5401495", "0.53956974", "0.5393255", "0.53637904", "0.5354623", "0.5354557", "0.5345471", "0.5341087", "0.53392893", "0.5339143", "0.5332332", "0.533047", "0.53259933", "0.5322456", "0.53021425", "0.52999425", "0.5275463", "0.52374655", "0.52361417", "0.522915", "0.5228819", "0.52153224", "0.5211377", "0.51908773", "0.5172133", "0.5170512", "0.5152825", "0.5142171", "0.5128", "0.51180595", "0.5101454", "0.5080314", "0.50775915" ]
0.67095476
7
Do login action here. For example in case of session authentication store the session in cookies.
def do_login(self, backend, user):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def login(self):\n\n self.__login_if_required()", "def login(self):\n if self._cookie_cached(self.login_email):\n self.cookie_login(self.login_email)\n else:\n self.new_login(self.login_email, self.login_pass)", "def login_perform():\n try:\n user_name = request.values['user_name']\n user_password = request.values['user_password']\n except KeyError:\n pass\n else:\n session = Session()\n user = session.query(User).filter_by(user_name=user_name).first()\n if not user or not user.password == hashlib.sha1(user_password).hexdigest():\n flash(\"Invalid credentials\", \"alert\")\n return redirect(url_for(\".login\"))\n try:\n remember = request.values['remember'].lower() == \"on\"\n except KeyError:\n remember = False\n login_user(user, remember=remember)\n return redirect(request.args.get(\"next\") or url_for(\"home.index\"))", "def login():", "def login():", "def _login(self, *args, **kwargs):\n pass", "def do_login(request):\n distinct_id = request.session.pop('distinct_id')\n user = User.objects.get(id=distinct_id)\n login(request, user)\n return redirect_to_user_settings()", "def login(self):", "def login(self):\n\t\treturn", "def login():\n if session['state'] != request.args['state']:\n response = make_response(json.dumps('Invalid state parameter.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n authorization_response = request.url\n FLOW.fetch_token(authorization_response=authorization_response)\n auth_session = FLOW.authorized_session()\n userinfo = auth_session.get(USERINFO_URL).json()\n session['userinfo'] = {\n 'name': userinfo['name'],\n 'email': userinfo['email']}\n sqlsession = SQLSESSION()\n user = User(name=userinfo['name'], email=userinfo['email'])\n try:\n sqlsession.add(user)\n sqlsession.commit()\n except IntegrityError:\n # user already exists in DB\n pass\n if 'target' not in session.keys():\n return redirect(\"/\")\n return redirect(session['target'])", "def login():\n error = None\n \n if request.method == 'POST':\n if not db.login(request.form['username'], request.form['password']):\n error = 'Invalid username or password. Please try again!'\n else:\n resp = make_response(redirect(url_for('main')))\n resp.set_cookie('username', request.form['username'])\n resp.set_cookie('password', request.form['password'])\n return resp\n return render_template('login.html', error = error)", "def do_login(self, password):\n # Creating JSON string with authentication credentails.\n in_data = ('{{ \"username\":\"{username}\",'\n '\"password\":\"{password}\" }}'\n ).format(\n username=self.pub_user,\n password=password\n )\n\n url = self.base_url + \"/oasis/login\"\n response = self.do_request(url, in_data)\n json_response = json.loads(response.content)\n\n if json_response[\"success\"] == False:\n print(\"Invalid user id or password\")\n else:\n self.cookies = dict(sessionid=response.cookies['sessionid'])\n print(\"You are logged into Mid-tier\")\n\n logger.info( 'Log in response ' + str(response.content))", "def sign_in():\n if request.method == 'POST':\n result = login(request.form['username'], request.form['password'])\n if result == \"Login successful\":\n session['username'] = request.form['username']\n return redirect(url_for('categories'))\n flash(result, 'warning')\n return render_template('login.html')", "def do_login_login():\n print(inspect.stack()[1][3])\n print(request.form)\n query = select([User]).where(and_(User.columns.email == request.form['email'],User.columns.password==request.form['password'] ))\n ResultProxy = connection.execute(query)\n ResultSet = ResultProxy.fetchone()\n if ResultSet:\n session['logged_in'] = True\n else:\n flash('wrong password!')\n # return str(get_flashed_messages())\n return home(result)", "def post(self):\n\n username = self.request.get('username').lower()\n pwd = self.request.get('pwd')\n remember = self.request.get('remember')\n\n user = User.login(username, pwd) # class\n if user:\n self.login(user, remember) # cookie\n self.redirect('/blog')\n else:\n msg = 'Invalid login'\n self.render(\"login.html\", error=msg)", "def do_login(self):\n url = self.get_url('/accounts/login')\n cookies = None\n\n client = requests.session()\n csrf = None\n try:\n csrf = client.get(url).cookies.get('csrftoken')\n except RequestException as e:\n logger.warning('Unable to retrieve csrf: {}'.format(e))\n\n data = {\n 'username': self.auth[0],\n 'password': self.auth[1],\n 'csrfmiddlewaretoken': csrf,\n 'next': '/'\n }\n try:\n response = client.post(url, data=data, headers=dict(Referer=url))\n except RequestException as e:\n logger.warning('Unable to login to {} ({})'.format(self.name, e))\n else:\n if response.status_code == 200:\n cookies = {}\n for cookie in response.request.headers.get('Cookie').split(';'):\n cookie = cookie.strip()\n session = cookie.split('sessionid=')\n if len(session) == 2:\n sessionid = session[-1]\n cookies = dict(sessionid=sessionid)\n break\n return cookies", "def login():\n\n if request.method == \"GET\":\n # Check if the user is auth'd\n user = auth_user_session()\n if user:\n # Send to homepage if they are auth'd\n return redirect(\"/\")\n else:\n # Otherwise send back to login\n return render_template(\"login.html\")\n\n if request.method == \"POST\":\n # Get values submitted through POST\n username = request.form[\"username\"]\n password = request.form[\"password\"]\n\n # Find the user in the database\n user = User.query.filter(User.username == username).first()\n if user:\n if user.check_password(password):\n # Update their cookie and commit\n cookie = update_session(user)\n db.session.add(user)\n db.session.commit()\n\n # Send cookie back in response\n response = make_response(redirect(\"/\"))\n response.set_cookie(\"session_cookie\", cookie)\n response.set_cookie(\"user\", f\"{user.id}\")\n\n # Return\n return response\n return render_template(\"loginfailure.html\")", "def login():\n form = LoginForm()\n if request.method == \"GET\":\n return render_template('login.html', title='Sign In', form=form)\n if request.method == \"POST\":\n if 'loggedin' in session:\n return redirect(url_for('home'))\n if form.validate_on_submit():\n username = form.username.data\n password = form.password.data\n account = db.check_item(\"username\", username)\n if account is None:\n flash('Invalid username or password')\n return redirect(url_for('login'))\n else:\n if check_password_hash(str(account['password_hash']), password):\n session['loggedin'] = True\n session['username'] = account['username']\n session['admin_auth'] = account['admin_auth']\n flash('Login successfully!')\n return redirect(url_for('home'))\n flash('Invalid username or password')\n return redirect(url_for('login'))\n else:\n return redirect(url_for('login'))", "def login(self):\n backend = self.backend\n self.session[backend.session_id_key] = self[\"id\"]\n self.session[backend.session_backend_key] = backend.session_backend_val\n self.session[backend.session_hash_key] = self._get_session_hash(\n self[\"password\"]\n )", "def login():\n if request.method == 'POST' and not session.get('logged_in'):\n db = get_db()\n cur = db.execute('select id, username, password from users where \\\n username = ? and password = ?',\n [request.form['username'], request.form['password']])\n rows = cur.fetchall()\n if len(rows) == 1:\n session['logged_in'] = True\n else:\n flash('Invalid username or password', 'error')\n return redirect(url_for('show_entries'))", "def login(request, *args, **kwargs):\n\tif request.method == 'POST':\n\t\tif not request.POST.get('remember_me', None):\n\t\t\trequest.session.set_expiry(0)\n\treturn auth_views.login(request, *args, **kwargs)", "def __login(self):\r\n # Validate email and get user from db\r\n email = self.request.get(constants.VAR_NAME_EMAIL)\r\n logging.info('User logging in: ' + str(email))\r\n if not User.isEmailValid(email) or not User.isAlreadyRegistered(email):\r\n logging.error('Email mismatched or not registered')\r\n self.set_error(constants.STATUS_BAD_REQUEST,\r\n self.gettext('LOGIN_ERROR'), url=self.request.url)\r\n return\r\n user = User.getUser(email.lower())\r\n\r\n # Calculate password hash\r\n password = self.request.get(constants.VAR_NAME_PASSWORD)\r\n if not User.isPasswordValid(password):\r\n logging.error('Invalid password')\r\n self.set_error(constants.STATUS_BAD_REQUEST,\r\n self.gettext('LOGIN_ERROR'), url=self.request.url)\r\n return\r\n key = CryptoUtil.getKey(password, user.salt)\r\n\r\n # Validate password\r\n if not user.password == key:\r\n logging.error('Incorrect password for email')\r\n self.set_error(constants.STATUS_BAD_REQUEST,\r\n self.gettext('LOGIN_ERROR'), url=self.request.url)\r\n return\r\n\r\n # Check remember me\r\n remember_string = self.request.get('remember').lower()\r\n remember = remember_string != '' and remember_string != 'false'\r\n if remember:\r\n token_id = LoginToken.generate_id()\r\n token = LoginToken()\r\n token.tokenid = token_id\r\n token.ip = self.request.remote_addr\r\n token.user = email\r\n token.put()\r\n cookie_value = token.get_cookie_value()\r\n delta = timedelta(days=constants.PERSISTENT_LOGIN_LIFETIME_DAYS)\r\n self.response.set_cookie(constants.PERSISTENT_LOGIN_NAME,\r\n cookie_value,\r\n expires=datetime.utcnow() + delta,\r\n path=\"/\", httponly=True, secure=True)\r\n\r\n # Log in user\r\n if user.verified:\r\n user.login(self.request.remote_addr)\r\n session = get_current_session()\r\n url = session.pop(constants.VAR_NAME_REDIRECT)\r\n if url is None:\r\n url = \"/\"\r\n self.ok(url)\r\n else:\r\n logging.error('User unverified')\r\n self.set_error(constants.STATUS_FORBIDDEN,\r\n self.gettext('UNVERIFIED_PRE') +\r\n ' <a href=\\\"/User/Verify\">' +\r\n self.gettext('UNVERIFIED_HERE') +\r\n '</a> ' +\r\n self.gettext('UNVERIFIED_POST'),\r\n url=self.request.url)\r\n return", "def log_in():\n if request.method == 'POST':\n username = request.form['username']\n password = request.form['password']\n if PLAN.login_user(username, password):\n session['name'] = username\n flash(\"Login success ...\")\n return redirect(url_for('index'))\n flash(\"Login failed ...\")\n return render_template('login.html')\n return render_template('login.html')", "def login(self) -> redirect:\n\t\tif self.is_authorized:\n\t\t\tflash(\"You are already logged in.\")\n\t\t\treturn redirect(url_for(\"index\"))\n\t\telif request.method == \"GET\":\n\t\t\treturn render_template(\"login.jinja2\")\n\t\tsession[\"state\"] = str(uuid4())\n\t\treturn self.oauth.authorize(callback=url_for(\"authorize\", _external=True), state=session[\"state\"])", "def login():\n if request.method == 'POST':\n username = request.form['username']\n password = request.form['password']\n\n user = get_user(username, password)\n\n if not user:\n flash('No such username and/or password', 'alert-danger')\n return redirect(url_for('login'))\n\n session['username'] = user.username\n session['user_id'] = user.id\n session['logged_in'] = True\n session['is_admin'] = user.is_admin\n\n return redirect(url_for('index'))\n\n return render_template('login.html')", "def login_user(self):\r\n self.client.login(username=self.user.username, password=\"password\")", "def custom_login(request, **kwargs):\n if request.user and request.user.is_authenticated():\n return HttpResponseRedirect(settings.LOGIN_REDIRECT_URL)\n else:\n return login(request, **kwargs)", "def login():\n form = LoginForm()\n if not 'username' in session:\n if request.method == 'POST':\n if form.validate_on_submit():\n user = mongo.db.user.find_one({'username':form.username.data})\n if user and bcrypt.checkpw(request.form['password'].encode('utf-8'), user['hashed_password']):\n session['username'] = form.username.data\n current_user = session['username']\n flash(f'Welcome back, {current_user}!', 'success')\n return redirect(url_for('dashboard'))\n \n flash('Please check login details.', 'danger')\n return render_template('pages/login.html', title='Login', form=form)\n flash('You are already logged in. Did you mean to go to your dashboard instead?', 'info')\n return redirect(url_for('dashboard'))", "def Login():\n bad_login = False\n\n try:\n if request.args.get('logout') == \"1\":\n resp = make_response(render_template('login.html', bad_login=bad_login))\n resp.set_cookie('user_id', '', expires=0)\n resp.set_cookie('user_auth_token', '', expires=0)\n return resp\n except:\n pass\n\n if request.method == 'POST':\n try:\n if request.form['submit'] == \"True\":\n email = request.form['Email']\n password = request.form['Password']\n\n users = Users()\n (success, user_id, user_auth_token) = users.user_login(email, password)\n\n if success:\n expire_date = datetime.datetime.now()\n expire_date = expire_date + datetime.timedelta(hours=1)\n\n resp = make_response(redirect(url_for('configuration.Configuration')))\n resp.set_cookie('user_id', str(user_id), expires=expire_date)\n resp.set_cookie('user_auth_token', user_auth_token, expires=expire_date)\n return resp\n else:\n bad_login = True\n\n except KeyError:\n pass\n\n return render_template('login.html', bad_login=bad_login)", "def login():\n if request.method == \"POST\":\n # Check that username exists\n existing_username = mongo.db.users.find_one(\n {\"username\": re.compile(\n \"^\" + request.form.get(\"username\") + \"$\", re.IGNORECASE)})\n if existing_username:\n # Ensure hashed password matches input\n if check_password_hash(\n existing_username[\"password\"], request.form.get(\n \"password\")):\n # Check if user is an admin\n is_admin = existing_username.get(\"is_admin\", False)\n if is_admin:\n session[\"admin\"] = True\n session[\"user\"] = existing_username[\"username\"]\n flash(Markup(\"Welcome, \") + session[\"user\"],\n category=\"success\")\n return redirect(url_for(\"get_terms\"))\n else:\n # Invalid password entered\n flash(\"Username and/or password incorrect\", category=\"error\")\n return redirect(url_for(\"login\"))\n else:\n # Username doesn't exist\n flash(\"Username and/or password incorrect\", category=\"error\")\n return redirect(url_for(\"login\"))\n\n try:\n if session[\"user\"]:\n flash(\"You are already logged in\",\n category=\"error\")\n return redirect(url_for(\"get_terms\"))\n except KeyError:\n return render_template(\"login.html\")", "def login(self):\n logging.debug(\"login called\")\n\n # Apply settings\n self.localisationsettings.apply_to_upcoming_session()\n self.admin_setting.apply_to_upcoming_session()\n self.macspoof_setting.apply_to_upcoming_session()\n self.network_setting.apply_to_upcoming_session()\n\n self.mainwindow.hide()\n self.gdmclient.do_login()", "def login(request, *args, **kwargs):\n if request.method == 'POST':\n if not request.POST.get('remember_me', None):\n request.session.set_expiry(0)\n return auth_login(request, *args, **kwargs)", "def login_user():\n pass", "def loginView(request):\n username = request.POST.get('username')\n password = request.POST.get('password')\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n dologin(request, user)\n if isOperator(user): # login as an operator\n return redirect('/operator/map')\n elif isAdmin(user): # login as an admin\n return redirect('/admin/map')\n return HttpResponse('ok')\n else:\n # Return a 'disabled account' error message\n return HttpResponse(\"Disabled account\")\n else:\n # Return an 'invalid login' error message.\n return HttpResponse(\"Invalid login\")", "def submit(self):\n password = self.form_result['password']\n username = self.form_result['username']\n\n if not loginhelper.validateUsernamePassword(username, password):\n return render('login.html')\n\n # Mark user as logged\n session['user'] = username\n session.save()\n\n # Send user back to where they originally wanted\n if session.get('path_before_login'):\n redirect(session['path_before_login'])\n else:\n return render('loggedin.html')", "def login():\n form = LoginForm(request.form)\n if request.method == 'POST' and form.validate():\n next_var = request.args.get('next')\n user = Users.query.get(form.email.data)\n if user:\n # sets the authenticated parameter which is needed for sessions to recognize the user\n user.authenticated = True\n db.session.add(user)\n db.session.commit()\n login_user(user, remember=True)\n return redirect(next_var or url_for('home'))\n return render_template('login.html', form=form, email=request.cookies.get('email'))", "def on_before(self, controller):\n session_id = controller.get_cookie(self.session_config.cookie_id)\n cookie_id = str(self.session_config.auth_cookie)\n userid = controller.get_secure_cookie(cookie_id)\n user = None\n if userid:\n sname = self.session_config.auth_service\n logger.debug(self.application.models)\n auth_service = self.application.models[sname]\n user = auth_service.auth(userid)\n if user:\n if not session_id:\n session_id = self.gen_session_id(controller)\n setattr(user, 'just_signin', True)\n setattr(user, 'session_id', session_id)\n threadlocal.set_user(user)\n \n if not session_id:\n session_id = self.gen_session_id(controller)\n threadlocal.set_sessionid(session_id)\n threadlocal.set_ip(controller.request.remote_ip)\n if session_id:\n controller.set_cookie(self.session_config.cookie_id, session_id)\n\n if not user and controller.require_auth:\n h = controller.request.headers.get('X-Requested-With', None)\n if h and h == 'XMLHttpRequest':\n raise tornado.web.HTTPError(403, self.__class__.__name__)\n else:\n if controller.request.method in (\"GET\", \"HEAD\"):\n url = controller.get_login_url()\n if \"?\" not in url:\n if urlparse.urlsplit(url).scheme:\n # if login url is absolute, make next absolute too\n next_url = controller.request.full_url()\n else:\n next_url = controller.request.uri\n url += \"?\" + urllib.urlencode(dict(next=next_url))\n controller.redirect(url)\n else:\n raise tornado.web.HTTPError(403, self.__class__.__name__)", "def login(self):\n with self.client.post(\"/login\", {\"username\":self.user.username,\n \"password\":MASTER_PASSWORD},\n catch_response=True) as response:\n for r_hist in response.history:\n if r_hist.cookies.get('token') is not None:\n response.success()\n return\n response.failure(\"login failed\")", "def login_action(request):\n if request.method == 'POST':\n login_form = LoginForm(request.POST)\n if login_form.is_valid():\n username = login_form.cleaned_data['username']\n password = login_form.cleaned_data['password']\n user = authenticate(request, username=username, password=password)\n if user is not None:\n # Log the user in and redirect him to the simulation manager.\n login(request, user)\n else:\n # Authentication failed.\n return HttpResponseRedirect(\n reverse('metro:login', kwargs={'login_error': True})\n )\n else:\n error = login_form.errors\n # If a problem occured, return to the login page and show the errors.\n context = {\n 'form': login_form,\n 'error': error\n }\n return render(request, 'metro_app/login.html', context)\n return HttpResponseRedirect(reverse('metro:simulation_manager'))", "def do_login(self):\n self.content = self._login()\n if self.with_tags:\n self.rest_content = self._login_vapi()", "def login():\n if request.method == \"POST\":\n username = request.form[\"username\"]\n password = request.form[\"password\"]\n\n # check if the user and hash are in the file\n if not is_valid_login(username, password):\n flash(\"Invalid username or password\")\n record_failed_login(username)\n else:\n session[\"username\"] = username\n return redirect(url_for(\"index\"))\n else:\n if \"username\" in session:\n return redirect(url_for(\"index\"))\n\n return render_template(\"login.html\")", "def login():\n pass", "def login_page():\n try:\n if request.method == \"POST\":\n with Database() as database:\n db_password = database.checkPass(request.form['username'])\n if len(db_password) > 0:\n db_password = db_password[0][0]\n if pbkdf2_sha256.verify(request.form['password'], db_password):\n session['logged_in'] = True\n session['id'] = database.getID(request.form['username'])\n session['username'] = request.form['username']\n app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER + \\\n session['username']\n return redirect(url_for('index'))\n else:\n flash(\"Invalid credentials, try again!\")\n return render_template(\"login.html\")\n else:\n flash(\"Invalid credentials, try again!\")\n return render_template(\"login.html\")\n return render_template(\"login.html\")\n\n except Exception as e:\n flash(\"Something went wrong, please try again\")\n return render_template(\"login.html\")", "def _shared_login(request):\n csession = request.session\n player = request.user\n sesslogin = csession.get(\"logged_in\", None)\n\n # check if user has authenticated to website\n if csession.session_key is None:\n # this is necessary to build the sessid key\n csession.save()\n elif player.is_authenticated():\n if not sesslogin:\n # User has already authenticated to website\n csession[\"logged_in\"] = player.id\n elif sesslogin:\n # The webclient has previously registered a login to this browser_session\n player = PlayerDB.objects.get(id=sesslogin)\n try:\n # calls our custom authenticate in web/utils/backends.py\n player = authenticate(autologin=player)\n login(request, player)\n except AttributeError:\n logger.log_trace()", "def login():\n form = LoginForm()\n\n if form.validate_on_submit():\n login_user(form.user, remember=form.remember.data)\n flash('You are now logged in to the ACM-HSCC site', 'alert-success')\n return redirect(url_for('default.home'))\n else:\n flash_form_errors(form)\n return render_template('login.html', form=form)", "def login():\n\n if current_user.is_authenticated == True:\n return redirect(url_for('controller'))\n\n form = LoginForm(request.form)\n if request.method == 'POST'and form.validate():\n check_user = User.query.filter_by(login=form.login.data).first()\n if check_user:\n if check_password_hash(check_user.password, form.password.data):\n login_user(check_user)\n return redirect(url_for('controller'))\n\n return render_template('login.html', form=form)", "def _login(self):\n data = self._send(self.nc_request(action=\"login\", parameters={\"apipassword\": self._api_password}))\n\n self._session_id = data[\"apisessionid\"]\n\n logging.info(f\"logged in successfully with session id {self._session_id}\")", "def login(self):\n # Enter login credentials\n WebDriverWait(self.driver, 120).until(\n EC.element_to_be_clickable(\n (By.ID, \"session_key-login\")\n )\n )\n elem = self.driver.find_element_by_id(\"session_key-login\")\n elem.send_keys(self.username)\n elem = self.driver.find_element_by_id(\"session_password-login\")\n elem.send_keys(self.password)\n # Enter credentials with Keys.RETURN\n elem.send_keys(Keys.RETURN)\n # Wait a few seconds for the page to load\n time.sleep(3)", "def login():\n if request.method=='GET':\n # get info and render\n return render_template('login.html')\n else:\n # auth\n username = request.form.get('username')\n password = request.form.get('password')\n\n users = DATABASE[\"users\"]\n\n if username in users:\n if password == users[username][\"Password\"]:\n # success, set session\n session['Name'] = username\n session['Type'] = users[username]['Type']\n\n # get info and redirect\n return redirect(url_for('manage_resources', user=username), 302)\n return Response(\"Incorrect Login Details\", 401)\n\n return \"Incorrect login credentials\"", "def process_login():\n\n # Get form variables\n email = request.form[\"email\"]\n password = request.form[\"password\"]\n\n # printing data from form to BASH\n print \"form password\"\n\n print password\n\n # check user exisit and then asign them variable user\n user = User.query.filter_by(email=email).first()\n\n print \"\\n \\n \\n \", user\n\n # Conditions\n if not user:\n\n flash(\"No such user\")\n\n return redirect(\"/\")\n\n elif user.password != password:\n\n flash(\"Incorrect password\")\n\n return redirect(\"/\")\n else:\n session[\"user_id\"] = user.user_id\n\n flash(\"Logged in\")\n\n return redirect('/decisions')", "def login():\n\n if current_user is not None and current_user.is_authenticated():\n return redirect(url_for(\"user.profile\"))\n\n form = LoginForm(request.form)\n if form.validate_on_submit():\n user, authenticated = User.authenticate(form.login.data,\n form.password.data)\n\n if user and authenticated:\n login_user(user, remember=form.remember_me.data)\n return redirect(request.args.get(\"next\") or\n url_for(\"forum.index\"))\n\n flash((\"Wrong username or password\"), \"danger\")\n return render_template(\"auth/login.html\", form=form)", "def login(self):\n identity = request.environ.get('repoze.who.identity')\n came_from = str(request.GET.get('came_from', '')) or \\\n url('/')\n if identity:\n redirect(url(came_from))\n else:\n c.came_from = came_from\n c.login_counter = request.environ['repoze.who.logins'] + 1\n return render('/forms/login.mako')", "def login(request):\n if request.method == \"GET\":\n return {}\n if request.method == \"POST\":\n if check_credentials(request):\n username = request.POST['username']\n headers = remember(request, username)\n return HTTPFound(location=request.route_url('home_view'), headers=headers) \n return {'error': 'Invalid username or password.'}", "def login(self):\n self._session = requests.Session()\n data = {'login': self.username, 'password': self.password}\n url = self.address + '/login_generic'\n r = self._session.post(url, data=data)\n if 'field-login' in r.text:\n # Response still contains login form\n raise RuntimeError('Login failed.')", "def do_login(user):\n session[CURRENT_USER_KEY] = user.id", "def submit_login():\n \n email = request.form.get('email')\n password = request.form.get('password')\n\n user = User.query.filter_by(email=email).first()\n\n if not user:\n flash(\"Type it in again, foo!\")\n return redirect(\"/login-form\")\n\n if user.password != password:\n flash(\"Stop trying to break in!\")\n return redirect(\"/login-form\")\n\n\n session[\"logged_in\"] = user.email\n flash(\"You have been successfully logged in!\")\n return redirect('/user-page/%d' % user.user_id)", "def login(self):\r\n \r\n # Get the csrf token from the main URL\r\n csrf = self.extract_csrf(API.url_login)\r\n \r\n # Construnct the payload\r\n payload = self.cfg['payload']['login'][0]\r\n payload['csrfmiddlewaretoken'] = csrf\r\n\r\n # Test the entry with it's json schema\r\n check.check_entry(path='schemas/login.json', test=payload)\r\n\r\n # Login request \r\n requests.post(API.url_login, payload, headers={'Referer' : API.url_login})", "def login(request):\n\n if request.method == 'POST':\n username = request.POST['username']\n password = request.POST['password']\n user = auth.authenticate(username=username, password=password)\n if user is not None and user.is_active:\n auth.login(request, user)\n return HttpResponseRedirect(\"/\")\n\n else:\n return HttpResponse(\"Invalid login. Please try again.\")\n\n # if not POST then return login form\n return render(request, \"login.html\", {'next': ''})", "def login(self, username, password):\n try:\n (_, content) = self._request(self.url,\n method='POST',\n body={'i': username, 'p': password})\n\n soup = BeautifulSoup(content)\n session_field = soup.find('input', attrs={'name': 's'})\n assert session_field\n\n self.session_key = session_field['value']\n assert self.session_key\n except:\n raise LoginFailure(\"username or password is wrong.\")\n self._soup = soup # update soup.\n self._encoding = self._soup.originalEncoding\n self._check_displaying_main_page_then_trim()", "def do_login():\n\n isTeacher = False\n\n # check if this_user is admin or normal user\n this_user = User.query.filter_by(username=request.form['username']).first()\n \n # is this_user is not student or admin then check teacher table\n if this_user is None:\n this_user = Teacher.query.filter_by(username=request.form['username']).first()\n isTeacher = True\n\n # if this_user is still none -> invalid user\n if this_user is not None:\n if this_user.password == request.form[\"password\"]:\n session['authenticated'] = True\n session['username'] = this_user.username\n session['name'] = this_user.name\n session['isTeacher'] = isTeacher\n if session['username'] == \"admin\":\n session['wasAt'] = \"manageusers\"\n try:\n session['cpi'] = this_user.cpi\n session['grp_size'] = this_user.group_size\n except:\n pass\n else:\n flash(\"Incorrect Password, Please Try Again\") \n else:\n flash(\"Invalid Username, Please Try Again\")\n return home()", "def post(self):\n username = self.request.get('username')\n password = self.request.get('password')\n\n # User class login function\n u = User.login(username, password)\n if u:\n self.login(u) # BlogHandler login function\n self.redirect('/blog')\n else:\n msg = 'Invalid login'\n self.render('login-form.html', error = msg)", "def doLogin(self):\n\t\tlogin_data = urllib.urlencode({\n\t\t\t'operatorName' : self.username,\n\t\t\t'password' : self.password,\n\t\t\t'submit' : 'Iniciar+sesi%C3%B3n',\n\t\t})\n\n\t\tresponse = self.opener.open(\"http://172.16.0.2/tdserver/login_deal.jsp\", login_data)\t\t### deberia devolver verdadero o falso segun se logueo o no", "def login(self):\n self.open(self.urls['login'])\n self.select_form(nr=0)\n\n self.form['custno'] = self.username\n self.form['password'] = self.password\n res = self.submit()\n \n return res", "def login(self):\r\n\r\n # Open browser with the login URL\r\n self.browser.open(self.config[\"base_url\"] + \"login\")\r\n\r\n # Select the login form\r\n self.browser.select_form('form[action=\"/login/\"]')\r\n\r\n # Fill the login form.\r\n self.browser[\"email\"] = self.config[\"email\"]\r\n self.browser[\"password\"] = self.config[\"password\"]\r\n\r\n # Submit form\r\n self.browser.submit_selected()", "def auth_login(request):\n vars = {\n 'body_id':'page_login',\n 'AuthenticationForm': AuthenticationForm( data = request.POST or None ),\n 'nextpage': request.GET.get('next', reverse('start')),\n }\n\n if request.method == 'POST':\n if vars['AuthenticationForm'].is_valid():\n user = vars['AuthenticationForm'].get_user()\n\n if user.is_active:\n login(request, user)\n\n if request.POST.get('keep_session', None):\n request.session.set_expiry(timedelta(days=365))\n else:\n request.session.set_expiry(0)\n return HttpResponseRedirect(vars['nextpage'])\n\n return render(request, 'login.html', vars)", "def login_page():\n form = loginUser()\n\n if \"user\" in session:\n logged_user = session[\"user\"]\n return redirect(f\"users/{logged_user}\")\n\n if form.validate_on_submit():\n username=form.username.data\n password=form.password.data\n\n user = User.authenticate(username=username, password=password)\n\n if user:\n session[\"user\"] = user.username\n\n return redirect(f'/users/{username}')\n else:\n form.password.errors = ['Unable to log in']\n\n return render_template(\"login_form.html\", form=form)", "def login():\n\n form = LoginForm()\n\n if form.validate_on_submit():\n username = form.username.data\n password = form.password.data\n\n user = User.authenticate(username, password)\n\n if user:\n do_login(user)\n flash(f'Hello, {username}')\n return redirect(f'/cafes')\n\n else:\n form.username.errors = [\"Invalid credentials\"]\n\n return render_template('auth/login-form.html', form=form)", "def login():\n if request.method == 'POST':\n db = database.getdb()\n user = db.execute(\"SELECT * FROM flaskuser WHERE username=?\", (request.form['username'],)).fetchone()\n if check(user[\"pword\"], request.form[\"password\"]):\n session.clear()\n session[\"user_id\"] = 'admin'\n return redirect(url_for('index'))\n return render_template('login.html', title='Log In')", "def test_voluntary_login(self):\n # Going to the login form voluntarily:\n resp = self.app.get('/login', status=200)\n form = resp.form\n # Submitting the login form:\n form['login'] = 'manager'\n form['password'] = 'managepass'\n post_login = form.submit(status=302)\n # Being redirected to the home page:\n ok_(post_login.location.startswith('http://localhost/post_login'))\n home_page = post_login.follow(status=302)\n ok_('authtkt' in home_page.request.cookies,\n 'Session cookie was not defined: %s' % home_page.request.cookies)\n eq_(home_page.location, 'http://localhost/')", "def login_menu(self):\n print(\"\\nPlease enter your email and password\")\n email = self.validate_email()\n password = self.validate_password()\n self.authenticate_user(email, password)", "def login():\n login_page = Login()\n login_page.login_main_page()", "def login_action(login_page, request, driver):\n login_page.login(request.config.getoption(\"--username\"), request.config.getoption(\"--password\"))", "def login():\n if current_user.is_authenticated:\n flash('You are already logged in!')\n return redirect(url_for('index'))\n form = LoginForm(request.form)\n if request.method == 'POST' and form.validate():\n user = User.query.filter_by(\n username=form.username.data.lower()).first()\n if user and sha.verify(form.password.data, user.password):\n login_user(user)\n flash('You are logged in as {}!'.format(user.username))\n return redirect(url_for('index'))\n flash('User either does not exist or password is invalid.')\n return render_template('login.html', form=form)", "def login():\n if request.method == \"POST\":\n username = request.form[\"username\"]\n password = request.form[\"password\"]\n db = get_db()\n error = None\n user = db.execute(\n \"SELECT * FROM user WHERE username = ?\", (username,)\n ).fetchone()\n\n if user is None:\n error = \"Incorrect username.\"\n elif not check_password_hash(user[\"password\"], password):\n error = \"Incorrect password.\"\n\n if error is None:\n # store the user id in a new session and return to the index\n session.clear()\n session[\"user_id\"] = user[\"id\"]\n return redirect(url_for(\"index\"))\n\n flash(error)\n\n return render_template(\"auth/login.html\")", "def login():\n if request.method == \"POST\":\n username = request.form[\"username\"]\n password = request.form[\"password\"]\n db = get_db()\n error = None\n user = db.execute(\n \"SELECT * FROM user WHERE username = ?\", (username,)\n ).fetchone()\n\n if user is None:\n error = \"Incorrect username.\"\n elif not check_password_hash(user[\"password\"], password):\n error = \"Incorrect password.\"\n\n if error is None:\n # store the user id in a new session and return to the index\n session.clear()\n session[\"user_id\"] = user[\"id\"]\n return redirect(url_for(\"index\"))\n\n flash(error)\n\n return render_template(\"auth/login.html\")", "def login_user():\n\n email = request.form.get('email')\n password = request.form.get('password')\n\n user = crud.check_user_login_info(email, password)\n\n if user:\n session[\"user_id\"] = user.user_id\n session['logged_in'] = True\n fname = user.fname\n flash(f'Welcome {fname}')\n return redirect('/directory')\n\n else:\n flash('Login info is incorrect, try again.')\n return redirect('/signin')", "def login():\n \n # forget any logged in user\n session.clear()\n \n # if user reached via POST\n if request.method == \"POST\":\n \n # ensure credentials entered\n if not request.form.get(\"username\"):\n flash(\"Please enter a username.\", \"error\")\n return redirect(url_for(\"login\"))\n elif not request.form.get(\"password\"):\n flash(\"Please enter a password.\", \"error\")\n return redirect(url_for(\"login\"))\n \n # query database to check for user\n rows = db.execute(\"SELECT * FROM 'users' WHERE username = :username\", username=request.form.get(\"username\"))\n \n if len(rows) != 1 or not pwd_context.verify(request.form.get(\"password\"), rows[0][\"hash\"]):\n flash(\"Username or password is incorrect.\", \"error\")\n return redirect(url_for(\"login\"))\n \n # remember user if login valid\n session[\"user_id\"] = rows[0][\"id\"]\n \n # redirect to home page\n flash(\"You have successfully been logged in.\", \"success\")\n return redirect(url_for(\"index\"))\n \n # if reached via GET\n else:\n return render_template(\"login.html\")", "def log_in():\n form = LoginForm(request.form)\n if request.method == 'POST' and form.validate():\n if form.username.data != current_app.config['USERNAME']:\n flash('Invalid username.')\n elif form.password.data != current_app.config['PASSWORD']:\n flash('Invalid password.')\n else:\n session['logged_in'] = True\n flash('You were logged in.')\n\n return redirect(url_for('blog.show_posts'))\n\n return render_template('auth/log_in.html', form=form)", "def userlogin(self, login, password):\n\n payload = {\n 'PASSWORD': password,\n 'path': \"http://www.gamefaqs.com/\",\n 'EMAILADDR': login,\n }\n\n login_url = 'http://www.gamefaqs.com/user/login'\n\n # Grab key ID\n resp = self.session.get(login_url)\n soup = bs(resp.text, 'html.parser')\n payload['key'] = soup.find('input', class_='hidden')['value']\n\n # Login with user payload\n resp = self.session.post(login_url, data=payload)\n\n soup = bs(resp.text, 'html.parser')\n\n if soup.find_all(string='There was an error while logging you in: '):\n raise Exception('Login Failed!')\n else:\n logging.debug('{} successfully logged in.'.format(self.login))", "def login():\n form = LoginForm()\n\n state = process_login(form)\n if state == LoginState.SHOW_LOGIN:\n return render_template('user/login.html', form=form)\n elif state == LoginState.SHOW_LOGIN_LOCKED:\n flash('User is locked, Contact Systems Administrator', 'danger')\n return redirect(url_for('user_view.login'))\n elif state == LoginState.SHOW_LOGIN_INCORRECT_PASSWORD:\n flash('Password is Incorrect', 'danger')\n return redirect(url_for('user_view.login'))\n elif state == LoginState.SHOW_LOGIN_EMAIL_NOT_EXIST:\n flash('Email does not exist', 'warning')\n return redirect(url_for('user_view.login'))\n elif state == LoginState.SHOW_DASHBOARD:\n return redirect(url_for('dashboard_view.home'))", "def log_user_in():\n\n print request.form.to_dict()\n user_id = data_manager.get_user_by_email(request.form.to_dict())\n\n if not user_id:\n flash(\"We do not have an account registered with that email. Please make an account.\")\n return redirect(\"/register\")\n\n if user_id == \"Wrong password\":\n flash(\"Wrong password. Please try again.\")\n return redirect(\"/\")\n\n session['user_id'] = user_id\n session['email'] = request.form.get('email')\n\n return redirect(\"/user/%s\" % user_id)", "def login():\n form = LoginForm()\n\n if form.validate_on_submit():\n user = User.query.filter_by(username=form.username.data).first()\n next_page = request.args.get('next')\n if not next_page or url_parse(next_page).netloc != '':\n next_page = url_for('index.index')\n\n if user is None or not user.check_password(form.password.data):\n flash(message='Invalid username or password', category='danger')\n return redirect(url_for('auth.login', next=next_page))\n\n session.clear()\n session['user_id'] = user.id\n return redirect(next_page)\n\n return render_template('auth/login.html', title='Login Page', form=form)", "def login_to_flask(self, form):\n\n flask_login.login_user(self, remember=form.remember_me.data)\n\n # handle 'next' page if we're stopping them en route\n next_page = flask.request.args.get('next')\n if not next_page or werkzeug.urls.url_parse(next_page).netloc != '':\n next_page = flask.url_for('dashboard')\n\n # create a response, inject the token into the cookie and return\n redirect = flask.redirect(next_page)\n response = app.make_response(redirect)\n response.set_cookie('kdm-manager_token', self.token)\n return response", "def process_login():\n\n email = request.form.get('email')\n password = request.form.get('password')\n\n match_user = User.query.filter_by(email=email).first()\n\n\n if not match_user:\n flash(\"No such email address.\")\n return redirect('/login')\n\n\n real_password = User.query.filter_by(email=email).first().password\n\n if password != real_password:\n flash(\"Incorrect password.\")\n return redirect(\"/login\")\n\n session[\"logged_in_customer_email\"] = email\n flash(\"Logged in.\")\n return redirect(\"/\")", "def login(self):\n #raise NotImplementedError(\"This method must be overridden\")", "def login(self):\n self.client.login(username=self.user.username, password='test')", "def login_view(self):\n\n # Authenticate username/email and login authenticated users.\n\n safe_next_url = self._get_safe_next_url('next', self.USER_AFTER_LOGIN_ENDPOINT)\n\n # Immediately redirect already logged in users\n if self.call_or_get(current_user.is_authenticated) and self.USER_AUTO_LOGIN_AT_LOGIN:\n return redirect(safe_next_url)\n\n # Initialize form\n login_form = self.LoginFormClass(request.form)\n\n if request.method != 'POST':\n return render_template('login.html')\n\n # if not login_form.validate():\n # print('Invalid form')\n # return render_template('login.html', attempt_failed=True)\n \n # Retrieve User\n # Find user by email (with form.email)\n user, _ = self.db_manager.get_user_and_user_email_by_email(login_form.email.data)\n if user and self.verify_password(login_form.password.data, user.password):\n safe_next_url = self.make_safe_url(login_form.next.data)\n return self._do_login_user(user, safe_next_url, True)\n \n return render_template('login.html', attempt_failed=True)", "def login_post():\n return redirect(url_for('session_api1.session_post'))", "def login():\n\n if \"username\" in session:\n return redirect(f\"/users/{session['username']}\")\n\n form = LoginForm()\n\n if form.validate_on_submit():\n user = User.authenticate(form.data[\"username\"], form.data[\"password\"])\n if user is None:\n if User.query.filter_by(username=form.data[\"username\"]).count() == 0:\n form.username.errors.append(\"Invalid username\")\n else:\n form.password.errors.append(\"Invalid credentials\")\n return render_template(\"login.html\", form=form)\n\n session[\"username\"] = user.username\n return redirect(f\"/users/{user.username}\")\n \n return render_template(\"login.html\", form=form)", "def login(self):\n url = self._root + self._routes[\"login\"]\n self.r = self.reqsession.get(url) \n if self.r.url == 'https://console.zerodha.com/dashboard':\n cookies = self.reqsession.cookies.get_dict('console.zerodha.com')\n self.console_session = cookies['session']\n self.public_token = self.reqsession.cookies['public_token']\n return True\n else:\n raise Exception(\"Login failed or Kite session expired\")", "def login_view(request):\n\n\tif request.user.is_authenticated():\n\t\treturn HttpResponseRedirect('/home')\n\tnext_url = request.GET.get('next', '/home')\n\tif request.method == \"POST\":\n\t\tprint(\"1\")\n\t\tusername = request.POST.get('username')\n\t\tpassword = request.POST.get('password')\n\t\tprint(\"2\")\n\t\tif username and password:\n\t\t\tuser = authenticate(request, username=username, password=password)\n\t\t\tif user:\n\t\t\t\tlogin(request, user)\n\t\t\t\treturn HttpResponseRedirect(next_url)\n\t\t\treturn render(\n\t\t\t\trequest, 'loginPage.html',\n\t\t\t\t{'message': 'Invalid login details'}\n\t\t\t)\n\treturn render(request, \"loginPage.html\", {})", "def login(session):\r\n # TODO: Test downloads with credentials\r\n response = session.get(LOGIN_URL)\r\n response.raise_for_status()\r\n login_form = html.fromstring(response.content).forms[LOGIN_FORM_INDEX]\r\n payload = dict(login_form.fields)\r\n payload[USER_FIELD_NAME] = USERNAME\r\n payload[PASS_FIELD_NAME] = PASSWORD\r\n response = session.post(LOGIN_URL, payload)\r\n response.raise_for_status()", "def process_login():\n\n\temail2 = request.form.get('email')\n\tpassword2 = request.form.get('password')\n\tnote = \"\"\n\n\tprint \"email and pass\", email2, password2\n\t\n\tif email2:\n\t\tuser = model.get_user(email2, password2)\n\t\t#if user is correctly identified in the system\n\t\tif user == True:\n\t\t\tsession['email'] = email2\n\t\t\tnote = \"Welcome %s\" %(email2)\n\t\telse: #bad password\n\t\t\tnote = \"Please make sure you correctly entered your email and password\"\n\n\treturn render_template(\"login.html\")", "def login(self, *args, **kwargs):\n serializer = LoginSerializer(data=self.request.data)\n serializer.is_valid(raise_exception=True)\n user = serializer.user\n login(self.request, user)\n return Response(status=200)", "def login(self, username, password):\n response = self.call('auth.login', username, password)\n if response[b'result'] == b'success':\n self.sessionid = response[b'token']\n self.authenticated = True", "def handleLogin(self):\n aVar = self.session.getAttribute(self.settings.authenvar)\n self.loggedin = False\n if not aVar:\n self.currenttemplate = self.settings.logintemplate \n self.logger.debug(\"Not logged in, Login-Mask activated.\")\n return\n\n self.loggedin = True\n self.logger.debug('Loged in as: \"{}\"'.format(aVar))", "def login(self):\n self.open(base_url + '/login')\n self.type(\"#email\", test_user.email)\n self.type(\"#password\", test_user.password)\n self.click('input[type=\"submit\"]')", "def login():\n\n from .forms import LoginForm\n\n form = LoginForm(request.form)\n if form.validate_on_submit():\n username = request.form['username']\n user = User()\n user.id = username\n login_user(user, remember=True)\n logger.info(username + ' successfully logged in.')\n response = redirect(request.args.get(\"next\") or url_for(\"home\"))\n return response\n else:\n return render_template('login.html', form=form)", "def login(request):\n if request.method == 'POST':\n username = request.data.get('username', None)\n password = request.data.get('password', None)\n msg = {}\n if not username:\n msg['username'] = 'This field is required'\n if not password:\n msg['password'] = 'This field is required'\n if len(msg) > 0:\n return Response(msg, status=status.HTTP_400_BAD_REQUEST)\n\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n auth_login(request, user)\n else:\n return Response(\n {'message': 'User Account Disbaled'},\n status=status.HTTP_401_UNAUTHORIZED)\n else:\n return Response(\n {'message': 'User authentication failed'},\n status=status.HTTP_401_UNAUTHORIZED)\n if request.session.test_cookie_worked():\n request.session.delete_test_cookie()\n return Response({'message': 'Logged in'})\n else:\n pass\n request.session.set_test_cookie()\n return Response({})", "def login_form_valid(self, form):\n self.request.session.update({\n 'user_is_none': None,\n 'user_is_active': None\n })\n\n email = form.cleaned_data['email']\n password = form.cleaned_data['password']\n user = authenticate(email=email, password=password)\n\n if user is None:\n self.request.session['user_is_none'] = True\n return HttpResponseRedirect('/user_account/')\n elif user.active is False:\n self.request.session['user_is_active'] = False\n return HttpResponseRedirect('/user_account/')\n else:\n self.request.session.update({\n 'user_is_none': False,\n 'user_is_active': True\n })\n login(self.request, user)\n return HttpResponseRedirect('/schedule/')" ]
[ "0.7799108", "0.7728997", "0.7510619", "0.7478229", "0.7478229", "0.7477876", "0.74192667", "0.7330238", "0.7261792", "0.72554547", "0.72398543", "0.7227756", "0.7222385", "0.7211491", "0.7187431", "0.71860313", "0.7163313", "0.7160891", "0.7127272", "0.7124372", "0.71183175", "0.7095561", "0.7089203", "0.70858014", "0.7066225", "0.703552", "0.7028472", "0.70157856", "0.7002131", "0.7001458", "0.6995131", "0.69949603", "0.699407", "0.69933534", "0.69924045", "0.6990296", "0.6979848", "0.6976905", "0.6963148", "0.6948666", "0.69475675", "0.6946439", "0.69443476", "0.69192535", "0.691642", "0.69161457", "0.6914448", "0.6910778", "0.6900354", "0.6887656", "0.68828666", "0.6872544", "0.68686676", "0.68610287", "0.6857113", "0.6853591", "0.6851463", "0.68495595", "0.68450904", "0.68428105", "0.68411714", "0.6837275", "0.68315643", "0.68279964", "0.6819173", "0.68081224", "0.6805522", "0.6804343", "0.6800029", "0.6799762", "0.67946464", "0.67874396", "0.678639", "0.678455", "0.678455", "0.6784375", "0.67813075", "0.6778716", "0.67725146", "0.67692953", "0.67653424", "0.6764577", "0.6763801", "0.6758949", "0.67532945", "0.67504907", "0.67444354", "0.67431", "0.67381203", "0.67378795", "0.673636", "0.6732525", "0.67292744", "0.67259264", "0.672433", "0.67178464", "0.6710833", "0.6710734", "0.67101187", "0.67019796" ]
0.7133062
18
auth_data will be used used as request_data in strategy
def set_input_data(self, request, auth_data): request.auth_data = auth_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_oauth_data():", "def __init__(self, my_data, my_auth):\n self.user = my_auth.user\n self.password = my_auth.password\n self.my_data = my_data", "def authenticate(self, request):\n auth_data = super().authenticate(request)\n if not auth_data:\n return auth_data\n\n user, auth = auth_data\n\n if amr_claim := auth.data.get(\"amr\"):\n user.token_amr_claim = amr_claim\n\n return user, auth", "def add_auth(self, http_request):\r\n pass", "def auth(self, user):", "def auth_token(self):", "def fake_auth_complete(self, strategy):\r\n args = ()\r\n kwargs = {\r\n 'request': strategy.request,\r\n 'backend': strategy.backend,\r\n 'user': None,\r\n 'response': self.get_response_data(),\r\n }\r\n return strategy.authenticate(*args, **kwargs)", "def request_data(self):\n pass", "def auth_extra_arguments(self):\n extra_arguments = super().auth_extra_arguments()\n extra_arguments[\"p\"] = self.policy or self.data.get(\"p\")\n return extra_arguments", "def set_auth_state(self, data):\n raise NotImplementedError()", "def _get_auth_string(self):", "def update_auth_data(self, auth_data: AuthData) -> None:\n self.auth_data.update(auth_data)\n if \"refresh_id\" in self.auth_data:\n self.set_cookie(COOKIE_NAME, self.auth_data[\"refresh_id\"])\n if self.on_auth_data_changed:\n self.on_auth_data_changed(self.auth_data)", "def authenticate(user, request):", "def get_request_and_strategy(self, auth_entry=None, redirect_uri=None):\r\n request = self.request_factory.get(\r\n pipeline.get_complete_url(self.backend_name) +\r\n '?redirect_state=redirect_state_value&code=code_value&state=state_value')\r\n request.user = auth_models.AnonymousUser()\r\n request.session = cache.SessionStore()\r\n request.session[self.backend_name + '_state'] = 'state_value'\r\n\r\n if auth_entry:\r\n request.session[pipeline.AUTH_ENTRY_KEY] = auth_entry\r\n\r\n strategy = social_utils.load_strategy(backend=self.backend_name, redirect_uri=redirect_uri, request=request)\r\n request.social_strategy = strategy\r\n\r\n return request, strategy", "def __init__(self, auth_class):\n self.url = auth_class.url\n self.ticket = auth_class.ticket\n self.CSRF = auth_class.CSRF", "def setUpAuth(self):\n self.user, self.user_headers = self.authUser()\n self.admin, self.admin_headers = self.authAdmin()", "def on_get(self, req, resp):\n data = req.context['auth']\n tenant = dict(id=data.get('domain_id', None), name=data.get('domain_name', None))\n role = dict(name=data.get('roles')[0])\n user = dict(id=data.get('user_id', None), name=data.get('user_name', None), tenant=tenant, role=role)\n data = dict(user=user)\n req.context['result'] = dict(session=data)\n resp.status = HTTP_200", "def post(self):\n\n data = request.get_json()\n # data = request.data\n print(\"data: \", data)\n\n arg_parser = reqparse.RequestParser()\n arg_parser.add_argument(\n \"exp\",\n default=15552000,\n help=\"Parameter must be an integer\",\n type=int\n )\n\n args = arg_parser.parse_args()\n\n print(args)\n\n auth = request.authorization\n print(\"auth req: \", auth)\n if not auth:\n # Try extracting from POST body\n print(\"here\")\n auth = request.get_json()\n print(\"here\")\n print(\"auth: \", auth)\n if not auth or not (\"email\" in auth and \"password\" in auth):\n abort(401, \"Missing authentication credentials\")\n\n # if auth[\"is_driver\"]:\n # # if it is a driver\n # user = Driver.identify(auth[\"email\"])\n # password = auth[\"password\"]\n\n # else:\n # # If it is a restaurant\n # user = Restaurant.identify(auth[\"email\"])\n # password = auth[\"password\"]\n\n is_driver = True\n\n user = Driver.identify(auth[\"email\"])\n password = auth[\"password\"]\n\n if not user:\n user = Restaurant.identify(auth[\"email\"])\n is_driver = False\n\n if not user or not user.verify_password(password):\n current_app.logger.warn(\n \"Incorrect credentials for {} from {}\".format(\n auth[\"email\"],\n *request.access_route\n )\n )\n abort(401, \"Incorrect email or password\")\n\n access_token = user.gen_access_token(args[\"exp\"])\n\n current_app.logger.info(\"[AUTH] User {} logged IN from {}\".format(\n user.email,\n *request.access_route\n ))\n\n access_token.update({\n \"is_driver\": is_driver\n })\n\n # return resp, 200\n return access_token", "def do_auth(self, access_token, *args, **kwargs):\n data = self.user_data(access_token, *args, **kwargs)\n response = kwargs.get('response') or {}\n response.update(data or {})\n if 'access_token' not in response:\n response['access_token'] = access_token\n kwargs.update({'response': response, 'backend': self})\n return self.strategy.authenticate(*args, **kwargs)", "def do_auth(self, access_token, *args, **kwargs):\n data = self.user_data(access_token, *args, **kwargs)\n response = kwargs.get('response') or {}\n response.update(data or {})\n if 'access_token' not in response:\n response['access_token'] = access_token\n kwargs.update({'response': response, 'backend': self})\n return self.strategy.authenticate(*args, **kwargs)", "def init(self, auth_dict=None):\n self.auth_dict = auth_dict", "def do_auth(self, access_token, *args, **kwargs):\n data = self.user_data(access_token)\n data['access_token'] = access_token\n kwargs.update(data)\n kwargs.update({'response': data, 'backend': self})\n return self.strategy.authenticate(*args, **kwargs)", "def __init__(self, **kwargs):\n self.data_dict = dict()\n self.data_list = dict()\n self.user_id = kwargs[\"user_id\"]", "def authentication_hook(self):\n pass", "def auth(self):\n return self.api(self.token)", "def get_request_auth_app(self):\n pass", "def __init__(self, auth):\n super(Socrata, self).__init__(auth)\n self.views = Views(auth)\n self.sources = Sources(auth)\n self.configs = Configs(auth)", "def __init__(self):\n self.auth()", "def _get_auth_data(self, storage_type, provider_id='default'):\n if storage_type == 'S3':\n return self.s3_auth.get(provider_id, None)\n elif storage_type == 'MINIO':\n return self.minio_auth.get(provider_id, None)\n elif storage_type == 'ONEDATA':\n return self.onedata_auth.get(provider_id, None)\n elif storage_type == 'WEBDAV':\n return self.webdav_auth.get(provider_id, None)\n return None", "def user_data(self, access_token, *args, **kwargs):\n return self.get_json(\n 'http://sso.rnoep.raccoongang.com/oauth2/access_token/%s/' % access_token,\n params={'access_token': access_token}\n )", "def get_auth(self):\n return {'method': yeti_config.core.auth}", "def inject_data_hook(self, data):\n return data", "def get_post_response_data(self, request, token_obj: \"AuthToken\"):\n UserSerializer = self.get_user_serializer_class()\n data = {\n \"expiry\": self.format_expiry_datetime(token_obj.expiry),\n \"token\": token_obj.token,\n }\n if UserSerializer is not None:\n data[\"user\"] = UserSerializer(request.user, context=self.get_context()).data\n return data", "def __authenticate(self, data):\n if 'token' not in data:\n raise TokenError(\"Invalid Token\")\n if data['token'] != app.config['SLACK_TOKEN']:\n raise TokenError(\"Invalid Token\")", "def set_requests_auth(self):\n self.__auth = OAuth2(token=self.bearer_token)", "def authenticate(self):\n #it's weird i have to do this here, but the code makes this not simple\n auth_json={'email':self.user, 'password':self.password}\n #send a post with no auth. prevents an infinite loop\n auth_response = self.post('/auth', data = json.dumps(auth_json), auth =\n None)\n\n _token = auth_response.json['token']\n\n self._token = _token\n self._wrapped.auth = SpringAuth(_token)", "def __init__(self, auth, base_url=ANACODE_API_URL):\n self.auth = auth\n self.base_url = base_url", "async def _token_request(self, data: dict) -> dict:\n session = async_get_clientsession(self.hass)\n\n data[\"client_id\"] = self.client_id\n\n if self.client_secret is not None:\n data[\"client_secret\"] = self.client_secret\n\n headers = {\n \"Authorization\": BasicAuth(self.client_id,\n self.client_secret).encode(),\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n }\n\n resp = await session.post(self.token_url,\n headers=headers,\n data=data)\n resp.raise_for_status()\n return cast(dict, await resp.json())", "def initial(self, request, *args, **kwargs):\n try:\n request.data[\"user\"] = request.auth.user\n except:\n pass\n return super(BoundToUserMixin, self).initial(request, *args, **kwargs)", "def _get_input_auth_data(self, parsed_event):\n storage_type = parsed_event.get_type()\n # Change storage type for dCache events to use WebDav\n if storage_type == 'DCACHE':\n storage_type = 'WEBDAV'\n if storage_type == 'ONEDATA':\n # Check input path and event object_key\n if hasattr(parsed_event, 'object_key'):\n # Get the onedata space from the event object_key\n event_space = parsed_event.object_key.strip('/').split('/', maxsplit=1)[0]\n for input_value in self.input:\n provider_type = StrUtils.get_storage_type(input_value.get('storage_provider'))\n if provider_type == storage_type:\n provider_id = StrUtils.get_storage_id(input_value.get('storage_provider'))\n if self.onedata_auth[provider_id].get_credential('space') == event_space:\n return self._get_auth_data(storage_type, provider_id)\n raise StorageAuthError(auth_type='ONEDATA')\n elif storage_type == 'UNKNOWN':\n return self._get_auth_data(storage_type)\n else:\n return (self._get_auth_data(storage_type), self._get_auth_data(storage_type, parsed_event.provider_id))[parsed_event.provider_id != 'default']", "def _oauth2_process_params(self, request):\n self.in_canvas = (request.REQUEST.get('fb_sig_in_canvas') == '1')\n self.added = (request.REQUEST.get('fb_sig_added') == '1')\n # If app_id is not set explicitly, pick it up from the params\n if not self.app_id:\n self.app_id = request.REQUEST.get('fb_sig_app_id')\n if not self.uid:\n self.uid = request.REQUEST.get('fb_sig_user')", "def authenticate(self, request):\n return None", "def setUp(self):\n super().setUp()\n self.request_factory = RequestFactory()\n self._auth_backend = LTIBackend()", "def _get_user_data(self):\n return {\"key\": self._key}", "def data(self, user=None):\n return {\n \"provider\": self.BACKEND,\n \"access_token\": self.access_token,\n \"client_id\": self.client_id,\n \"honor_code\": \"true\",\n \"country\": \"US\",\n \"username\": user.username if user else \"test_username\",\n \"name\": user.first_name if user else \"test name\",\n \"email\": user.email if user else \"[email protected]\"\n }", "def handle_data(self, data):\n if self._wait_auth:\n if len(data) != self._AUTH_LEN:\n self.force_close()\n return\n data = list(self._cipher_rx.crypt(data))\n authl = list(self._auth_data)\n if data == authl:\n self._wait_auth = False\n self._timeout = None\n self.set_number()\n else:\n self.force_close()\n elif self.remote_user:\n self.remote_user.send(self._cipher_rx.crypt(data))", "def pyramid_request(self, pyramid_request, lti_user):\n pyramid_request.params[\"code\"] = \"test_code\"\n pyramid_request.params[\"state\"] = \"test_state\"\n pyramid_request.session[\"oauth2_csrf\"] = \"test_csrf\"\n pyramid_request.lti_user = lti_user\n return pyramid_request", "def _interpret_auth_data(auth_data):\n if isinstance(auth_data, Mapping):\n # upgrade old-style single dict configs to new-style list-of-dicts (with one item)\n auth_data = [auth_data]\n\n auth = []\n for config in auth_data:\n # translate config options\n host = \"\"\n ssl = True\n extra_kwargs = {}\n try:\n # SSL options\n if \"protocol\" in config and config[\"protocol\"] == \"SASL_PLAINTEXT\":\n ssl = False\n elif \"ssl_ca_location\" in config:\n extra_kwargs[\"ssl_ca_location\"] = config[\"ssl_ca_location\"]\n\n # SASL options\n user = config[\"username\"]\n password = config[\"password\"]\n\n if \"hostname\" in config:\n host = config[\"hostname\"]\n\n token_endpoint = config.get(\"token_endpoint\")\n\n if \"mechanism\" in config:\n mechanism = config[\"mechanism\"].replace(\"-\", \"_\")\n elif token_endpoint:\n mechanism = \"OAUTHBEARER\"\n else:\n mechanism = \"SCRAM_SHA_512\"\n\n except KeyError as ke:\n raise RuntimeError(\"configuration file is not configured correctly: \"\n f\"missing auth property {ke}\")\n else:\n auth.append(Auth(user, password, host=host, ssl=ssl, method=SASLMethod[mechanism],\n token_endpoint=token_endpoint, **extra_kwargs))\n return auth", "def __init__(self, request, data):\n self.request = request\n self.data = data", "def auth():\n pass", "def auth():\n pass", "def getUser(self, authenticationToken):\r\n pass", "def _on_signin(self, data, auth_info, provider, extra=None):\n logging.debug('Got user data: %s', data)\n\n auth_id = '%s:%s' % (provider, data['id'])\n\n logging.debug('Looking for a user with id %s', auth_id)\n user = self.auth.store.user_model.get_by_auth_id(auth_id)\n _attrs = self._to_user_model_attrs(data, self.USER_ATTRS[provider])\n\n if user:\n logging.debug('Found existing user to log in')\n # Existing users might've changed their profiile data so we update our\n # local model anyway. This might result in quite inefficient usage\n # of the Datastore, but we do this anyway for demo purposes.\n #\n # In a real app you could compare _attrs with user's properties fetched\n # from the datastore and update local user in case something's changed.\n user.populate(**_attrs)\n user.put()\n self.auth.set_session(self.auth.store.user_to_dict(user))\n\n else:\n # check whether there's a user currently logged in\n # then, create a new user if nobody's signed in,\n # otherwise add this auth_id to currently logged in user.\n\n if self.logged_in:\n logging.debug('Updating currently logged in user')\n\n u = self.current_user\n u.populate(**_attrs)\n # The following will also do u.put(). Though, in a real app\n # you might want to check the result, which is\n # (boolean, info) tuple where boolean == True indicates success\n # See webapp2_extras.appengine.auth.models.User for details.\n u.add_auth_id(auth_id)\n\n else:\n logging.debug('Creating a brand new user')\n ok, user = self.auth.store.user_model.create_user(auth_id, **_attrs)\n if ok:\n self.auth.set_session(self.auth.store.user_to_dict(user))\n\n # user settings page\n destination_url = '/settings'\n if extra is not None:\n params = webob.multidict.MultiDict(extra)\n destination_url = str(params.get('destination_url', '/settings'))\n return self.redirect(destination_url)", "def init_context_data(self):\n pass", "def do_request(self, path, method='get', params=None, data=None,\n headers=None, cookies=None, auth=None):\n headers = {'Authorization': self.token}\n return super(FGasesRegistry, self).do_request(path,\n method=method,\n params=params,\n data=data,\n headers=headers,\n cookies=cookies,\n auth=auth)", "def __call__(self, access_token):", "def __init__(self, data):\n self.__authenticate(data)\n self.user_id = self.__parse_user_id(data)\n self.channel_id = self.__parse_channel_id(data)\n self.data = data\n if 'text' in data:\n self.text = data['text'].split(\", \")", "def _check_token_data(self, jwt_token_data):\n try:\n self.user = get_user_model().objects.get(pk=jwt_token_data['id'])\n except (TypeError, KeyError):\n return self.render_api_error_response('Not authenticated - Bad authorization header data', status=401)\n except get_user_model().DoesNotExist:\n return self.render_api_error_response('Not authenticated - User not found', status=401)\n self.jwt_token_data = jwt_token_data\n return None", "def __init__(self, request=None, *args, **kwargs):\n self.request = request\n self.user_cache = None\n super(AuthForm, self).__init__(*args, **kwargs)", "def fhir_enquiry(request, context_override={}):\n\n state = get_state(CLIENT_ID,AUTH_URL)\n code = get_code(CLIENT_ID,AUTH_URL)\n\n # set default context\n context = {}\n context['template'] = \"result.html\"\n context['get_fmt'] = \"json\"\n context['display'] = \"Me\"\n context['code'] = code\n context['state'] = state\n context['ask'] = \"/api/v1/me?_format=json\"\n context['url'] = settings.OAUTH_TEST_INFO['BASE']\n context['headers'] = {'content-type': 'application/x-www-form-urlencoded',\n 'Authorization': \"Bearer \"+ get_code(CLIENT_ID, AUTH_URL)},\n\n # add / overwrite anything in context_override\n context = update_dict(context, context_override)\n\n data = {'code': code,\n 'grant_type': 'authorization_code',\n 'key': 'access_token',\n #'key': 'refresh_token',\n 'access_token': get_access(state),\n 'refresh_token': get_refresh(state),\n 'redirect_uri': REDIRECT_URI}\n\n if settings.DEBUG:\n print(\"Context after update:\", context)\n print(\"Data:\", data)\n\n print(\"SERVICE:\", SERVICE )\n\n # Get access_token\n headers = {}\n print('Context Headers:', dict(context['headers'][0]))\n #headers = {'headers': update_dict(headers, context_override=dict(context['headers'][0]))}\n headers = update_dict(headers, context_override=dict(context['headers'][0]))\n print(\"Headers:\", headers)\n\n kw_to_send = {'data': data, 'headers': headers}\n\n #session = SERVICE.get_auth_session(method=\"POST\",**kw_to_send)\n #session = SERVICE.get_session(get_access(state))\n #session = SERVICE.get_raw_access_token(method=\"POST\", **kw_to_send)\n session = SERVICE.get_raw_access_token(data=data)\n\n #response = SERVICE.get_access_token(method=\"POST\")\n # response = SERVICE.get_auth_session(data=data)\n print(\"Auth Session\", session)\n #response = SERVICE.get_raw_access_token(data=data, **headers)\n\n get_text = session.json()\n\n if 'access_token' in get_text:\n print(\"got an access token\")\n access = save_tokens(state,\n get_text['access_token'],\n get_text['refresh_token'])\n\n print(\"RESPONSE:\", get_text)\n # RESPONSE: {\"expires_in\": 36000,\n # \"access_token\": \"h1vY5eDu69JKfV4nPpdu8xEan63hKl\",\n # \"scope\": \"patient/*.read write_consent\",\n # \"token_type\": \"Bearer\",\n # \"refresh_token\": \"6HZnSwhfsGvfr9Aguw5n0e5CoGr8CQ\"}\n\n\n sesn = SERVICE.get_session(get_text['access_token'])\n print(\"SESSION:\", sesn)\n\n r = sesn.get(context['url'] + context['ask'])\n\n if settings.DEBUG:\n print(\"R:\", r.content)\n\n return r", "def lesson_auth(request):", "def process_request(self, req):\n if req.headers.get('X-Identity-Status') == 'Confirmed':\n req.context = self._get_authenticated_context(req)\n elif req.headers.get('X-Auth-Token') is not None:\n req.context = self._get_auth_token_context(req)\n elif CONF.allow_anonymous_access:\n req.context = self._get_anonymous_context()\n else:\n raise webob.exc.HTTPUnauthorized()", "def get_complete_parameters(self, auth_request_params):\n params = super().get_complete_parameters(auth_request_params)\n params.update(\n {\n \"id_token\": sign_id_token(self.get_apple_id_token_payload()),\n \"user\": json.dumps(\n {\n \"email\": \"[email protected]\",\n \"name\": {\n \"firstName\": \"A\",\n \"lastName\": \"B\",\n },\n }\n ),\n }\n )\n return params", "def request_data(self, request_data):\n\n self._request_data = request_data", "def setUp(self):\r\n self._request_factory = RequestFactory()\r\n self._anon_user = AnonymousUser()\r\n self._auth_user = UserFactory.create(\r\n email=\"[email protected]\",\r\n username=\"test\",\r\n profile__name=\"Test User\"\r\n )\r\n # This contains issue_type and course_id to ensure that tags are submitted correctly\r\n self._anon_fields = {\r\n \"email\": \"[email protected]\",\r\n \"name\": \"Test User\",\r\n \"subject\": \"a subject\",\r\n \"details\": \"some details\",\r\n \"issue_type\": \"test_issue\",\r\n \"course_id\": \"test_course\"\r\n }\r\n # This does not contain issue_type nor course_id to ensure that they are optional\r\n self._auth_fields = {\"subject\": \"a subject\", \"details\": \"some details\"}", "def __init__(self, access_token=None):\r\n self.access_token = access_token\r\n\r\n self.add_filter(self.add_auth)", "def __init__(self, request=None, *args, **kwargs):\n self.request = request\n self.user_cache = None\n super(AuthenticationFormCustom, self).__init__(*args, **kwargs)", "def transform_credentials(self, data: Dict, **kwargs) -> Dict:\r\n name = data.pop(\"name\")\r\n return_data = {name: data}\r\n return return_data", "def __init__(self, cache, user_data):\n self.ctx = {}\n super().__init__(cache, user_data, {})", "async def authenticate(self, request: web.Request) -> Dict[str, Any]:", "def __call__(self, context, callback):\r\n\r\n callback((('authorization', 'Bearer ' + self.token_hash ),), None)", "def __init__(self, authtoken, portal_id):\n self.details = { \n 'authtoken': authtoken\n }\n self.portal_id = portal_id", "def __init__(self, authtoken, portal_id):\n self.details = { \n 'authtoken': authtoken\n }\n self.portal_id = portal_id", "def __init__(self, data):\n self.user_id = data['user_id']\n self.condition_id = data['condition_id']\n self.condition = data['condition']\n self.condition_details = data['condition_details']\n self.user_answer = data['user_answer']", "def __init__(self, req, credentials_fn):\n self.req = req\n self.credentials_fn = credentials_fn", "def test_authflow(self):\n response = self.client.post('/auth/signup/', {\n 'first_name': 'John',\n 'last_name': 'Doe',\n 'email': '[email protected]',\n 'password': self.password,\n 'gstin': '11AAAAA1111A1A1',\n 'mobile': self.mobile,\n 'business_name': 'busi_ness',\n 'address': {'address_name':'', 'address_line1': '', 'address_line2': '', 'state': '', 'pincode': '209801', 'country': 'INDIA'}\n })\n\n response_data = response.json()\n\n self.assertListEqual(list(response_data.keys()), ['id', 'otp'])\n\n response = self.client.post('/auth/verify-otp/', response_data)\n\n response_data = response.json()\n self.assertListEqual(list(response_data.keys()), ['token', 'refresh_token', 'session_key'])\n self.assertRegexpMatches(response_data['token'], r'[0-9A-Za-z\\-]+\\.[0-9A-Za-z\\-]+\\.[0-9A-Za-z\\-]+')\n self.assertRegexpMatches(response_data['refresh_token'], r'[0-9A-Za-z]{32}')\n self.assertRegexpMatches(response_data['session_key'], r'[0-9A-Za-z]{32}')\n\n response = self.client.post('/auth/signin/', {'id_field': self.mobile, 'password': self.password})\n auth_data = response.json()\n\n refresh_token = auth_data['refresh_token']\n session_key = auth_data['session_key']\n\n response = self.client.post('/auth/refresh/', {'refresh_token': refresh_token}, HTTP_AUTHORIZATION='JWT ' + auth_data['token'], HTTP_X_SESSION_KEY=session_key)\n\n refreshed_auth_data = response.json() \n response = self.client.get('/auth/handle-sessions/', HTTP_AUTHORIZATION='JWT ' + refreshed_auth_data['token'], HTTP_X_SESSION_KEY=session_key)\n\n active_sessions = response.json()\n self.assertListEqual(list(active_sessions.keys()), ['token_list'])\n\n acitve_sessions_token_list = active_sessions.get('token_list')\n\n # end all other sessions except your own\n for session_key_iter in acitve_sessions_token_list:\n if session_key_iter != session_key:\n self.client.post('/auth/handle-sessions/', {'session_key': session_key_iter}, HTTP_AUTHORIZATION='JWT ' + refreshed_auth_data['token'], HTTP_X_SESSION_KEY=session_key)\n\n # log out from own session\n self.client.get('/auth/signout/', HTTP_AUTHORIZATION='JWT ' + refreshed_auth_data['token'], HTTP_X_SESSION_KEY=session_key)", "def __init__(self):\n self.authurl = Config().auth\n self.baseurl = Config().api\n self.s = Session()\n self.s.headers = {'Accept': 'application/json'}\n data = {\"grant_type\": \"client_credentials\", \"scope\": \"/read-public\", \"client_id\": Config().client_id,\n \"client_secret\": Config().client_secret}\n r = self.s.request(method=\"post\", url=self.authurl, data=data)\n self.s.headers = {'Accept': 'application/json', \"Access token\": r.json()[\"access_token\"]}", "def __init__(self, user_data: dict):\n self.login = user_data['login']\n self.followers = user_data['followers']\n self.following = user_data['following']\n self.site_admin = user_data['site_admin']\n self.name = user_data['name']\n self.company = user_data['company']\n self.blog = user_data['blog']\n self.location = user_data['location']\n self.public_repos = user_data['public_repos']\n self.public_gists = user_data['public_gists']\n self.organizations = self.__get_orgs_len()\n self.git_score = self.get_score()", "def auth_access_token_request(self, auth_access_token_request):\n\n self._auth_access_token_request = auth_access_token_request", "def getauth_process():\n global logger\n\n p = reqparse.RequestParser()\n\n # answer when requested as json in a post\n p.add_argument('tool_id' , type=str, location='json')\n p.add_argument('api_key', type=str, location='json')\n p.add_argument('card_id', type=str, location='json')\n\n # answer when requested on url \n #p.add_argument('rdr_id',type=str)\n #p.add_argument('api_key',type=str)\n #p.add_argument('card_id',type=str)\n \n # get passed params \n args = p.parse_args()\n\n #logger.info('getauth ' + 'ip:' + request.remote_addr + ' api_key:' + args['api_key'])\n #logger.info('getauth ' + ' card_id:' + args['card_id'])\n\n args.setdefault('api_key','')\n args.setdefault('card_id','')\n args.setdefault('tool_id','')\n\n return(getauth_from_db(args))", "def initialize(self, request, args, kwargs):\n data = request_data.RequestData(request, args, kwargs)\n mutator = access_checker.Mutator(data)\n if data.is_developer:\n check = access_checker.DeveloperAccessChecker(data)\n else:\n check = access_checker.AccessChecker(data)\n return data, check, mutator", "def for_user(self, a_token, a_secret):\n\t\tself.a_token = a_token\n\t\tself.a_secret = a_secret", "def __init__(\n self,\n uri,\n audience,\n get_token,\n **kwargs\n ):\n super(JWTTokenAuth, self).__init__(uri, audience, kwargs.pop(\"token_type\", TOKEN_TYPE_JWT), get_token)\n self.get_token = get_token", "def _get_auth_info_for_id_or_from_request(\n sub_type=str, user=None, username=None, db_session=None\n):\n db_session = db_session or current_app.scoped_session()\n\n # set default \"anonymous\" user_id and username\n # this is fine b/c it might be public data or a client token that is not\n # linked to a user\n final_user_id = None\n if sub_type == str:\n final_user_id = sub_type(ANONYMOUS_USER_ID)\n final_username = ANONYMOUS_USERNAME\n\n token = \"\"\n try:\n if user:\n final_username = user.username\n final_user_id = sub_type(user.id)\n elif username:\n result = query_for_user(db_session, username)\n final_username = result.username\n final_user_id = sub_type(result.id)\n else:\n token = validate_request(scope={\"user\"}, audience=config.get(\"BASE_URL\"))\n set_current_token(token)\n final_user_id = current_token[\"sub\"]\n final_user_id = sub_type(final_user_id)\n final_username = current_token[\"context\"][\"user\"][\"name\"]\n except Exception as exc:\n logger.info(\n f\"could not determine user auth info from request. setting anonymous user information. Details:\\n{exc}\"\n )\n\n client_id = \"\"\n try:\n if not token:\n token = validate_request(scope=[], audience=config.get(\"BASE_URL\"))\n set_current_token(token)\n client_id = current_token.get(\"azp\") or \"\"\n except Exception as exc:\n logger.info(\n f\"could not determine client auth info from request. setting anonymous client information. Details:\\n{exc}\"\n )\n\n if (\n not config.get(\"CLIENT_CREDENTIALS_ON_DOWNLOAD_ENABLED\")\n and final_username == ANONYMOUS_USERNAME\n and client_id != \"\"\n ):\n raise Forbidden(\"This endpoint does not support client credentials tokens\")\n\n return {\n \"user_id\": final_user_id,\n \"username\": final_username,\n \"client_id\": client_id,\n }", "def _get_params(self, params={}, auth_type=None):\n auth_type = auth_type or self.AUTH_TYPE['basic']\n\n params['_user_id'] = self.USER_ID\n params['_api_key'] = self.PUBLIC_KEY\n\n if self.AUTH_TYPE['basic'] == auth_type:\n pass\n elif self.AUTH_TYPE['hmac'] == auth_type:\n # Get current unix timestamp (UTC time).\n params['_timestamp'] = int(time.time())\n params['_hash'] = self._do_hmac(params)\n else:\n raise RuntimeError\n\n return params", "def __call__(self, data, **kwargs):", "def for_authenticate_only(self):\n self.token['type'] = 'auth'\n\n return self", "def auth_user():\n\n logging.info(request.headers)\n validate(request)", "def __call__(self, request):\n request.headers['Authorization'] = f'Token {self.token}'\n return request", "def main_response(self, data):", "def main_response(self, data):", "def denormalize_token_data(self, data):\n if not data:\n return\n\n return {\"oauth_token\": data.get(\"token\"),\n \"oauth_token_secret\": data.get(\"extra\")}", "def validate(self, attrs):\n data = super().validate(attrs)\n\n # Get the fields from the user profile serializer.\n serializer = UserSerializerWithToken(self.user).data\n\n for fields, values in serializer.items():\n data[fields] = values\n\n #print('token:', data)\n\n return data", "def validate(self, data):\n username = data.get(\"username\")\n password = data.get(\"password\")\n\n user = authenticate(\n request=self.context.get(\"request\"), username=username, password=password\n )\n\n if not user:\n msg = _(\"Unable to authenticate with provided credentials\")\n raise serializers.ValidationError(msg, code=\"authentication\")\n\n data[\"user\"] = user\n return data", "def get_user_data(self):\n return self.user_data", "def get_user_data(self):\n return self.user_data", "def get_data():\n pass", "def on_auth_resp(self, jdata):\n LOGGER.debug('on_auth_resp %s', str(jdata))\n self.client_id = jdata['Payload']['ClientId']\n\n self.send_dict_pack(\n MoloSocketHelper.req_tunnel(self.tunnel['protocol'],\n self.tunnel['hostname'],\n self.tunnel['subdomain'],\n self.tunnel['rport'], self.client_id))", "def prepare_data(self):", "def authn_and_authz():\n authentication()\n authorization()" ]
[ "0.6669421", "0.6569925", "0.63427466", "0.61100876", "0.6065976", "0.6054941", "0.6029151", "0.60213995", "0.6012752", "0.5897798", "0.5737002", "0.5730088", "0.56567615", "0.5615505", "0.56064767", "0.55513734", "0.5519616", "0.5500598", "0.5468376", "0.5468376", "0.54406774", "0.54257256", "0.5405854", "0.54002213", "0.53946966", "0.53935367", "0.53932387", "0.5377167", "0.53764945", "0.53530496", "0.53456134", "0.5334704", "0.5330824", "0.53305346", "0.5329766", "0.5303802", "0.5293426", "0.5291898", "0.52820635", "0.52819765", "0.5278442", "0.52553797", "0.5255129", "0.5254057", "0.52521974", "0.5251975", "0.52505136", "0.52492654", "0.52427864", "0.5235236", "0.5235236", "0.5233252", "0.522456", "0.5212514", "0.5207419", "0.52011687", "0.5197467", "0.51967645", "0.5196763", "0.5189754", "0.51896954", "0.5188355", "0.51823294", "0.5178179", "0.5174714", "0.51665586", "0.516109", "0.5156139", "0.5148212", "0.51470804", "0.51413745", "0.5140411", "0.5140411", "0.5121441", "0.51172245", "0.51082563", "0.50985855", "0.5093703", "0.50924", "0.5092382", "0.5091364", "0.50913155", "0.5089426", "0.5087631", "0.5082588", "0.50779927", "0.506877", "0.5068183", "0.50661284", "0.50578254", "0.50578254", "0.50550306", "0.5050094", "0.504743", "0.5035839", "0.5035839", "0.503479", "0.50310636", "0.50240546", "0.502355" ]
0.71807003
0
Tests that only 'admin' can add a product
def test_only_admin_can_create_product(self): resp = self.admin_create_user() reply = self.attendant_login() token = reply['token'] product = dict( prod_name='NY_denims', category='denims', stock=20, price=150 ) resp = self.client.post( '/api/v1/products', content_type='application/json', data=json.dumps(product), headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Unauthorized Access!') self.assertEqual(resp.status_code, 401)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_non_admin_cannot_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_create_not_admin(self):\n self.assertEqual(Product.objects.count(), 2)\n payload = {\n 'name': 'New product',\n 'category': self.category_1.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99\n }\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n expected = {'detail': 'You do not have permission to perform this action.'}\n response = self.client.post(\n '/api/products/', data=payload,\n content_type='application/json', **headers)\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)\n self.assertEqual(Product.objects.count(), 2)", "def test_admin_create_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)", "def test_add_permission(self):\r\n self.assertFalse(self.creator_admin.has_add_permission(self.request))", "def test_only_attendant_can_make_a_sale(self):\n resp = self.admin_add_product()\n reply = self.admin_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_add_product(self):\n view = ProductCreateListView.as_view({'post': 'create'})\n uri = reverse('products:create/list-products')\n data = {\n \"name\": \"Iphone 7\",\n \"description\": \"Mobile phone\",\n \"price\": 200,\n \"is_available\": True\n }\n request = self.factory.post(uri, data, HTTP_AUTHORIZATION='Token {}'.format(self.token_admin.key))\n request.user = self.user['admin']\n response = view(request)\n self.assertEqual(response.status_code, 201,\n f'Expected Response Code 201, received {response.status_code} instead.')", "def test_categories_product_admin(self):\n response = self.client.post('api/v1/category/products',\n data=json.dumps(category_product[0]),\n content_type='application/json',\n headers=self.admin_headers)\n self.assertEqual(response.status_code, 401)\n self.assertIn('unauthorized', str(response.data))", "def test_admin_cannot_delete_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This product does not exist in Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_admin(self):\n assert(admin)", "def test_form_submition_and_product_creation(user_company, client, authenticated_user):\n add_product_url = reverse('add-product')\n response = client.post(add_product_url, {\n 'name': 'Test_product_name',\n 'serial_number': 'XZ001', \n 'manufacturer': 'Test company',\n 'price_net': 415.26,\n 'description': fake.paragraph(),\n 'stock': 16\n })\n assert response.status_code == 302\n product = Product.objects.get(name='Test_product_name')\n assert response.url == reverse('product-detail',kwargs={'pk': product.pk}) \n assert product.user == authenticated_user\n assert product in Product.objects.all()", "def test_admin_cannot_add_item(self):\n response = self.client.get(\n '/self.base_url/sales/3/2',\n headers=dict(Authorization=\"Bearer \" + self.owner_token),\n content_type = 'application/json'\n )\n\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"You cannot make a sale from an Admin account, Consider having an attendant account\")\n self.assertEqual(response.status_code,401)", "def test_admin_cannot_create_product_with_empty_fields(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='',\n category='',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Please enter all fields!')\n self.assertEqual(resp.status_code, 400)", "def test_cannot_sale_nonexistant_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"Paris_heels\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product is not in the Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_add_facility_pt1(self):\n self.assertFalse(self.admin.has_perm('auth.add_facility'))", "def test_add_admin_to_org(self):\n pass", "def test_view_a_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertIn('NY_denims', str(reply['product']))\n self.assertEqual(resp.status_code, 200)", "def test_create_product_as_customer_fails(self):\n customer = get_user_model().objects.create_user(\n '[email protected]',\n 'Customer',\n 'user123'\n )\n self.client.force_authenticate(customer)\n res = self.client.post(PRODUCTS_URL, PRODUCT_PAYLOAD)\n\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)", "def test_delete_not_my_product(self):\n self._require_login(self.user1)\n response = self.client.delete('/api/1.0/products/2/')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_security_on_post(self):\n url = '/product/xml/'\n response = self.client.post(url,{'description':'my new description'})\n self.failUnlessEqual(response.status_code, 401)", "def test_admin_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Product deleted!')\n self.assertEqual(resp.status_code, 200)", "def test_creating_supply_admin(self):\n request = self.factory.post(\n '/api/supplies/', {'name': '3d printer', 'state': 'good state', 'description': 'prints 3d objects'})\n force_authenticate(request, user=self.testadmin)\n response = SupplyListView.as_view()(request)\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n try:\n supply = Supply.objects.get(name='3d printer')\n self.assertEqual(supply.name, '3d printer')\n self.assertEqual(supply.state, 'good state')\n self.assertEqual(supply.description, 'prints 3d objects')\n except Supply.DoesNotExist:\n self.fail()", "def test_admin_cannot_delete_product_with_non_integer_prod_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/kk',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'The product id should be a number!')\n self.assertEqual(resp.status_code, 400)", "def test_cannot_create_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_update_not_my_product(self):\n post_data = {\n \"category\": {\n \"name\": \"general\",\n \"index\": 0\n },\n \"name\": \"Producto 2 modified\",\n \"description\": \"Descripcion de producto 2 modified\",\n \"selling\": True,\n \"price\": 20,\n }\n\n response = self.client.put('/api/1.0/products/2/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_add_product_view_for_unauthenticated_users(client):\n add_product_url = reverse('add-product')\n response = client.get(add_product_url)\n assert response.status_code == 302\n assert response.url == \"/accounts/login/?next=/products/add-product/\"", "def test_only_add_perm(self):\n self.assertStatusCode(self.url, 403)", "def test_add_admin(self):\n self.test_create_user()\n self.test_create_organization()\n url = reverse('MGA:add_admin')\n data = {'admin id': 1, 'org_id': 1}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_order_product(self):\n self.client.force_authenticate(self.user)\n resp = self.client.post(ORDER_URL, data={\n \"product\": self.product.id,\n \"count\": 1,\n \"option_value\": self.option_value.id\n })\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)", "def test_admin(self):\r\n \r\n self.assertEqual(False, self.user.isAdmin)", "def test_admin_cannot_delete_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_add_product_during_auth(self):\n product = self.create_product()\n\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n\n # Adding a product here should succeed\n res = self.do_add_to_basket(product.id)\n basket1 = res.data['id']\n self.assertEqual(res.status_code, 200)\n\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n\n # Adding a product here should go to a new basket, not the one we're auth'ing\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n basket2 = res.data['id']\n self.assertNotEqual(basket1, basket2)\n\n res = self.do_cybersource_post(cs_url, cs_data)\n self.assertEqual(res.status_code, 302)\n self.check_finished_order(cs_data['reference_number'], product.id)\n\n # Adding a product here should go to basket2, not basket1\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n basket3 = res.data['id']\n self.assertEqual(basket2, basket3)", "def test_admin_cannot_delete_product_from_empty_Inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'There are no products in Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_update_product_without_authentication(self):\n post_data = {\n \"category\": {\n \"name\": \"general\",\n \"index\": 0\n },\n \"name\": \"Producto 2 modified\",\n \"description\": \"Descripcion de producto 2 modified\",\n \"selling\": True,\n \"price\": 20,\n }\n\n response = self.client.put('/api/1.0/products/1/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_unauthorized_product_update(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_permissions(self):\n self.assert_('admin' in get_model_perms(Group))", "def test_detail_is_hacker_permission(self):\n self.user_1.username = 'pythonhacker'\n self.user_1.save()\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_1.id), **headers)\n\n expected = {'detail': 'You do not have permission to perform this action.'}\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)", "def test_cannot_view_a_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def permission(self):\n return \"core.manage_products\"", "def test_add_role(self):\n pass", "def test_view_product_that_doesnot_exist_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product does not exist!')\n self.assertEqual(resp.status_code, 404)", "def test_security_on_delete(self):\n # test the delete product url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.delete(url)\n self.failUnlessEqual(response.status_code, 401)", "def test_PRODUCTS_addProduct(browser, url, username, password):\n\t\t\n\t#initialise browser and login with valid credentials\n\tgo_to_admin(browser,url,username,password)\n\n\ttry:\n\t\t# Select \"Add a Product\" link from \"Products\" menu\n\t\tbrowser.find_element_by_link_text('Products').click()\n\t\tbrowser.find_element_by_link_text('Add a Product').click()\n\n\texcept NoSuchElementException:\n\t\tbrowser.save_screenshot('Nosuchelement.png')\n\t\traise\t\n\n\t#Provide Details of the product\n\telement = wait_until_element_present(browser, 'product-name', 'ID')\n\telement.send_keys('Testing')\n\n\ttry:\n\t\tbrowser.find_element_by_id('product-price').send_keys(\"10.45\")\n\t\tbrowser.find_element_by_xpath('//li[@title = \"'+category_name+'\"]/a[text()=\"'+category_name+'\"]').click()\n\t\tbrowser.find_element_by_id('product-weight').send_keys(\"1\")\n\t\tbrowser.find_element_by_id('product-sku').send_keys(SKU)\n\texcept NoSuchElementException:\n\t\tbrowser.save_screenshot('Nosuchelement.png')\n\t\traise \t\n\n\ttry:\n\t\tbrowser.execute_script(\"tinyMCE.activeEditor.dom.remove(tinyMCE.activeEditor.dom.select('p'));\")\n\t\tbrowser.execute_script(\"tinymce.activeEditor.execCommand('mceInsertContent', true, \\\"TEST AUTOMATION BANNER\\\");\")\n\texcept WebDriverException:\n\t\tbrowser.find_element_by_id('wysiwyg').clear()\n\t\tbrowser.find_element_by_id('wysiwyg').send_keys('TEST AUTOMATION BANNER')\n\n\ttry:\t\n\t\tbrowser.find_element_by_id('product-width').send_keys(\"1\")\n\t\tbrowser.find_element_by_id('product-height').send_keys(\"1\")\n\t\tbrowser.find_element_by_id('product-depth').send_keys(\"1\")\n\texcept NoSuchElementException:\n\t\tbrowser.save_screenshot('Nosuchelement.png')\n\t\traise \n\t\t\n\tUpload \"Image and Video\" of the product\n\ttry:\n\t\tbrowser.find_element_by_link_text('Images & Videos').click()\n\t\tfile = browser.find_element_by_xpath('//input[@class = \"file-upload\"]')\n\t\tfile.send_keys(product_img_path)\n\t\ttime.sleep(15)\n\t\tbrowser.find_element_by_id('product-videos-search-query').send_keys(product_video_url)\n\n\texcept NoSuchElementException:\n\t\tbrowser.save_screenshot('Nosuchelement.png')\n\t\traise\t\n\n\telement=wait_until_element_present(browser,'product-videos-search','ID')\n\telement.click()\n\n\ttry:\n\t\ttime.sleep(15)\t\n\t\tbrowser.find_element_by_xpath('//label[@for = \"'+product_video_label+'\"]').click()\n\n\t\t#Provide \"Inventory\" detials of the product\n\t\tbrowser.find_element_by_link_text('Inventory').click()\n\t\tbrowser.find_element_by_xpath('//label[@for = \"product-inventory-tracking-1\"]').click()\n\t\tclear_field(browser,'inventory-level')\n\t\tbrowser.find_element_by_id('inventory-level').send_keys(\"123\")\n\t\tclear_field(browser,'inventory-warning-level')\n\t\tbrowser.find_element_by_id('inventory-warning-level').send_keys(\"123\")\n\n\texcept NoSuchElementException:\n\t\tbrowser.save_screenshot('Nosuchelement.png')\n\t\traise\t\n\n\t\t\n\ttry:\t\n\t\t#Select Product Delivery details\n\t\tbrowser.find_element_by_link_text('Delivery/Event Date').click()\n\t\tbrowser.find_element_by_xpath('//label[@for = \"product-event-date-required\"]').click()\n\t\tbrowser.find_element_by_link_text('Details').click()\n\t\tbrowser.find_element_by_name('btn-save').click()\n\n\texcept NoSuchElementException:\n\t\tbrowser.save_screenshot('Nosuchelement.png')\n\t\traise\n\n\tverify_and_assert_success_message(browser, \"The new product has been added successfully.\", \".alert-success\")\n\n\t# View newly created Product in control panel\n\tbrowser.find_element_by_link_text('Products').click()\n\tbrowser.find_element_by_link_text('View Products').click()\n\telement = wait_until_element_present(browser,'search-query','ID')\n\telement.send_keys(SKU)\n\tbrowser.find_element_by_xpath('//button[@class = \"btn btn-secondary filter-button\"]').click()\n\ttime.sleep(15)\n\tbrowser.find_element_by_xpath(\"//tr[contains(.,'\" + SKU + \"')]\").find_element_by_css_selector('.dropdown-trigger').click()\n\tbrowser.find_element_by_link_text('View').click()\n \n #Switching to cart window \n\tfor handle in browser.window_handles:\n\t\t\t\tbrowser.switch_to_window(handle)\n\n\t#Provide required delivery date\n\ttry:\n\t\twait_until_element_present(browser,'EventDateMonth','ID')\n\t\tselect_dropdown_value(browser, 'EventDateMonth', 'Jan')\n\t\tselect_dropdown_value(browser, 'EventDateDay', '1')\n\t\tselect_dropdown_value(browser, 'EventDateYear', '2013')\n\n\texcept NoSuchElementException:\n\t\tbrowser.save_screenshot('Nosuchelement.png')\n\t\traise\n\t\t\t\n\telement = wait_until_element_present(browser, '//input[contains(@src,\"AddCartButton.gif\")]', 'XPATH')\n\telement.click()\n\n\t#Proceeding to checkout as guest\n\ttime.sleep(15)\n\twait_until_element_present(browser, '//a[@title = \"Click here to proceed to checkout\"]', 'XPATH').click()\n\telement = wait_until_element_present(browser, 'checkout_type_guest', 'ID')\n\telement.click()\n\telement = wait_until_element_present(browser, 'CreateAccountButton', 'ID')\n\telement.click()\n\twait_until_element_present(browser, 'FormField_1', 'ID')\n\n\t#Provide Billing Details and proceed further\n\ttry:\n\t\tbrowser.find_element_by_id('FormField_1').clear()\n\t\tbrowser.find_element_by_id('FormField_1').send_keys('[email protected]')\n\t\tbrowser.find_element_by_id('FormField_4').clear()\n\t\tbrowser.find_element_by_id('FormField_4').send_keys('Virendra')\n\t\tbrowser.find_element_by_id('FormField_5').clear()\n\t\tbrowser.find_element_by_id('FormField_5').send_keys('Brahmbhatt')\n\t\tbrowser.find_element_by_id('FormField_7').clear()\n\t\tbrowser.find_element_by_id('FormField_7').send_keys('234234423234')\n\t\tbrowser.find_element_by_id('FormField_8').clear()\n\t\tbrowser.find_element_by_id('FormField_8').send_keys('George Street')\n\t\tbrowser.find_element_by_id('FormField_10').clear()\n\t\tbrowser.find_element_by_id('FormField_10').send_keys('Sydney')\n\t\tselect_dropdown_value(browser, 'FormField_11', 'Australia')\n\t\tselect_dropdown_value(browser, 'FormField_12', 'New South Wales')\n\t\tbrowser.find_element_by_id('FormField_13').clear()\n\t\tbrowser.find_element_by_id('FormField_13').send_keys('2000')\n\t\tbrowser.find_element_by_css_selector('.Submit .billingButton').click()\n\n\texcept NoSuchElementException:\n\t\tbrowser.save_screenshot('Nosuchelement.png')\n\t\traise\t\t\n\n\n\t# Select shipping method\n\telement = wait_until_element_present(browser, \"//input[contains(@id, 'shippingCheck')]\", 'XPATH')\n\telement.click()\n\tbrowser.find_element_by_xpath(\"//div[@class='ML20']/input[@type='submit' and contains(@value,'Continue')]\").click()\n\n\t# Proceed to payment\n\ttry:\n\t\twait_until_element_present(browser, 'bottom_payment_button', 'ID')\n\t\tbrowser.find_element_by_id('bottom_payment_button').click()\n\n\texcept NoSuchElementException:\n\t\tbrowser.save_screenshot('Nosuchelement.png')\n\t\traise\t\n\n\t#Provide Credit Card Details\n\ttry:\n\t\twait_until_element_present(browser,'creditcard_cctype','ID')\n\t\tselect_dropdown_value(browser, 'creditcard_cctype', 'Visa')\n\t\tbrowser.find_element_by_id('creditcard_name').send_keys('test')\n\t\tbrowser.find_element_by_id('creditcard_ccno').send_keys('4242424242424242')\n\t\tselect_dropdown_value(browser, 'creditcard_ccexpm', 'Jan')\n\t\tselect_dropdown_value(browser, 'creditcard_ccexpy', '2014')\n\t\tbrowser.find_element_by_id('creditcard_cccvd').send_keys('234')\n\t\tbrowser.find_element_by_xpath('//input[@value = \"Pay for Order\"]')\n\t\t\n\texcept NoSuchElementException:\n\t\tbrowser.save_screenshot('Nosuchelement.png')\n\t\traise\t\n\n # Assert the succes message of Order Creation\n\torder_success_msg = 'YOUR ORDER NUMBER IS'\n\tbrowser_success_msg = browser.find_element_by_xpath('//div[@class = \"alert alert-success\"]/p').text\n\n\tif order_success_msg in browser_success_msg:\n\t\tprint \"I found my text\"\n\t\tassert True\n\telse:\n\t\tprint \"No text\"\n\t\tassert False", "def test_view_all_products(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertIn('NY_denims', str(reply['products']))\n self.assertEqual(resp.status_code, 200)", "def test_default_product_stealability(self):\n prod = Product('Test Product')\n self.assertEqual(prod.stealability(), \"Kinda stealable.\")", "def test_product(self):\n self.assertEqual(self.test_product.name, self.test_product_name)\n self.assertEqual(self.test_product.price, self.test_product_price)", "def test_new_product(self):\n prod = Product(name='New Product', price=100, weight=60,\n flammability=0.9)\n self.assertEqual(prod.explode(), '...BABOOM!!')\n self.assertEqual(prod.stealability(), 'Very stealable!')", "def test_listing_supplies_admin(self):\n request = self.factory.get(\n '/api/supplies')\n force_authenticate(request, user=self.testadmin)\n response = SupplyListView.as_view()(request)\n # admin can browse the data\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_admin_required(self):\n with self.login(self.user_admin):\n self.assertTrue(current_user.is_authenticated)\n self.assertEqual(current_user, self.user_admin)\n\n rv = self.client.get('/required')\n self.assertEqual(b'required', rv.data)", "def test_admin_accessible(self) -> None:\n response = self.client.get(\"/admin/\")\n self.assertEqual(200, response.status_code)", "def test_add_facility_pt3(self):\n self.assertFalse(self.learner1.has_perm('auth.add_facility'))", "def test_none_admin_edit(self):\n\n with self.client:\n token = self.customer()\n id = 1\n response = self.client.put('api/v1/meals/{}'.format(id),\n data=json.dumps(dict(\n meal_name=\"chips\",\n price=15000\n )),\n content_type='application/json',\n headers=({\"token\": token}))\n data = json.loads(response.data.decode())\n self.assertEqual(data.get('message'),\n \"Customer is not authorized to access this page\")\n self.assertEqual(response.status_code, 401)", "def test_add_new_product(self):\n self._require_login(self.user1)\n post_data = {\n \"category\": {\n \"name\": \"deportes\",\n \"index\": 1\n },\n \"price\": \"4500.0\",\n \"name\": \"Producto 3\",\n \"description\": \"Descripcion de producto 3\"\n }\n\n response = self.client.post('/api/1.0/products/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertNotEqual(response.data['published_date'], '')\n self.assertEqual(response.data['name'], 'Producto 3')\n self.assertEqual(response.data['description'], 'Descripcion de producto 3')\n self.assertEqual(response.data['selling'], True)\n self.assertEqual(response.data['price'], '4500.0')\n self.assertEqual(response.data['seller']['user']['username'], self.username)\n self.assertEqual(response.data['category']['name'], 'deportes')", "def test_detail_odd_product_id_permission(self):\n self.assertEqual(self.product_2.id, 2)\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_2.id), **headers)\n\n expected = {'detail': 'You do not have permission to perform this action.'}\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)", "def test_create__admin_valid(self):\n testing_config.sign_in('[email protected]', 123567890)\n\n json_data = {\n 'email': '[email protected]',\n 'isAdmin': True, 'isSiteEditor': True}\n with test_app.test_request_context(self.request_path, json=json_data):\n actual_json = self.handler.do_post()\n self.assertEqual('[email protected]', actual_json['email'])\n self.assertTrue(actual_json['is_site_editor'])\n self.assertTrue(actual_json['is_admin'])\n\n new_appuser = user_models.AppUser.query(\n user_models.AppUser.email == '[email protected]').get()\n self.assertEqual('[email protected]', new_appuser.email)\n self.assertTrue(new_appuser.is_admin)\n\n # Clean up\n new_appuser.key.delete()", "def test_admin(self):\r\n from django.contrib import admin\r\n admin.autodiscover()\r\n\r\n from adrest.models import AccessKey\r\n self.assertTrue(AccessKey in admin.site._registry)\r\n\r\n from adrest.models import Access\r\n self.assertTrue(Access in admin.site._registry)", "def test_add_product_view_for_authenticated_users(user_company, client):\n add_product_url = reverse('add-product')\n response = client.get(add_product_url)\n assert response.status_code == 200", "def test_delete_permission(self):\r\n self.assertFalse(self.creator_admin.has_delete_permission(self.request))", "def test_security_on_put(self):\n # test the update url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.put(url,{'description':'my new description'})\n self.failUnlessEqual(response.status_code, 401)", "def test_add_role_simple(self):\n pass", "def test_03_admin_featured_apps_as_admin(self):\r\n self.register()\r\n self.signin()\r\n res = self.app.get('/admin/featured', follow_redirects=True)\r\n assert \"Manage featured applications\" in res.data, res.data", "def test_get_deleted_product(self):\n product = self.add_product()\n product.is_deleted = True\n product.save()\n\n url = f'{self.url}{product.id}/'\n\n response = self.client.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n self.client.force_authenticate(user=self.admin_user)\n response = self.client.get(url, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_add_role_simple_post(self):\n pass", "def test_security_on_get(self):\n # test the listing url\n product = Product.objects.all()[0]\n url = '/product/xml/'\n response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)\n # test the product detail url\n url = '/product/xml/%s/' % product.item_number\n Response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)", "def test_add_to_cart_item_not_in_system(self):\n # test sale products not in db\n\n response = self.client.get(\n '/self.base_url/sales/1999/2',\n headers=dict(Authorization=\"Bearer \" + self.attendant_token),\n content_type = 'application/json'\n )\n\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"This product does not exist\")\n self.assertEqual(response.status_code,200)\n\n\n # test add item which is at minimum stock", "def test_add_category_with_wrong_perms(self):\n self.client.login(username='hodor', password='hodor')\n Perms.objects.create(user=self.user, access_level=4).save()\n response = self.client.post('/categories/add', {}, follow=True)\n self.assertTemplateUsed(response, 'unauthorized.html')", "def test_cannot_sell_more_than_stock(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":15\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Only 10 NY_denims available right now!')\n self.assertEqual(resp.status_code, 400)", "def test_attendant_make_a_sale(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)", "def test_unauthorized_add(self):\n response = self.client.post('/add/', {'url': 'http://example.com', 'key': 'example'})\n # TODO status 403", "def test_add_single_org_product(client, jwt, session, keycloak_mock): # pylint:disable=unused-argument\n headers = factory_auth_header(jwt=jwt, claims=TestJwtClaims.staff_admin_role)\n rv = client.post('/api/v1/users', headers=headers, content_type='application/json')\n rv = client.post('/api/v1/orgs', data=json.dumps(TestOrgInfo.org1),\n headers=headers, content_type='application/json')\n assert rv.status_code == http_status.HTTP_201_CREATED\n dictionary = json.loads(rv.data)\n rv_products = client.post(f\"/api/v1/orgs/{dictionary.get('id')}/products\",\n data=json.dumps(TestOrgProductsInfo.org_products1),\n headers=headers, content_type='application/json')\n assert rv_products.status_code == http_status.HTTP_201_CREATED\n assert schema_utils.validate(rv_products.json, 'org_product_subscriptions_response')[0]", "def test_23_admin_add_category(self):\r\n self.create()\r\n category = {'name': 'cat', 'short_name': 'cat',\r\n 'description': 'description'}\r\n # Anonymous user\r\n url = '/admin/categories'\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Anonymous users should be redirected to sign in\"\r\n assert dom.find(id='signin') is not None, err_msg\r\n\r\n # Authenticated user but not admin\r\n self.signin(email=self.email_addr2, password=self.password)\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n err_msg = \"Non-Admin users should get 403\"\r\n assert res.status_code == 403, err_msg\r\n self.signout()\r\n\r\n # Admin\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n err_msg = \"Category should be added\"\r\n assert \"Category added\" in res.data, err_msg\r\n assert category['name'] in res.data, err_msg\r\n\r\n category = {'name': 'cat', 'short_name': 'cat',\r\n 'description': 'description'}\r\n\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n err_msg = \"Category form validation should work\"\r\n assert \"Please correct the errors\" in res.data, err_msg", "def test_project_administrator(project):\n if is_server_administrator():\n return True\n if is_project_administrator(project):\n return True\n return False", "def test_add_category_with_perms(self):\n self.client.login(username='hodor', password='hodor')\n Perms.objects.create(user=self.user, access_level=2).save()\n response = self.client.post('/categories/add/', {'categoryType': 'tr0npr0n'})\n self.assertRedirects(response, '/categories/')", "def test_05_admin_featured_apps_as_user(self):\r\n self.register()\r\n self.signout()\r\n self.register()\r\n self.register(name=\"tester2\", email=\"[email protected]\",\r\n password=\"tester\")\r\n res = self.app.get('/admin/featured', follow_redirects=True)\r\n assert res.status == \"403 FORBIDDEN\", res.status", "def test_Product(self):\n self.assertEquals(self.prod_1.pk, 1)\n self.assertEquals(self.prod_1.ean, '3350033118072')\n self.assertEquals(self.prod_1.name, 'test 1')\n self.assertEquals(self.prod_1.nutriscore, 'u')\n self.assertEquals(self.prod_1.category, 'cat 1')", "def test_attendant_can_only_view_own_sale(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Benja Maisha',\n username='maisha',\n password='Andela8',\n role='attendant'\n )\n\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n\n user = dict(\n username='maisha',\n password='Andela8'\n )\n response = self.client.post(\n '/api/v1/login',\n content_type='application/json',\n data=json.dumps(user)\n )\n reply = json.loads(response.data.decode())\n token = reply['token']\n\n resp = self.client.get(\n '/api/v1/sales/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'You have no access to this sale!')\n self.assertEqual(resp.status_code, 401)", "def test_admin_get(self, *args, **kwargs):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n res = self.view.get(self.request, *args, **kwargs)\n nt.assert_equal(res.status_code, 200)", "def test_admin_get(self, *args, **kwargs):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n res = self.view.get(self.request, *args, **kwargs)\n nt.assert_equal(res.status_code, 200)", "def test_try_add_new_book_while_unauthorized(self):\n create_admin()\n response = self.client().post('/api/v1/login', json=self.test_admin)\n json_data = json.loads(response.data)\n access_token = json_data.get('access_token')\n self.client().post('/api/v1/reg',\n headers={\"Authorization\": \"Bearer \" + access_token}, json=self.test_user)\n response = self.client().post('/api/v1/login', json=self.test_user)\n json_data = json.loads(response.data)\n access_token = json_data.get('access_token')\n response = self.client().post('/api/v1/products',\n headers={\"Authorization\": \"Bearer \" + access_token}, json=self.test_book)\n json_data = json.loads(response.data)\n self.assertTrue(json_data.get('Error'))\n self.assertEqual(json_data.get('Error'),\n \"Only Admins are allowed to add books\")\n self.assertEqual(response.status_code, 403)", "def test_none_admin_post(self):\n with self.client:\n token = self.customer()\n response = self.client.post('api/v1/meals', data=json.dumps(\n dict(\n meal_name=\"fries\",\n price=10000\n )\n ),\n content_type='application/json',\n headers=({\"token\": token})\n )\n data = json.loads(response.data.decode())\n self.assertEqual(data.get('message'),\n \"Customer is not authorized to access this page\")\n self.assertEqual(response.status_code, 401)", "def test_create_drink_created_by_admin(self):\n self.test_create_admin_user()\n self.test_create_seting_bar()\n user = UserBase.objects.get(username='admin')\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + user.token)\n url = reverse('drink-list')\n data = {\n 'name': 'Testing Drink',\n 'ingredients':'[{\"unit\":\"0\",\"ratio\":2,\"ingredient\":2001},{\"unit\":\"0\",\"ratio\":2,\"ingredient\":2001}]'\n }\n\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_create(self):\n self.assertEqual(Product.objects.count(), 2)\n payload = {\n 'name': 'New product',\n 'category': self.category_1.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99\n }\n\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(self.token_admin)\n }\n response = self.client.post(\n '/api/products/', data=payload, content_type='application/json', **headers)\n self.assertEqual(response.status_code, 201)\n self.assertEqual(response['Content-Type'], 'application/json')\n self.assertEqual(Product.objects.count(), 3)\n\n product = Product.objects.get(name='New product')\n self.assertEqual(product.name, 'New product')\n self.assertEqual(product.category, self.category_1)\n self.assertEqual(product.sku, '11111111')\n self.assertEqual(product.description, 'New product description')\n self.assertEqual(float(product.price), 39.99)", "def test_creating_supply_user(self):\n request = self.factory.post(\n '/api/supplies/', {'name': '3d printer 2', 'state': 'good state', 'description': 'prints 3d objects'})\n force_authenticate(request, user=self.testuser1)\n response = SupplyListView.as_view()(request)\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n try:\n Supply.objects.get(name='3d printer')\n self.fail()\n except Supply.DoesNotExist:\n pass", "def test_update_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], \"This product doesn't exists in the Inventory!\")\n self.assertEqual(resp.status_code, 400)", "def test_add_facility_pt4(self):\n with self.assertRaises(InvalidPermission):\n self.assertFalse(self.learner1.has_perm('auth.add_facility', obj=[]))", "def test_view_product_with_invalid_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2kk',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Try an interger for product id')\n self.assertEqual(resp.status_code, 400)", "def test_post_product(self):\n with self.client:\n # Register an admin user\n self.client.post(\n '/api/v1/auth/register',\n data=json.dumps(dict(\n name='leticia',\n email='[email protected]',\n role='admin',\n username='ticia',\n password='password',\n confirm_password='password'\n )),\n content_type='application/json'\n )\n # Register attendant\n self.client.post(\n '/api/v1/auth/register',\n data=json.dumps(dict(\n name='leticia',\n email='[email protected]',\n role='attendant',\n username='leticia',\n password='password',\n confirm_password='password'\n )),\n content_type='application/json'\n )\n\n # login as admin\n login_response = self.client.post(\n '/api/v1/auth/login',\n data=json.dumps(dict(\n username='ticia',\n password='password'\n\n )),\n content_type='application/json'\n )\n result = json.loads(login_response.data)\n token = result\n\n # Login attendant\n login_att_response = self.client.post(\n '/api/v1/auth/login',\n data=json.dumps(dict(\n username='leticia',\n password='password'\n\n )),\n content_type='application/json'\n )\n resultatt = json.loads(login_att_response.data)\n tokenatt = resultatt\n # Test successful post\n response = self.client.post(\n '/api/v1/products',\n data=json.dumps(dict(\n id=100,\n name='chunky heels',\n category='shoes',\n purchase_price=1000,\n selling_price=1800,\n quantity=70,\n low_limit=10,\n description='A wide based heel'\n\n )),\n content_type='application/json'\n\n )\n\n response_data = json.loads(response.data)\n self.assertEqual(\"Product with id 100 added successfully\", response_data[\"message\"])\n self.assertEqual(response.status_code, 201)\n # Test post product with existing product id\n responsez = self.client.post(\n '/api/v1/products',\n data=json.dumps(dict(\n id=100,\n name='chunky heels',\n category='shoes',\n purchase_price=1000,\n selling_price=1800,\n quantity=70,\n low_limit=10,\n description='A wide based heel'\n\n )),\n content_type='application/json'\n\n )\n\n response_dataz = json.loads(responsez.data)\n self.assertEqual(\"The product Id you entered is being used for another product\", response_dataz[\"message\"])\n self.assertEqual(response.status_code, 201)\n\n # Test empty data\n response1 = self.client.post(\n '/api/v1/products',\n data=json.dumps(dict()\n ),\n content_type='application/json'\n\n )\n response_data1 = json.loads(response1.data)\n self.assertEqual(\"Fields cannot be empty\", response_data1[\"message\"])\n self.assertEqual(response1.status_code, 400)\n # Test missing required fields\n response2 = self.client.post(\n '/api/v1/products',\n data=json.dumps(dict(\n id=\"\",\n name=\"chunky\",\n category=\"shoes\",\n purchase_price=1000,\n selling_price=\"\",\n quantity=\"\",\n low_limit=\"\",\n description=\"A wide based heel\"\n\n )),\n content_type='application/json'\n\n )\n\n response_data2 = json.loads(response2.data)\n self.assertEqual(\"Some required fields are missing!\", response_data2[\"message\"])\n self.assertEqual(response2.status_code, 206)\n # Test only admin can post products\n responseatt_post = self.client.post(\n '/api/v1/products',\n data=json.dumps(dict(\n id=200,\n name='chunky heels',\n category='shoes',\n purchase_price=1000,\n selling_price=1800,\n quantity=70,\n low_limit=10,\n description='A wide based heel'\n\n )),\n content_type='application/json'\n\n )\n\n response_data_att = json.loads(responseatt_post.data)\n self.assertEqual(\"Product with id 200 added successfully\", response_data_att[\"message\"])\n self.assertEqual(responseatt_post.status_code, 201)", "def test_add_item_to_cart(client):\n raise NotImplemented('Acceptance test failed')", "def test_households_in_admin_unit(self):", "def test_no_enable_shoppingcart(self):\r\n self.add_to_cart()\r\n self.request.user = self.user\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertFalse(context['display_shopping_cart'])", "def setUp(self):\n super(PurchaseOrderTest, self).setUp()\n \n self.ct = ContentType(app_label=\"po\")\n self.ct.save()\n self.p = Permission(codename=\"add_purchaseorder\", content_type=self.ct)\n self.p.save()\n self.p2 = Permission(codename=\"change_purchaseorder\", content_type=self.ct)\n self.p2.save()\n #Create the user\n self.username = 'tester'\n self.password = 'pass'\n self.user = User.objects.create_user(self.username, '[email protected]', self.password)\n self.user.save()\n self.user.user_permissions.add(self.p)\n self.user.user_permissions.add(self.p2)\n self.client.login(username=self.username, password=self.password)\n self.client.force_authenticate(self.user)\n \n self.supplier = Supplier(**base_supplier)\n self.supplier.save()\n self.address = Address(**base_address)\n self.address.contact = self.supplier\n self.address.save()\n self.contact = SupplierContact(name='test', email='[email protected]', telephone=1234, primary=True)\n self.contact.supplier = self.supplier\n self.contact.save()\n\n # Create Custom Supply\n # not implemented\n\n # Create Fabric\n self.supply = Fabric.create(**base_fabric)\n \n #self.supply.units = \"m^2\"\n self.supply.save()\n self.supply1 = self.supply\n \n self.product = Product(supply=self.supply, supplier=self.supplier, cost=base_fabric['unit_cost'],\n purchasing_units='m')\n self.product.save()\n self.supply2 = Fabric.create(**base_fabric2)\n self.supply2.discount = 5\n self.supply2.save()\n self.product2 = Product(supply=self.supply2, supplier=self.supplier, cost=base_fabric['unit_cost'])\n self.product2.save()\n self.supply1.supplier = self.supplier\n self.supply2.supplier = self.supplier\n \n\n #Create supply with no target item\n self.supply3 = Supply.objects.create(description='test supply')\n self.supply3.id = 203\n self.supply3.save()\n\n #Create a project\n self.project = Project()\n self.project.codename = 'MC House'\n self.project.save()\n \n self.po = PurchaseOrder()\n self.po.employee = self.user\n self.po.supplier = self.supplier\n self.po.terms = self.supplier.terms\n self.po.vat = 7\n self.order_date = datetime.datetime(2017, 1, 15, 15, 30, 0, 0, tzinfo=timezone('Asia/Bangkok'))\n self.po.order_date = self.order_date\n self.po.receive_date = datetime.datetime.now()\n self.po.save()\n #self.po.create_and_upload_pdf()\n \n self.item = Item.create(supplier=self.supplier, id=1, **base_purchase_order['items'][0])\n self.item.purchase_order = self.po\n self.item.save()\n \n self.po.calculate_total()\n self.po.save()", "def test_cannot_view_all_products_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_create_product(self):\n url = reverse('products:list')\n data = {\n 'name': 'Banana',\n 'description': '''\n Bananas are one of the most widely consumed fruits in the\n world for good reason. Eating them could help lower blood\n pressure and reduce the risks of cancer and asthma.\n '''\n }\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(models.Product.objects.filter(name=data['name']).count(), 1)", "def test_create_with_permissions(self):\n permissions = Permission.objects.filter(name__in=('Can add course mode', 'Can change course mode'))\n for permission in permissions:\n self.user.user_permissions.add(permission)\n\n self.assert_can_create_course()", "def test_stealability(self):\n prod = Product('Test Product', price=100, weight=1)\n self.assertEqual(prod.stealability(), \"Very stealable!\")", "def test_products_ref_users_put(self):\n pass", "def test_add_to_cart(self):\n\n # Log the user in that is not the seller\n self.client.login(username=\"test_user\", password=\"secret\")\n\n # Confirm that product title appears in cart\n response = self.client.get(reverse('website:cart'))\n\n # Check that the response is 200 ok\n self.assertEqual(response.status_code, 200)\n\n # Ensure that the cart displays product title, but not the title for product2\n self.assertIn('<h6 class=\"mr-auto p-2\">Test Product</h6>'.encode(), response.content)\n self.assertNotIn('<h6 class=\"mr-auto p-2\">Test Product2</h6>'.encode(), response.content)\n\n # Confirm that the post returns a response of 302\n response = self.client.get(reverse(\"website:add_to_cart\", args=(1,)))\n self.assertEqual(response.status_code, 302)", "def test_product_exists_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product exists in the Inventory!')\n self.assertEqual(resp.status_code, 400)", "def test_07_admin_featured_apps_add_remove_app_non_admin(self, mock):\r\n self.register()\r\n self.signout()\r\n self.register(name=\"John2\", email=\"[email protected]\",\r\n password=\"passwd\")\r\n self.new_application()\r\n # The application is in the system but not in the front page\r\n res = self.app.get('/', follow_redirects=True)\r\n err_msg = (\"The application should not be listed in the front page\"\r\n \"as it is not featured\")\r\n assert \"Create an App\" in res.data, err_msg\r\n res = self.app.get('/admin/featured', follow_redirects=True)\r\n err_msg = (\"The user should not be able to access this page\"\r\n \" but the returned status is %s\" % res.status)\r\n assert \"403 FORBIDDEN\" in res.status, err_msg\r\n # Try to add the app to the featured list\r\n res = self.app.post('/admin/featured/1')\r\n err_msg = (\"The user should not be able to POST to this page\"\r\n \" but the returned status is %s\" % res.status)\r\n assert \"403 FORBIDDEN\" in res.status, err_msg\r\n # Try to remove it again from the Featured list\r\n res = self.app.delete('/admin/featured/1')\r\n err_msg = (\"The user should not be able to DELETE to this page\"\r\n \" but the returned status is %s\" % res.status)\r\n assert \"403 FORBIDDEN\" in res.status, err_msg", "def test_admin_cannot_post_sale(self):\n response = self.client.post(\n '/self.base_url/sales',\n headers=dict(Authorization=\"Bearer \" + self.owner_token),\n content_type = 'application/json'\n )\n\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"You cannot make a sale from an Admin account, Consider having an attendant account\")\n self.assertEqual(response.status_code,401)", "def test_product_create(self):\n self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])" ]
[ "0.7732614", "0.766534", "0.746467", "0.73577404", "0.7302724", "0.7297793", "0.7198699", "0.7088031", "0.706756", "0.69749635", "0.6925475", "0.6893543", "0.6830343", "0.68255603", "0.6805827", "0.68006784", "0.677526", "0.67369896", "0.6678268", "0.66471297", "0.6601341", "0.6589541", "0.6563557", "0.6555914", "0.6552673", "0.6546463", "0.65432596", "0.6537618", "0.6526094", "0.6525832", "0.6507259", "0.648921", "0.64734435", "0.64607424", "0.64511704", "0.6448973", "0.6424691", "0.64239144", "0.64156747", "0.64019966", "0.6400038", "0.6394441", "0.6377176", "0.63683206", "0.63680524", "0.63651097", "0.63450587", "0.63364685", "0.63308775", "0.63264275", "0.63233703", "0.63147855", "0.63144827", "0.630635", "0.6304148", "0.6301944", "0.62903804", "0.62788445", "0.62667346", "0.62637305", "0.6254806", "0.6253878", "0.62435603", "0.62342", "0.6231163", "0.6221278", "0.62058794", "0.61899304", "0.6180356", "0.61800027", "0.6168996", "0.61605614", "0.6156966", "0.61481494", "0.6146622", "0.61380506", "0.61380506", "0.6130546", "0.6123898", "0.6116677", "0.6115562", "0.6115113", "0.61128736", "0.61127317", "0.61105055", "0.61101323", "0.611003", "0.6106681", "0.60995", "0.6098728", "0.6088696", "0.608841", "0.60858446", "0.6083071", "0.6077585", "0.607083", "0.6065316", "0.60643834", "0.60523677", "0.6044855" ]
0.8407797
0
Tests that 'admin' can add a product
def test_admin_create_product(self): resp = self.admin_register() reply = self.admin_login() token = reply['token'] product = dict( prod_name='NY_denims', category='denims', stock=20, price=150 ) resp = self.client.post( '/api/v1/products', content_type='application/json', data=json.dumps(product), headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Product successfully added to Inventory!') self.assertEqual(resp.status_code, 201)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_only_admin_can_create_product(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_add_product(self):\n view = ProductCreateListView.as_view({'post': 'create'})\n uri = reverse('products:create/list-products')\n data = {\n \"name\": \"Iphone 7\",\n \"description\": \"Mobile phone\",\n \"price\": 200,\n \"is_available\": True\n }\n request = self.factory.post(uri, data, HTTP_AUTHORIZATION='Token {}'.format(self.token_admin.key))\n request.user = self.user['admin']\n response = view(request)\n self.assertEqual(response.status_code, 201,\n f'Expected Response Code 201, received {response.status_code} instead.')", "def test_non_admin_cannot_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_create_not_admin(self):\n self.assertEqual(Product.objects.count(), 2)\n payload = {\n 'name': 'New product',\n 'category': self.category_1.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99\n }\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n expected = {'detail': 'You do not have permission to perform this action.'}\n response = self.client.post(\n '/api/products/', data=payload,\n content_type='application/json', **headers)\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)\n self.assertEqual(Product.objects.count(), 2)", "def test_form_submition_and_product_creation(user_company, client, authenticated_user):\n add_product_url = reverse('add-product')\n response = client.post(add_product_url, {\n 'name': 'Test_product_name',\n 'serial_number': 'XZ001', \n 'manufacturer': 'Test company',\n 'price_net': 415.26,\n 'description': fake.paragraph(),\n 'stock': 16\n })\n assert response.status_code == 302\n product = Product.objects.get(name='Test_product_name')\n assert response.url == reverse('product-detail',kwargs={'pk': product.pk}) \n assert product.user == authenticated_user\n assert product in Product.objects.all()", "def test_view_a_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertIn('NY_denims', str(reply['product']))\n self.assertEqual(resp.status_code, 200)", "def test_admin_cannot_delete_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This product does not exist in Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_add_permission(self):\r\n self.assertFalse(self.creator_admin.has_add_permission(self.request))", "def test_categories_product_admin(self):\n response = self.client.post('api/v1/category/products',\n data=json.dumps(category_product[0]),\n content_type='application/json',\n headers=self.admin_headers)\n self.assertEqual(response.status_code, 401)\n self.assertIn('unauthorized', str(response.data))", "def test_only_attendant_can_make_a_sale(self):\n resp = self.admin_add_product()\n reply = self.admin_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_admin(self):\n assert(admin)", "def test_admin_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Product deleted!')\n self.assertEqual(resp.status_code, 200)", "def test_admin_cannot_create_product_with_empty_fields(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='',\n category='',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Please enter all fields!')\n self.assertEqual(resp.status_code, 400)", "def test_PRODUCTS_addProduct(browser, url, username, password):\n\t\t\n\t#initialise browser and login with valid credentials\n\tgo_to_admin(browser,url,username,password)\n\n\ttry:\n\t\t# Select \"Add a Product\" link from \"Products\" menu\n\t\tbrowser.find_element_by_link_text('Products').click()\n\t\tbrowser.find_element_by_link_text('Add a Product').click()\n\n\texcept NoSuchElementException:\n\t\tbrowser.save_screenshot('Nosuchelement.png')\n\t\traise\t\n\n\t#Provide Details of the product\n\telement = wait_until_element_present(browser, 'product-name', 'ID')\n\telement.send_keys('Testing')\n\n\ttry:\n\t\tbrowser.find_element_by_id('product-price').send_keys(\"10.45\")\n\t\tbrowser.find_element_by_xpath('//li[@title = \"'+category_name+'\"]/a[text()=\"'+category_name+'\"]').click()\n\t\tbrowser.find_element_by_id('product-weight').send_keys(\"1\")\n\t\tbrowser.find_element_by_id('product-sku').send_keys(SKU)\n\texcept NoSuchElementException:\n\t\tbrowser.save_screenshot('Nosuchelement.png')\n\t\traise \t\n\n\ttry:\n\t\tbrowser.execute_script(\"tinyMCE.activeEditor.dom.remove(tinyMCE.activeEditor.dom.select('p'));\")\n\t\tbrowser.execute_script(\"tinymce.activeEditor.execCommand('mceInsertContent', true, \\\"TEST AUTOMATION BANNER\\\");\")\n\texcept WebDriverException:\n\t\tbrowser.find_element_by_id('wysiwyg').clear()\n\t\tbrowser.find_element_by_id('wysiwyg').send_keys('TEST AUTOMATION BANNER')\n\n\ttry:\t\n\t\tbrowser.find_element_by_id('product-width').send_keys(\"1\")\n\t\tbrowser.find_element_by_id('product-height').send_keys(\"1\")\n\t\tbrowser.find_element_by_id('product-depth').send_keys(\"1\")\n\texcept NoSuchElementException:\n\t\tbrowser.save_screenshot('Nosuchelement.png')\n\t\traise \n\t\t\n\tUpload \"Image and Video\" of the product\n\ttry:\n\t\tbrowser.find_element_by_link_text('Images & Videos').click()\n\t\tfile = browser.find_element_by_xpath('//input[@class = \"file-upload\"]')\n\t\tfile.send_keys(product_img_path)\n\t\ttime.sleep(15)\n\t\tbrowser.find_element_by_id('product-videos-search-query').send_keys(product_video_url)\n\n\texcept NoSuchElementException:\n\t\tbrowser.save_screenshot('Nosuchelement.png')\n\t\traise\t\n\n\telement=wait_until_element_present(browser,'product-videos-search','ID')\n\telement.click()\n\n\ttry:\n\t\ttime.sleep(15)\t\n\t\tbrowser.find_element_by_xpath('//label[@for = \"'+product_video_label+'\"]').click()\n\n\t\t#Provide \"Inventory\" detials of the product\n\t\tbrowser.find_element_by_link_text('Inventory').click()\n\t\tbrowser.find_element_by_xpath('//label[@for = \"product-inventory-tracking-1\"]').click()\n\t\tclear_field(browser,'inventory-level')\n\t\tbrowser.find_element_by_id('inventory-level').send_keys(\"123\")\n\t\tclear_field(browser,'inventory-warning-level')\n\t\tbrowser.find_element_by_id('inventory-warning-level').send_keys(\"123\")\n\n\texcept NoSuchElementException:\n\t\tbrowser.save_screenshot('Nosuchelement.png')\n\t\traise\t\n\n\t\t\n\ttry:\t\n\t\t#Select Product Delivery details\n\t\tbrowser.find_element_by_link_text('Delivery/Event Date').click()\n\t\tbrowser.find_element_by_xpath('//label[@for = \"product-event-date-required\"]').click()\n\t\tbrowser.find_element_by_link_text('Details').click()\n\t\tbrowser.find_element_by_name('btn-save').click()\n\n\texcept NoSuchElementException:\n\t\tbrowser.save_screenshot('Nosuchelement.png')\n\t\traise\n\n\tverify_and_assert_success_message(browser, \"The new product has been added successfully.\", \".alert-success\")\n\n\t# View newly created Product in control panel\n\tbrowser.find_element_by_link_text('Products').click()\n\tbrowser.find_element_by_link_text('View Products').click()\n\telement = wait_until_element_present(browser,'search-query','ID')\n\telement.send_keys(SKU)\n\tbrowser.find_element_by_xpath('//button[@class = \"btn btn-secondary filter-button\"]').click()\n\ttime.sleep(15)\n\tbrowser.find_element_by_xpath(\"//tr[contains(.,'\" + SKU + \"')]\").find_element_by_css_selector('.dropdown-trigger').click()\n\tbrowser.find_element_by_link_text('View').click()\n \n #Switching to cart window \n\tfor handle in browser.window_handles:\n\t\t\t\tbrowser.switch_to_window(handle)\n\n\t#Provide required delivery date\n\ttry:\n\t\twait_until_element_present(browser,'EventDateMonth','ID')\n\t\tselect_dropdown_value(browser, 'EventDateMonth', 'Jan')\n\t\tselect_dropdown_value(browser, 'EventDateDay', '1')\n\t\tselect_dropdown_value(browser, 'EventDateYear', '2013')\n\n\texcept NoSuchElementException:\n\t\tbrowser.save_screenshot('Nosuchelement.png')\n\t\traise\n\t\t\t\n\telement = wait_until_element_present(browser, '//input[contains(@src,\"AddCartButton.gif\")]', 'XPATH')\n\telement.click()\n\n\t#Proceeding to checkout as guest\n\ttime.sleep(15)\n\twait_until_element_present(browser, '//a[@title = \"Click here to proceed to checkout\"]', 'XPATH').click()\n\telement = wait_until_element_present(browser, 'checkout_type_guest', 'ID')\n\telement.click()\n\telement = wait_until_element_present(browser, 'CreateAccountButton', 'ID')\n\telement.click()\n\twait_until_element_present(browser, 'FormField_1', 'ID')\n\n\t#Provide Billing Details and proceed further\n\ttry:\n\t\tbrowser.find_element_by_id('FormField_1').clear()\n\t\tbrowser.find_element_by_id('FormField_1').send_keys('[email protected]')\n\t\tbrowser.find_element_by_id('FormField_4').clear()\n\t\tbrowser.find_element_by_id('FormField_4').send_keys('Virendra')\n\t\tbrowser.find_element_by_id('FormField_5').clear()\n\t\tbrowser.find_element_by_id('FormField_5').send_keys('Brahmbhatt')\n\t\tbrowser.find_element_by_id('FormField_7').clear()\n\t\tbrowser.find_element_by_id('FormField_7').send_keys('234234423234')\n\t\tbrowser.find_element_by_id('FormField_8').clear()\n\t\tbrowser.find_element_by_id('FormField_8').send_keys('George Street')\n\t\tbrowser.find_element_by_id('FormField_10').clear()\n\t\tbrowser.find_element_by_id('FormField_10').send_keys('Sydney')\n\t\tselect_dropdown_value(browser, 'FormField_11', 'Australia')\n\t\tselect_dropdown_value(browser, 'FormField_12', 'New South Wales')\n\t\tbrowser.find_element_by_id('FormField_13').clear()\n\t\tbrowser.find_element_by_id('FormField_13').send_keys('2000')\n\t\tbrowser.find_element_by_css_selector('.Submit .billingButton').click()\n\n\texcept NoSuchElementException:\n\t\tbrowser.save_screenshot('Nosuchelement.png')\n\t\traise\t\t\n\n\n\t# Select shipping method\n\telement = wait_until_element_present(browser, \"//input[contains(@id, 'shippingCheck')]\", 'XPATH')\n\telement.click()\n\tbrowser.find_element_by_xpath(\"//div[@class='ML20']/input[@type='submit' and contains(@value,'Continue')]\").click()\n\n\t# Proceed to payment\n\ttry:\n\t\twait_until_element_present(browser, 'bottom_payment_button', 'ID')\n\t\tbrowser.find_element_by_id('bottom_payment_button').click()\n\n\texcept NoSuchElementException:\n\t\tbrowser.save_screenshot('Nosuchelement.png')\n\t\traise\t\n\n\t#Provide Credit Card Details\n\ttry:\n\t\twait_until_element_present(browser,'creditcard_cctype','ID')\n\t\tselect_dropdown_value(browser, 'creditcard_cctype', 'Visa')\n\t\tbrowser.find_element_by_id('creditcard_name').send_keys('test')\n\t\tbrowser.find_element_by_id('creditcard_ccno').send_keys('4242424242424242')\n\t\tselect_dropdown_value(browser, 'creditcard_ccexpm', 'Jan')\n\t\tselect_dropdown_value(browser, 'creditcard_ccexpy', '2014')\n\t\tbrowser.find_element_by_id('creditcard_cccvd').send_keys('234')\n\t\tbrowser.find_element_by_xpath('//input[@value = \"Pay for Order\"]')\n\t\t\n\texcept NoSuchElementException:\n\t\tbrowser.save_screenshot('Nosuchelement.png')\n\t\traise\t\n\n # Assert the succes message of Order Creation\n\torder_success_msg = 'YOUR ORDER NUMBER IS'\n\tbrowser_success_msg = browser.find_element_by_xpath('//div[@class = \"alert alert-success\"]/p').text\n\n\tif order_success_msg in browser_success_msg:\n\t\tprint \"I found my text\"\n\t\tassert True\n\telse:\n\t\tprint \"No text\"\n\t\tassert False", "def test_new_product(self):\n prod = Product(name='New Product', price=100, weight=60,\n flammability=0.9)\n self.assertEqual(prod.explode(), '...BABOOM!!')\n self.assertEqual(prod.stealability(), 'Very stealable!')", "def test_add_admin_to_org(self):\n pass", "def test_add_new_product(self):\n self._require_login(self.user1)\n post_data = {\n \"category\": {\n \"name\": \"deportes\",\n \"index\": 1\n },\n \"price\": \"4500.0\",\n \"name\": \"Producto 3\",\n \"description\": \"Descripcion de producto 3\"\n }\n\n response = self.client.post('/api/1.0/products/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertNotEqual(response.data['published_date'], '')\n self.assertEqual(response.data['name'], 'Producto 3')\n self.assertEqual(response.data['description'], 'Descripcion de producto 3')\n self.assertEqual(response.data['selling'], True)\n self.assertEqual(response.data['price'], '4500.0')\n self.assertEqual(response.data['seller']['user']['username'], self.username)\n self.assertEqual(response.data['category']['name'], 'deportes')", "def test_cannot_sale_nonexistant_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"Paris_heels\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product is not in the Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_order_product(self):\n self.client.force_authenticate(self.user)\n resp = self.client.post(ORDER_URL, data={\n \"product\": self.product.id,\n \"count\": 1,\n \"option_value\": self.option_value.id\n })\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)", "def test_add_product_during_auth(self):\n product = self.create_product()\n\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n\n # Adding a product here should succeed\n res = self.do_add_to_basket(product.id)\n basket1 = res.data['id']\n self.assertEqual(res.status_code, 200)\n\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n\n # Adding a product here should go to a new basket, not the one we're auth'ing\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n basket2 = res.data['id']\n self.assertNotEqual(basket1, basket2)\n\n res = self.do_cybersource_post(cs_url, cs_data)\n self.assertEqual(res.status_code, 302)\n self.check_finished_order(cs_data['reference_number'], product.id)\n\n # Adding a product here should go to basket2, not basket1\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n basket3 = res.data['id']\n self.assertEqual(basket2, basket3)", "def test_creating_supply_admin(self):\n request = self.factory.post(\n '/api/supplies/', {'name': '3d printer', 'state': 'good state', 'description': 'prints 3d objects'})\n force_authenticate(request, user=self.testadmin)\n response = SupplyListView.as_view()(request)\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n try:\n supply = Supply.objects.get(name='3d printer')\n self.assertEqual(supply.name, '3d printer')\n self.assertEqual(supply.state, 'good state')\n self.assertEqual(supply.description, 'prints 3d objects')\n except Supply.DoesNotExist:\n self.fail()", "def test_add_admin(self):\n self.test_create_user()\n self.test_create_organization()\n url = reverse('MGA:add_admin')\n data = {'admin id': 1, 'org_id': 1}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_product(self):\n self.assertEqual(self.test_product.name, self.test_product_name)\n self.assertEqual(self.test_product.price, self.test_product_price)", "def test_view_all_products(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertIn('NY_denims', str(reply['products']))\n self.assertEqual(resp.status_code, 200)", "def test_admin_cannot_delete_product_with_non_integer_prod_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/kk',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'The product id should be a number!')\n self.assertEqual(resp.status_code, 400)", "def test_view_product_that_doesnot_exist_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product does not exist!')\n self.assertEqual(resp.status_code, 404)", "def test_product_create(self):\n self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])", "def test_admin_cannot_add_item(self):\n response = self.client.get(\n '/self.base_url/sales/3/2',\n headers=dict(Authorization=\"Bearer \" + self.owner_token),\n content_type = 'application/json'\n )\n\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"You cannot make a sale from an Admin account, Consider having an attendant account\")\n self.assertEqual(response.status_code,401)", "def test_create_product(self):\n url = reverse('products:list')\n data = {\n 'name': 'Banana',\n 'description': '''\n Bananas are one of the most widely consumed fruits in the\n world for good reason. Eating them could help lower blood\n pressure and reduce the risks of cancer and asthma.\n '''\n }\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(models.Product.objects.filter(name=data['name']).count(), 1)", "def test_security_on_post(self):\n url = '/product/xml/'\n response = self.client.post(url,{'description':'my new description'})\n self.failUnlessEqual(response.status_code, 401)", "def test_add_new_product(self):\n response=self.add_new_product()\n result = json.loads(response.data.decode('utf-8'))\n self.assertEqual(response.status_code, 201, result['New Product'])", "def test_add_product_view_for_unauthenticated_users(client):\n add_product_url = reverse('add-product')\n response = client.get(add_product_url)\n assert response.status_code == 302\n assert response.url == \"/accounts/login/?next=/products/add-product/\"", "def test_add_product_view_for_authenticated_users(user_company, client):\n add_product_url = reverse('add-product')\n response = client.get(add_product_url)\n assert response.status_code == 200", "def test_create(self):\n self.assertEqual(Product.objects.count(), 2)\n payload = {\n 'name': 'New product',\n 'category': self.category_1.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99\n }\n\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(self.token_admin)\n }\n response = self.client.post(\n '/api/products/', data=payload, content_type='application/json', **headers)\n self.assertEqual(response.status_code, 201)\n self.assertEqual(response['Content-Type'], 'application/json')\n self.assertEqual(Product.objects.count(), 3)\n\n product = Product.objects.get(name='New product')\n self.assertEqual(product.name, 'New product')\n self.assertEqual(product.category, self.category_1)\n self.assertEqual(product.sku, '11111111')\n self.assertEqual(product.description, 'New product description')\n self.assertEqual(float(product.price), 39.99)", "def test_create_product_success(self):\n res = self.client.post(PRODUCTS_URL, PRODUCT_PAYLOAD)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n self.assertEqual(res.data['supplier_id'], self.user.id)\n self.assertEqual(res.data['name'], PRODUCT_PAYLOAD['name'])\n self.assertEqual(res.data['price'], PRODUCT_PAYLOAD['price'])", "def test_update_not_my_product(self):\n post_data = {\n \"category\": {\n \"name\": \"general\",\n \"index\": 0\n },\n \"name\": \"Producto 2 modified\",\n \"description\": \"Descripcion de producto 2 modified\",\n \"selling\": True,\n \"price\": 20,\n }\n\n response = self.client.put('/api/1.0/products/2/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_create_product_as_customer_fails(self):\n customer = get_user_model().objects.create_user(\n '[email protected]',\n 'Customer',\n 'user123'\n )\n self.client.force_authenticate(customer)\n res = self.client.post(PRODUCTS_URL, PRODUCT_PAYLOAD)\n\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)", "def test_admin_cannot_delete_product_from_empty_Inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'There are no products in Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_add_new_product_to_category(self):\n login = self.autheniticate()\n token = json.loads(login.data.decode()).get('token')\n self.app.post(category_url,\n data=json.dumps(self.data),\n headers=dict(Authorization=\"Bearer \" + token),\n content_type='application/json')\n res = self.app.post(addnew_product_to_category_url,\n data=json.dumps(self.product_data),\n headers=dict(Authorization=\"Bearer \" + token),\n content_type='application/json')\n res1 = json.loads(res.data.decode())\n self.assertEqual(res1['message'], 'Product successfully added')\n self.assertEqual(res.status_code, 201)", "def test_Product(self):\n self.assertEquals(self.prod_1.pk, 1)\n self.assertEquals(self.prod_1.ean, '3350033118072')\n self.assertEquals(self.prod_1.name, 'test 1')\n self.assertEquals(self.prod_1.nutriscore, 'u')\n self.assertEquals(self.prod_1.category, 'cat 1')", "def test_add_facility_pt1(self):\n self.assertFalse(self.admin.has_perm('auth.add_facility'))", "def test_product_exists_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product exists in the Inventory!')\n self.assertEqual(resp.status_code, 400)", "def test_attendant_make_a_sale(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)", "def test_add_single_org_product(client, jwt, session, keycloak_mock): # pylint:disable=unused-argument\n headers = factory_auth_header(jwt=jwt, claims=TestJwtClaims.staff_admin_role)\n rv = client.post('/api/v1/users', headers=headers, content_type='application/json')\n rv = client.post('/api/v1/orgs', data=json.dumps(TestOrgInfo.org1),\n headers=headers, content_type='application/json')\n assert rv.status_code == http_status.HTTP_201_CREATED\n dictionary = json.loads(rv.data)\n rv_products = client.post(f\"/api/v1/orgs/{dictionary.get('id')}/products\",\n data=json.dumps(TestOrgProductsInfo.org_products1),\n headers=headers, content_type='application/json')\n assert rv_products.status_code == http_status.HTTP_201_CREATED\n assert schema_utils.validate(rv_products.json, 'org_product_subscriptions_response')[0]", "def setUp(self):\n super().setUp()\n list_of_product_types = [\n 'default_product_variant',\n 'multiple_product_variants',\n 'ceo_title'\n ]\n self.new_product = eval(f\"get_new_product_with_\" \\\n f\"{list_of_product_types[randint(0, len(list_of_product_types) - 1)]}()\")\n response = ProcessRequest('products.json').send_request(\n 'POST',\n data=self.new_product,\n expected_return_codes=[201],\n )\n self.product_id = response.response['product']['id']", "def test_01_product_create(self):\n # Create new product with a replacement product\n product = self.create_product()\n\n # Check recently was created product with default 'In Development'\n # value state and that the replacement was assigned. This case also\n # check the read test.\n self.assertTrue(product)\n self.assertEqual(product.state2, 'draft')\n self.assertTrue(product.replacement_product_ids)\n self.assertEqual(len(product.replacement_product_ids), 1)\n self.assertEqual(product.replacement_product_ids[0].id,\n self.ref('product_lifecycle.product_product_4e'))", "def test_add_item_to_cart(client):\n raise NotImplemented('Acceptance test failed')", "def test_update_product_without_authentication(self):\n post_data = {\n \"category\": {\n \"name\": \"general\",\n \"index\": 0\n },\n \"name\": \"Producto 2 modified\",\n \"description\": \"Descripcion de producto 2 modified\",\n \"selling\": True,\n \"price\": 20,\n }\n\n response = self.client.put('/api/1.0/products/1/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_cannot_create_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_add_to_cart(self):\n\n # Log the user in that is not the seller\n self.client.login(username=\"test_user\", password=\"secret\")\n\n # Confirm that product title appears in cart\n response = self.client.get(reverse('website:cart'))\n\n # Check that the response is 200 ok\n self.assertEqual(response.status_code, 200)\n\n # Ensure that the cart displays product title, but not the title for product2\n self.assertIn('<h6 class=\"mr-auto p-2\">Test Product</h6>'.encode(), response.content)\n self.assertNotIn('<h6 class=\"mr-auto p-2\">Test Product2</h6>'.encode(), response.content)\n\n # Confirm that the post returns a response of 302\n response = self.client.get(reverse(\"website:add_to_cart\", args=(1,)))\n self.assertEqual(response.status_code, 302)", "def test_post_product(self):\n with self.client:\n # Register an admin user\n self.client.post(\n '/api/v1/auth/register',\n data=json.dumps(dict(\n name='leticia',\n email='[email protected]',\n role='admin',\n username='ticia',\n password='password',\n confirm_password='password'\n )),\n content_type='application/json'\n )\n # Register attendant\n self.client.post(\n '/api/v1/auth/register',\n data=json.dumps(dict(\n name='leticia',\n email='[email protected]',\n role='attendant',\n username='leticia',\n password='password',\n confirm_password='password'\n )),\n content_type='application/json'\n )\n\n # login as admin\n login_response = self.client.post(\n '/api/v1/auth/login',\n data=json.dumps(dict(\n username='ticia',\n password='password'\n\n )),\n content_type='application/json'\n )\n result = json.loads(login_response.data)\n token = result\n\n # Login attendant\n login_att_response = self.client.post(\n '/api/v1/auth/login',\n data=json.dumps(dict(\n username='leticia',\n password='password'\n\n )),\n content_type='application/json'\n )\n resultatt = json.loads(login_att_response.data)\n tokenatt = resultatt\n # Test successful post\n response = self.client.post(\n '/api/v1/products',\n data=json.dumps(dict(\n id=100,\n name='chunky heels',\n category='shoes',\n purchase_price=1000,\n selling_price=1800,\n quantity=70,\n low_limit=10,\n description='A wide based heel'\n\n )),\n content_type='application/json'\n\n )\n\n response_data = json.loads(response.data)\n self.assertEqual(\"Product with id 100 added successfully\", response_data[\"message\"])\n self.assertEqual(response.status_code, 201)\n # Test post product with existing product id\n responsez = self.client.post(\n '/api/v1/products',\n data=json.dumps(dict(\n id=100,\n name='chunky heels',\n category='shoes',\n purchase_price=1000,\n selling_price=1800,\n quantity=70,\n low_limit=10,\n description='A wide based heel'\n\n )),\n content_type='application/json'\n\n )\n\n response_dataz = json.loads(responsez.data)\n self.assertEqual(\"The product Id you entered is being used for another product\", response_dataz[\"message\"])\n self.assertEqual(response.status_code, 201)\n\n # Test empty data\n response1 = self.client.post(\n '/api/v1/products',\n data=json.dumps(dict()\n ),\n content_type='application/json'\n\n )\n response_data1 = json.loads(response1.data)\n self.assertEqual(\"Fields cannot be empty\", response_data1[\"message\"])\n self.assertEqual(response1.status_code, 400)\n # Test missing required fields\n response2 = self.client.post(\n '/api/v1/products',\n data=json.dumps(dict(\n id=\"\",\n name=\"chunky\",\n category=\"shoes\",\n purchase_price=1000,\n selling_price=\"\",\n quantity=\"\",\n low_limit=\"\",\n description=\"A wide based heel\"\n\n )),\n content_type='application/json'\n\n )\n\n response_data2 = json.loads(response2.data)\n self.assertEqual(\"Some required fields are missing!\", response_data2[\"message\"])\n self.assertEqual(response2.status_code, 206)\n # Test only admin can post products\n responseatt_post = self.client.post(\n '/api/v1/products',\n data=json.dumps(dict(\n id=200,\n name='chunky heels',\n category='shoes',\n purchase_price=1000,\n selling_price=1800,\n quantity=70,\n low_limit=10,\n description='A wide based heel'\n\n )),\n content_type='application/json'\n\n )\n\n response_data_att = json.loads(responseatt_post.data)\n self.assertEqual(\"Product with id 200 added successfully\", response_data_att[\"message\"])\n self.assertEqual(responseatt_post.status_code, 201)", "def test_delete_not_my_product(self):\n self._require_login(self.user1)\n response = self.client.delete('/api/1.0/products/2/')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_admin_cannot_delete_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_add_new_book(self):\n create_admin()\n response = self.client().post('/api/v1/login', json=self.test_admin)\n json_data = json.loads(response.data)\n access_token = json_data.get('access_token')\n response = self.client().post('/api/v1/products',\n headers={\"Authorization\": \"Bearer \" + access_token}, json=self.test_book)\n json_data = json.loads(response.data)\n self.assertTrue(json_data.get('message'))\n self.assertEqual(json_data.get('message'), \"Success! Book added\")\n self.assertEqual(response.status_code, 201)", "def test_03_admin_featured_apps_as_admin(self):\r\n self.register()\r\n self.signin()\r\n res = self.app.get('/admin/featured', follow_redirects=True)\r\n assert \"Manage featured applications\" in res.data, res.data", "def test_view_product_with_invalid_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2kk',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Try an interger for product id')\n self.assertEqual(resp.status_code, 400)", "def test_products_ref_users_put(self):\n pass", "def test_security_on_put(self):\n # test the update url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.put(url,{'description':'my new description'})\n self.failUnlessEqual(response.status_code, 401)", "def test_update_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'product updated!')\n self.assertEqual(resp.status_code, 200)", "def test_add_role(self):\n pass", "def permission(self):\n return \"core.manage_products\"", "def test_list_products(self):\n self._require_login(self.user1)\n response = self.client.get('/api/1.0/products/')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[0]['name'], 'Producto 2')\n self.assertEqual(response.data[1]['description'], 'Descripcion producto 1')", "def test_add_to_cart_item_not_in_system(self):\n # test sale products not in db\n\n response = self.client.get(\n '/self.base_url/sales/1999/2',\n headers=dict(Authorization=\"Bearer \" + self.attendant_token),\n content_type = 'application/json'\n )\n\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"This product does not exist\")\n self.assertEqual(response.status_code,200)\n\n\n # test add item which is at minimum stock", "def test_cannot_view_a_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_admin(self):\r\n from django.contrib import admin\r\n admin.autodiscover()\r\n\r\n from adrest.models import AccessKey\r\n self.assertTrue(AccessKey in admin.site._registry)\r\n\r\n from adrest.models import Access\r\n self.assertTrue(Access in admin.site._registry)", "def test_default_product_stealability(self):\n prod = Product('Test Product')\n self.assertEqual(prod.stealability(), \"Kinda stealable.\")", "def test_product_search(self):\n\n flag = \"user\"\n api = \"product.product.add\"\n current_page = 1\n search_info = json.dumps({\n 'name': \"可爱的小蓝牙呀\"\n })\n print('start------------------------>add')\n result = self.access_api(flag = flag, api = api, current_page = current_page, product_info = search_info)", "def test_get_deleted_product(self):\n product = self.add_product()\n product.is_deleted = True\n product.save()\n\n url = f'{self.url}{product.id}/'\n\n response = self.client.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n self.client.force_authenticate(user=self.admin_user)\n response = self.client.get(url, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_update_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], \"This product doesn't exists in the Inventory!\")\n self.assertEqual(resp.status_code, 400)", "def test_add(self):\n self.client.login(username='admin', password='admin')\n response = self.client.post('/add/', {'url': 'http://example.com'}, follow=True)\n self.assertShortURLCreated(response)", "def test_if_app_can_search_for_existing_lists_with_products(self):\n product_to_add = {'product':'nikes', 'Quantity':3, 'Amountspent':5000}\n jsonproduct_to_add = json.dumps(product_to_add)\n add_list = self.client.post('/shoppinglists/',\n data = self.shopllist, \n headers = {\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n add_product=self.client.post('/shoppinglist/shoes/items/',\n data=jsonproduct_to_add,\n headers={\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n searchforlists=self.client.get('/search/?q=shoes',\n headers={\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n searchforproducts=self.client.get('/searchProduct/?q=nike',\n headers={\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n self.assertIn(\"Success\",str(searchforlists.data))\n self.assertIn(\"Success\",str(searchforproducts.data))\n self.assertEqual(searchforproducts.status_code,200)\n self.assertEqual(searchforlists.status_code,200)", "def test_unauthorized_product_update(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_create_same_product(self):\n url = reverse('products:list')\n data = {\n 'name': 'Eggs',\n 'description': '''\n Bird and reptile eggs consist of a protective eggshell,\n albumen (egg white), and vitellus (egg yolk),\n contained within various thin membranes.\n The most commonly consumed eggs are chicken eggs.\n Other poultry eggs including those of duck and quail\n also are eaten.\n '''\n }\n product_count_before = models.Product.objects.count()\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(models.Product.objects.count(), product_count_before)", "def test_listing_supplies_admin(self):\n request = self.factory.get(\n '/api/supplies')\n force_authenticate(request, user=self.testadmin)\n response = SupplyListView.as_view()(request)\n # admin can browse the data\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_create__admin_valid(self):\n testing_config.sign_in('[email protected]', 123567890)\n\n json_data = {\n 'email': '[email protected]',\n 'isAdmin': True, 'isSiteEditor': True}\n with test_app.test_request_context(self.request_path, json=json_data):\n actual_json = self.handler.do_post()\n self.assertEqual('[email protected]', actual_json['email'])\n self.assertTrue(actual_json['is_site_editor'])\n self.assertTrue(actual_json['is_admin'])\n\n new_appuser = user_models.AppUser.query(\n user_models.AppUser.email == '[email protected]').get()\n self.assertEqual('[email protected]', new_appuser.email)\n self.assertTrue(new_appuser.is_admin)\n\n # Clean up\n new_appuser.key.delete()", "def test_product_list(self):\n self.url = reverse(\"product-list\")\n response = self.client.get(self.url, **self.auth_headers)\n self.assertEqual(200, response.status_code)", "def test_app_can_add_list(self):\n add_list=self.client.post('/addshoppinglists/?user='+self.user['user'], \n data=self.shopllist, \n headers={\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n self.assertEqual(add_list.status_code,200)", "def test_admin_event_admin_add(self):\n response = self.client.get(\"/admin/appointment/event/add/\")\n self.assertEqual(response.status_code, 200)", "def test_add_role_simple_post(self):\n pass", "def test_add_single_org_product_vs(client, jwt, session, keycloak_mock): # pylint:disable=unused-argument\n headers = factory_auth_header(jwt=jwt, claims=TestJwtClaims.public_user_role)\n rv = client.post('/api/v1/users', headers=headers, content_type='application/json')\n rv = client.post('/api/v1/orgs', data=json.dumps(TestOrgInfo.org1),\n headers=headers, content_type='application/json')\n assert rv.status_code == http_status.HTTP_201_CREATED\n dictionary = json.loads(rv.data)\n rv_products = client.post(f\"/api/v1/orgs/{dictionary.get('id')}/products\",\n data=json.dumps(TestOrgProductsInfo.org_products_vs),\n headers=headers, content_type='application/json')\n assert rv_products.status_code == http_status.HTTP_201_CREATED\n assert schema_utils.validate(rv_products.json, 'org_product_subscriptions_response')[0]\n\n rv_products = client.get(f\"/api/v1/orgs/{dictionary.get('id')}/products\", headers=headers,\n content_type='application/json')\n list_products = json.loads(rv_products.data)\n vs_product = next(prod for prod in list_products if prod.get('code') == 'VS')\n assert vs_product.get('subscriptionStatus') == 'PENDING_STAFF_REVIEW'", "def test_create_product_successful(self):\n \n ProductCategory.objects.create(name=\"test name\", description=\"new name\")\n test_key = ProductCategory.objects.values()[0]\n # print(test_key)\n payload = {\n 'name': 'Test Tag',\n 'product_category_id': test_key.get('id'),\n 'unit_price': 100,\n 'quantity': 12,\n 'description': 'Test description'\n }\n \n res = self.client.post(PRODUCT_ADD_URL, payload)\n\n # print(res.data)\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)", "def setUp(self):\n super(PurchaseOrderTest, self).setUp()\n \n self.ct = ContentType(app_label=\"po\")\n self.ct.save()\n self.p = Permission(codename=\"add_purchaseorder\", content_type=self.ct)\n self.p.save()\n self.p2 = Permission(codename=\"change_purchaseorder\", content_type=self.ct)\n self.p2.save()\n #Create the user\n self.username = 'tester'\n self.password = 'pass'\n self.user = User.objects.create_user(self.username, '[email protected]', self.password)\n self.user.save()\n self.user.user_permissions.add(self.p)\n self.user.user_permissions.add(self.p2)\n self.client.login(username=self.username, password=self.password)\n self.client.force_authenticate(self.user)\n \n self.supplier = Supplier(**base_supplier)\n self.supplier.save()\n self.address = Address(**base_address)\n self.address.contact = self.supplier\n self.address.save()\n self.contact = SupplierContact(name='test', email='[email protected]', telephone=1234, primary=True)\n self.contact.supplier = self.supplier\n self.contact.save()\n\n # Create Custom Supply\n # not implemented\n\n # Create Fabric\n self.supply = Fabric.create(**base_fabric)\n \n #self.supply.units = \"m^2\"\n self.supply.save()\n self.supply1 = self.supply\n \n self.product = Product(supply=self.supply, supplier=self.supplier, cost=base_fabric['unit_cost'],\n purchasing_units='m')\n self.product.save()\n self.supply2 = Fabric.create(**base_fabric2)\n self.supply2.discount = 5\n self.supply2.save()\n self.product2 = Product(supply=self.supply2, supplier=self.supplier, cost=base_fabric['unit_cost'])\n self.product2.save()\n self.supply1.supplier = self.supplier\n self.supply2.supplier = self.supplier\n \n\n #Create supply with no target item\n self.supply3 = Supply.objects.create(description='test supply')\n self.supply3.id = 203\n self.supply3.save()\n\n #Create a project\n self.project = Project()\n self.project.codename = 'MC House'\n self.project.save()\n \n self.po = PurchaseOrder()\n self.po.employee = self.user\n self.po.supplier = self.supplier\n self.po.terms = self.supplier.terms\n self.po.vat = 7\n self.order_date = datetime.datetime(2017, 1, 15, 15, 30, 0, 0, tzinfo=timezone('Asia/Bangkok'))\n self.po.order_date = self.order_date\n self.po.receive_date = datetime.datetime.now()\n self.po.save()\n #self.po.create_and_upload_pdf()\n \n self.item = Item.create(supplier=self.supplier, id=1, **base_purchase_order['items'][0])\n self.item.purchase_order = self.po\n self.item.save()\n \n self.po.calculate_total()\n self.po.save()", "def test_23_admin_add_category(self):\r\n self.create()\r\n category = {'name': 'cat', 'short_name': 'cat',\r\n 'description': 'description'}\r\n # Anonymous user\r\n url = '/admin/categories'\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Anonymous users should be redirected to sign in\"\r\n assert dom.find(id='signin') is not None, err_msg\r\n\r\n # Authenticated user but not admin\r\n self.signin(email=self.email_addr2, password=self.password)\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n err_msg = \"Non-Admin users should get 403\"\r\n assert res.status_code == 403, err_msg\r\n self.signout()\r\n\r\n # Admin\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n err_msg = \"Category should be added\"\r\n assert \"Category added\" in res.data, err_msg\r\n assert category['name'] in res.data, err_msg\r\n\r\n category = {'name': 'cat', 'short_name': 'cat',\r\n 'description': 'description'}\r\n\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n err_msg = \"Category form validation should work\"\r\n assert \"Please correct the errors\" in res.data, err_msg", "def test_add_role_simple(self):\n pass", "def test_households_in_admin_unit(self):", "def test_03_product_delete(self):\n product = self.create_product()\n products = self.product_obj.search([])\n self.assertIn(product, products)\n product.unlink()\n self.assertNotIn(product.exists(), products)", "def test_view_products_from_empty_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'There are no products yet!')\n self.assertEqual(resp.status_code, 404)", "def test_security_on_delete(self):\n # test the delete product url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.delete(url)\n self.failUnlessEqual(response.status_code, 401)", "def test_list_product(self):\n url = reverse('products:list')\n response = self.client.get(url)\n self.assertEqual(len(response.data), 1)\n self.assertEqual(response.data[0]['name'], 'Eggs')", "def test_add_single_org_product_mhr_qualified_supplier(client, jwt, session, keycloak_mock, org_product_info):\n # setup user and org\n headers = factory_auth_header(jwt=jwt, claims=TestJwtClaims.public_user_role)\n rv = client.post('/api/v1/users', headers=headers, content_type='application/json')\n rv = client.post('/api/v1/orgs', data=json.dumps(TestOrgInfo.org_premium),\n headers=headers, content_type='application/json')\n assert rv.status_code == http_status.HTTP_201_CREATED\n dictionary = json.loads(rv.data)\n\n # Create product subscription\n rv_products = client.post(f\"/api/v1/orgs/{dictionary.get('id')}/products\",\n data=json.dumps(org_product_info),\n headers=headers, content_type='application/json')\n assert rv_products.status_code == http_status.HTTP_201_CREATED\n assert schema_utils.validate(rv_products.json, 'org_product_subscriptions_response')[0]\n\n # Fetch org products and validate subscription status\n rv_products = client.get(f\"/api/v1/orgs/{dictionary.get('id')}/products\", headers=headers,\n content_type='application/json')\n list_products = json.loads(rv_products.data)\n mhr_product = next(prod for prod in list_products\n if prod.get('code') == org_product_info['subscriptions'][0]['productCode'])\n assert mhr_product.get('subscriptionStatus') == 'PENDING_STAFF_REVIEW'\n\n # Should show up as a review task for staff\n headers = factory_auth_header(jwt=jwt, claims=TestJwtClaims.staff_role)\n rv = client.get('/api/v1/tasks', headers=headers, content_type='application/json')\n\n item_list = rv.json\n assert schema_utils.validate(item_list, 'paged_response')[0]\n assert rv.status_code == http_status.HTTP_200_OK\n assert len(item_list['tasks']) == 1\n\n task = item_list['tasks'][0]\n assert task['relationshipStatus'] == 'PENDING_STAFF_REVIEW'\n assert task['relationshipType'] == 'PRODUCT'\n assert task['action'] == 'QUALIFIED_SUPPLIER_REVIEW'\n assert task['externalSourceId'] == org_product_info['subscriptions'][0]['externalSourceId']\n\n # Approve task\n rv = client.put('/api/v1/tasks/{}'.format(task['id']),\n data=json.dumps({'relationshipStatus': 'ACTIVE'}),\n headers=headers, content_type='application/json')\n\n task = rv.json\n assert rv.status_code == http_status.HTTP_200_OK\n assert task['relationshipStatus'] == 'ACTIVE'\n assert task['relationshipType'] == 'PRODUCT'\n assert task['action'] == 'QUALIFIED_SUPPLIER_REVIEW'\n assert task['externalSourceId'] == org_product_info['subscriptions'][0]['externalSourceId']", "def test_detail_is_hacker_permission(self):\n self.user_1.username = 'pythonhacker'\n self.user_1.save()\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_1.id), **headers)\n\n expected = {'detail': 'You do not have permission to perform this action.'}\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)", "def test_product_landing(self, flag_is_active):\n flag_is_active.return_value = True\n\n # Create a product\n p = product(save=True)\n\n # Create some topics\n topics = []\n for i in range(11):\n topics.append(topic(save=True))\n\n # Create a document and assign the product and 10 topics.\n doc = revision(is_approved=True, save=True).document\n doc.products.add(p)\n for i in range(10):\n doc.topics.add(topics[i])\n\n self.refresh()\n\n # GET the topic page and verify the content\n url = reverse('products.product', args=[p.slug])\n r = self.client.get(url, follow=True)\n eq_(200, r.status_code)\n doc = pq(r.content)\n eq_(10, len(doc('#help-topics li')))", "def test_07_admin_featured_apps_add_remove_app_non_admin(self, mock):\r\n self.register()\r\n self.signout()\r\n self.register(name=\"John2\", email=\"[email protected]\",\r\n password=\"passwd\")\r\n self.new_application()\r\n # The application is in the system but not in the front page\r\n res = self.app.get('/', follow_redirects=True)\r\n err_msg = (\"The application should not be listed in the front page\"\r\n \"as it is not featured\")\r\n assert \"Create an App\" in res.data, err_msg\r\n res = self.app.get('/admin/featured', follow_redirects=True)\r\n err_msg = (\"The user should not be able to access this page\"\r\n \" but the returned status is %s\" % res.status)\r\n assert \"403 FORBIDDEN\" in res.status, err_msg\r\n # Try to add the app to the featured list\r\n res = self.app.post('/admin/featured/1')\r\n err_msg = (\"The user should not be able to POST to this page\"\r\n \" but the returned status is %s\" % res.status)\r\n assert \"403 FORBIDDEN\" in res.status, err_msg\r\n # Try to remove it again from the Featured list\r\n res = self.app.delete('/admin/featured/1')\r\n err_msg = (\"The user should not be able to DELETE to this page\"\r\n \" but the returned status is %s\" % res.status)\r\n assert \"403 FORBIDDEN\" in res.status, err_msg", "def test_add_item_using_post(self):\n pass", "def test_create_drink_created_by_admin(self):\n self.test_create_admin_user()\n self.test_create_seting_bar()\n user = UserBase.objects.get(username='admin')\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + user.token)\n url = reverse('drink-list')\n data = {\n 'name': 'Testing Drink',\n 'ingredients':'[{\"unit\":\"0\",\"ratio\":2,\"ingredient\":2001},{\"unit\":\"0\",\"ratio\":2,\"ingredient\":2001}]'\n }\n\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_cannot_sell_more_than_stock(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":15\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Only 10 NY_denims available right now!')\n self.assertEqual(resp.status_code, 400)", "def adminAddPrdStore(self, prd, num):\n if not self.__checkProduct(prd, num):\n return False, \"parameter error\", {}\n try:\n ret = vmdata.addPrdStore(prd, num) \n return True, \"Add success, current product store\", ret[1]\n except:\n logging.error(\"Add product error\")", "def test_stealability(self):\n prod = Product('Test Product', price=100, weight=1)\n self.assertEqual(prod.stealability(), \"Very stealable!\")", "def test_add_user(self):\n pass", "def test_create_user_page(self):\n\n # Get the admin url and send a GET request\n url = reverse('admin:core_user_add')\n res = self.client.get(url)\n\n # Assertions\n self.assertEqual(res.status_code, 200)" ]
[ "0.8125268", "0.78284925", "0.7689276", "0.74345493", "0.7312975", "0.72979146", "0.71163535", "0.709621", "0.70959914", "0.7021732", "0.6964215", "0.6953487", "0.6913336", "0.6887993", "0.6846328", "0.6843517", "0.6843496", "0.68297696", "0.67998624", "0.6782752", "0.67686844", "0.6763103", "0.67531633", "0.6730877", "0.6667929", "0.6656305", "0.6649149", "0.6640939", "0.6637427", "0.6598826", "0.65926135", "0.65804857", "0.656985", "0.65634686", "0.6561752", "0.6534728", "0.65309155", "0.6508378", "0.65039", "0.65037477", "0.64915097", "0.64858073", "0.6453945", "0.64254224", "0.64210737", "0.6420693", "0.6419787", "0.64175636", "0.6405128", "0.63930595", "0.6392946", "0.63919246", "0.63874656", "0.6386185", "0.63845843", "0.6369891", "0.636206", "0.63547105", "0.6343346", "0.63383895", "0.6329529", "0.63169944", "0.63122773", "0.63022405", "0.6300867", "0.62957823", "0.6295289", "0.6292849", "0.6283974", "0.6276983", "0.62698776", "0.6268628", "0.6267989", "0.6257503", "0.62571234", "0.625707", "0.6245794", "0.6229637", "0.6222579", "0.62205434", "0.6219562", "0.62183464", "0.6185442", "0.61819977", "0.6178742", "0.61780494", "0.61742216", "0.61589193", "0.6157993", "0.6147107", "0.61470103", "0.61413354", "0.6135731", "0.61298865", "0.61192375", "0.6108449", "0.60979015", "0.6084448", "0.6071399", "0.6071053" ]
0.79675835
1
Test admin cannot create a product with a blacklisted token
def test_cannot_create_product_with_blacklisted_token(self): resp = self.admin_register() reply = self.admin_login() token = reply['token'] resp = self.client.delete( '/api/v1/logout', headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'You are successfully logged out!') self.assertEqual(resp.status_code, 200) product = dict( prod_name='NY_denims', category='denims', stock=20, price=150 ) resp = self.client.post( '/api/v1/products', content_type='application/json', data=json.dumps(product), headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!') self.assertEqual(resp.status_code, 401)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_cannot_view_a_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_admin_cannot_delete_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_create_not_admin(self):\n self.assertEqual(Product.objects.count(), 2)\n payload = {\n 'name': 'New product',\n 'category': self.category_1.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99\n }\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n expected = {'detail': 'You do not have permission to perform this action.'}\n response = self.client.post(\n '/api/products/', data=payload,\n content_type='application/json', **headers)\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)\n self.assertEqual(Product.objects.count(), 2)", "def test_cannot_view_all_products_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_cannot_update_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_only_admin_can_create_product(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_attendant_cannot_make_a_sale_with_blacklisted_token(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_admin_cannot_create_product_with_empty_fields(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='',\n category='',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Please enter all fields!')\n self.assertEqual(resp.status_code, 400)", "def test_create_product_as_customer_fails(self):\n customer = get_user_model().objects.create_user(\n '[email protected]',\n 'Customer',\n 'user123'\n )\n self.client.force_authenticate(customer)\n res = self.client.post(PRODUCTS_URL, PRODUCT_PAYLOAD)\n\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)", "def test_non_admin_cannot_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_cannot_get_sale_record_with_blacklisted_token(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/sales/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_admin_cannot_delete_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This product does not exist in Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_cannot_sale_nonexistant_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"Paris_heels\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product is not in the Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_admin_create_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)", "def test_delete_not_my_product(self):\n self._require_login(self.user1)\n response = self.client.delete('/api/1.0/products/2/')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_admin_cannot_delete_product_from_empty_Inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'There are no products in Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_cannot_create_user_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n user = dict(\n name='Summer Love',\n username='love',\n password='Andela8',\n role='attendant'\n )\n\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_cannot_get_all_sale_records_with_blacklisted_token(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n \n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/sales',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_security_on_post(self):\n url = '/product/xml/'\n response = self.client.post(url,{'description':'my new description'})\n self.failUnlessEqual(response.status_code, 401)", "def test_create_without_token(self):\n url = '/api/ingredients/'\n client = APIClient()\n\n response = client.post(url, self.ingredient_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_admin_cannot_add_item(self):\n response = self.client.get(\n '/self.base_url/sales/3/2',\n headers=dict(Authorization=\"Bearer \" + self.owner_token),\n content_type = 'application/json'\n )\n\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"You cannot make a sale from an Admin account, Consider having an attendant account\")\n self.assertEqual(response.status_code,401)", "def test_view_product_that_doesnot_exist_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product does not exist!')\n self.assertEqual(resp.status_code, 404)", "def test_view_product_with_invalid_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2kk',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Try an interger for product id')\n self.assertEqual(resp.status_code, 400)", "def test_admin_cannot_delete_product_with_non_integer_prod_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/kk',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'The product id should be a number!')\n self.assertEqual(resp.status_code, 400)", "def test_detail_is_hacker_permission(self):\n self.user_1.username = 'pythonhacker'\n self.user_1.save()\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_1.id), **headers)\n\n expected = {'detail': 'You do not have permission to perform this action.'}\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)", "def test_only_attendant_can_make_a_sale(self):\n resp = self.admin_add_product()\n reply = self.admin_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_unauthorized_product_update(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_unauthorized_add(self):\n response = self.client.post('/add/', {'url': 'http://example.com', 'key': 'example'})\n # TODO status 403", "def test_cannot_sale_out_of_stock_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":20\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'NY_denims is out of stock!')\n self.assertEqual(resp.status_code, 404)", "def test_update_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], \"This product doesn't exists in the Inventory!\")\n self.assertEqual(resp.status_code, 400)", "def test_add_product(self):\n view = ProductCreateListView.as_view({'post': 'create'})\n uri = reverse('products:create/list-products')\n data = {\n \"name\": \"Iphone 7\",\n \"description\": \"Mobile phone\",\n \"price\": 200,\n \"is_available\": True\n }\n request = self.factory.post(uri, data, HTTP_AUTHORIZATION='Token {}'.format(self.token_admin.key))\n request.user = self.user['admin']\n response = view(request)\n self.assertEqual(response.status_code, 201,\n f'Expected Response Code 201, received {response.status_code} instead.')", "def test_update_not_my_product(self):\n post_data = {\n \"category\": {\n \"name\": \"general\",\n \"index\": 0\n },\n \"name\": \"Producto 2 modified\",\n \"description\": \"Descripcion de producto 2 modified\",\n \"selling\": True,\n \"price\": 20,\n }\n\n response = self.client.put('/api/1.0/products/2/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_view_products_from_empty_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'There are no products yet!')\n self.assertEqual(resp.status_code, 404)", "def test_cannot_make_sale_with_missing_fields(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'One of the fields is empty!')\n self.assertEqual(resp.status_code, 400)", "def test_cannot_sell_more_than_stock(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":15\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Only 10 NY_denims available right now!')\n self.assertEqual(resp.status_code, 400)", "def test_create_drink_not_authentificate(self):\n url = reverse('drink-list')\n data = {'name': 'Testing Drink'}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_create_token_for_not_user(self):\n\n credentials = {'email': '[email protected]', 'password': 'Testpass12'}\n response = self.client.post(URL_TOKEN, credentials)\n\n # Check that the response is HTTP 400, and does not contain a token.\n self.assertNotIn('token', response.data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create_token_no_user(self):\n payload = {'email': '[email protected]', 'password': 'password'}\n res = self.client.post(TOKEN_URL, payload)\n\n self.assertNotIn('token', res.data)\n self.assertEquals(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create_token_missing_field(self):\n res = self.client.post(TOKEN_URL, {'email':'', 'password':\"\"})\n self.assertNotIn('token', res.data)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_admin_cannot_post_sale(self):\n response = self.client.post(\n '/self.base_url/sales',\n headers=dict(Authorization=\"Bearer \" + self.owner_token),\n content_type = 'application/json'\n )\n\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"You cannot make a sale from an Admin account, Consider having an attendant account\")\n self.assertEqual(response.status_code,401)", "def test_create_token_no_user(self):\n res = self.client.post(TOKEN_URL, self.mock_user)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertNotIn('token', res.data)", "def test_update_product_without_authentication(self):\n post_data = {\n \"category\": {\n \"name\": \"general\",\n \"index\": 0\n },\n \"name\": \"Producto 2 modified\",\n \"description\": \"Descripcion de producto 2 modified\",\n \"selling\": True,\n \"price\": 20,\n }\n\n response = self.client.put('/api/1.0/products/1/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_create_product_no_data(self):\n resp = self.app.post(\n \"/products\", json={}, content_type=\"application/json\"\n )\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)", "def test_security_on_delete(self):\n # test the delete product url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.delete(url)\n self.failUnlessEqual(response.status_code, 401)", "def test_cannot_get_empty_sales(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n\n resp = self.client.get(\n '/api/v1/sales/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This sale does not exist!')\n self.assertEqual(resp.status_code, 400)", "def test_create_token_invalid_credantials(self):\n create_user(email='[email protected]', password='testpass')\n payload = {'email': '[email protected]', 'password': 'wrong'}\n res = self.client.post(TOKEN_URL, payload)\n\n self.assertNotIn('token', res.data)\n self.assertEquals(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_detail_odd_product_id_permission(self):\n self.assertEqual(self.product_2.id, 2)\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_2.id), **headers)\n\n expected = {'detail': 'You do not have permission to perform this action.'}\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)", "def test_with_unpermitted_token(self):\n email_text = self.email_template % self.token.uuid\n assert not save_from_email_reply(email_text)", "def test_token_missing_edit(self):\n with self.client:\n id = self.get_id()\n response = self.client.put('api/v1/meals/{}'.format(id),\n data=json.dumps(dict(\n meal_name=\"chips\",\n price=15000\n )),\n content_type='application/json',\n headers=({\"token\": \"\"}))\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 401)\n self.assertEqual(data.get('message'), \"Token is missing\")", "def test_create_token_no_user(self):\r\n payload = {\r\n 'email': '[email protected]',\r\n 'password': 'testpass',\r\n 'name': 'Maks'\r\n }\r\n res = self.client.post(TOKEN_URL, payload)\r\n\r\n self.assertNotIn('token', res.data)\r\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_product_buy_missing_goods(self):\n result_buy = self.info_list.product_buy(\"хлеб серый хлебозавод\", 3)\n self.assertFalse(result_buy)", "def test_create_token_missing_field(self):\n payload = {'email': '[email protected]', 'password': ''}\n res = self.client.post(TOKEN_URL, payload)\n\n self.assertNotIn('token', res.data)\n self.assertEquals(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create_account_not_allowed(self):\n\n def _side_effect_for_get_value(value, default=None):\n \"\"\"\n returns a side_effect with given return value for a given value\n \"\"\"\n if value == 'ALLOW_PUBLIC_ACCOUNT_CREATION':\n return False\n else:\n return get_value(value, default)\n\n with mock.patch('openedx.core.djangoapps.site_configuration.helpers.get_value') as mock_get_value:\n mock_get_value.side_effect = _side_effect_for_get_value\n response = self.client.post(self.url, {\"email\": self.EMAIL, \"username\": self.USERNAME})\n assert response.status_code == 403", "def test_none_admin_post(self):\n with self.client:\n token = self.customer()\n response = self.client.post('api/v1/meals', data=json.dumps(\n dict(\n meal_name=\"fries\",\n price=10000\n )\n ),\n content_type='application/json',\n headers=({\"token\": token})\n )\n data = json.loads(response.data.decode())\n self.assertEqual(data.get('message'),\n \"Customer is not authorized to access this page\")\n self.assertEqual(response.status_code, 401)", "def test_token_was_blacklisted(self):\n\n revoked_token = RevokedToken('secret_token_blacklisted')\n revoked_token.save()\n\n self.assertTrue(\n RevokedToken.is_jti_blacklisted('secret_token_blacklisted'))", "def test_purchase_not_available(self):\n purchase_model = {\"id\": 2, \"amount\": 1}\n resp = self.app.post(\"/products/2/purchase\", json=purchase_model, content_type=\"application/json\")\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)\n resp = self.app.get(\"/products/2\", content_type=\"application/json\")\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)", "def test_add_to_cart_item_not_in_system(self):\n # test sale products not in db\n\n response = self.client.get(\n '/self.base_url/sales/1999/2',\n headers=dict(Authorization=\"Bearer \" + self.attendant_token),\n content_type = 'application/json'\n )\n\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"This product does not exist\")\n self.assertEqual(response.status_code,200)\n\n\n # test add item which is at minimum stock", "def test_create_token_missing_field(self):\r\n res = self.client.post(TOKEN_URL, {'email': 'one', 'password': ''})\r\n\r\n self.assertNotIn('token', res.data)\r\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create_token_no_user(self):\n payload = {\n 'email': '[email protected]',\n 'password': 'testpass'\n }\n res = self.client.post(TOKEN_URI, payload)\n self.assertNotIn('token', res.data)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_post_product(self):\n with self.client:\n # Register an admin user\n self.client.post(\n '/api/v1/auth/register',\n data=json.dumps(dict(\n name='leticia',\n email='[email protected]',\n role='admin',\n username='ticia',\n password='password',\n confirm_password='password'\n )),\n content_type='application/json'\n )\n # Register attendant\n self.client.post(\n '/api/v1/auth/register',\n data=json.dumps(dict(\n name='leticia',\n email='[email protected]',\n role='attendant',\n username='leticia',\n password='password',\n confirm_password='password'\n )),\n content_type='application/json'\n )\n\n # login as admin\n login_response = self.client.post(\n '/api/v1/auth/login',\n data=json.dumps(dict(\n username='ticia',\n password='password'\n\n )),\n content_type='application/json'\n )\n result = json.loads(login_response.data)\n token = result\n\n # Login attendant\n login_att_response = self.client.post(\n '/api/v1/auth/login',\n data=json.dumps(dict(\n username='leticia',\n password='password'\n\n )),\n content_type='application/json'\n )\n resultatt = json.loads(login_att_response.data)\n tokenatt = resultatt\n # Test successful post\n response = self.client.post(\n '/api/v1/products',\n data=json.dumps(dict(\n id=100,\n name='chunky heels',\n category='shoes',\n purchase_price=1000,\n selling_price=1800,\n quantity=70,\n low_limit=10,\n description='A wide based heel'\n\n )),\n content_type='application/json'\n\n )\n\n response_data = json.loads(response.data)\n self.assertEqual(\"Product with id 100 added successfully\", response_data[\"message\"])\n self.assertEqual(response.status_code, 201)\n # Test post product with existing product id\n responsez = self.client.post(\n '/api/v1/products',\n data=json.dumps(dict(\n id=100,\n name='chunky heels',\n category='shoes',\n purchase_price=1000,\n selling_price=1800,\n quantity=70,\n low_limit=10,\n description='A wide based heel'\n\n )),\n content_type='application/json'\n\n )\n\n response_dataz = json.loads(responsez.data)\n self.assertEqual(\"The product Id you entered is being used for another product\", response_dataz[\"message\"])\n self.assertEqual(response.status_code, 201)\n\n # Test empty data\n response1 = self.client.post(\n '/api/v1/products',\n data=json.dumps(dict()\n ),\n content_type='application/json'\n\n )\n response_data1 = json.loads(response1.data)\n self.assertEqual(\"Fields cannot be empty\", response_data1[\"message\"])\n self.assertEqual(response1.status_code, 400)\n # Test missing required fields\n response2 = self.client.post(\n '/api/v1/products',\n data=json.dumps(dict(\n id=\"\",\n name=\"chunky\",\n category=\"shoes\",\n purchase_price=1000,\n selling_price=\"\",\n quantity=\"\",\n low_limit=\"\",\n description=\"A wide based heel\"\n\n )),\n content_type='application/json'\n\n )\n\n response_data2 = json.loads(response2.data)\n self.assertEqual(\"Some required fields are missing!\", response_data2[\"message\"])\n self.assertEqual(response2.status_code, 206)\n # Test only admin can post products\n responseatt_post = self.client.post(\n '/api/v1/products',\n data=json.dumps(dict(\n id=200,\n name='chunky heels',\n category='shoes',\n purchase_price=1000,\n selling_price=1800,\n quantity=70,\n low_limit=10,\n description='A wide based heel'\n\n )),\n content_type='application/json'\n\n )\n\n response_data_att = json.loads(responseatt_post.data)\n self.assertEqual(\"Product with id 200 added successfully\", response_data_att[\"message\"])\n self.assertEqual(responseatt_post.status_code, 201)", "def test_custom_403(self):\n c = Client()\n response = c.get(\"/apimock/mocked/mocked_post?format=json\")\n self.assertEqual(response.status_code, 403)\n self.assertEqual(\n \"wrong used test Data,this is api for POST\", response.content)", "def test_creating_supply_unauthenticated(self):\n request = self.factory.post(\n '/api/supplies/', {'name': '3d printer 2', 'state': 'good state', 'description': 'prints 3d objects'})\n response = SupplyListView.as_view()(request)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n try:\n Supply.objects.get(name='3d printer')\n self.fail()\n except Supply.DoesNotExist:\n pass", "def test_create_token_missing_field(self):\n res = self.client.post(TOKEN_URL, {\n **self.mock_user,\n 'password': ''\n })\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertNotIn('token', res.data)", "def test_try_add_new_book_while_unauthorized(self):\n create_admin()\n response = self.client().post('/api/v1/login', json=self.test_admin)\n json_data = json.loads(response.data)\n access_token = json_data.get('access_token')\n self.client().post('/api/v1/reg',\n headers={\"Authorization\": \"Bearer \" + access_token}, json=self.test_user)\n response = self.client().post('/api/v1/login', json=self.test_user)\n json_data = json.loads(response.data)\n access_token = json_data.get('access_token')\n response = self.client().post('/api/v1/products',\n headers={\"Authorization\": \"Bearer \" + access_token}, json=self.test_book)\n json_data = json.loads(response.data)\n self.assertTrue(json_data.get('Error'))\n self.assertEqual(json_data.get('Error'),\n \"Only Admins are allowed to add books\")\n self.assertEqual(response.status_code, 403)", "def test_create__forbidden(self):\n testing_config.sign_in('[email protected]', 123567890)\n\n with test_app.test_request_context(self.request_path):\n with self.assertRaises(werkzeug.exceptions.Forbidden):\n self.handler.do_post(self.appuser_id)\n\n new_appuser = user_models.AppUser.query(\n user_models.AppUser.email == '[email protected]').get()\n self.assertIsNone(new_appuser)", "def test_shoppinglist_creation_with_error(self):\n res = self.app.post(\n '/shoppinglist', data={'name': 'Easter!'})\n self.assertEqual(res.status_code, 200)\n response = self.shopping_class_obj.create_list(\n 'Easter!', '[email protected]')\n self.assertIn(\"No special characters\", response)", "def test_cannot_update_user_with_blacklisted_token(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n user = dict(\n name='Summer Lover',\n username='lover',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.put(\n '/api/v1/users/2',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_admin_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Product deleted!')\n self.assertEqual(resp.status_code, 200)", "def test_create_token_invalid_credentials(self):\n # create user\n create_user(email='[email protected]', password='abcd1234')\n payload = {\n 'email': '[email protected]',\n 'password': 'wrong'\n }\n # We do not expect a token and should get a HTTP 400\n response = self.client.post(TOKEN_URL, payload)\n\n self.assertNotIn('token', response.data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def create_token_no_user(self):\n payload = {'email': '[email protected]', 'password': 'testpass'}\n res = self.client.post(TOKEN_URL, payload)\n self.assertNotIn('token', res.data)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_non_contractor_acks_receipt(self):\n res = self.client.post(self.url)\n self.assertEqual(res.status_code, 403)", "def test_create_token_missing_field(self):\n\n invalid_credentials = {'email': '[email protected]', 'password': ''}\n response = self.client.post(URL_TOKEN, invalid_credentials)\n\n # Check that the response is HTTP 400, and does not contain a token.\n self.assertNotIn('token', response.data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_feature_disabled(self, url):\n response = self.client.get(url)\n assert response.status_code == 403\n response = self.client.post(url)\n assert response.status_code == 403", "def test_create_token_no_user(self):\n payload = {\n 'email': '[email protected]',\n 'password': 'testpass',\n }\n # make a request without creating a user\n response = self.client.post(TOKEN_URL, payload)\n\n # We do not expect a token and should get a HTTP 400\n self.assertNotIn('token', response.data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_unauthorized_request(self):\n # test false token\n user_id = self.create_user()[0]\n question_id = int(self.create_question(user_id)[0])\n false_token = self.post_data(question_id, headers={\"Authorization\":\"Bearer wrongtoken\"})\n self.assertEqual(false_token.status_code, 401)", "def test_deny_pending_payment(self):\n pass", "def test_view_a_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertIn('NY_denims', str(reply['product']))\n self.assertEqual(resp.status_code, 200)", "def test_authenticated_user_create(self):\r\n with self.flask_app.test_request_context('/'):\r\n for token in self.auth_providers:\r\n assert_raises(Forbidden,\r\n getattr(require, 'token').create,\r\n token)", "def test_post_creation_unauthorized(self):\n url = reverse('post-list')\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n title = 'Test Title'\n body = 'Test Body'\n response = self.client.post(url, {'title': title, 'body': body, 'user': user_url}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_Product_name_cannot_contain_a_number(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_3',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Please enter strings in name and category!')\n self.assertEqual(resp.status_code, 400)", "def test_post_without_token(self):\n client = Client()\n data = {\n 'rating': '1',\n 'title': 'Hated It!',\n 'summary': 'A little text to say that I hated it!',\n 'company': '1'\n }\n response = client.post('/reviews/', data)\n self.assertEqual(response.status_code, 401)", "def test_cannot_create_new_team(self):\n\n data = {\n 'name': 'Griffons',\n 'description': 'Only strong souls can be joined us.'\n }\n response = self.client.post(reverse('api:teams-list'), data)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_only_add_perm(self):\n self.assertStatusCode(self.url, 403)", "def test_categories_product_admin(self):\n response = self.client.post('api/v1/category/products',\n data=json.dumps(category_product[0]),\n content_type='application/json',\n headers=self.admin_headers)\n self.assertEqual(response.status_code, 401)\n self.assertIn('unauthorized', str(response.data))", "def test_create_reusableitem_api_fails(self):\n self.client.force_authenticate(user=self.user_1)\n\n data = reusableitem_1_data\n create_reusableitem_url = reverse('topTenLists:ReusableItems-list')\n response = self.client.post(create_reusableitem_url, data, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_500_INTERNAL_SERVER_ERROR)", "def test_free_product(self):\n product = self.create_product(price=D('0.00'))\n\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n\n self.assertEqual(cs_data['amount'], '0.00')\n\n res = self.do_cybersource_post(cs_url, cs_data)\n self.assertEqual(res.status_code, 302)\n self.check_finished_order(cs_data['reference_number'], product.id)", "def test_custom_403(self):\n c = Client()\n response = c.post(\"/apimock/mocked/mocked_get\", data={\"post\": \"data\"})\n self.assertEqual(response.status_code, 403)\n self.assertEqual(\"wrong used test Data\", response.content)", "def test_bad_token(self):\n user = self.create_user()\n\n token_generator = EmailActivationTokenGenerator()\n bad_activation_keys = (\n 'emailactivationtokengenerator',\n 'emailactivation-tokengenerator',\n '3rd-bademailactivationkey'\n )\n for key in bad_activation_keys:\n self.assertFalse(token_generator.check_token(user, key))", "def test_create_token_invalid_credentials(self):\r\n create_user(email='[email protected]', password='testpass')\r\n payload = {\r\n 'email': '[email protected]',\r\n 'password': 'Wrongpass',\r\n 'name': 'Maks'\r\n }\r\n\r\n res = self.client.post(TOKEN_URL, payload)\r\n\r\n self.assertNotIn('token', res.data)\r\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_product_buy_with_not_exists_name(self):\n result_buy = self.info_list.product_buy(\"Говядина Немецкая 2кг\", 3)\n self.assertFalse(result_buy)", "def test_create_token_missing_field(self):\n payload = {\n 'email': 'one',\n 'password': '',\n }\n response = self.client.post(TOKEN_URL, payload)\n\n # We do not expect a token and should get a HTTP 400\n self.assertNotIn('token', response.data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_user_not_authorized(self):\n response = self.client.post(self.url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_cannot_delete_user_with_blacklisted_token(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.delete(\n '/api/v1/users/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_order_product(self):\n self.client.force_authenticate(self.user)\n resp = self.client.post(ORDER_URL, data={\n \"product\": self.product.id,\n \"count\": 1,\n \"option_value\": self.option_value.id\n })\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)", "def test_create_token_no_fields(self):\n payload = {\n 'email': 'email',\n 'password': ''\n }\n res = self.client.post(TOKEN_URI, payload)\n self.assertNotIn('token', res.data)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create_reusableitem_not_authenticated(self):\n\n self.client.logout()\n\n toptenitems = self.toptenlist_1.topTenItem.all()\n toptenitem_1_id = toptenitems[0].id\n\n response = create_reusable_item_1(self, toptenitem_1_id, **reusableitem_1_data)\n\n # the request should fail\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_guests_can_not_post_message(self):\n url = reverse('posts-list')\n data = {'title': 'some title', 'body': 'somebody :P'}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_creating_supply_user(self):\n request = self.factory.post(\n '/api/supplies/', {'name': '3d printer 2', 'state': 'good state', 'description': 'prints 3d objects'})\n force_authenticate(request, user=self.testuser1)\n response = SupplyListView.as_view()(request)\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n try:\n Supply.objects.get(name='3d printer')\n self.fail()\n except Supply.DoesNotExist:\n pass", "def test_deletehardwares_item(self):\n pass", "def test_get_non_valid_token(self):\r\n\r\n user = UserFactory.create_batch(2)[1]\r\n res = self.app.get('/api/token/non-valid?api_key=' + user.api_key)\r\n error = json.loads(res.data)\r\n\r\n assert res.status_code == 404, error\r\n assert error['status'] == 'failed', error\r\n assert error['action'] == 'GET', error\r\n assert error['target'] == 'token', error\r\n assert error['exception_cls'] == 'NotFound', error" ]
[ "0.82479733", "0.7897591", "0.7884179", "0.78223985", "0.7625364", "0.7585557", "0.7400365", "0.7375994", "0.72420007", "0.7152579", "0.7103639", "0.70389014", "0.6944365", "0.6929796", "0.68858266", "0.68301815", "0.67753994", "0.6750073", "0.6746582", "0.66715974", "0.66688746", "0.66519153", "0.6631966", "0.6559985", "0.65317154", "0.6529461", "0.6523732", "0.6455961", "0.6450408", "0.6435281", "0.6423553", "0.6419226", "0.64143866", "0.64132744", "0.6396266", "0.63914406", "0.6388822", "0.6365584", "0.6363419", "0.6356275", "0.6345575", "0.6328039", "0.63248605", "0.63095117", "0.63027906", "0.63026094", "0.6299091", "0.62671214", "0.62651604", "0.6264894", "0.6263958", "0.62547606", "0.62512445", "0.6243945", "0.62431896", "0.6236574", "0.6226047", "0.62241143", "0.62192714", "0.62097466", "0.61926943", "0.6190413", "0.6188251", "0.61858046", "0.6179112", "0.6172988", "0.6171234", "0.61637795", "0.61620724", "0.61532825", "0.6146314", "0.6133117", "0.6127442", "0.61208993", "0.6116827", "0.6107399", "0.6104814", "0.6103049", "0.6093233", "0.6089135", "0.608754", "0.6081157", "0.6069086", "0.606662", "0.6058877", "0.6044717", "0.60377175", "0.6034461", "0.6023748", "0.6008785", "0.6007488", "0.60057235", "0.6002895", "0.6002808", "0.5997898", "0.59955555", "0.59950966", "0.5991302", "0.5987932", "0.5987257" ]
0.8788049
0
Tests that 'admin' cannot add a product with empty fields
def test_admin_cannot_create_product_with_empty_fields(self): resp = self.admin_register() reply = self.admin_login() token = reply['token'] product = dict( prod_name='', category='', stock=20, price=150 ) resp = self.client.post( '/api/v1/products', content_type='application/json', data=json.dumps(product), headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Please enter all fields!') self.assertEqual(resp.status_code, 400)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_cannot_make_sale_with_missing_fields(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'One of the fields is empty!')\n self.assertEqual(resp.status_code, 400)", "def test_add_without_name(self):\n good = GoodInfo(\"\", \"30\", \"40\", \"2020-12-30\", \n \"14\", \"2020-12-30\")\n check_product_data = self.database.add(good)\n\n self.assertFalse(check_product_data)", "def test_update_product_with_empty_fields(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n product_update = dict(\n prod_name='',\n category='',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'prod_name and category cannot be empty!')\n self.assertEqual(resp.status_code, 400)", "def test_create_product_no_data(self):\n resp = self.app.post(\n \"/products\", json={}, content_type=\"application/json\"\n )\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)", "def test_update_product_required_fields(self):\n data = {\n 'pk': 1,\n 'name': None,\n 'description': '''\n Yogurt also spelled yoghurt, yogourt or yoghourt,\n is a food produced by bacterial fermentation of milk.\n '''\n }\n url = reverse('products:detail', kwargs={'pk': data['pk']})\n response = self.client.put(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(models.Product.objects.filter(name=None).count(), 0)", "def _clean_standalone(self):\n if not self.title:\n raise ValidationError(_(\"Your product must have a title.\"))\n if not self.product_class:\n raise ValidationError(_(\"Your product must have a product class.\"))\n if self.parent_id:\n raise ValidationError(_(\"Only child products can have a parent.\"))", "def test_admin_cannot_delete_product_from_empty_Inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'There are no products in Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_create_invalid_product_blank_name(self):\n product_name = \"\"\n size = Size.SIZE_4_5\n colour = \"Red\"\n price = 47.00\n product_type = ProductType.SHOE\n product_code = \"A\"\n department = Department.LADIES\n\n product = Product(\n name=product_name,\n size=size,\n colour=colour,\n price=price,\n product_type=product_type,\n product_code=product_code,\n department=department,\n )\n\n with self.assertRaises(ValidationError):\n product.full_clean()", "def test_create_invalid_product_no_name(self):\n size = Size.SIZE_4_5\n colour = \"Red\"\n price = 47.00\n product_type = ProductType.SHOE\n product_code = \"A\"\n department = Department.LADIES\n\n product = Product(\n size=size,\n colour=colour,\n price=price,\n product_type=product_type,\n product_code=product_code,\n department=department,\n )\n\n with self.assertRaises(ValidationError):\n product.full_clean()", "def test_create_not_admin(self):\n self.assertEqual(Product.objects.count(), 2)\n payload = {\n 'name': 'New product',\n 'category': self.category_1.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99\n }\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n expected = {'detail': 'You do not have permission to perform this action.'}\n response = self.client.post(\n '/api/products/', data=payload,\n content_type='application/json', **headers)\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)\n self.assertEqual(Product.objects.count(), 2)", "def test_product_name_is_required(self):\n product = {\n 'name': '',\n 'price': '100.00',\n 'image': ''\n }\n res = self.client.post(PRODUCTS_URL, product)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_cannot_sale_nonexistant_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"Paris_heels\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product is not in the Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_product_nullables(self):\n self.assertIsNone(self.product3.main_image)\n self.assertIsNone(self.product3.protein)\n self.assertIsNone(self.product3.fat)\n self.assertIsNone(self.product3.carbs)\n self.assertIsNone(self.product3.calories)", "def test_admin_cannot_delete_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This product does not exist in Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_raises_on_missing_needed_fields(self):\n test_name = \"impossible_creature_not_present\"\n self.form.constructor_fields = [*self.form.constructor_fields, test_name]\n message = \"The fields for email, username, and constructor must be set in fields. \"\n self.assertNotIn(test_name, self.form.base_fields)\n with self.assertRaisesMessage(ImproperlyConfigured, message):\n self.form.confirm_required_fields()", "def test_product_price_is_required(self):\n product = {\n 'name': 'LAPTOP',\n 'price': '',\n 'image': ''\n }\n res = self.client.post(PRODUCTS_URL, product)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_object_is_not_created_without_required_fields(self):\n data1 = self.data.copy()\n del data1[\"title\"]\n\n serializer = ProductSerializer(data=data1)\n\n self.assertFalse(serializer.is_valid())\n self.assertEqual(serializer.errors.get(\"title\")[0], self.error_message)\n\n data2 = self.data.copy()\n del data2[\"description\"]\n\n serializer = ProductSerializer(data=data2)\n self.assertFalse(serializer.is_valid())\n self.assertEqual(serializer.errors.get(\"description\")[0], self.error_message)\n\n data3 = self.data.copy()\n del data3[\"price\"]\n\n serializer = ProductSerializer(data=data3)\n self.assertFalse(serializer.is_valid())\n self.assertEqual(serializer.errors.get(\"price\")[0], self.error_message)", "def test_only_admin_can_create_product(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_add_missing_field(self):\n response = self.client.post('/api/v1/categories',\n data=json.dumps(category[1]),\n content_type='application/json',\n headers=self.admin_headers)\n self.assertEqual(response.status_code, 400)\n self.assertIn('Missing required parameter', str(response.data))", "def test_cannot_get_empty_sales(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n\n resp = self.client.get(\n '/api/v1/sales/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This sale does not exist!')\n self.assertEqual(resp.status_code, 400)", "def test_non_admin_cannot_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_update_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], \"This product doesn't exists in the Inventory!\")\n self.assertEqual(resp.status_code, 400)", "def test_invalid_data_course_add(self, app, auth, field):\n app.admin.add_new_course()\n course_data = CreateCourse.random()\n setattr(course_data, field, None)\n app.course.create_course(course_data)\n assert (\n not app.course.all_required_fields_filled()\n ), \"Empty fields are ignored and user data changed successfully!\"", "def test_manufacturer_bulk_import_invalid(self):\n form = ManufacturerBulkImportForm(data={\"pk\": \"\"})\n\n self.assertFalse(form.is_valid())", "def test_alright_when_non_required_field_is_missing():\n\n model_definition = {'language': {'type': 'fixed',\n 'required': True,\n 'persisted': True},\n 'source': {'type': 'list',\n 'required': False,\n 'persisted': True},\n 'resources.title': {'type': 'text',\n 'required': False,\n 'persisted': True}}\n product1 = {'language': 'english'}\n factory = ProductModelFactory(model_definition)\n factory.build('product1', product1)\n # Ok. No exceptions were raised.", "def test_view_products_from_empty_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'There are no products yet!')\n self.assertEqual(resp.status_code, 404)", "def test_admin_cannot_create_user_with_empty_fields(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='',\n username='',\n password='',\n role=''\n )\n\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Please input all fields!')\n self.assertEqual(resp.status_code, 400)", "def test_create_valid_product(self):\n product_name = \"Swift Iris\"\n size = Size.SIZE_4_5\n colour = \"Red\"\n price = 47.00\n product_type = ProductType.SHOE\n product_code = \"A\"\n department = Department.LADIES\n\n product = Product(\n name=product_name,\n size=size,\n colour=colour,\n price=price,\n product_type=product_type,\n product_code=product_code,\n department=department,\n )\n with self.assertRaises(ValidationError):\n product.full_clean()", "def test_view_product_that_doesnot_exist_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product does not exist!')\n self.assertEqual(resp.status_code, 404)", "def test_contentious_prescription_no_rationale(self):\n url = reverse('admin:prescription_prescription_add')\n data = {\n 'name': 'Test',\n 'planned_season': 1,\n 'planned_year': 2013,\n 'region': 1,\n 'district': 1,\n 'location': 'Test location',\n 'perimeter': 20,\n 'area': 100,\n 'purposes': [1],\n 'remote_sensing_priority': 4,\n 'priority': 2,\n 'contentious': True,\n }\n response = self.client.post(url, data)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(Prescription.objects.count(), 0)\n form = response.context['adminform'].form\n self.assertEqual(form.errors, {\n 'contentious_rationale': ['A contentious burn requires a '\n 'contentious rationale.']\n })", "def test_cannot_create_group_with_empty_field(self):\n\n utils.create_user_and_authenticate(self)\n group_fields = ['name', 'description']\n utils.test_cannot_post_with_empty_fields(self, self.url, group_fields)", "def test_add_with_negative_amount(self):\n good = GoodInfo(\"яйцо 1 кат.\", \"30\", \"-40\", \"2020-12-30\", \n \"14\", \"2020-12-30\")\n check_product_data = self.database.add(good)\n\n self.assertFalse(check_product_data)", "def test_add_with_negative_price(self):\n good = GoodInfo(\"яйцо 1 кат.\", \"-30\", \"40\", \"2020-12-30\", \n \"14\", \"2020-12-30\")\n check_product_data = self.database.add(good)\n\n self.assertFalse(check_product_data)", "def test_devicetype_bulk_import_invalid(self):\n form = DeviceTypeBulkImportForm(data={\"pk\": \"\"})\n\n self.assertFalse(form.is_valid())", "def test_admin_cannot_delete_product_with_non_integer_prod_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/kk',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'The product id should be a number!')\n self.assertEqual(resp.status_code, 400)", "def test_update_not_my_product(self):\n post_data = {\n \"category\": {\n \"name\": \"general\",\n \"index\": 0\n },\n \"name\": \"Producto 2 modified\",\n \"description\": \"Descripcion de producto 2 modified\",\n \"selling\": True,\n \"price\": 20,\n }\n\n response = self.client.put('/api/1.0/products/2/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test06_add_student_with_empty_fields(self):\n student_data = self.students_page.\\\n click_edit_students_list_button(). \\\n click_add_new_student_button()\n student_data.save_data_changes_button.click()\n actual_warnings = \\\n student_data.warnings_text_for_adding_student_with_empty_fields()\n self.assertEqual(actual_warnings, data['expected_warnings'])", "def test_blank(self):\n form_data = self.form_data('')\n form = self.form(data=form_data, user=self.u)\n self.assertFalse(form.is_valid())", "def test_cannot_create_tab_with_empty_field(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n tab_fields = ['name']\n\n utils.test_cannot_post_with_empty_fields(self, self.url, tab_fields)\n self.assertEqual(len(Tab.objects.all()), 0)", "def test_add_to_cart_item_not_in_system(self):\n # test sale products not in db\n\n response = self.client.get(\n '/self.base_url/sales/1999/2',\n headers=dict(Authorization=\"Bearer \" + self.attendant_token),\n content_type = 'application/json'\n )\n\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"This product does not exist\")\n self.assertEqual(response.status_code,200)\n\n\n # test add item which is at minimum stock", "def test_empty_data(self, client):\n url = reverse('users:create')\n response = client.post(url)\n assert response.status_code == 200\n assert 'This field is required.' in str(response.content)", "def test_create_invalid_price_three_dp(self):\n product_name = \"Swift Iris\"\n size = Size.SIZE_4_5\n colour = \"Red\"\n price = 47.123\n product_type = ProductType.SHOE\n product_code = \"A\"\n department = Department.LADIES\n\n product = Product(\n name=product_name,\n size=size,\n colour=colour,\n price=price,\n product_type=product_type,\n product_code=product_code,\n department=department,\n )\n\n with self.assertRaises(ValidationError):\n product.full_clean()", "def test_add_sale_with_product_name_not_string(self):\n self.register_admin_test_account()\n token = self.login_admin_test()\n\n response = self.app_test_client.post('{}/saleorder'.format(\n self.base_url), json={'name': 1, 'price': 1500, 'quantity': 10, 'totalamt': \"\"},\n headers=dict(Authorization=token),\n content_type='application/json')\n\n self.assertEqual(response.status_code, 400)\n\n self.assertEqual(general_helper_functions.convert_json(\n response)['message'], 'Bad request. The product name should be a string.')", "def test_add_with_not_right_shelf_life(self):\n good = GoodInfo(\"яйцо 1 кат.\", \"-30\", \"40\", \"2020-12-30\", \n \"-14\", \"2020-12-30\")\n check_product_data = self.database.add(good)\n\n self.assertFalse(check_product_data)", "def test_missing_mandatory_attributes():\n model_definition = {'source': {'type': 'list',\n 'required': True,\n 'persisted': True},\n 'resources.title': {'type': 'text',\n 'required': True,\n 'persisted': True}}\n # missing language in the model\n _ = ProductModelFactory(model_definition)", "def test_product_fields(self):\n\n prd = Product.objects.get(id=1)\n\n # test the type of name field\n prd_type = prd._meta.get_field('name').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label name\n max_length = prd._meta.get_field('name').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label name\n prd_blank = prd._meta.get_field('name').blank\n self.assertTrue(prd_blank)\n # test null field in label name\n prd_null = prd._meta.get_field('name').null\n self.assertTrue(prd_null)\n\n # test the type of description field\n prd_type = prd._meta.get_field('description').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label description\n max_length = prd._meta.get_field('description').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label description\n prd_blank = prd._meta.get_field('description').blank\n self.assertTrue(prd_blank)\n # test null field in label description\n prd_null = prd._meta.get_field('description').null\n self.assertTrue(prd_null)\n\n # test the type of nutrition_grade field\n prd_type = prd._meta.get_field('nutrition_grade').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label nutrition_grade\n max_length = prd._meta.get_field('nutrition_grade').max_length\n self.assertEqual(max_length, 1)\n # test blank field in label nutrition_grade\n prd_blank = prd._meta.get_field('nutrition_grade').blank\n self.assertTrue(prd_blank)\n # test null field in label nutrition_grade\n prd_null = prd._meta.get_field('nutrition_grade').null\n self.assertTrue(prd_null)\n\n # test the type of barcode field\n prd_type = prd._meta.get_field('barcode').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label barcode\n max_length = prd._meta.get_field('barcode').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label barcode\n prd_blank = prd._meta.get_field('barcode').blank\n self.assertFalse(prd_blank)\n # test null field in label barcode\n prd_null = prd._meta.get_field('barcode').null\n self.assertFalse(prd_null)\n\n # test the type of url field\n prd_type = prd._meta.get_field('url').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label url\n max_length = prd._meta.get_field('url').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label url\n prd_blank = prd._meta.get_field('url').blank\n self.assertTrue(prd_blank)\n # test null field in label url\n prd_null = prd._meta.get_field('url').null\n self.assertTrue(prd_null)\n\n # test the type of url_pic field\n prd_type = prd._meta.get_field('url_pic').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label url_pic\n max_length = prd._meta.get_field('url_pic').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label url_pic\n prd_blank = prd._meta.get_field('url_pic').blank\n self.assertTrue(prd_blank)\n # test null field in label url_pic\n prd_null = prd._meta.get_field('url_pic').null\n self.assertTrue(prd_null)\n\n # test the type of store field\n prd_type = prd._meta.get_field('store').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label store\n max_length = prd._meta.get_field('store').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label store\n prd_blank = prd._meta.get_field('store').blank\n self.assertTrue(prd_blank)\n # test null field in label store\n prd_null = prd._meta.get_field('store').null\n self.assertTrue(prd_null)\n\n # test the type of fat field\n prd_type = prd._meta.get_field('fat').get_internal_type()\n self.assertEqual(prd_type, 'DecimalField')\n # label fat max digits\n max_digits = prd._meta.get_field('fat').max_digits\n self.assertEqual(max_digits, 5)\n # label fat decimal places\n dec_places = prd._meta.get_field('fat').decimal_places\n self.assertEqual(dec_places, 2)\n # test blank field in label fat\n prd_blank = prd._meta.get_field('fat').blank\n self.assertTrue(prd_blank)\n # test null field in label fat\n prd_null = prd._meta.get_field('fat').null\n self.assertTrue(prd_null)\n\n # test the type of saturated_fat field\n prd_type = prd._meta.get_field('saturated_fat').get_internal_type()\n self.assertEqual(prd_type, 'DecimalField')\n # label saturated_fat max digits\n max_digits = prd._meta.get_field('saturated_fat').max_digits\n self.assertEqual(max_digits, 5)\n # label saturated_fat decimal places\n dec_places = prd._meta.get_field('saturated_fat').decimal_places\n self.assertEqual(dec_places, 2)\n # test blank field in label saturated_fat\n prd_blank = prd._meta.get_field('saturated_fat').blank\n self.assertTrue(prd_blank)\n # test null field in label saturated_fat\n prd_null = prd._meta.get_field('saturated_fat').null\n self.assertTrue(prd_null)\n\n # test the type of sugar field\n prd_type = prd._meta.get_field('sugar').get_internal_type()\n self.assertEqual(prd_type, 'DecimalField')\n # label sugar max digits\n max_digits = prd._meta.get_field('sugar').max_digits\n self.assertEqual(max_digits, 5)\n # label sugar decimal places\n dec_places = prd._meta.get_field('sugar').decimal_places\n self.assertEqual(dec_places, 2)\n # test blank field in label sugar\n prd_blank = prd._meta.get_field('sugar').blank\n self.assertTrue(prd_blank)\n # test null field in label sugar\n prd_null = prd._meta.get_field('sugar').null\n self.assertTrue(prd_null)\n\n # test the type of salt\n prd_type = prd._meta.get_field('salt').get_internal_type()\n self.assertEqual(prd_type, 'DecimalField')\n # label salt max digits\n max_digits = prd._meta.get_field('salt').max_digits\n self.assertEqual(max_digits, 5)\n # label salt decimal places\n dec_places = prd._meta.get_field('salt').decimal_places\n self.assertEqual(dec_places, 2)\n # test blank field in label salt\n prd_blank = prd._meta.get_field('salt').blank\n self.assertTrue(prd_blank)\n # test null field in label salt\n prd_null = prd._meta.get_field('salt').null\n self.assertTrue(prd_null)\n\n # test the type of prd_cat\n prd_type = prd._meta.get_field('prd_cat').get_internal_type()\n self.assertEqual(prd_type, 'ForeignKey')\n # label db_column\n fk = prd._meta.get_field('prd_cat').db_column\n self.assertEqual(fk, 'prd_cat')\n # test blank field in label prd_cat\n prd_blank = prd._meta.get_field('prd_cat').blank\n self.assertFalse(prd_blank)\n # test null field in label prd_cat\n prd_null = prd._meta.get_field('prd_cat').null\n self.assertFalse(prd_null)\n\n # Favourite table ----------------------------------------------------", "def test_raises_if_missed_fields(self):\n name = 'second'\n self.form.called_handle_modifiers = False\n remove = {'remove_field': name}\n self.form.handle_modifiers({}, [], **remove)\n self.assertNotIn(name, self.form.fields)\n self.assertIn(name, self.form.hold_field)\n message = \"Some unassigned fields, perhaps some added during handle_modifiers. \"\n with self.assertRaisesMessage(ImproperlyConfigured, message):\n self.form.make_fieldsets(add_field=name)\n self.form.called_handle_modifiers = False", "def test_cannot_make_sale_with_wrong_datatypes(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_345\", \n \"quantity\":'Kummi'\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'prod_name & quantity should be a character & number respectively!')\n self.assertEqual(resp.status_code, 400)", "def test_defaults(self):\n p = Product.objects.create(\n name=\"Product\", slug=\"product\", sku=\"4711\", price=42.0)\n\n self.assertEqual(p.name, \"Product\")\n self.assertEqual(p.slug, \"product\")\n self.assertEqual(p.sku, \"4711\")\n self.assertEqual(p.price, 42.0)\n self.assertEqual(p.effective_price, 42.0)\n self.assertEqual(p.short_description, \"\")\n self.assertEqual(p.description, \"\")\n self.assertEqual(len(p.images.all()), 0)\n\n self.assertEqual(p.meta_title, \"<name>\")\n self.assertEqual(p.meta_description, \"\")\n self.assertEqual(p.meta_keywords, \"\")\n\n self.assertEqual(len(p.related_products.all()), 0)\n self.assertEqual(len(p.accessories.all()), 0)\n\n self.assertEqual(p.for_sale, False)\n self.assertEqual(p.for_sale_price, 0.0)\n self.assertEqual(p.active, False)\n\n self.assertEqual(p.deliverable, True)\n self.assertEqual(p.manual_delivery_time, False)\n self.assertEqual(p.delivery_time, None)\n self.assertEqual(p.order_time, None)\n self.assertEqual(p.ordered_at, None)\n self.assertEqual(p.manage_stock_amount, False)\n self.assertEqual(p.stock_amount, 0)\n\n self.assertEqual(p.weight, 0)\n self.assertEqual(p.height, 0)\n self.assertEqual(p.length, 0)\n self.assertEqual(p.width, 0)\n\n self.assertEqual(p.tax, None)\n self.assertEqual(p.sub_type, STANDARD_PRODUCT)\n\n self.assertEqual(p.default_variant, None)\n self.assertEqual(p.variants_display_type, LIST)\n\n self.assertEqual(p.parent, None)\n self.assertEqual(p.active_name, False)\n self.assertEqual(p.active_sku, False)\n self.assertEqual(p.active_short_description, False)\n self.assertEqual(p.active_description, False)\n self.assertEqual(p.active_price, False)\n self.assertEqual(p.active_images, False)\n self.assertEqual(p.active_related_products, False)\n self.assertEqual(p.active_accessories, False)\n self.assertEqual(p.active_meta_description, False)\n self.assertEqual(p.active_meta_keywords, False)", "def test_cannot_save_empty_list_items(self) -> None:\n list_ = List.objects.create()\n item = Item(list=list_, text=\"\")\n\n with self.assertRaises(ValidationError):\n item.save()\n item.full_clean()", "def test_admin_cannot_update_user_with_empty_fields(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='',\n username='',\n password='',\n role=''\n )\n resp = self.client.put(\n '/api/v1/users/2',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Please input all fields!')\n self.assertEqual(resp.status_code, 400)", "def test_ProductsDataViewSet_with_post_Invalid_data(self):\n payload = {\n \"name\": \"1234\"\n }\n\n # Request the data by API call.\n response = self.client.post('/api/productsdata/',\n data=json.dumps(payload),\n content_type=self.content_type)\n\n # Checking the response\n self.assertEqual(response.status_code, 400)\n self.assertEqual(response.json()['name'][0],\n 'Name `{0}` must contain atleast one letter'.format(\n payload.get('name')))", "def test_manual_entry_error(self):\r\n self._login_admin()\r\n # no url entered\r\n res = self.app.post(\r\n '/admin/new_error',\r\n params={\r\n 'url': '',\r\n 'description': '',\r\n 'extended': '',\r\n 'tags': ''\r\n })\r\n self.assertIn('not valid', res.body)", "def test_cannot_create_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_Product_name_cannot_contain_a_number(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_3',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Please enter strings in name and category!')\n self.assertEqual(resp.status_code, 400)", "def test_no_user(self):\n form = self._get_form()\n self.assertTrue(self._validate_form(form), form.errors)\n self.assertRaises(IntegrityError, form.save)", "def test_cannot_update_with_empty_field(self):\n\n self.client.login(username='notlogged', password='notlogged')\n group_fields = ['name', 'description']\n\n utils.test_cannot_post_with_empty_fields(self, self.url, group_fields)\n\n # Group is not updated.\n updated_group = Group.objects.get(pk=self.group.pk)\n self.assertEqual(updated_group.name, 'test')\n self.assertEqual(updated_group.description, 'test')\n self.assertIsNone(updated_group.last_edit_date)", "def test_recipe_no_related_stays_no_related(self):\n form = RecipeForm(data=self.form_data_recipe_no_related)\n recipe = form.save()\n\n # Check that the resulting recipe has no related items\n self.assertEqual(len(recipe.ingredient_groups.all()), 0)\n self.assertEqual(len(recipe.instructions.all()), 0)\n self.assertEqual(len(recipe.notes.all()), 0)", "def test_raise_on_missing_critical(self):\n name_for_field = 'absent_field'\n field_opts = {'names': (name_for_field, 'absent'), 'alt_field': '', 'computed': False}\n critical_fields = {'absent_field': field_opts}\n with self.assertRaises(ImproperlyConfigured):\n self.form.fields_for_critical(critical_fields)", "def test_update_product_without_authentication(self):\n post_data = {\n \"category\": {\n \"name\": \"general\",\n \"index\": 0\n },\n \"name\": \"Producto 2 modified\",\n \"description\": \"Descripcion de producto 2 modified\",\n \"selling\": True,\n \"price\": 20,\n }\n\n response = self.client.put('/api/1.0/products/1/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_product_buy_missing_goods(self):\n result_buy = self.info_list.product_buy(\"хлеб серый хлебозавод\", 3)\n self.assertFalse(result_buy)", "def test_admin_create_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)", "def test_add_product(self):\n view = ProductCreateListView.as_view({'post': 'create'})\n uri = reverse('products:create/list-products')\n data = {\n \"name\": \"Iphone 7\",\n \"description\": \"Mobile phone\",\n \"price\": 200,\n \"is_available\": True\n }\n request = self.factory.post(uri, data, HTTP_AUTHORIZATION='Token {}'.format(self.token_admin.key))\n request.user = self.user['admin']\n response = view(request)\n self.assertEqual(response.status_code, 201,\n f'Expected Response Code 201, received {response.status_code} instead.')", "def test_form_submition_and_product_creation(user_company, client, authenticated_user):\n add_product_url = reverse('add-product')\n response = client.post(add_product_url, {\n 'name': 'Test_product_name',\n 'serial_number': 'XZ001', \n 'manufacturer': 'Test company',\n 'price_net': 415.26,\n 'description': fake.paragraph(),\n 'stock': 16\n })\n assert response.status_code == 302\n product = Product.objects.get(name='Test_product_name')\n assert response.url == reverse('product-detail',kwargs={'pk': product.pk}) \n assert product.user == authenticated_user\n assert product in Product.objects.all()", "def test_empty_data(self, client, users):\n user = users[0]\n url = reverse('users:update', args=(user.pk,))\n response = client.post(url)\n assert response.status_code == 200\n assert 'This field is required.' in str(response.content)", "def test_create_invalid_product_code_lengthly(self):\n product_name = \"Swift Iris\"\n size = Size.SIZE_4_5\n colour = \"Red\"\n price = 47.00\n product_type = ProductType.SHOE\n product_code = \"asdfghjk\"\n department = Department.LADIES\n\n product = Product(\n name=product_name,\n size=size,\n colour=colour,\n price=price,\n product_type=product_type,\n product_code=product_code,\n department=department,\n )\n\n with self.assertRaises(ValidationError):\n product.full_clean()", "def test_login_with_nonempty_cart(client):\n raise NotImplemented('Acceptance test failed')", "def test_no_options_in_section(self):\n\n panels = {\n 'priority': PriorityAdminPanel(self.env),\n 'severity': SeverityAdminPanel(self.env),\n 'resolution': ResolutionAdminPanel(self.env),\n 'ticket_type': TicketTypeAdminPanel(self.env),\n 'component': ComponentAdminPanel(self.env),\n }\n\n # create the section, but no options or values in configuration\n self.env.config.set('ticket-field-config','','')\n\n # run our plugin\n admin_command = TicketFieldConfigCommand(self.env)\n admin_command.set_fields_from_config()\n\n # verify that present section but missing options does not alter db\n for name, panel in panels.items():\n if name == 'component':\n self.assertItemsEqual(\n panel.get_component_list(),\n self.default[name]\n )\n else:\n self.assertItemsEqual(\n panel.get_enum_list(),\n self.default[name]\n )", "def test_delete_not_my_product(self):\n self._require_login(self.user1)\n response = self.client.delete('/api/1.0/products/2/')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_missing_description(superuser):\n form = RegisterForm(superuser, name='Client',\n is_confidential=choice([True, False]),\n redirect_uris='http://localhost/',\n default_scopes='read write')\n\n assert form.validate() is False\n assert _('This field is required.') in form.description.errors", "def test_is_valid_return_only_good_products(self):\n self.assertTrue(ProductValidator().is_valid(self.good_product))\n self.assertFalse(ProductValidator().is_valid(self.bad_product))", "def test_missing_description(self):\n self.check_validation_error(\"description\\n field required\", name=\"Name\")", "def test_blank_category(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.login('[email protected]', 'Bo1995')\n rv = self.category('')\n self.assertIn(b'Field must be between 1 and 50 characters long.', rv.data)", "def test_get_empty_product_list(self):\n response = self.client().get('/api/v1/products')\n json_data = json.loads(response.data)\n self.assertTrue(json_data.get('Error'))\n self.assertEqual(json_data.get('Error'), \"There are no books\")\n self.assertEqual(response.status_code, 404)", "def test_create_product_as_customer_fails(self):\n customer = get_user_model().objects.create_user(\n '[email protected]',\n 'Customer',\n 'user123'\n )\n self.client.force_authenticate(customer)\n res = self.client.post(PRODUCTS_URL, PRODUCT_PAYLOAD)\n\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)", "def test_show_cart_empty(client):\n raise NotImplemented('Acceptance test failed')", "def test_blank(self):\n form_data = {\n 'username': 'testuser',\n 'password1': '',\n 'password2': ''\n }\n form = StrictUserCreationForm(data=form_data)\n self.assertFalse(form.is_valid())", "def test_add_category_missing_fields(self):\n category = json.dumps({\n 'desc': \"Jamaican\",\n })\n response = self.client.post('/category', data=category,\n headers={\"Authorization\": self.token})\n self.assertEqual(response.status_code, 400)\n self.assertIn('Check the keys and try again', response.data.decode())", "def test_raises_on_constructor_fields_error(self):\n self.form.constructor_fields = None\n message = \"Expected a list of field name strings for constructor_fields. \"\n with self.assertRaisesMessage(ImproperlyConfigured, message):\n self.form.confirm_required_fields()", "def test_cannot_update_tab_with_empty_field(self):\n\n self.client.login(username='tab', password='tab')\n tab_fields = ['name']\n\n utils.test_cannot_post_with_empty_fields(self, self.url, tab_fields)\n\n tab = Tab.objects.get(pk=self.tab.pk)\n self.assertEqual(tab.name, 'test')\n self.assertIsNone(tab.last_edit_date)", "def testAddNoneUsernameAndPassword(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(\"\", \"\"))", "def test_default_product_explodability(self):\n prod = Product('Test Product')\n self.assertEqual(prod.explode(), \"...boom!\")", "def test_create_form_with_no_fields(self):\n with pytest.raises(ValidationError):\n SurveyForm.create('badform', '<p>no input fields here</p>')\n\n with pytest.raises(ValidationError):\n SurveyForm.create('badform', '<input id=\"input_without_name\" />')", "def test_error_with_empty_trait_field(self):\n response = self.client.post(self.get_url(), {'object': ''})\n self.assertEqual(response.status_code, 200)\n self.assertFormError(response, 'form', 'object',\n 'This field is required.')", "def test_admin_cannot_add_item(self):\n response = self.client.get(\n '/self.base_url/sales/3/2',\n headers=dict(Authorization=\"Bearer \" + self.owner_token),\n content_type = 'application/json'\n )\n\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"You cannot make a sale from an Admin account, Consider having an attendant account\")\n self.assertEqual(response.status_code,401)", "def test_create_invalid_product_long_name(self):\n product_name = \"1234567890123456789098765432112345678900987654322123\"\n size = Size.SIZE_4_5\n colour = \"Red\"\n price = 47.00\n product_type = ProductType.SHOE\n product_code = \"A\"\n department = Department.LADIES\n\n product = Product(\n name=product_name,\n size=size,\n colour=colour,\n price=price,\n product_type=product_type,\n product_code=product_code,\n department=department,\n )\n\n with self.assertRaises(ValidationError):\n product.full_clean()", "def test_form_missing(self):\n self.step_data[\"basics\"] = {\"advanced\": True}\n resp = self.post_step(\"basics\")\n self.assertWizardFailure(resp, \"name\")", "def test_post_blank_tag(self):\n response = self.client.post(self.get_url(self.trait.pk), {'tag': '', })\n messages = list(response.wsgi_request._messages)\n self.assertEqual(len(messages), 1)\n self.assertTrue('Oops!' in str(messages[0]))\n form = response.context['form']\n self.assertEqual(form['tag'].errors, [u'This field is required.'])\n self.assertNotIn(self.tag, self.trait.all_tags.all())", "def test_post_blank_tag(self):\n response = self.client.post(self.get_url(self.trait.pk), {'tag': '', })\n messages = list(response.wsgi_request._messages)\n self.assertEqual(len(messages), 1)\n self.assertTrue('Oops!' in str(messages[0]))\n form = response.context['form']\n self.assertEqual(form['tag'].errors, [u'This field is required.'])\n self.assertNotIn(self.tag, self.trait.all_tags.all())", "def test_only_attendant_can_make_a_sale(self):\n resp = self.admin_add_product()\n reply = self.admin_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_no_items_in_cart(self):\r\n self.request.user = self.user\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertFalse(context['display_shopping_cart'])", "def test_no_name(self):\n data = self._get_form_data(name='')\n form = self._get_form(data=data)\n self.assertFalse(self._validate_form(form))\n self.assertTrue('name' in form.errors)", "def test_no_enable_shoppingcart(self):\r\n self.add_to_cart()\r\n self.request.user = self.user\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertFalse(context['display_shopping_cart'])", "def test_make_order_with_some_data_missing(self):\n response = self.api_test_client.post('{}/orders'.format(\n self.BASE_URL), json={'item_name': 'Watermelon'}, headers={\n 'Content-Type': 'application/json'})\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(response_as_json(\n response)['message'], 'Bad request. Missing required param')", "def test_add_empty(self):\n self.open_url('/group/add')\n el = self.wd.find_element(By.ID, \"name\")\n el.send_keys(\"\")\n self.submit_form(\"group_form\")\n # Since we're using HTML5 'required' we'll just assert that form did not submit.\n self.assertEquals('Add Group', self.wd.title)", "def test_view_product_with_invalid_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2kk',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Try an interger for product id')\n self.assertEqual(resp.status_code, 400)", "def test_invalid_post_data_empty_fields(self):\n data = {\n # first_author_form\n 'name': '',\n \n # authors_fs --> 'form-0-name', ..., 'form-3-name' + ManagementForm\n 'form-0-name': '',\n 'form-1-name': '',\n 'form-2-name': '',\n 'form-3-name': '',\n 'form-TOTAL_FORMS': ['4'],\n 'form-INITIAL_FORMS': ['0'],\n 'form-MIN_NUM_FORMS': ['0'],\n 'form-MAX_NUM_FORMS': ['1000'],\n \n # language_form\n 'code': '',\n \n # book_form\n 'title': '',\n 'pub_date': '',\n 'pages': '',\n 'isbn': '',\n 'cover_url': '',\n }\n self.assertTrue(Book.objects.count() == 0)\n self.assertTrue(Author.objects.count() == 0)\n self.assertTrue(Language.objects.count() == 0)\n \n response = self.client.post(self.url, data)\n self.assertEquals(response.status_code, 200)\n \n language_form = response.context.get('language_form')\n book_form = response.context.get('book_form')\n self.assertTrue(language_form.errors)\n self.assertTrue(book_form.errors)\n\n self.assertTrue(Book.objects.count() == 0)\n self.assertTrue(Author.objects.count() == 0)\n self.assertTrue(Language.objects.count() == 0)", "def test_items_create_empty_user(patch_mongo):\n item = {\n \"content\": \"lorem ipsum\",\n \"priority\": \"high\",\n \"status\": \"backlog\",\n \"users\": [],\n }\n\n response = client.post(\"/item\", json=item)\n assert response.status_code == status.HTTP_400_BAD_REQUEST", "def test_create_same_product(self):\n url = reverse('products:list')\n data = {\n 'name': 'Eggs',\n 'description': '''\n Bird and reptile eggs consist of a protective eggshell,\n albumen (egg white), and vitellus (egg yolk),\n contained within various thin membranes.\n The most commonly consumed eggs are chicken eggs.\n Other poultry eggs including those of duck and quail\n also are eaten.\n '''\n }\n product_count_before = models.Product.objects.count()\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(models.Product.objects.count(), product_count_before)", "def test_make_order_with_a_missing_field(self):\n\n\t\tres = self.login_user()\n\t\taccess_token = json.loads(res.data.decode())['access_token']\n\n\t\torder_data = {\n\t\t\t\t\t\"owner\": \"Pemwa\",\n\t\t\t\t\t\"meal_name\": \"pizza\"\n\t\t\t\t\t }\n\n\t\tresponse = self.client().post(\n\t\t\t'/api/v2/orders',\n\t\t\theaders={\"x-access-token\": access_token},\n\t\t\tdata = json.dumps(\n\t\t\t\torder_data) , content_type = 'application/json')\n\t\tresult = json.loads(response.data)\n\t\tself.assertEqual(result[\"message\"], \"Missing argument\")\n\t\tself.assertEqual(response.status_code, 400)" ]
[ "0.7727357", "0.71426547", "0.71228373", "0.6888725", "0.6875848", "0.68659735", "0.68113697", "0.6800745", "0.6779112", "0.67721987", "0.6709652", "0.66885155", "0.6665036", "0.6651361", "0.6607132", "0.66057837", "0.66039413", "0.65725106", "0.6552758", "0.6544897", "0.6522389", "0.65190476", "0.6440011", "0.6418874", "0.64180654", "0.6415804", "0.63955593", "0.63657457", "0.63615507", "0.63556045", "0.6341899", "0.6329007", "0.63178754", "0.6310761", "0.62901926", "0.62846184", "0.6280863", "0.62556744", "0.6252401", "0.6247117", "0.6237773", "0.6226532", "0.6225643", "0.6208608", "0.6202311", "0.62003225", "0.6192476", "0.6174107", "0.6169916", "0.61671734", "0.61558837", "0.61511153", "0.61457425", "0.6129683", "0.61271864", "0.6116395", "0.6111715", "0.6106593", "0.6102717", "0.6101227", "0.61004436", "0.60941195", "0.6086567", "0.6086262", "0.60702085", "0.6057878", "0.6052217", "0.6034393", "0.60323566", "0.6031951", "0.6026745", "0.60259837", "0.602422", "0.602371", "0.60223144", "0.6019192", "0.6017212", "0.6014306", "0.60139644", "0.60137665", "0.6013265", "0.60131717", "0.60089916", "0.6007089", "0.6004117", "0.5975877", "0.59743136", "0.59720653", "0.59720653", "0.5971651", "0.59670186", "0.59666413", "0.596416", "0.5961625", "0.5960427", "0.59596056", "0.59538394", "0.59532946", "0.5951724", "0.59404945" ]
0.8373025
0
Tests that product_name field cannot contain a number
def test_Product_name_cannot_contain_a_number(self): resp = self.admin_register() reply = self.admin_login() token = reply['token'] product = dict( prod_name='NY_3', category='denims', stock=20, price=150 ) resp = self.client.post( '/api/v1/products', content_type='application/json', data=json.dumps(product), headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Please enter strings in name and category!') self.assertEqual(resp.status_code, 400)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_category_cannot_contain_a_number(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='4dens',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Please enter strings in name and category!')\n self.assertEqual(resp.status_code, 400)", "def prodName(self, pName):\r\n if str(pName).isnumeric() == False:\r\n self.__prodName = pName\r\n else:\r\n raise Exception(\"Product Names cannot be numbers\")", "def test_non_numberic_validation(self):", "def test_non_numberic_validation(self):", "def lf_is_numeric(x):\n words = x.product_name.split()\n if words[x.word_idx].isnumeric():\n return MODELNAME\n return -1", "def test_add_sale_with_product_name_not_string(self):\n self.register_admin_test_account()\n token = self.login_admin_test()\n\n response = self.app_test_client.post('{}/saleorder'.format(\n self.base_url), json={'name': 1, 'price': 1500, 'quantity': 10, 'totalamt': \"\"},\n headers=dict(Authorization=token),\n content_type='application/json')\n\n self.assertEqual(response.status_code, 400)\n\n self.assertEqual(general_helper_functions.convert_json(\n response)['message'], 'Bad request. The product name should be a string.')", "def test_add_sale_with_price_not_digit_format(self):\n self.register_admin_test_account()\n token = self.login_admin_test()\n\n response = self.app_test_client.post('{}/saleorder'.format(\n self.base_url), json={'name': \"Hand Bag\", 'price': \"1500\", 'quantity': 3, 'totalamt': \"\"},\n headers=dict(Authorization=token),\n content_type='application/json')\n\n self.assertEqual(response.status_code, 400)\n\n self.assertEqual(general_helper_functions.convert_json(\n response)['message'], 'Bad request. The product price should be an integer.')", "def test_legal_names(self):\n prods = generate_products()\n for obj in prods:\n self.assertRegexpMatches(\n '(\\w{2,10} \\w{0,12}|\\?{0,3}){1}', obj.name)", "def product_isalpha(product):\n if product.isalpha(): #This verifies if the product that has inserted, has a valid name\n return True\n else:\n return False", "def test_create_invalid_price_higher_than_999(self):\n product_name = \"Swift Iris\"\n size = Size.SIZE_4_5\n colour = \"Red\"\n price = 1001\n product_type = ProductType.SHOE\n product_code = \"A\"\n department = Department.LADIES\n\n product = Product(\n name=product_name,\n size=size,\n colour=colour,\n price=price,\n product_type=product_type,\n product_code=product_code,\n department=department,\n )\n\n with self.assertRaises(ValidationError):\n product.full_clean()", "def test_create_invalid_product_code_lengthly(self):\n product_name = \"Swift Iris\"\n size = Size.SIZE_4_5\n colour = \"Red\"\n price = 47.00\n product_type = ProductType.SHOE\n product_code = \"asdfghjk\"\n department = Department.LADIES\n\n product = Product(\n name=product_name,\n size=size,\n colour=colour,\n price=price,\n product_type=product_type,\n product_code=product_code,\n department=department,\n )\n\n with self.assertRaises(ValidationError):\n product.full_clean()", "def test_uss_num_bad_values(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_uss_num(val))", "def test_create_invalid_product_long_name(self):\n product_name = \"1234567890123456789098765432112345678900987654322123\"\n size = Size.SIZE_4_5\n colour = \"Red\"\n price = 47.00\n product_type = ProductType.SHOE\n product_code = \"A\"\n department = Department.LADIES\n\n product = Product(\n name=product_name,\n size=size,\n colour=colour,\n price=price,\n product_type=product_type,\n product_code=product_code,\n department=department,\n )\n\n with self.assertRaises(ValidationError):\n product.full_clean()", "def test_number(self):\n descriptor = clone(SPECIES_OBSERVATION_SCHEMA)\n record = {\n 'Observation Date': \"18/08/2016\",\n 'Latitude': -32,\n 'Longitude': 115,\n 'Species Name': 1234\n }\n schema = SpeciesObservationSchema(descriptor)\n with self.assertRaises(Exception):\n schema.cast_species_name(record)", "def test_create_invalid_product_blank_name(self):\n product_name = \"\"\n size = Size.SIZE_4_5\n colour = \"Red\"\n price = 47.00\n product_type = ProductType.SHOE\n product_code = \"A\"\n department = Department.LADIES\n\n product = Product(\n name=product_name,\n size=size,\n colour=colour,\n price=price,\n product_type=product_type,\n product_code=product_code,\n department=department,\n )\n\n with self.assertRaises(ValidationError):\n product.full_clean()", "def lf_is_before_num(x):\n words = x.product_name.split()\n if x.word_idx < len(words)-1 and words[x.word_idx+1].isnumeric():\n return MODELNAME\n return -1", "def test_nonreserved_name(self):\n try:\n field_name_validator('_identifier')\n except ValidationError:\n self.fail('Field name raised ValidationError unexpectedly')", "def testBadFormatISBN(self): \n val = format_isbn(\"1234567843534594123\")\n self.assertFalse(val)", "def test_starts_with_dollar_sign(self):\n with self.assertRaises(ValidationError):\n field_name_validator('$id')", "def test_book_isbn_must_only_be_numbers(self):\n\n\t\twith self.client:\n\t\t\tadd_book = {\n\t\t\t\t'title': 'Hello Books',\n\t\t\t\t'isbn': '56987451Ky'\n\t\t\t}\n\t\t\tlogin_data = self.login_test_user()\n\t\t\ttoken = login_data['auth_token']\n\t\t\tres = self.client.post(\n\t\t\t\tf'{URL_BOOKS}',\n\t\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\t\tcontent_type='application/json',\n\t\t\t\tdata=json.dumps(add_book)\n\t\t\t)\n\t\t\tres2 = json.loads(res.data.decode())\n\t\t\tself.assertIn('isbn must only include numbers', str(res2))\n\t\t\tself.assertEqual(res.status_code, 400)", "def test_ends_with_dollar_sign(self):\n try:\n field_name_validator('id$')\n except ValidationError:\n self.fail('Field name raised ValidationError unexpectedly')", "def test_noQuantity(self):\n # result = self.parser.parse(\"d6\")\n\n # TODO\n # self.assertIsNone(result)", "def test_create_invalid_product_no_name(self):\n size = Size.SIZE_4_5\n colour = \"Red\"\n price = 47.00\n product_type = ProductType.SHOE\n product_code = \"A\"\n department = Department.LADIES\n\n product = Product(\n size=size,\n colour=colour,\n price=price,\n product_type=product_type,\n product_code=product_code,\n department=department,\n )\n\n with self.assertRaises(ValidationError):\n product.full_clean()", "def number_only(number):\n number = number.replace(' ', '')\n result = re.match(r\"^[0-9]+$\", number)\n if not result:\n return True\n return False", "def test_ends_with_dollar_sign(self):\n with self.assertRaises(ValidationError):\n db_name_validator('id$')", "def validate_numeric(column_name, value, column_data_type=\"numeric\"):\n valid = value.isnumeric()\n if not valid:\n return \"{0} : '{1}' is not a valid {2}\".format(column_name, value, column_data_type)\n return None", "def test_valid_name_invalid():\n assert not valid_name(\"\")\n assert not valid_name(\"a\"*21)", "def test_non_integer_suffix(self):\n with self.assertRaises(Exception) as exception:\n make_rpm_version('0.1.2preX')\n\n self.assertEqual(\n u'Non-integer value \"X\" for \"pre\". Supplied version 0.1.2preX',\n unicode(exception.exception)\n )", "def test_stock_and_price_must_be_numbers(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock='stock',\n price='money'\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'The Stock and Price must be numbers!')\n self.assertEqual(resp.status_code, 400)", "def isnum(self, x):\n\n return x in '1234567890.-'", "def test_details_nonnum_id(self):\n self.check_response(\n '/attributes/xyz',\n ('Please enter an integer value for Attribute ID',))", "def test_add_with_negative_price(self):\n good = GoodInfo(\"яйцо 1 кат.\", \"-30\", \"40\", \"2020-12-30\", \n \"14\", \"2020-12-30\")\n check_product_data = self.database.add(good)\n\n self.assertFalse(check_product_data)", "def test_add_without_name(self):\n good = GoodInfo(\"\", \"30\", \"40\", \"2020-12-30\", \n \"14\", \"2020-12-30\")\n check_product_data = self.database.add(good)\n\n self.assertFalse(check_product_data)", "def validate_number(column_name, value, column_data_type=\"number\"):\n valid = value.isnumeric()\n if valid is False:\n try:\n float(value)\n return None\n except ValueError:\n return \"{0} : '{1}' is not a valid {2}\".format(column_name, value, column_data_type)\n return None", "def test_product_buy_with_not_exists_name(self):\n result_buy = self.info_list.product_buy(\"Говядина Немецкая 2кг\", 3)\n self.assertFalse(result_buy)", "def __set_has_numeric(text=str):\n reg_ex = constants.NUMERIC_REG_EX_PATTERN\n if reg_ex.search(text) is None:\n return text\n return reg_ex.sub(constants.QUESTION_HAS_NUMERIC_KEY, text)", "def testBadFormatISBNAgain(self): \n val = format_isbn(\"12345678\")\n self.assertFalse(val)", "def test_number(self):\n form_data = self.form_data('CDr=cpz&Z&a!cuP-nAQe')\n form = self.form(data=form_data, user=self.u)\n self.assertFalse(form.is_valid())", "def __checkProduct(self, prd, num):\n if prd not in vmdata.prdStore or not isinstance(num, int) or num < 1:\n return False \n return True", "def test_ProductsDataViewSet_with_post_Invalid_data(self):\n payload = {\n \"name\": \"1234\"\n }\n\n # Request the data by API call.\n response = self.client.post('/api/productsdata/',\n data=json.dumps(payload),\n content_type=self.content_type)\n\n # Checking the response\n self.assertEqual(response.status_code, 400)\n self.assertEqual(response.json()['name'][0],\n 'Name `{0}` must contain atleast one letter'.format(\n payload.get('name')))", "def test_non_cast_input():\n assert _currency_column_to_numeric(\"-1,000,000 yen\") == \"-1000000\"", "def testNumberAttribute(self):\n def action(field_class):\n # Check range.\n self.assertRaises(messages.InvalidNumberError,\n field_class,\n 0)\n self.assertRaises(messages.InvalidNumberError,\n field_class,\n -1)\n self.assertRaises(messages.InvalidNumberError,\n field_class,\n messages.MAX_FIELD_NUMBER + 1)\n\n # Check reserved.\n self.assertRaises(messages.InvalidNumberError,\n field_class,\n messages.FIRST_RESERVED_FIELD_NUMBER)\n self.assertRaises(messages.InvalidNumberError,\n field_class,\n messages.LAST_RESERVED_FIELD_NUMBER)\n self.assertRaises(messages.InvalidNumberError,\n field_class,\n '1')\n\n # This one should work.\n field_class(number=1)\n self.ActionOnAllFieldClasses(action)", "def is_name_mostly_numeric(self) -> bool:\n app_no_punc = self.app_name_no_punc()\n\n try:\n int(app_no_punc)\n return True\n except ValueError:\n pass\n\n alphabetic_chars = 0\n for char in app_no_punc:\n if not char.isnumeric():\n alphabetic_chars += 1\n\n return alphabetic_chars / len(app_no_punc) < .75", "def test_legal_names(self):\r\n prod = generate_products()\r\n ADJECTIVES = ['Awesome', 'Shiny', 'Impressive', 'Portable', 'Improved']\r\n NOUNS = ['Anvil', 'Catapult', 'Disguise', 'Mousetrap', '???']\r\n for product in prod:\r\n self.assertIn(product.name.split(\" \")[0], ADJECTIVES)\r\n self.assertIn(product.name.split(\" \")[1], NOUNS)", "def validate_product_quantity(item, qty):\n return True", "def is_valid_number(self, text, widget):\n if len(text) > 2:\n return False\n for char in text:\n if not char.isdigit():\n return False\n if text != '' and int(text) == 0:\n return False\n return True", "def test_starts_with_dollar_sign(self):\n with self.assertRaises(ValidationError):\n db_name_validator('$id')", "def test_index_hostid_notnum(self):\n self.check_response(\n '/attributes?h=xyz',\n ('Please enter an integer value for Host ID'))", "def clean_numeric_column(name : float) -> float:\n if name > -1 and name < 1:\n name = 0\n return name", "def test_case_strings_numbers2(self):\n data = {\"numbers\": \"1,4,e,w,5,t\"}\n response = self.client.post(\"/api/hi\", data)\n self.assertEqual(response.data, {\"error\":\"must be a number\"})", "def test_osimportname_name_too_many_chars(self):\n\n # get foreign key object id\n os_id = Os.objects.get(os_name='os_1').os_id\n # get object\n form = OsimportnameForm(data = {\n 'osimportname_name': 'ooooooooooooooooooooooooooooooo',\n 'osimportname_importer': 'osimportname_importer_1',\n 'os': os_id\n })\n # compare\n self.assertFalse(form.is_valid())", "def test_mult_specifiers_missing(self):\n template = '{0} too few {1}'\n value_count = 3\n msg = ('The formatter contains too few \"{}\" '\n 'specifiers for the number of source fields.')\n with six.assertRaisesRegex(self, ValidationError, msg):\n validate_str_substitution(template, value_count)", "def test_add_with_negative_amount(self):\n good = GoodInfo(\"яйцо 1 кат.\", \"30\", \"-40\", \"2020-12-30\", \n \"14\", \"2020-12-30\")\n check_product_data = self.database.add(good)\n\n self.assertFalse(check_product_data)", "def test_price_details_number(self):\n with self.client:\n response = self.add_meal(\"beef\", \"jasmine\")\n data = json.loads(response.data.decode())\n self.assertEqual(data.get('message'),\n \"Price must be a number\")\n self.assertEqual(response.status_code, 400)", "def test_cannot_make_sale_with_wrong_datatypes(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_345\", \n \"quantity\":'Kummi'\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'prod_name & quantity should be a character & number respectively!')\n self.assertEqual(resp.status_code, 400)", "def test_negativeQuantity(self):\n result = self.parser.parse(\"-1d6\")\n\n # TODO\n # self.assertIsNone(result)", "def test_case_strings_numbers(self):\n data = {\"numbers\": \"1,4,6,e,r,6,t,1\"}\n response = self.client.post(\"/api/hi\", data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_40_phonenumbers_too_long(self):\n number_phone = self.samples[4]\n with self.assertRaises(osv.except_osv):\n self.pn._symbol_set_char(number_phone)", "def test_create_invalid_product_code_lower_case(self):\n product_name = \"Swift Iris\"\n size = Size.SIZE_4_5\n colour = \"Red\"\n price = 47.00\n product_type = ProductType.SHOE\n product_code = \"a\"\n department = Department.LADIES\n\n product = Product(\n name=product_name,\n size=size,\n colour=colour,\n price=price,\n product_type=product_type,\n product_code=product_code,\n department=department,\n )\n\n with self.assertRaises(ValidationError):\n product.full_clean()", "def test_negative_pricing(self):\n with self.assertRaises(InvalidProductPriceException):\n Product(self.test_product_name, -1.00)\n with self.assertRaises(InvalidProductPriceException):\n Product(self.test_product_name, -0.01)\n with self.assertRaises(InvalidProductPriceException):\n Product(self.test_product_name, 0)\n with self.assertRaises(InvalidProductPriceException):\n Product(self.test_product_name, 0.00)\n try:\n Product(self.test_product_name, 1.00)\n Product(self.test_product_name, 0.01)\n except InvalidProductPriceException:\n self.fail(\"InvalidProductPriceException raised for positive value unexpectedly\")", "def slug_is_numerical(slug):\r\n try:\r\n float(slug)\r\n except ValueError:\r\n return False\r\n\r\n return True", "def test_containsOnly(self) -> None:\n assert containsOnly('.83', '0123456789.')\n assert not containsOnly('43221', '123')", "def clean(self, value):\n non_decimal = re.compile(r'\\D+')\n value = non_decimal.sub('', value.strip()) \n \n if value and not luhn(value):\n raise forms.ValidationError(\"Please enter a valid credit card number.\")\n return super(BankcardNumberField, self).clean(value)", "def validate_serial_number(self, value):\n qs = GenSet.objects.filter(serial_number__iexact=value) # including instance\n if self.instance:\n qs = qs.exclude(pk=self.instance.pk)\n if qs.exists():\n raise serializers.ValidationError(\"This serial number has already been used\")\n return value", "def validate_serial_number(self, value):\n qs = Tools.objects.filter(serial_number__iexact=value) # including instance\n if self.instance:\n qs = qs.exclude(pk=self.instance.pk)\n if qs.exists():\n raise serializers.ValidationError(\"This serial number has already been used\")\n return value", "def test_valid_phone_invalid():\n assert not valid_phone(\"\")\n assert not valid_phone(\"000-000-00000\")\n assert not valid_phone(\"000-0000-0000\")\n assert not valid_phone(\"0000-000-0000\")\n assert not valid_phone(\"00000000000\")\n assert not valid_phone(\"foobar\")", "def valid_name_product():\n reset()\n enter_product = False\n while enter_product == False:\n product = raw_input(\"Add the product: \")\n product_lower = minuscule(product)\n enter_product = product_isalpha(product)\n if enter_product == False:\n print \"insert a valid name\"\n return product_lower", "def test_empty_input():\n assert _currency_column_to_numeric(\"\") == \"ORIGINAL_NA\"", "def test_drop_numbers():\n cleaner = TextCleaner()\n assert cleaner.transform([[\"123,123.123\"]])[\"corpus\"][0] == \"\"\n assert not cleaner.drops[\"number\"].dropna().empty", "def test_reserved_name(self):\n with self.assertRaises(ValidationError):\n field_name_validator('_id')", "def test_post_sale_record_with_quantity_string(self):\n\t\tself.register_user()\n\t\tresult = self.login_user()\n\t\taccess_token = json.loads(result.data.decode())['token']\n\n\t\tres = self.client.post(self.sl_url,\n\t\t\tdata=self.string_sales,\n\t\t\theaders=dict(Authorization=\"Bearer \" + access_token))\n\t\tresult = json.loads(res.data.decode())\n\t\tself.assertEqual(res.status_code, 400)\n\t\tself.assertEqual(result[\"message\"][\"quantity\"], \"Only integers allowed\")", "def test_valid_phone_valid():\n assert valid_phone(\"000-000-0000\")\n assert valid_phone(\"0000000000\")", "def validate_serial_number(self, value):\n qs = GenSet2.objects.filter(serial_number__iexact=value) # including instance\n if self.instance:\n qs = qs.exclude(pk=self.instance.pk)\n if qs.exists():\n raise serializers.ValidationError(\"This serial number has already been used\")\n return value", "def _validate(cls, pid_value):\n blop = re.compile('^[-\\w]+$')\n if not bool(blop.match(pid_value)):\n raise ValidationError(\n 'The ID should contain only letters with numbers or dashes.',\n field_name='id',\n )", "def validate_serial_number(self, value):\n qs = StarboardEngine3.objects.filter(serial_number__iexact=value) # including instance\n if self.instance:\n qs = qs.exclude(pk=self.instance.pk)\n if qs.exists():\n raise serializers.ValidationError(\"This serial number has already been used\")\n return value", "def is_number(self, cell): \n for token in self._cell_tokenizer.tokenize(cell.get_text()):\n if self._get_token_type(token) == 'NAME':\n return False \n return True", "def validate_serial_number(self, value):\n qs = StarboardEngine2.objects.filter(serial_number__iexact=value) # including instance\n if self.instance:\n qs = qs.exclude(pk=self.instance.pk)\n if qs.exists():\n raise serializers.ValidationError(\"This serial number has already been used\")\n return value", "def validate_serial_number(self, value):\n qs = MainEngine.objects.filter(serial_number__iexact=value) # including instance\n if self.instance:\n qs = qs.exclude(pk=self.instance.pk)\n if qs.exists():\n raise serializers.ValidationError(\"This serial number has already been used\")\n return value", "def test_alnum(self, address):\n t=address.replace(\" \", \"\").isalnum()\n assert t, \"it only accept digits and letters\"", "def test_osimportname_importer_too_many_chars(self):\n\n # get foreign key object id\n os_id = Os.objects.get(os_name='os_1').os_id\n # get object\n form = OsimportnameForm(data = {\n 'osimportname_name': 'osimportname_1',\n 'osimportname_importer': 'ooooooooooooooooooooooooooooooo',\n 'os': os_id,\n })\n # compare\n self.assertFalse(form.is_valid())", "def test_update_product_with_characters_for_numbers(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n product_update = dict(\n prod_name='NY_denims',\n category='denims',\n stock='many',\n price='pesa'\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'The Stock and Price must be numbers!')\n self.assertEqual(resp.status_code, 400)", "def test_sell_ticket_name_alnum(self, *_):\n #logout to invalidate any logged in session\n self.open(base_url + '/logout')\n #login a user\n self.open(base_url + '/login')\n # fill email and password\n self.type(\"#email\", \"[email protected]\")\n self.type(\"#password\", \"Test_frontend@\")\n # click enter button\n self.click('input[type=\"submit\"]')\n #open the base url\n self.open(base_url)\n #Enter an invalid ticket name\n self.type('#name_sell', \" invalid \")\n self.type(\"#price_sell\", \"100\")\n self.type(\"#quantity_sell\", \"2\")\n self.type(\"#exp_date_sell\", \"20200921\")\n self.click('#submit-sell')\n #Assert that the valid error message is shown.\n self.assert_text(\"Invalid spaces found in word\", \"#message\")", "def validate_serial_number(self, value):\n qs = StarboardEngine.objects.filter(serial_number__iexact=value) # including instance\n if self.instance:\n qs = qs.exclude(pk=self.instance.pk)\n if qs.exists():\n raise serializers.ValidationError(\"This serial number has already been used\")\n return value", "def validate_number(input_data):\n if input_data.startswith('-'):\n return input_data.i\n else:\n return False", "def test_admin_cannot_delete_product_with_non_integer_prod_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/kk',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'The product id should be a number!')\n self.assertEqual(resp.status_code, 400)", "def test_cast_non_numeric_false():\n assert _currency_column_to_numeric(\"10 dollars\", {\"foo\": 42}) == \"10\"", "def test_create_invalid_price_three_dp(self):\n product_name = \"Swift Iris\"\n size = Size.SIZE_4_5\n colour = \"Red\"\n price = 47.123\n product_type = ProductType.SHOE\n product_code = \"A\"\n department = Department.LADIES\n\n product = Product(\n name=product_name,\n size=size,\n colour=colour,\n price=price,\n product_type=product_type,\n product_code=product_code,\n department=department,\n )\n\n with self.assertRaises(ValidationError):\n product.full_clean()", "def test_legal_names(self):\n names = [prod.name for prod in generate_products()]\n sep = [(name.split()[0], name.split()[1]) for name in names]\n for name in sep:\n self.assertIn(name[0], ADJS)\n self.assertIn(name[1], NOUNS)", "def test_legal_names(self):\n products = generate_products()\n\n for product in products:\n names = product.name.split(\" \")\n self.assertIn(names[0], ADJECTIVES)\n self.assertIn(names[1], NOUNS)", "def validate_telephone(self, data):\n value = data.strip()\n if re.match(constant.NUMBER_ONLY, value):\n if User.objects.filter(telephone=value).exists():\n raise serializers.ValidationError('telephone number already registered')\n return value\n raise serializers.ValidationError(VALIDATION['phone']['invalid'])", "def test_add_sale_with_invalid_quantity(self):\n self.register_admin_test_account()\n token = self.login_admin_test()\n \n response = self.app_test_client.post('{}/saleorder'.format(\n self.base_url), json={'name': \"Hand Bag\", 'price': 1500, 'quantity': \"5\", 'totalamt': \"\"}, \n headers=dict(Authorization=token),\n content_type='application/json')\n\n self.assertEqual(response.status_code, 400)\n\n self.assertEqual(general_helper_functions.convert_json(\n response)['message'], 'Bad request. The quantity should be an integer.')", "def validate_int(self, name, number):\n if type(number) is not int:\n raise TypeError(\"{} must be an integer\".format(name))", "def validate_int(self, name, number):\n if type(number) is not int:\n raise TypeError(\"{} must be an integer\".format(name))", "def test_search_number_invalid():\n response = search.number({\"body\": \"{}\"}, {})\n\n assert response[\"statusCode\"] == 422", "def test_valid_name_valid():\n assert valid_name(\"1\")\n assert valid_name(\"a\"*20)", "def clean_price(self):\n price = self.cleaned_data.get('price')\n if price == \"0\":\n raise forms.ValidationError(\n u'Please insert a price for your product')\n return price", "def validate_sale(self, data):\n if len(data.keys()) == 0 or len(data.keys()) > 3:\n return \"Invalid keys\"\n if 'product_name' and 'product_quantity' not in data.keys():\n return \"Add product name and quantity\"\n if data['product_name'] == \"\":\n return \"Product_name cannot be blank\"\n if data['product_quantity'] == \"\":\n return \"Product_quantity cannot be blank\"\n if data['price'] == \"\":\n return \"Enter required price\"\n if not re.match(r\"^[0-9_]*$\", data['product_quantity']):\n return \"quantity should contain integers only\"\n if not re.match(r\"^[a-zA-Z0-9 _]*$\", data['product_name']):\n return \"productname should contain alphanumerics only\"\n if not re.match(r\"^[0-9_]*$\", data['price']):\n return \"price should contain integers only\"\n else:\n return \"Sale_valid\"", "def check_string( pname, use ):\n for l in pname:\n if l in string.letters: continue\n if l in string.digits : continue\n if l =='_' : continue\n print( \"your \"+use+\" (\" + pname + \") contains invalid characters, please choose another one!\" )\n return False\n return True", "def test_load_do_not_convert_non_quantity_strings(self):\n sage = ForceField(\"openff-2.0.0.offxml\")\n\n for parameter_handler_name in sage.registered_parameter_handlers:\n parameter_handler = sage.get_parameter_handler(parameter_handler_name)\n\n for parameter in parameter_handler.parameters:\n assert isinstance(parameter.smirks, str)\n assert not isinstance(parameter.smirks, unit.Quantity)\n\n # Ensure that, for example, F isn't converted to Farad\n if (\n parameter_handler_name == \"LibraryCharges\"\n and parameter.name is not None\n ):\n assert isinstance(parameter.name, str)\n assert not isinstance(parameter.name, unit.Quantity)", "def number_only(number_available):\n number_available = number_available.removeprefix('In stock (')\n number_available = number_available.removesuffix(' available)')\n return number_available" ]
[ "0.6742002", "0.6642998", "0.6555487", "0.6555487", "0.6454647", "0.62910545", "0.62258327", "0.6198766", "0.6166252", "0.61075205", "0.5994341", "0.5968352", "0.5964386", "0.595047", "0.5949145", "0.5943742", "0.59381294", "0.591026", "0.5901904", "0.5901547", "0.58894527", "0.5888071", "0.5883961", "0.58690155", "0.5856628", "0.5855989", "0.58472776", "0.5842301", "0.5831338", "0.5830257", "0.5827018", "0.58169067", "0.5816499", "0.58105135", "0.57980406", "0.5789695", "0.57890105", "0.57793605", "0.5769706", "0.57665116", "0.57640916", "0.5749591", "0.5733744", "0.57286924", "0.5727441", "0.57211816", "0.5710418", "0.57065576", "0.5700452", "0.5681173", "0.5679878", "0.56781447", "0.56731516", "0.56717795", "0.5667456", "0.5659941", "0.5659772", "0.56525326", "0.56512296", "0.5646235", "0.56461525", "0.5643989", "0.5638517", "0.5624516", "0.56158596", "0.56150967", "0.56147915", "0.5613317", "0.5603248", "0.5597897", "0.55976474", "0.5595229", "0.5576273", "0.55758727", "0.556864", "0.5562266", "0.55602527", "0.5542844", "0.5532724", "0.553263", "0.55324227", "0.55292857", "0.5520752", "0.551507", "0.55117774", "0.55116004", "0.55088305", "0.5507199", "0.5506493", "0.5505189", "0.5501246", "0.54942065", "0.54942065", "0.54929024", "0.5491924", "0.5491018", "0.5490623", "0.5486542", "0.54861057", "0.5481366" ]
0.76176125
0
Tests that category field cannot contain a number
def test_category_cannot_contain_a_number(self): resp = self.admin_register() reply = self.admin_login() token = reply['token'] product = dict( prod_name='NY_denims', category='4dens', stock=20, price=150 ) resp = self.client.post( '/api/v1/products', content_type='application/json', data=json.dumps(product), headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Please enter strings in name and category!') self.assertEqual(resp.status_code, 400)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_isNumericCategory(self):\r\n obs = self.overview_map.isNumericCategory('Treatment')\r\n self.assertEqual(obs, False)\r\n\r\n obs = self.overview_map.isNumericCategory('DOB')\r\n self.assertEqual(obs, True)", "def test_isNumericCategory(self):\n obs = self.overview_map.isNumericCategory('Treatment')\n self.assertEqual(obs, False)\n\n obs = self.overview_map.isNumericCategory('DOB')\n self.assertEqual(obs, True)", "def test_non_numberic_validation(self):", "def test_non_numberic_validation(self):", "def isNumericCategory(self, category):\r\n category_values = self.getCategoryValues(self.SampleIds, category)\r\n\r\n is_numeric = True\r\n for category_value in category_values:\r\n try:\r\n float(category_value)\r\n except ValueError:\r\n is_numeric = False\r\n return is_numeric", "def test_drop_numbers():\n cleaner = TextCleaner()\n assert cleaner.transform([[\"123,123.123\"]])[\"corpus\"][0] == \"\"\n assert not cleaner.drops[\"number\"].dropna().empty", "def is_valid_number(self, text, widget):\n if len(text) > 2:\n return False\n for char in text:\n if not char.isdigit():\n return False\n if text != '' and int(text) == 0:\n return False\n return True", "def test_non_cast_input():\n assert _currency_column_to_numeric(\"-1,000,000 yen\") == \"-1000000\"", "def test_uss_num_bad_values(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_uss_num(val))", "def slug_is_numerical(slug):\r\n try:\r\n float(slug)\r\n except ValueError:\r\n return False\r\n\r\n return True", "def testNumberAttribute(self):\n def action(field_class):\n # Check range.\n self.assertRaises(messages.InvalidNumberError,\n field_class,\n 0)\n self.assertRaises(messages.InvalidNumberError,\n field_class,\n -1)\n self.assertRaises(messages.InvalidNumberError,\n field_class,\n messages.MAX_FIELD_NUMBER + 1)\n\n # Check reserved.\n self.assertRaises(messages.InvalidNumberError,\n field_class,\n messages.FIRST_RESERVED_FIELD_NUMBER)\n self.assertRaises(messages.InvalidNumberError,\n field_class,\n messages.LAST_RESERVED_FIELD_NUMBER)\n self.assertRaises(messages.InvalidNumberError,\n field_class,\n '1')\n\n # This one should work.\n field_class(number=1)\n self.ActionOnAllFieldClasses(action)", "def test_add_category_integer_name(self):\n category = json.dumps({\n 'name': 8888,\n })\n response = self.client.post('/category', data=category,\n headers={\"Authorization\": self.token})\n self.assertEqual(response.status_code, 400)\n self.assertIn('Numbers cant be a Name', response.data.decode())", "def test_getCategoryValue_bad_category(self):\n # Nonexistent category.\n self.assertRaises(KeyError, self.overview_map.getCategoryValue,\n 'PC.354', 'foo')\n # Integer category.\n self.assertRaises(KeyError, self.overview_map.getCategoryValue,\n 'PC.354', 42)\n # Category of type None.\n self.assertRaises(KeyError, self.overview_map.getCategoryValue,\n 'PC.354', None)\n\n # Category on map with no metadata, but that has sample IDs.\n self.assertRaises(KeyError, self.no_metadata.getCategoryValue,\n 'PC.354', 'Treatment')\n # Integer category on map with no metadata.\n self.assertRaises(KeyError, self.no_metadata.getCategoryValue,\n 'PC.354', 34)\n # Category of type None on map with no metadata.\n self.assertRaises(KeyError, self.no_metadata.getCategoryValue,\n 'PC.354', None)", "def clean(self, value):\n non_decimal = re.compile(r'\\D+')\n value = non_decimal.sub('', value.strip()) \n \n if value and not luhn(value):\n raise forms.ValidationError(\"Please enter a valid credit card number.\")\n return super(BankcardNumberField, self).clean(value)", "def test_category_invalid(self):\n # wiki and questions\n ques = QuestionFactory(title=u'q1 audio')\n ques.tags.add(u'desktop')\n ans = AnswerFactory(question=ques)\n AnswerVoteFactory(answer=ans, helpful=True)\n\n d1 = DocumentFactory(\n title=u'd1 audio',\n locale=u'en-US',\n category=10,\n is_archived=False,\n tags=[u'desktop'])\n ApprovedRevisionFactory(document=d1)\n\n self.refresh()\n\n qs = {'a': 1, 'w': 3, 'format': 'json', 'category': 'invalid'}\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(2, json.loads(response.content)['total'])", "def is_numeric_and_not_ignored(column):\n if column not in categorical_columns and column not in ignore_columns:\n return True\n return False", "def test_getCategoryValue_bad_category(self):\r\n # Nonexistent category.\r\n self.assertRaises(KeyError, self.overview_map.getCategoryValue,\r\n 'PC.354', 'foo')\r\n # Integer category.\r\n self.assertRaises(KeyError, self.overview_map.getCategoryValue,\r\n 'PC.354', 42)\r\n # Category of type None.\r\n self.assertRaises(KeyError, self.overview_map.getCategoryValue,\r\n 'PC.354', None)\r\n\r\n # Category on map with no metadata, but that has sample IDs.\r\n self.assertRaises(KeyError, self.no_metadata.getCategoryValue,\r\n 'PC.354', 'Treatment')\r\n # Integer category on map with no metadata.\r\n self.assertRaises(KeyError, self.no_metadata.getCategoryValue,\r\n 'PC.354', 34)\r\n # Category of type None on map with no metadata.\r\n self.assertRaises(KeyError, self.no_metadata.getCategoryValue,\r\n 'PC.354', None)", "def _validate_data_category(data_category: str) -> str:\n valid_categories = DataCategory.__members__.keys()\n if data_category not in valid_categories:\n raise common_exceptions.DataCategoryNotSupported(\n f\"The data category {data_category} is not supported.\"\n )\n return data_category", "def test_blank_category(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.login('[email protected]', 'Bo1995')\n rv = self.category('')\n self.assertIn(b'Field must be between 1 and 50 characters long.', rv.data)", "def number_only(number):\n number = number.replace(' ', '')\n result = re.match(r\"^[0-9]+$\", number)\n if not result:\n return True\n return False", "def validate_number(column_name, value, column_data_type=\"number\"):\n valid = value.isnumeric()\n if valid is False:\n try:\n float(value)\n return None\n except ValueError:\n return \"{0} : '{1}' is not a valid {2}\".format(column_name, value, column_data_type)\n return None", "def test_check_category_input_1(self):\n assert validation.check_category_input(1, []) == False", "def __set_has_numeric(text=str):\n reg_ex = constants.NUMERIC_REG_EX_PATTERN\n if reg_ex.search(text) is None:\n return text\n return reg_ex.sub(constants.QUESTION_HAS_NUMERIC_KEY, text)", "def validate_number(input_data):\n if input_data.startswith('-'):\n return input_data.i\n else:\n return False", "def scrub_category_val(category_val):\n if not isinstance(category_val, str):\n category_val = str(category_val)\n if category_val.lower() == 'nan':\n category_val = 'NaN'\n if not category_val:\n category_val = 'NaN'\n return category_val", "def validate_numeric(column_name, value, column_data_type=\"numeric\"):\n valid = value.isnumeric()\n if not valid:\n return \"{0} : '{1}' is not a valid {2}\".format(column_name, value, column_data_type)\n return None", "def must_contain_digit(cell):\n # Check if it's nan first\n if check_empty(cell):\n return True\n return not bool(re.search(\"\\d\", str(cell)))", "def is_number(text):\n return text.lower() in AVRO_NUMBERS", "def test_cast_non_numeric_false():\n assert _currency_column_to_numeric(\"10 dollars\", {\"foo\": 42}) == \"10\"", "def validate_category(self, data):\n try:\n if data['category_name'] == \"\":\n return \"Category_name cannot be blank\"\n if 'category_name' not in data.keys():\n return \"Enter category_name\"\n if not re.match(r\"^[a-zA-Z0-9 _]*$\", data['category_name']):\n return \"category name should contain alphanumerics only\"\n if len(data.keys()) > 1:\n return \"Invalid fields added\"\n else:\n return \"category_valid\"\n except KeyError:\n return \"Add required keys\"", "def _fix_surprising_number(val, s):\n if (\n isinstance(val, (int, float)) and \"!!\" not in s\n and _contains_non_numeric_chars(s)\n ):\n return s\n return val", "def validate(value):\n if str.isdigit(value) or value == \"\":\n return True\n else:\n return False", "def test_is_number(self):\n \n self.assertEqual(self.var.is_number(None), False)\n self.assertEqual(self.var.is_number(\"5\"), True)\n self.assertEqual(self.var.is_number(\"a\"), False)", "def is_number(c):\n return '0' <= c <= '9'", "def test_book_isbn_must_only_be_numbers(self):\n\n\t\twith self.client:\n\t\t\tadd_book = {\n\t\t\t\t'title': 'Hello Books',\n\t\t\t\t'isbn': '56987451Ky'\n\t\t\t}\n\t\t\tlogin_data = self.login_test_user()\n\t\t\ttoken = login_data['auth_token']\n\t\t\tres = self.client.post(\n\t\t\t\tf'{URL_BOOKS}',\n\t\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\t\tcontent_type='application/json',\n\t\t\t\tdata=json.dumps(add_book)\n\t\t\t)\n\t\t\tres2 = json.loads(res.data.decode())\n\t\t\tself.assertIn('isbn must only include numbers', str(res2))\n\t\t\tself.assertEqual(res.status_code, 400)", "def test_number(self):\n form_data = self.form_data('CDr=cpz&Z&a!cuP-nAQe')\n form = self.form(data=form_data, user=self.u)\n self.assertFalse(form.is_valid())", "def __verify_numeric(self, action, value):\n if action != \"1\": # if the action is anything other than inserting:\n return True\n try:\n return value.isnumeric()\n except ValueError:\n return False", "def test_case_08_not_a_real_number(self):\n self.__assert_equals_test_case([(\"A\", 1, 1)], 'InvalidInput')", "def test_cast_non_numeric_true():\n assert _currency_column_to_numeric(\"foo\", {\"foo\": 42}) == 42", "def test_duration_attribute_is_not_an_integer(self):\n d = DurationMixin(duration=10)\n with self.assertRaises(TypeError) as cm:\n d.duration = 'not an integer'\n\n self.assertEqual(\n cm.exception.message,\n 'DurationMixin.duration should be an non-negative float, not str'\n )", "def test_categorical_column_validates_categories(self):\n\n categories = 1\n\n with pytest.raises(CitrinationClientError):\n CategoricalColumn(name=self.name, role=self.role, group_by_key=self.group_by_key, categories=categories)\n\n categories = [\"Grey\", 1]\n with pytest.raises(CitrinationClientError):\n CategoricalColumn(name=self.name, role=self.role, group_by_key=self.group_by_key, categories=categories)\n\n categories = [\"Grey\", \"Blue\"]\n CategoricalColumn(name=self.name, role=self.role, group_by_key=self.group_by_key, categories=categories)", "def test_Product_name_cannot_contain_a_number(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_3',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Please enter strings in name and category!')\n self.assertEqual(resp.status_code, 400)", "def test_category_field(self):\n field = self.record.find('field[@name=\\'category_id\\']')\n self.assertEqual(field.attrib['eval'],\n '[(4, ref(\\'nh_clinical.role_nhc_admin\\'))]',\n 'Incorrect eval on category id')", "def validate(number):\n number = compact(number)\n if len(number) != 10:\n raise InvalidLength()\n if not _nipt_re.match(number):\n raise InvalidFormat()\n return number", "def test_cast_bad_category(self):\n categories = list(range(10))\n dim = Categorical(\"yolo\", categories, shape=2)\n sample = np.array([\"asdfa\", \"1\"], dtype=object)\n with pytest.raises(ValueError) as exc:\n dim.cast(sample)\n assert \"Invalid category: asdfa\" in str(exc.value)", "def test_bad_probabilities(self):\n categories = {\"asdfa\": 0.05, 2: 0.2, 3: 0.3, 4: 0.4}\n with pytest.raises(ValueError):\n Categorical(\"yolo\", categories, shape=2)", "def isnum(self, x):\n\n return x in '1234567890.-'", "def is_numberish(G):\n return True", "def checknum(val):\n\n if len(val) == 0:\n return False\n\n for i in range(len(val)):\n if not val[i].isdigit():\n return False\n\n return True", "def is_number(self) -> bool:\n return False", "def _is_number(data):\n return len(data) and np.issubdtype(_to_ndarray(data).dtype, np.number)", "def validate_number(value_if_allowed):\n if value_if_allowed == '':\n return True\n try:\n float(value_if_allowed)\n return True\n except ValueError:\n return False", "def could_be_number(val):\n if val == None:\n return False\n\n if isinstance(val, (float, int, long)):\n return True\n\n # allow coercion from str\n if isinstance(val, (str, unicode)):\n try:\n n = float(val)\n if not isinstance(n, float):\n raise ValueError\n else:\n return True\n except:\n return False\n\n #otherwise\n return False", "def not_a_num(val):\n if math.isnan(val):\n return False\n else:\n return True", "def test_cpf_is_digit(self):\n form = self.make_validated_form(cpf='ABCD5678901')\n self.assertFormErrorMessage(form, 'cpf', 'digits')", "def test_cannot_create_with_invalid_category(self):\n serializer = ServiceSerializer(\n data = dict(name = \"service1\", category = 10),\n context = dict(project = self.project)\n )\n self.assertFalse(serializer.is_valid())\n self.assertEqual(serializer.errors['category'][0].code, 'does_not_exist')", "def validate_int(self, name, number):\n if type(number) is not int:\n raise TypeError(\"{} must be an integer\".format(name))", "def validate_int(self, name, number):\n if type(number) is not int:\n raise TypeError(\"{} must be an integer\".format(name))", "def test_cpf_is_digit(self):\r\n form = self.make_validated_form(cpf='asdfesa1234')\r\n self.assertFormErrorCode(form, 'cpf', 'digits')", "def _is_non_negative_int(item):\n if not isinstance(item, int):\n return False\n return item >= 0", "def test_contains_wrong_shape(self):\n categories = {\"asdfa\": 0.1, 2: 0.2, 3: 0.3, 4: 0.4}\n dim = Categorical(\"yolo\", categories, shape=2)\n\n assert 3 not in dim\n assert (\"asdfa\", 2) in dim", "def is_valid_value(self, value):\n return value in self.categories", "def is_numeric(self) -> bool:\n return False", "def is_number(self, value):\n if isinstance(value, (int, float, long, complex)): # noqa\n return True\n return False", "def _validate_input_integer(display_name, value):\n\n if isinstance(value, int) is False:\n raise ValueError(display_name + \" must be integer.\")", "def clean_value(self, value):\n try:\n return int(value)\n except ValueError:\n raise ValidationError('\"%s\" is not an integer' % value)", "def oxygen_validation(oxygen: int) -> bool:\n if not str(oxygen).isnumeric() or isinstance(oxygen, str):\n return False\n\n return int(oxygen) < 101 and int(oxygen) >= 0", "def test_categoryQuery(self) -> None:\n result = self.entries.filter(category__iexact='suncare')\n self.assertGreater(len(result), 0)\n\n result = self.entries.filter(category__iexact='xxxxxx')\n self.assertEqual(len(result), 0)", "def check_value_is_number_type(value):\n if not isinstance(value, Number):\n raise TypeError(\"Value must be a Number type.\")", "def validateNumber(key, value):\n if value is None or isinstance(value, (int, float)) and not isinstance(value, bool):\n return None\n else:\n return {'error': 'invalid value: %s (%s), valid values number/null' % (value, pythonTypeToJSONType(value))}", "def is_number_char(c: str) -> bool:\n return c.isdigit() or c == \".\"", "def validate_numeric_annots(self):\n valid = True\n for annot_header in self.file.columns[1:]:\n annot_name = annot_header[0]\n annot_type = annot_header[1]\n column_dtype = self.file.dtypes[annot_header]\n if annot_type == \"numeric\" and column_dtype == \"object\":\n valid = False\n msg = f\"Numeric annotation, {annot_name}, contains non-numeric data (or unidentified NA values)\"\n self.store_validation_issue(\n \"error\", msg, \"content:invalid-type:not-numeric\"\n )\n return valid", "def is_number_correct(total):\n if int(total) < 0:\n return None\n return True", "def is_non_numeric_dd_invalid_message_shown(self):\n is_shown = False\n self.grid_row_data.clear()\n self.grid_row_data.update({\"Description\": \"\"})\n excluded_destinations_grid_row_data = self.get_grid_row_details(self.excluded_destinations_grid_div_id, self.grid_row_data)\n if \"non-numeric dd\" in excluded_destinations_grid_row_data[\"Description\"].lower():\n is_shown = True\n return is_shown", "def check_numeric(data, col):\n from pandas.api.types import is_numeric_dtype\n try:\n if is_numeric_dtype(data[col]):\n logging.info(f' {col} is numeric.')\n return data\n else:\n numdata = (data\n .drop([col], axis=1)\n .join(data[col].apply(pandas.to_numeric, errors='coerce'))\n )\n numcol = numdata[col].isnull().values().sum()\n logging.warning(f' %s rows in %s are non-numeric' % (numcol, col,))\n logging.warning(f' {col} is tested by coercing into numeric values.')\n return numdata\n except:\n logging.error(f' the format of %s is not testable.' % (col,))\n print(data.head(n=2))\n sys.exit(1)", "def isCategorical(data):\n\tre = next((d for d in data if not (type(d) == int or type(d) == str)), None)\n\treturn (re is None)", "def test_getCategoryValue_bad_sample_id(self):\n # Nonexistent sample ID.\n self.assertRaises(KeyError, self.overview_map.getCategoryValue,\n 'PC.000', 'Treatment')\n self.assertRaises(KeyError, self.no_metadata.getCategoryValue,\n 'PC.000', 'Treatment')\n # Integer sample ID.\n self.assertRaises(KeyError, self.overview_map.getCategoryValue, 42,\n 'DOB')\n # Sample ID of type None.\n self.assertRaises(KeyError, self.overview_map.getCategoryValue, None,\n 'Treatment')\n\n # Sample ID on empty map.\n self.assertRaises(KeyError, self.empty_map.getCategoryValue, 's1',\n 'foo')\n # Integer sample ID on empty map.\n self.assertRaises(KeyError, self.empty_map.getCategoryValue, 1,\n 'bar')\n # Sample ID of None on empty map.\n self.assertRaises(KeyError, self.empty_map.getCategoryValue, None,\n 'baz')", "def test_duration_argument_is_not_an_integer(self):\n with self.assertRaises(TypeError) as cm:\n DurationMixin(duration='not an integer')\n\n self.assertEqual(\n cm.exception.message,\n 'DurationMixin.duration should be an non-negative float, not str'\n )", "def can_insert(data):\n if not issparse(data):\n return False\n if data.dtype.char in UNSUPPORTED_NUMERIC_TYPE_CODES:\n return False\n return np.issubdtype(data.dtype, np.number)", "def is_number(self, cell): \n for token in self._cell_tokenizer.tokenize(cell.get_text()):\n if self._get_token_type(token) == 'NAME':\n return False \n return True", "def __has_numbers(self, input_string):\n return bool(re.search(r'\\d', input_string))", "def is_number(self,val):\n try:\n float(val)\n return True\n except ValueError:\n return False", "def test_contains_false(self):\n self.assertFalse('Not_a_Category' in self.tester)", "def test_contains_false(self):\n self.assertFalse('Not_a_Category' in self.tester)", "def test_case_strings_numbers2(self):\n data = {\"numbers\": \"1,4,e,w,5,t\"}\n response = self.client.post(\"/api/hi\", data)\n self.assertEqual(response.data, {\"error\":\"must be a number\"})", "def must_be_numeric(cell):\n # Check if it's nan first\n if check_empty(cell):\n return True\n # If it's not nan, check it's a number\n return pd.isna(pd.to_numeric(str(cell), errors=\"coerce\"))", "def test_getCategoryValue_bad_sample_id(self):\r\n # Nonexistent sample ID.\r\n self.assertRaises(KeyError, self.overview_map.getCategoryValue,\r\n 'PC.000', 'Treatment')\r\n self.assertRaises(KeyError, self.no_metadata.getCategoryValue,\r\n 'PC.000', 'Treatment')\r\n # Integer sample ID.\r\n self.assertRaises(KeyError, self.overview_map.getCategoryValue, 42,\r\n 'DOB')\r\n # Sample ID of type None.\r\n self.assertRaises(KeyError, self.overview_map.getCategoryValue, None,\r\n 'Treatment')\r\n\r\n # Sample ID on empty map.\r\n self.assertRaises(KeyError, self.empty_map.getCategoryValue, 's1',\r\n 'foo')\r\n # Integer sample ID on empty map.\r\n self.assertRaises(KeyError, self.empty_map.getCategoryValue, 1,\r\n 'bar')\r\n # Sample ID of None on empty map.\r\n self.assertRaises(KeyError, self.empty_map.getCategoryValue, None,\r\n 'baz')", "def test_add_with_negative_amount(self):\n good = GoodInfo(\"яйцо 1 кат.\", \"30\", \"-40\", \"2020-12-30\", \n \"14\", \"2020-12-30\")\n check_product_data = self.database.add(good)\n\n self.assertFalse(check_product_data)", "def test_empty_input():\n assert _currency_column_to_numeric(\"\") == \"ORIGINAL_NA\"", "def test_add_missing_field(self):\n response = self.client.post('/api/v1/categories',\n data=json.dumps(category[1]),\n content_type='application/json',\n headers=self.admin_headers)\n self.assertEqual(response.status_code, 400)\n self.assertIn('Missing required parameter', str(response.data))", "def _check_with_cp_no_format(self, field, value):\n if not self._is_valid_cp_format(value):\n self._error(field, \"Invalid cellphone number format.\")", "def test_ensure_likes_dislikes_are_positive(self):\n testFailedCheck = False\n category_params = {'name': 'n', 'views': -1, 'likes': -1, 'dislikes': -2, 'likesDislikesDefault': 0, 'slug': str(random.randint(1, 1000))}\n cat = CategoryForm(category_params)\n self.assertFalse(cat.is_valid())", "def test_check_category_input_2(self):\n choices = [(1, 'choice 1'), (2, 'choice 2')]\n assert validation.check_category_input(3, choices) == False", "def test_wrong_type_of_fill_all_non_numeric(currency_df):\n with pytest.raises(TypeError):\n _ = currency_df.currency_column_to_numeric(\n \"d_col\",\n fill_all_non_numeric=\"zzzzz\",\n )", "def __allowed_values_inccorrect_number(self):\n strTestName = 'Values of a number (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'number #1')\n RxCSObject.paramAddMan('parameter2', 'number #2')\n RxCSObject.paramAllowed('parameter2', range(10))\n\n RxCSObject.parameter1 = 11\n RxCSObject.parameter2 = 1.4\n\n self.__parametersCheck_error(RxCSObject, AllowedValuesError, strTestName)", "def lf_is_numeric(x):\n words = x.product_name.split()\n if words[x.word_idx].isnumeric():\n return MODELNAME\n return -1", "def test_details_nonnum_id(self):\n self.check_response(\n '/attributes/xyz',\n ('Please enter an integer value for Attribute ID',))", "def clean_numeric_column(name : float) -> float:\n if name > -1 and name < 1:\n name = 0\n return name", "def check_type(number):\r\n if number.find(\"i\") != -1:\r\n return 1\r\n return 0", "def is_number(self,s):\n try:\n float(s.replace(\" \", \"\"))\n return True\n except ValueError:\n return False" ]
[ "0.69292754", "0.6892475", "0.68267983", "0.68267983", "0.6329393", "0.60511804", "0.6026339", "0.60021937", "0.5988064", "0.5981536", "0.5980512", "0.5917058", "0.59100056", "0.59020984", "0.5897836", "0.5872285", "0.58462423", "0.58406997", "0.58369875", "0.58208215", "0.5811314", "0.5794547", "0.57898664", "0.578632", "0.57757646", "0.5751578", "0.5736873", "0.5722728", "0.57048607", "0.5696044", "0.567359", "0.5662661", "0.56349146", "0.5632751", "0.56250113", "0.56202745", "0.5617749", "0.56037873", "0.5602869", "0.5594432", "0.5584651", "0.55356145", "0.55348307", "0.5529222", "0.5528674", "0.5518275", "0.5509757", "0.5505236", "0.55043936", "0.5487497", "0.54769564", "0.547558", "0.54628336", "0.5459778", "0.5456561", "0.5456381", "0.544882", "0.544882", "0.5441759", "0.5427808", "0.5427692", "0.54273266", "0.54224116", "0.5419826", "0.5417967", "0.54157186", "0.54130644", "0.54105616", "0.53833044", "0.5362602", "0.5356053", "0.5351697", "0.53451926", "0.5340618", "0.53396004", "0.5335087", "0.5333291", "0.5333248", "0.5330215", "0.5330128", "0.5327501", "0.5323651", "0.53220946", "0.53220946", "0.53211695", "0.5314426", "0.5308681", "0.5306026", "0.5304058", "0.5290491", "0.52889085", "0.52855515", "0.52784544", "0.5275432", "0.5272486", "0.5268692", "0.52644414", "0.52517205", "0.524786", "0.52444625" ]
0.70390165
0
Tests that stock and price fields must be numbers
def test_stock_and_price_must_be_numbers(self): resp = self.admin_register() reply = self.admin_login() token = reply['token'] product = dict( prod_name='NY_denims', category='denims', stock='stock', price='money' ) resp = self.client.post( '/api/v1/products', content_type='application/json', data=json.dumps(product), headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'The Stock and Price must be numbers!') self.assertEqual(resp.status_code, 400)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_add_sale_with_price_not_digit_format(self):\n self.register_admin_test_account()\n token = self.login_admin_test()\n\n response = self.app_test_client.post('{}/saleorder'.format(\n self.base_url), json={'name': \"Hand Bag\", 'price': \"1500\", 'quantity': 3, 'totalamt': \"\"},\n headers=dict(Authorization=token),\n content_type='application/json')\n\n self.assertEqual(response.status_code, 400)\n\n self.assertEqual(general_helper_functions.convert_json(\n response)['message'], 'Bad request. The product price should be an integer.')", "def test_non_numberic_validation(self):", "def test_non_numberic_validation(self):", "def test_non_cast_input():\n assert _currency_column_to_numeric(\"-1,000,000 yen\") == \"-1000000\"", "def test_cast_non_numeric_true():\n assert _currency_column_to_numeric(\"foo\", {\"foo\": 42}) == 42", "def test_cast_non_numeric_false():\n assert _currency_column_to_numeric(\"10 dollars\", {\"foo\": 42}) == \"10\"", "def test_empty_input():\n assert _currency_column_to_numeric(\"\") == \"ORIGINAL_NA\"", "def test_make_order_with_price_invalid(self):\n response = self.api_test_client.post('{}/orders'.format(\n self.BASE_URL), json={\n 'item_name': 'Watermelon', 'item_price': -50, 'quantity': 3\n }, headers={'Content-Type': 'application/json'})\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(response_as_json(\n response)['message'],\n 'Bad request. Price and quantity must be ints >= 1')", "def test_price_details_number(self):\n with self.client:\n response = self.add_meal(\"beef\", \"jasmine\")\n data = json.loads(response.data.decode())\n self.assertEqual(data.get('message'),\n \"Price must be a number\")\n self.assertEqual(response.status_code, 400)", "def test_book_isbn_must_only_be_numbers(self):\n\n\t\twith self.client:\n\t\t\tadd_book = {\n\t\t\t\t'title': 'Hello Books',\n\t\t\t\t'isbn': '56987451Ky'\n\t\t\t}\n\t\t\tlogin_data = self.login_test_user()\n\t\t\ttoken = login_data['auth_token']\n\t\t\tres = self.client.post(\n\t\t\t\tf'{URL_BOOKS}',\n\t\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\t\tcontent_type='application/json',\n\t\t\t\tdata=json.dumps(add_book)\n\t\t\t)\n\t\t\tres2 = json.loads(res.data.decode())\n\t\t\tself.assertIn('isbn must only include numbers', str(res2))\n\t\t\tself.assertEqual(res.status_code, 400)", "def pricevalidator(self, price):\n if type(price) != int:\n API.abort(400, error_messages[15]['str_price'])\n\n return True", "def test_10_insert_stock_prices(self):\n p_eur = Price.insert_new_price(\"EUR\", 1.2)\n p_aapl = Price.insert_new_price(\"AAPL\", 163.99)\n p_ibm = Price.insert_new_price(\"IBM\", 145.78)\n p_msft = Price.insert_new_price(\"MSFT\", 75.87)\n\n self.assertTrue(isinstance(p_eur, Price),\n msg=\"Price is NOT returning a valid inserted EUR instance\")\n print(\"Price insert EUR asset is returning the following price: {}\".format(\n p_eur.price,\n ))\n\n self.assertTrue(isinstance(p_aapl, Price),\n msg=\"Price is NOT returning a valid inserted AAPL instance\")\n print(\"Price insert AAPL asset is returning the following price: {}\".format(\n p_aapl.price,\n ))\n\n self.assertTrue(isinstance(p_ibm, Price),\n msg=\"Price is NOT returning a valid inserted IBM instance\")\n print(\"Price insert IBM asset is returning the following price: {}\".format(\n p_ibm.price,\n ))\n\n self.assertTrue(isinstance(p_msft, Price),\n msg=\"Price is NOT returning a valid inserted MSFT instance\")\n print(\"Price insert MSFT asset is returning the following price: {}\".format(\n p_msft.price,\n ))", "def clean_stock(self):\n stock = self.cleaned_data.get('stock')\n if stock == 0:\n raise forms.ValidationError(u'Please insert product quantity')\n return stock", "def _validate_qty(values: dict):\n\n if not (quantity := values.get('quantity')):\n raise ValueError(\"Quantity attribute is required.\")\n\n if not (symbol := values.get('symbol')):\n raise ValueError(\"Symbol attribute is required.\")\n\n filter = symbol.filters.lot_size_filter\n # if ONE :=1 and not filter.min_qty <= quantity <= filter.max_qty:\n # ValueError(\"The quantity is not in valid range.\")\n\n if filter.step_size and not is_valid_significant_digits(\n quantity,\n symbol.qty_decimal_precision\n ):\n raise ValueError(\"The quantity precision is not valid.\")\n\n return values", "def test_numeric(self):\n conn = self.database.connection()\n cursor = conn.cursor()\n dialect = self.database.dialect()\n dbapi = self.database.dbapi()\n query = dialect.translate('DROP TABLE test_numeric')\n try:\n cursor.execute(query)\n except dbapi.Error:\n conn.rollback()\n query = dialect.translate('CREATE TABLE test_numeric ' \\\n '( value NUMERIC(100,50) NOT NULL )')\n cursor.execute(query)\n data = []\n query = 'INSERT INTO test_numeric VALUES (%s)'\n for i in range(100):\n int = random.getrandbits(150)\n frac = random.getrandbits(150)\n item = decimal.Decimal('%d.%s' % (int, frac))\n data.append(item)\n cursor.execute(query, (item,))\n query = 'SELECT * FROM test_numeric'\n cursor.execute(query)\n result = cursor.fetchall()\n for row in result:\n item = row[0]\n assert isinstance(item, decimal.Decimal)\n assert item in data\n data.remove(item)\n query = dialect.translate('DELETE FROM test_numeric')\n cursor.execute(query)\n query = dialect.translate('DROP TABLE test_numeric')\n cursor.execute(query)\n conn.commit()", "def test_add_with_negative_price(self):\n good = GoodInfo(\"яйцо 1 кат.\", \"-30\", \"40\", \"2020-12-30\", \n \"14\", \"2020-12-30\")\n check_product_data = self.database.add(good)\n\n self.assertFalse(check_product_data)", "def check_for_float_and_int(check):", "def test_add_to_stock_negative(add):\n assert STOCK[0]['quantity'] == 20\n for i in [\"2.32\", \"sd\", -2, 0, 201]:\n value = validate_int(i)\n add[0].add_to_stock(value)\n # there is no change in our stock on invalid input\n assert STOCK[0]['quantity'] == 20\n STOCK[0]['quantity'] = 20", "def validate_insert(self, s, internal=True):\n super(FieldNumeric, self).validate_insert(s, internal) # mandatory check\n if s:\n try:\n float(s)\n except:\n raise FilemanError(\"\"\"[%s] is not a valid number\"\"\" % s)", "def check_symbol_price(self, data):\n if self.input_price < float(data.get(\"price\")):\n logging.info(\"Symbol price is higher than the input provided by the user.\")\n logging.info(\"Input Price :- \")\n logging.info(str(self.input_price))\n logging.info(\"Symbol Price :- \")\n logging.info(str(data.get(\"price\")))\n logging.info(\"+++++++++++++++++++++++++++++\")", "def test_check_price_ok() -> None:\n data = check_price(min_price=1, data={'p': 2.0})\n assert data == {'p': 2.0}", "def check_price(self):\n if self.price < 0:\n self.raise_user_error(\"negative_amount\")", "def monetary_amount_valid(record, field_name='price', min=1, max=10):\n monetary_amount = record[field_name]\n assert isinstance(monetary_amount, float)\n string_price = str(monetary_amount)\n decimal = string_price.split(\".\")[1]\n assert min <= monetary_amount <= max and len(decimal) <= 2", "def _validate_price(price):\n try:\n price = float(price)\n except ValueError:\n raise ValueError('Please provide valid price')\n if price < 1:\n raise ValueError('Price should be positive number')\n return price", "def test_loads_base_price_valid(self):\n base_price: BasePrice = BasePrice.Schema().loads(json.dumps(base_price_valid))\n assert base_price.base_price == base_price_valid[\"base-price\"]\n assert base_price.options == base_price_valid[\"options\"]\n assert base_price.product_type == base_price_valid[\"product-type\"]", "def testNumberAttribute(self):\n def action(field_class):\n # Check range.\n self.assertRaises(messages.InvalidNumberError,\n field_class,\n 0)\n self.assertRaises(messages.InvalidNumberError,\n field_class,\n -1)\n self.assertRaises(messages.InvalidNumberError,\n field_class,\n messages.MAX_FIELD_NUMBER + 1)\n\n # Check reserved.\n self.assertRaises(messages.InvalidNumberError,\n field_class,\n messages.FIRST_RESERVED_FIELD_NUMBER)\n self.assertRaises(messages.InvalidNumberError,\n field_class,\n messages.LAST_RESERVED_FIELD_NUMBER)\n self.assertRaises(messages.InvalidNumberError,\n field_class,\n '1')\n\n # This one should work.\n field_class(number=1)\n self.ActionOnAllFieldClasses(action)", "def test_make_order_with_quantity_invalid(self):\n response = self.api_test_client.post('{}/orders'.format(\n self.BASE_URL), json={\n 'item_name': 'Watermelon', 'item_price': 50, 'quantity': -3\n }, headers={'Content-Type': 'application/json'})\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(response_as_json(\n response)['message'], 'Bad request. Price and quantity must be ints >= 1')", "def test_add_sale_with_price_below_one(self):\n self.register_admin_test_account()\n token = self.login_admin_test()\n\n response = self.app_test_client.post('{}/saleorder'.format(\n self.base_url), json={'name': 'Torch', 'price': -10, 'quantity': 5, 'totalamt': \"\"},\n headers=dict(Authorization=token),\n content_type='application/json')\n\n self.assertEqual(response.status_code, 400)\n\n self.assertEqual(general_helper_functions.convert_json(\n response)['message'], 'Bad request. The product price should be a positive number above 0.')", "def test_create_invalid_price_higher_than_999(self):\n product_name = \"Swift Iris\"\n size = Size.SIZE_4_5\n colour = \"Red\"\n price = 1001\n product_type = ProductType.SHOE\n product_code = \"A\"\n department = Department.LADIES\n\n product = Product(\n name=product_name,\n size=size,\n colour=colour,\n price=price,\n product_type=product_type,\n product_code=product_code,\n department=department,\n )\n\n with self.assertRaises(ValidationError):\n product.full_clean()", "def test_positive_price_details(self):\n with self.client:\n response = self.add_meal(\"beef\", -15000)\n data = json.loads(response.data.decode())\n self.assertEqual(data.get('message'),\n \"Price must be a positive number\")\n self.assertEqual(response.status_code, 400)", "def test_wrong_type_of_fill_all_non_numeric(currency_df):\n with pytest.raises(TypeError):\n _ = currency_df.currency_column_to_numeric(\n \"d_col\",\n fill_all_non_numeric=\"zzzzz\",\n )", "def test_to_python_method_validation_errors(self):\n field = DecimalFractionField()\n with self.assertRaises(ValidationError):\n field.clean(\"abcd\")\n\n with self.assertRaises(ValidationError):\n field.clean(\"1 1 1/3\")\n\n with self.assertRaises(ValidationError):\n field.clean(\"1 1\")", "def test_amount_in_tons(self):", "def test_product_price_is_required(self):\n product = {\n 'name': 'LAPTOP',\n 'price': '',\n 'image': ''\n }\n res = self.client.post(PRODUCTS_URL, product)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_add_sale_with_invalid_quantity(self):\n self.register_admin_test_account()\n token = self.login_admin_test()\n \n response = self.app_test_client.post('{}/saleorder'.format(\n self.base_url), json={'name': \"Hand Bag\", 'price': 1500, 'quantity': \"5\", 'totalamt': \"\"}, \n headers=dict(Authorization=token),\n content_type='application/json')\n\n self.assertEqual(response.status_code, 400)\n\n self.assertEqual(general_helper_functions.convert_json(\n response)['message'], 'Bad request. The quantity should be an integer.')", "def _check(self):\n assert isinstance(self._price, int)\n assert self._price >= 0\n assert isinstance(self._units, int)\n assert self._units > 0\n assert self._side == OrderSide.BUY or self._side == OrderSide.SELL\n assert self._type == OrderType.LIMIT or self._type == OrderType.CANCEL\n assert isinstance(self._market, int)\n assert self._market > 0", "def test_quantity_has_to_be_an_integer(self):\n\n\t\tres = self.login_user()\n\t\taccess_token = json.loads(res.data.decode())['access_token']\n\n\t\torder_data = {\n\t\t\t\t\t\"owner\": \"Pemwa\",\n\t\t\t\t\t\"meal_name\": \"pizza\",\n\t\t\t\t\t\"quantity\": '4'\n\t\t\t\t\t }\n\n\t\tresponse = self.client().post(\n\t\t\t'/api/v2/orders',\n\t\t\theaders={\"x-access-token\": access_token},\n\t\t\tdata = json.dumps(\n\t\t\t\torder_data) , content_type = 'application/json')\n\t\tresult = json.loads(response.data)\n\t\tself.assertEqual(result[\"message\"], \"Quantity has to be an integer Number\")\n\t\tself.assertEqual(response.status_code, 400)", "def test_Product_name_cannot_contain_a_number(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_3',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Please enter strings in name and category!')\n self.assertEqual(resp.status_code, 400)", "def test_number(self):\n form_data = self.form_data('CDr=cpz&Z&a!cuP-nAQe')\n form = self.form(data=form_data, user=self.u)\n self.assertFalse(form.is_valid())", "def test_case_strings_numbers2(self):\n data = {\"numbers\": \"1,4,e,w,5,t\"}\n response = self.client.post(\"/api/hi\", data)\n self.assertEqual(response.data, {\"error\":\"must be a number\"})", "def test_convert_amounts(self):\n pass", "def test_uss_num_bad_values(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_uss_num(val))", "def test_int():\n assert int(Quantity(1, unit('m'))) == int(1)", "def test_validate_ticker_real(self):\n ticker = \"FB\"\n result = stock_helper.validate_ticker(ticker)\n self.assertEqual(result, True)", "def test_post_sale_record_with_quantity_string(self):\n\t\tself.register_user()\n\t\tresult = self.login_user()\n\t\taccess_token = json.loads(result.data.decode())['token']\n\n\t\tres = self.client.post(self.sl_url,\n\t\t\tdata=self.string_sales,\n\t\t\theaders=dict(Authorization=\"Bearer \" + access_token))\n\t\tresult = json.loads(res.data.decode())\n\t\tself.assertEqual(res.status_code, 400)\n\t\tself.assertEqual(result[\"message\"][\"quantity\"], \"Only integers allowed\")", "def test_sell_ticket_valid_quantity(self, *_):\n # logout to invalidate any logged in session\n self.open(base_url + '/logout')\n # login a user\n self.open(base_url + '/login')\n # fill email and password\n self.type(\"#email\", \"[email protected]\")\n self.type(\"#password\", \"Test_frontend@\")\n # click enter button\n self.click('input[type=\"submit\"]')\n # open the /sell route\n self.open(base_url)\n # Enter an invalid ticket name\n self.type('#name_sell', \"ticketname\")\n self.type('#quantity_sell', \"-1\")\n self.type(\"#price_sell\", \"15\")\n self.type(\"#exp_date_sell\", \"20200921\")\n self.click('#submit-sell')\n # Assert that the valid error message is shown\n self.assert_text(\"Invalid quantity of tickets\", \"#message\")\n\n # logout to invalidate any logged in session\n self.open(base_url + '/logout')\n # login a user\n self.open(base_url + '/login')\n # fill email and password\n self.type(\"#email\", \"[email protected]\")\n self.type(\"#password\", \"Test_frontend@\")\n # click enter button\n self.click('input[type=\"submit\"]')\n # open the /sell route\n self.open(base_url)\n # Enter an invalid ticket name\n self.type('#name_sell', \"ticketname\")\n self.type('#quantity_sell', \"101\")\n self.type(\"#price_sell\", \"15\")\n self.type(\"#exp_date_sell\", \"20200921\")\n self.click('#submit-sell')\n # Assert that the valid error message is shown\n self.assert_text(\"Invalid quantity of tickets\", \"#message\")", "def test_category_cannot_contain_a_number(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='4dens',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Please enter strings in name and category!')\n self.assertEqual(resp.status_code, 400)", "def test_no_decimals_00(self):\n self.assertEqual(currency(188.00, False), \"$188\")", "def test_add_with_negative_amount(self):\n good = GoodInfo(\"яйцо 1 кат.\", \"30\", \"-40\", \"2020-12-30\", \n \"14\", \"2020-12-30\")\n check_product_data = self.database.add(good)\n\n self.assertFalse(check_product_data)", "def yohoho_validator(payload, chosen):\n\n if not chosen.isdecimal():\n print(f\"Choose a number!\")\n return False\n\n return True", "def test_sell_ticket_price_range(self, *_):\n # logout to invalidate any logged in session\n self.open(base_url + '/logout')\n # login a user\n self.open(base_url + '/login')\n # fill email and password\n self.type(\"#email\", \"[email protected]\")\n self.type(\"#password\", \"Test_frontend@\")\n # click enter button\n self.click('input[type=\"submit\"]')\n # open the /sell route\n self.open(base_url)\n # Enter an invalid ticket name\n self.type('#name_sell', \"testticket\")\n self.type(\"#quantity_sell\", \"1\")\n self.type(\"#price_sell\", \"101\")\n self.click('#submit-sell')\n # Assert that the valid error message is shown.\n self.assert_text(\"Ticket price outside of valid range\", \"#message\")\n\n # logout to invalidate any logged in session\n self.open(base_url + '/logout')\n # login a user\n self.open(base_url + '/login')\n # fill email and password\n self.type(\"#email\", \"[email protected]\")\n self.type(\"#password\", \"Test_frontend@\")\n # click enter button\n self.click('input[type=\"submit\"]')\n # open the /sell route\n self.open(base_url)\n # Enter an invalid ticket name\n self.type('#name_sell', \"testticket\")\n self.type(\"#quantity_sell\", \"1\")\n self.type(\"#price_sell\", \"9\")\n self.click('#submit-sell')\n # Assert that the valid error message is shown.\n self.assert_text(\"Ticket price outside of valid range\", \"#message\")", "def test_non_integral_validation(self):", "def test_non_integral_validation(self):", "def clean_price(self):\n price = self.cleaned_data.get('price')\n if price == \"0\":\n raise forms.ValidationError(\n u'Please insert a price for your product')\n return price", "def test_no_decimals_01(self):\n self.assertEqual(currency(188.01, False), \"$188.01\")", "def validate_numeric(column_name, value, column_data_type=\"numeric\"):\n valid = value.isnumeric()\n if not valid:\n return \"{0} : '{1}' is not a valid {2}\".format(column_name, value, column_data_type)\n return None", "def test_numeric(self):\n self.assertEquals(self.t['24'][2][20]['episodename'], 'Day 2: 3:00 A.M.-4:00 A.M.')\n self.assertEquals(self.t['24']['seriesname'], '24')", "def test_historic_currency() -> None:\n schema = vol.Schema(cv.historic_currency)\n\n for value in (None, \"BTC\", \"EUR\"):\n with pytest.raises(vol.MultipleInvalid):\n schema(value)\n\n for value in (\"DEM\", \"NLG\"):\n assert schema(value)", "def test_negative_pricing(self):\n with self.assertRaises(InvalidProductPriceException):\n Product(self.test_product_name, -1.00)\n with self.assertRaises(InvalidProductPriceException):\n Product(self.test_product_name, -0.01)\n with self.assertRaises(InvalidProductPriceException):\n Product(self.test_product_name, 0)\n with self.assertRaises(InvalidProductPriceException):\n Product(self.test_product_name, 0.00)\n try:\n Product(self.test_product_name, 1.00)\n Product(self.test_product_name, 0.01)\n except InvalidProductPriceException:\n self.fail(\"InvalidProductPriceException raised for positive value unexpectedly\")", "def attribute_validation(cls, values: dict) -> dict:\n if not (total := values.get('total')):\n raise ValueError(\"Total attribute is required.\")\n \n if not (quantity := values.get('quantity')):\n raise ValueError(\"Quantity attribute is required.\")\n \n if not (symbol := values.get('symbol')):\n raise ValueError(\"Symbol attribute is required.\")\n\n filter = symbol.filters.market_lot_size_filter\n # if ONE :=1 and not filter.min_qty <= total <= filter.max_qty:\n # raise ValueError(\"The quantity is not in valid range.\")\n\n if filter.step_size and not is_valid_significant_digits(\n total,\n symbol.qty_decimal_precision\n ):\n raise ValueError(\"The quantity precision is not valid.\")\n\n return values", "def test_validate_ticker_false(self):\n ticker = 'xxx'\n result = stock_helper.validate_ticker(ticker)\n self.assertEqual(result, False)", "def validate_product_quantity(item, qty):\n return True", "def check_for_float(check):", "def test_create_invalid_price_three_dp(self):\n product_name = \"Swift Iris\"\n size = Size.SIZE_4_5\n colour = \"Red\"\n price = 47.123\n product_type = ProductType.SHOE\n product_code = \"A\"\n department = Department.LADIES\n\n product = Product(\n name=product_name,\n size=size,\n colour=colour,\n price=price,\n product_type=product_type,\n product_code=product_code,\n department=department,\n )\n\n with self.assertRaises(ValidationError):\n product.full_clean()", "def testBadFormatISBNAgain(self): \n val = format_isbn(\"12345678\")\n self.assertFalse(val)", "def test_16_insert_cash_prices(self):\n p_eur = Price.update_price(\"EUR\", 1.17)\n p_usd = Price.insert_new_price(\"USD\", 0.8909)\n\n self.assertTrue(isinstance(p_eur, Price),\n msg=\"Price is NOT returning a valid inserted EUR instance\")\n print(\"Price insert EUR asset is returning the following price: {}\".format(\n p_eur.price,\n ))\n\n self.assertTrue(isinstance(p_usd, Price),\n msg=\"Price is NOT returning a valid inserted USD instance\")\n print(\"Price insert USD asset is returning the following price: {}\".format(\n p_usd.price,\n ))", "def test_get_stock_price_summary1(self):\n\n actual = a1.stock_price_summary([])\n expected = (0,0)\n self.assertEqual(actual, expected)", "def test_getNumbers():\n assert formatter.getNumbers(\"some chars and $10.00\") == 10.0\n assert formatter.getNumbers(\n \"some chars and $10.99 some other chars\") == 10.99\n assert formatter.getNumbers(\"\") == -math.inf", "def numeric_check(param, name):\n\tif not isinstance(param, numbers.Number):\n\t\traise TypeError(\"Keyword arg '%s' must be a real number. Got: %s\" % (\n\t\t\tname, type(param)))\n\telse:\n\t\tpass", "def test_numerical_requirement(self, space_each_type):\n tspace = build_required_space(space_each_type, type_requirement=\"numerical\")\n assert len(tspace) == 5\n assert tspace[0].type == \"real\"\n assert tspace[1].type == \"integer\"\n assert tspace[2].type == \"integer\"\n assert tspace[3].type == \"real\"\n assert tspace[4].type == \"integer\"\n assert (\n str(tspace)\n == \"\"\"\\\nSpace([Precision(4, Real(name=yolo, prior={norm: (0.9,), {}}, shape=(3, 2), default value=None)),\n Enumerate(Categorical(name=yolo2, prior={asdfa: 0.10, 2: 0.20, 3: 0.30, 4: 0.40}, shape=(), default value=2)),\n Integer(name=yolo3, prior={uniform: (3, 7), {}}, shape=(1,), default value=None),\n Precision(4, Real(name=yolo4, prior={reciprocal: (1.0, 10.0), {}}, shape=(3, 2), default value=None)),\n Integer(name=yolo5, prior={reciprocal: (1, 10), {}}, shape=(3, 2), default value=None)])\\\n\"\"\"\n ) # noqa", "def portfolio_checkinput(stock_ticker_list):\n if not isinstance(stock_ticker_list, list):\n raise InvalidTickerlist\n return 0", "def test_get_stock_price_summary4(self):\n\n actual = a1.stock_price_summary([0.02, 0.14, 0.10])\n expected = (0.26,0)\n self.assertEqual(actual, expected)", "def test_check_price_exception() -> None:\n with raises(StopProcessing):\n check_price(min_price=4, data={'p': 2.0})", "def test_int_field():", "def testBadFormatISBN(self): \n val = format_isbn(\"1234567843534594123\")\n self.assertFalse(val)", "def validate_price(price_str: str) -> bool:\n\n # if no digit is found, return false\n if not extract_required_data(data_str=price_str, req_type=r'\\d+'):\n return False\n\n # if per('/') is not found, return false\n if '/' not in price_str:\n print(\"Please specify item price per ('/') units\")\n return False\n\n # extract the unit from the price string\n unit = price_str[price_str.index('/') + 1:]\n\n # is unit not found in stored units, return false\n if not StandardUnits.has_value(unit) and unit not in units_mapping:\n return False\n\n return True", "def test_not_int(self):\n invalid_args = [\"random string\", \"123\", 123.5]\n for arg in invalid_args:\n assert meters_to_km(arg) is arg", "def test_float(self):\n self.assertFalse(validate_measure_input('0.0', self.measures))\n self.assertFalse(validate_measure_input('1.0', self.measures))\n self.assertFalse(validate_measure_input('1.1', self.measures))", "def test_is_number(self):\n \n self.assertEqual(self.var.is_number(None), False)\n self.assertEqual(self.var.is_number(\"5\"), True)\n self.assertEqual(self.var.is_number(\"a\"), False)", "def __verify_numeric(self, action, value):\n if action != \"1\": # if the action is anything other than inserting:\n return True\n try:\n return value.isnumeric()\n except ValueError:\n return False", "def test_load_do_not_convert_non_quantity_strings(self):\n sage = ForceField(\"openff-2.0.0.offxml\")\n\n for parameter_handler_name in sage.registered_parameter_handlers:\n parameter_handler = sage.get_parameter_handler(parameter_handler_name)\n\n for parameter in parameter_handler.parameters:\n assert isinstance(parameter.smirks, str)\n assert not isinstance(parameter.smirks, unit.Quantity)\n\n # Ensure that, for example, F isn't converted to Farad\n if (\n parameter_handler_name == \"LibraryCharges\"\n and parameter.name is not None\n ):\n assert isinstance(parameter.name, str)\n assert not isinstance(parameter.name, unit.Quantity)", "def test_cannot_make_sale_with_wrong_datatypes(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_345\", \n \"quantity\":'Kummi'\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'prod_name & quantity should be a character & number respectively!')\n self.assertEqual(resp.status_code, 400)", "def testcharge_and_mult(self):\r\n assert self.data.charge == 0\r\n assert self.data.mult == 1", "def validate(self, attrs):\n if attrs['product_mrp'] <= 0:\n raise serializers.ValidationError(\"Price Cannot Be Zero or Negative.\")\n return attrs", "def test_only_nums_are_valid_inputs():\n bad_inputs = [[\"boop\", \"boink\"], 10, 99.99, {\"one\": 2, \"three:\": 4}]\n\n for input in bad_inputs:\n with pytest.raises(AttributeError):\n song_decoder(bad_inputs)", "def test_normal_decimal_input(self):\r\n ws_leader = \"S. O'Neal (14.9)\"\r\n res = treat_input(ws_leader, type=\"float\")\r\n assert res == 14.9", "def test_convert_nonnumeric_value():\n with pytest.raises(TypeError):\n pressure_util.convert(\"a\", PRESSURE_HPA, PRESSURE_INHG)", "def test__validate_title__1():\n for input_value in (\n 12.6,\n ):\n with vampytest.assert_raises(TypeError):\n validate_title(input_value)", "def check_numeric(data, col):\n from pandas.api.types import is_numeric_dtype\n try:\n if is_numeric_dtype(data[col]):\n logging.info(f' {col} is numeric.')\n return data\n else:\n numdata = (data\n .drop([col], axis=1)\n .join(data[col].apply(pandas.to_numeric, errors='coerce'))\n )\n numcol = numdata[col].isnull().values().sum()\n logging.warning(f' %s rows in %s are non-numeric' % (numcol, col,))\n logging.warning(f' {col} is tested by coercing into numeric values.')\n return numdata\n except:\n logging.error(f' the format of %s is not testable.' % (col,))\n print(data.head(n=2))\n sys.exit(1)", "def test_api_for_invalid_ticker(self):\n ticker = \"xxx\"\n name = \"Julian\"\n data = {'name': name, 'ticker': ticker}\n # pylint: disable=broad-except\n req = self.client.post('/stocks/addstock/', data, follow=True, secure=True)\n # pylint: enable=broad-except\n self.assertEqual(req.status_code, 500)\n data = DailyStockQuote.objects.all()\n self.assertEqual(len(data), 0)", "def is_numeric(self) -> bool:\n return False", "def test_case_strings_numbers(self):\n data = {\"numbers\": \"1,4,6,e,r,6,t,1\"}\n response = self.client.post(\"/api/hi\", data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def __allowed_values_correct_number(self):\n strTestName = 'Values of a number (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'number #1')\n RxCSObject.paramAddMan('parameter2', 'number #2')\n RxCSObject.paramAllowed('parameter2', range(10))\n\n RxCSObject.parameter1 = 11\n RxCSObject.parameter2 = 0\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def check_for_int(check):", "def test_decimal_places_validation_errors(self):\n field = DecimalFractionField(max_digits=3, decimal_places=2)\n\n with self.assertRaises(ValidationError):\n # too many non-decimal digits\n field.clean(\"10\")\n\n with self.assertRaises(ValidationError):\n # too many decimal digits\n field.clean(\"1/100\")", "def test_get_stock_price_summary3(self):\n\n actual = a1.stock_price_summary([-0.02, -0.14, -0.10])\n expected = (0,-0.26)\n self.assertEqual(actual, expected)", "def test_get_stock_price_summary2(self):\n\n actual = a1.stock_price_summary([0,0])\n expected = (0,0)\n self.assertEqual(actual, expected)", "def test_wrong_type_of_cast_non_numeric_values(currency_df):\n with pytest.raises(TypeError):\n _ = currency_df.currency_column_to_numeric(\n \"d_col\",\n cast_non_numeric={\"foo\": \"zzzzz\"},\n )", "def validate_number(column_name, value, column_data_type=\"number\"):\n valid = value.isnumeric()\n if valid is False:\n try:\n float(value)\n return None\n except ValueError:\n return \"{0} : '{1}' is not a valid {2}\".format(column_name, value, column_data_type)\n return None", "def test_make_order_with_some_data_as_empty_str(self):\n response = self.api_test_client.post('{}/orders'.format(\n self.BASE_URL), json={\n 'item_name': 'Watermelon', 'item_price': 200, 'quantity': ''\n }, headers={'Content-Type': 'application/json'})\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(response_as_json(\n response)['message'],\n 'Bad request. Price and quantity must be ints >= 1')" ]
[ "0.73344076", "0.72246283", "0.72246283", "0.68624747", "0.66491514", "0.6644348", "0.66039526", "0.65944123", "0.6510274", "0.64985764", "0.6415647", "0.6365934", "0.6306292", "0.6289345", "0.6277049", "0.6271073", "0.62253267", "0.6222383", "0.6221313", "0.616871", "0.6168377", "0.616262", "0.6076003", "0.606838", "0.6030939", "0.60207576", "0.6013369", "0.59867245", "0.5977606", "0.5947591", "0.59282607", "0.5925831", "0.59139", "0.5905244", "0.58993393", "0.5895486", "0.58889407", "0.5883583", "0.58768463", "0.5875333", "0.58553195", "0.58488387", "0.58408594", "0.58352226", "0.5834123", "0.583077", "0.5822091", "0.58161247", "0.5807689", "0.58037674", "0.5800729", "0.5799818", "0.5799818", "0.57995546", "0.5796319", "0.57943153", "0.57809097", "0.57808936", "0.57621497", "0.5752572", "0.5748282", "0.5744725", "0.5738414", "0.5732144", "0.5728262", "0.57178295", "0.5715063", "0.57008094", "0.5699005", "0.5695883", "0.5682043", "0.5675182", "0.5674242", "0.56664616", "0.56632864", "0.56602174", "0.5648979", "0.56487393", "0.5637532", "0.56353396", "0.56344175", "0.56281376", "0.5625467", "0.5622425", "0.5600835", "0.55965245", "0.55910224", "0.5584385", "0.55828655", "0.55815697", "0.558075", "0.5574328", "0.55738026", "0.55628246", "0.55619466", "0.5559404", "0.55590796", "0.5556827", "0.5549009", "0.55460596" ]
0.7861267
0
Tests that product already exists in the Inventory
def test_product_exists_in_inventory(self): resp = self.admin_register() reply = self.admin_login() token = reply['token'] product = dict( prod_name='NY_denims', category='denims', stock=20, price=150 ) resp = self.client.post( '/api/v1/products', content_type='application/json', data=json.dumps(product), headers={'Authorization': 'Bearer {}'.format(token)} ) product = dict( prod_name='NY_denims', category='denims', stock=20, price=150 ) resp = self.client.post( '/api/v1/products', content_type='application/json', data=json.dumps(product), headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'This product exists in the Inventory!') self.assertEqual(resp.status_code, 400)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_view_product_that_doesnot_exist_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product does not exist!')\n self.assertEqual(resp.status_code, 404)", "def test_cannot_sale_nonexistant_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"Paris_heels\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product is not in the Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_update_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], \"This product doesn't exists in the Inventory!\")\n self.assertEqual(resp.status_code, 400)", "def test_add_to_cart_item_not_in_system(self):\n # test sale products not in db\n\n response = self.client.get(\n '/self.base_url/sales/1999/2',\n headers=dict(Authorization=\"Bearer \" + self.attendant_token),\n content_type = 'application/json'\n )\n\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"This product does not exist\")\n self.assertEqual(response.status_code,200)\n\n\n # test add item which is at minimum stock", "def checker(self, product):\n for item in self.instock:\n if item == product:\n return True\n return False", "def test_admin_cannot_delete_product_from_empty_Inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'There are no products in Inventory!')\n self.assertEqual(resp.status_code, 404)", "def add_item(self, product, price):\n if not product in self.items_in_cart:\n self.items_in_cart[product] = price\n print (product + \" added.\")\n else:\n print (product + \" is already in the cart.\")", "def test_admin_cannot_delete_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This product does not exist in Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_product_buy_with_not_exists_name(self):\n result_buy = self.info_list.product_buy(\"Говядина Немецкая 2кг\", 3)\n self.assertFalse(result_buy)", "def add_item(self, product, price):\r\n if not product in self.items_in_cart:\r\n self.items_in_cart[product] = price\r\n print(product + \" added.\")\r\n else:\r\n print(product + \" is already in the cart.\")", "def confirm_inventory(self, data, batch): # not used will be deprecated todo\n try:\n batch = batch\n data = data\n location = self.Location.find(['name', '=', 'MyInventory'])[-1]\n inventory = self.Inventory.find([('batch_number', '=', batch), ('location', '=', location.id)])[-1]\n lines = inventory.lines\n for i in data:\n product = \\\n self.Product.find(\n [('code', '=', i['code']), ('description', '=', 'Stock'), ('type', '=', 'goods')])[\n -1]\n supplier = self.Party.find(['name', '=', i['supplier']])[-1]\n for j in lines:\n if j.product == product:\n pro = j.product\n template = pro.template\n template.list_price = Decimal(i['rate'])\n template.save()\n pro.save()\n j.quantity = float(i['quantity'])\n j.supplier = supplier\n j.expiry_date = i['expiry_date']\n j.save()\n inventory.state = 'done'\n inventory.save()\n return True\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return False", "def add_item(item):\n # Check first if the item already exists in the inventory\n for i in get_inventory():\n if i['name'] == item['name']:\n print(f\"[ERROR] item with name {i['name']} already exists\")\n break\n else:\n print(f'[INFO] Adding item {item}')\n INVENTORY.append(item)\n # mongo.collection().insert_one(item)", "def test_01_product_create(self):\n # Create new product with a replacement product\n product = self.create_product()\n\n # Check recently was created product with default 'In Development'\n # value state and that the replacement was assigned. This case also\n # check the read test.\n self.assertTrue(product)\n self.assertEqual(product.state2, 'draft')\n self.assertTrue(product.replacement_product_ids)\n self.assertEqual(len(product.replacement_product_ids), 1)\n self.assertEqual(product.replacement_product_ids[0].id,\n self.ref('product_lifecycle.product_product_4e'))", "def test_update_cart_name_duplicate(self):\n user_id = '123'\n cart_id = self.cart_item_manager.create_cart(user_id, 'Cart1', False)\n self.cart_item_manager.create_cart(user_id, 'Cart2', False)\n with self.assertRaises(DuplicateItemError):\n self.cart_item_manager.update_cart(user_id, cart_id, {'CartName': 'Cart2'})", "def is_product_exists(product_name) -> bool:\n with MY_CONNECTION as connection:\n cursor = connection.cursor()\n cursor.execute(\"SELECT exists(SELECT 1 FROM Products WHERE product_name=?)\", (product_name,))\n return cursor.fetchone()[0] == 1", "def test_update_inventory(self):\n pass", "def checker(product):\n for item in INSTOCK:\n if item == product:\n return True\n return False", "def add_item(self, product, price):\n if not product in self.items_in_cart:\n self.items_in_cart[product] = price\n print product + \" added.\"\n else:\n print product + \" is already in the cart.\"", "def test_view_products_from_empty_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'There are no products yet!')\n self.assertEqual(resp.status_code, 404)", "def test_product_buy_missing_goods(self):\n result_buy = self.info_list.product_buy(\"хлеб серый хлебозавод\", 3)\n self.assertFalse(result_buy)", "def test_admin_create_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)", "def test_cart_creation_duplicate_name(self):\n cart_name = 'cart name'\n self.cart_item_manager.create_cart('123', cart_name, False)\n self.cart_item_manager.create_cart('124', cart_name, False)\n with self.assertRaises(DuplicateItemError):\n self.cart_item_manager.create_cart('123', cart_name, False)", "def test_03_product_delete(self):\n product = self.create_product()\n products = self.product_obj.search([])\n self.assertIn(product, products)\n product.unlink()\n self.assertNotIn(product.exists(), products)", "def test_view_a_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertIn('NY_denims', str(reply['product']))\n self.assertEqual(resp.status_code, 200)", "def products_made(self, product) -> bool:\n return self.product_idx(product) is not None", "def _check_product(self):\n\n self.importable = False\n abcde = string.ascii_uppercase[:5]\n product_infos = self.retrieve_product_infos()\n\n if product_infos['product_code'] is not None:\n try:\n Products.objects.get(\n code=product_infos['product_code']\n )\n except Products.DoesNotExist:\n if (\n product_infos['product_name'] is not None\n and product_infos['product_code'] not in ProductImportation.codes\n and product_infos['product_code'] is not None\n and product_infos['product_url'] is not None\n and product_infos['image_url'] is not None\n and product_infos['quantity'] is not None\n and product_infos['ingredients'] is not None\n and product_infos['brands'] != []\n and product_infos['stores'] != []\n and product_infos['countries'] is not None\n and product_infos['compare_to'] is not None\n and product_infos['categories_hierarchy'] is not None\n and product_infos['nutriscore'] in abcde\n and all([product_infos[nutriment] >= 0 for nutriment in self.list_nutriments])\n and Categories.objects.filter(name=product_infos['compare_to']).count() > 0\n ):\n self.name = product_infos['product_name']\n self.product_infos = product_infos\n self.code = product_infos['product_code']\n ProductImportation.codes.append(self.code)\n self.importable = True\n\n return self.importable", "def _item_exists(self, location):\n \"Does nothing\"", "def exist(self, product_item):\n cursor = self.database.cursor(named_tuple=True, buffered=True)\n sql = \"SELECT * FROM favoris WHERE produit_id = '{}' \".format(product_item.id)\n cursor.execute(sql)\n rows = cursor.fetchone()\n if not rows:\n return False\n return True", "def test_add_product_to_cart(self, driver):\n logging.info(\"Start test case: Continue Shop\")\n data = self.test_data[\"Continue Shop\"][\"Products\"][0]\n logging.info(f\"Test data: [{data}]\")\n product_name = data[\"Product Name\"]\n\n select_product(driver, data[\"Page\"], product_name)\n add_product_to_cart(driver, data[\"Size\"], data[\"Color\"], data[\"Quantity\"])\n assert is_product_in_cart(driver, product_name)\n continue_shopping_from_order_summary(driver)\n assert verify_current_page_is_home(driver)", "def test_buyTicket_AlreadySold():\n assert not testUser2.buyTicket(testTicket1)\n assert testTicket1 in testUser1.inventory\n assert testTicket1 not in testUser2.inventory\n assert not testTicket1.for_sale\n assert testUser2.wallet == 500", "def test_cannot_sale_out_of_stock_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":20\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'NY_denims is out of stock!')\n self.assertEqual(resp.status_code, 404)", "def test_create_same_product(self):\n url = reverse('products:list')\n data = {\n 'name': 'Eggs',\n 'description': '''\n Bird and reptile eggs consist of a protective eggshell,\n albumen (egg white), and vitellus (egg yolk),\n contained within various thin membranes.\n The most commonly consumed eggs are chicken eggs.\n Other poultry eggs including those of duck and quail\n also are eaten.\n '''\n }\n product_count_before = models.Product.objects.count()\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(models.Product.objects.count(), product_count_before)", "def ingredient_used(self, item, quantity):\n logger.info('ReleaseDiscard ingredient used initiated')\n try:\n quantity = Decimal(quantity).quantize(Decimal('0.11'))\n inventory_list = self.Inventory.search([('location', '=', self.kitchen.id)]\n , order=[('batch_number', 'ASC')])\n product = self.Product.search([('name', '=', item),\n ('description', '=', 'Stock'),\n ('type', '=', 'goods')])[-1]\n done = False\n today = date.today()\n for i in inventory_list:\n for j in i.lines:\n if j.product.template.name == item:\n expiry = j.expiry_date\n if expiry:\n if expiry >= today:\n if Decimal(j.quantity) >= Decimal(quantity):\n j.quantity = Decimal(j.quantity) - Decimal(quantity)\n self.move(from_location=self.kitchen, to_location=self.used, item=product,\n quantity=quantity,\n batch_number=i.batch_number)\n self.store_inventory(location=self.used, inventory_stock=j,\n quantity=quantity, batch=i.batch_number)\n j.save()\n self.check_and_delete(i)\n done = True\n else:\n quantity = Decimal(quantity) - Decimal(j.quantity)\n self.move(from_location=self.kitchen, to_location=self.used, item=product,\n quantity=j.quantity, batch_number=i.batch_number)\n self.store_inventory(location=self.used, inventory_stock=j,\n quantity=j.quantity, batch=i.batch_number)\n j.quantity = 0\n j.save()\n self.check_and_delete(i)\n # transaction.cursor.commit()\n i.save()\n if done:\n return True\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return False", "def test_view_url_propose_product_already_in_favorites(self):\r\n self.client.login(username='test', password='test')\r\n response = self.client.get(reverse('search_results'),\r\n {'query': '', 'name': 'nutella'})\r\n self.assertEqual(response.status_code, 200)\r\n self.assertTemplateUsed(response, 'purbeurre/search_results.html')", "def test_add_item(self):\n self.inv.add_item(self.item_helmet)\n str_inventory = self.inv.pretty\n str_item = self.item_helmet.pretty\n\n self.rebuild_instance()\n str_unequipped = self.inv.unequipped[0].pretty\n\n assert str_inventory == self.inv.pretty\n assert str_item == str_unequipped", "def create_item(self, obj):\n logger.info('ItemProduct adding item initiated')\n try:\n with Transaction().start(DBNAME, 1) as transaction:\n unit, = self.ProductUom.search([('name', '=', obj['units'])])\n template = self.ProductTemplate()\n try:\n if self.Product.search([('code', '=', obj['id']), ('description', '=', 'Stock'),\n ('type', '=', 'goods')])[-1]:\n return False\n except Exception:\n pass\n template.category = self.ProductCategory.search([('name', '=', obj['category'])])[-1]\n template.default_uom = unit\n template.purchase_uom = unit\n template.type = 'goods'\n rate = Decimal(obj['rate'])\n cost = rate / 2\n template.name = obj['name']\n template.list_price = Decimal(rate)\n template.cost_price = Decimal(cost)\n template.purchasable = True\n template.account_expense = self.accounts['expense']\n template.account_receivable = self.accounts['receivable']\n template.save()\n # transaction.cursor.commit()\n product = self.Product()\n product.template = template\n product.code = obj['id']\n product.description = 'Stock'\n product.save()\n transaction.cursor.commit()\n return True\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return False", "def add_item_to_inventory(game, *args):\n (item, action_description, already_done_description) = args[0]\n if not game.is_in_inventory(item):\n print_bold(action_description)\n game.add_to_inventory(item)\n print_italic(\"You've just got a {item}.\".format(item=item.name))\n else:\n print_italic(already_done_description)\n return False", "def test_add_without_name(self):\n good = GoodInfo(\"\", \"30\", \"40\", \"2020-12-30\", \n \"14\", \"2020-12-30\")\n check_product_data = self.database.add(good)\n\n self.assertFalse(check_product_data)", "def test_if_app_can_search_for_existing_lists_with_products(self):\n product_to_add = {'product':'nikes', 'Quantity':3, 'Amountspent':5000}\n jsonproduct_to_add = json.dumps(product_to_add)\n add_list = self.client.post('/shoppinglists/',\n data = self.shopllist, \n headers = {\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n add_product=self.client.post('/shoppinglist/shoes/items/',\n data=jsonproduct_to_add,\n headers={\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n searchforlists=self.client.get('/search/?q=shoes',\n headers={\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n searchforproducts=self.client.get('/searchProduct/?q=nike',\n headers={\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n self.assertIn(\"Success\",str(searchforlists.data))\n self.assertIn(\"Success\",str(searchforproducts.data))\n self.assertEqual(searchforproducts.status_code,200)\n self.assertEqual(searchforlists.status_code,200)", "def test_add_and_remove_two_items(self):\n login = LoginPage(self.driver) #SAUCE-LAB-5\n login.open()\n inventory_page = login.login(_DEF_USER, _DEF_PASSWORD)\n first_item = inventory_page.products[0]\n first_item: InventoryItem\n first_item.add_to_cart()\n print('\\n')\n print(first_item.get_title())\n print(first_item.get_description())\n print(first_item.get_price())\n print('*' * 80)\n second_item = inventory_page.products[4]\n second_item: InventoryItem\n second_item.add_to_cart()\n print('\\n')\n print(second_item.get_title())\n print(second_item.get_description())\n print(second_item.get_price())\n print('*' * 80)\n first_item.remove_from_cart()\n second_item.remove_from_cart()\n print(f'Products {first_item.get_title()} and {second_item.get_title()} were successfully removed')", "def test_multiple_creates_do_not_increase_products(self):\n for i in xrange(0, 10):\n modified_po = copy.deepcopy(base_purchase_order)\n self.assertEqual(Supply.objects.get(pk=1).quantity, 10)\n \n resp = self.client.post('/api/v1/purchase-order/', format='json', data=modified_po)\n \n self.assertEqual(resp.status_code, 201, msg=resp)\n \n po_data = resp.data\n self.assertEqual(po_data['status'], 'AWAITING APPROVAL')\n\n item1 = po_data['items'][0]\n #self.assertEqual(item1['supply']['id'], 1)\n self.assertEqual(item1['status'], u'Ordered')\n\n item2 = po_data['items'][1]\n #self.assertEqual(item1['supply']['id'], 2)\n self.assertEqual(item1['status'], u'Ordered')\n \n #Test database values\n po = PurchaseOrder.objects.get(pk=resp.data['id'])\n self.assertEqual(po.status, 'AWAITING APPROVAL')\n for item in po.items.all():\n self.assertEqual(item.status, u\"Ordered\")\n \n supplier = Supplier.objects.get(pk=1)\n\n supply = Supply.objects.get(pk=1)\n self.assertEqual(supply.quantity, 10)\n self.assertEqual(supply.products.filter(supplier=supplier).count(), 1)\n\n supply = Supply.objects.get(pk=2)\n self.assertEqual(supply.quantity, 10)\n self.assertEqual(supply.products.filter(supplier=supplier).count(), 1)", "def get_exists(self):\n self.exist_products = {}\n limit = 100\n with sa.create_engine(dsn).connect() as dbcon:\n count = [x for x in dbcon.execute(Product.count())][0][0]\n for i in range(count//limit+1):\n sql = sa.select([Product.c.id, Product.c.title]).limit(limit).offset(limit*i)\n part = {hash(x[1]): x[0] for x in dbcon.execute(sql)}\n self.exist_products.update(part)", "def products_exist(cls, *skus):\n try:\n stock_level_ids = cls._get_stock_item_ids(*skus)\n except linnapi.exceptions.InvalidResponseError:\n return False\n if not set(skus).issubset(set(stock_level_ids.keys())):\n return False\n return True", "def test_product_is_installed(self):\n qi_tool = getToolByName(self.portal, 'portal_quickinstaller')\n pid = 'imio.media'\n installed = [p['id'] for p in qi_tool.listInstalledProducts()]\n self.assertTrue(pid in installed,\n 'package appears not to have been installed')", "def test_add_with_not_right_shelf_life(self):\n good = GoodInfo(\"яйцо 1 кат.\", \"-30\", \"40\", \"2020-12-30\", \n \"-14\", \"2020-12-30\")\n check_product_data = self.database.add(good)\n\n self.assertFalse(check_product_data)", "def test_view_product_with_invalid_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2kk',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Try an interger for product id')\n self.assertEqual(resp.status_code, 400)", "def test_update_inventory_not_found(self):\n new_inventory = {'name': 'conditioner', 'quantity': 1, 'status': 'new'}\n data = json.dumps(new_inventory)\n resp = self.app.put('/inventories/0', data=data, content_type='application/json')\n self.assertEquals(resp.status_code, status.HTTP_404_NOT_FOUND)", "def add_to_inventory(self, newItem):\n\n if len(self.player_inventory) >= 8:\n print(\"\"\"You already have the maximum of 7 items in your inventory,\n looks like you will need to get rid of an item to get {}\"\"\".format(newItem.name))\n\n print(\"Would you like to get rid of an item to add the {} to your inventory?\".format(newItem.name))\n\n if 'yes' in choice:\n dropping = player_inventory.drop()\n print(dedent('Okay, {} was removed from your inventory.'.format(item_name)))\n\n elif 'no' in choice:\n print(dedent('Okay redirecting you back to shop.'))\n return False\n\n else:\n print(dedent('Seems like you did not make a valid choice, aborting ...'))\n return False\n\n else:\n\n if newItem.type == \"food\":\n self.player_inventory[newItem.name] = newItem.health_addition\n elif newItem.type == \"weapon\":\n self.player_inventory[newItem.name] = newItem.quality\n\n print(dedent(\"\"\"\n ##############################################\n Nice, the {} has been added to your inventory!\n \"\"\".format(newItem.name)))", "def is_product_id_exists(product_id) -> bool:\n with MY_CONNECTION as connection:\n cursor = connection.cursor()\n cursor.execute(\"SELECT exists(SELECT 1 FROM Products WHERE id_product=?)\", (product_id,))\n return cursor.fetchone()[0] == 1", "def check_if_already_prepared(self, instance, product_attribute):\n attribute_exist = self.search([('ks_shopify_instance', '=', instance.id),\n ('ks_product_attribute', '=', product_attribute.id)], limit=1)\n if attribute_exist:\n return attribute_exist\n else:\n return False", "def test_non_admin_cannot_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_add_all(self): #SAUCE-LAB-7\n login = LoginPage(self.driver)\n login.open()\n inventory_page = login.login(_DEF_USER, _DEF_PASSWORD)\n first_item = inventory_page.products\n first_item: InventoryItem\n for item in first_item:\n item.add_to_cart()\n if inventory_page.header.get_total_cart_items() == 6:\n print('\\n')\n print(f'Total of products {inventory_page.header.get_total_cart_items()}')\n else:\n print('\\n')\n print('Not all items were added')", "def test_create_inventory_with_no_name(self):\n new_inventory = {'status': 'new'}\n resp = self.app.post('/inventories', data=new_inventory, content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)", "def test_product_is_installed(self):\n pid = 'collective.favorites'\n installed = [p['id'] for p in self.qi_tool.listInstalledProducts()]\n self.assertTrue(pid in installed,\n 'package appears not to have been installed')", "def test_add_cart_item_nonexistent_cart(self):\n with self.assertRaises(ResourceAccessError):\n self.cart_item_manager.add_cart_item(catalog=self.catalog,\n user_id='111',\n cart_id='123',\n entity_id='entity_id',\n entity_type='entity_type',\n entity_version='entity_version')", "def test_product_installed(self):\n self.assertTrue(\n self.installer.is_product_installed('{{cookiecutter.package_name}}')\n )", "def assert_tracking_unique(self, serial_numbers):\n Lot = self.env[\"stock.production.lot\"]\n self.ensure_one()\n if self.tracking != \"none\":\n lots = Lot.search(\n [(\"product_id\", \"=\", self.id), (\"name\", \"in\", serial_numbers)]\n )\n if lots:\n raise ValidationError(\n _(\"%s numbers %s already in use for product %s\")\n % (self.tracking.capitalize(), \" \".join(lots.mapped(\"name\")), self.name)\n )", "def resource_exists(self, resource):\n products = Product.select(self.env, where={'name' : resource.id})\n return bool(products)", "def test_add_with_end_shelf_life(self):\n good = GoodInfo(\"яйцо 1 кат.\", \"-30\", \"40\", \"2020-12-1\", \n \"3\", \"2020-12-1\")\n check_product_data = self.database.add(good)\n\n self.assertFalse(check_product_data)", "def test_product_is_installed(self):\n try:\n result = self.installer.is_product_installed(PROJECT_NAME)\n except AttributeError:\n result = self.installer.isProductInstalled(PROJECT_NAME)\n self.assertTrue(result)", "def test_create_product(self):\n url = reverse('products:list')\n data = {\n 'name': 'Banana',\n 'description': '''\n Bananas are one of the most widely consumed fruits in the\n world for good reason. Eating them could help lower blood\n pressure and reduce the risks of cancer and asthma.\n '''\n }\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(models.Product.objects.filter(name=data['name']).count(), 1)", "def register_product(p: Product) -> ExecRet:\n market = get_market()\n pid = p.pid\n if pid in market.products.keys():\n return ExecRet.err(message='pid %d already exists' % pid)\n market.add_product(p)\n LOGGER.info('added product %s' % p.json())\n return ExecRet.ok()", "def add_product():\n name = input(\"\\nPlease enter the name of the new product: \")\n\n quantity = input(\"Please enter the quantity of the new product: \")\n while quantity.isdigit() == False:\n print(\"Please enter a valid number.\")\n quantity = input(\"Please enter the quantity of the new product: \")\n quantity = int(quantity)\n\n price = input(\"Please enter the price of the new product(in dollars): \").strip(\"$\")\n while True:\n try:\n price = float(price)\n break\n except ValueError:\n print(\"Please enter a valid price\")\n price = input(\"Please enter the price of the new product: \")\n\n price = price * 100\n\n try:\n Product.create(product_name=name,\n product_price=price,\n product_quantity=quantity)\n latest_item = Product.select().order_by(Product.product_id.desc()).get()\n print(f\"You just added {latest_item.product_name} as the {latest_item.product_id}th item in the inventory.\\n\")\n\n except IntegrityError:\n to_update = Product.get(product_name=name)\n to_update.product_name = name\n to_update.product_price = price\n to_update.product_quantity = quantity\n to_update.date_updated = datetime.datetime.now()\n to_update.save()\n print(f\"You just updated {to_update.product_name}\\n\")\n input(\"\\nPress ENTER to continue\")\n clear()", "def test_product_not_available_by_stock(self):\n product = ProductFactory(stock_amount=2)\n\n for i in range(2):\n opr = OrderProductRelationFactory(product=product)\n order = opr.order\n order.paid = True\n order.save()\n\n self.assertEqual(product.left_in_stock, 0)\n self.assertFalse(product.is_stock_available)\n self.assertFalse(product.is_available())", "def test_update_inventory_with_no_name(self):\n new_inventory = {'id': 2, 'quantity': 2, 'status': 'new'}\n resp = self.app.put('/inventories/2', data=new_inventory, content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)", "def test_update_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'product updated!')\n self.assertEqual(resp.status_code, 200)", "def test_add_to_cart(self):\n\n # Log the user in that is not the seller\n self.client.login(username=\"test_user\", password=\"secret\")\n\n # Confirm that product title appears in cart\n response = self.client.get(reverse('website:cart'))\n\n # Check that the response is 200 ok\n self.assertEqual(response.status_code, 200)\n\n # Ensure that the cart displays product title, but not the title for product2\n self.assertIn('<h6 class=\"mr-auto p-2\">Test Product</h6>'.encode(), response.content)\n self.assertNotIn('<h6 class=\"mr-auto p-2\">Test Product2</h6>'.encode(), response.content)\n\n # Confirm that the post returns a response of 302\n response = self.client.get(reverse(\"website:add_to_cart\", args=(1,)))\n self.assertEqual(response.status_code, 302)", "def has_item(self, item: Inventory) -> bool:\n return (item.pk,) in self.orderitem_set.values_list('item')", "def test_product_buy_more_then_have(self):\n result_buy = self.info_list.product_buy(\"соль 1 кг\", 50)\n self.assertFalse(result_buy)", "def _check_sn_uniqueness(self):\n if self.product_tracking == 'serial' and self.lot_producing_id:\n sml = self.env['stock.move.line'].search_count([\n ('lot_id', '=', self.lot_producing_id.id),\n ('location_id.usage', '=', 'production'),\n ('qty_done', '=', 1),\n ('state', '=', 'done')\n ])\n if sml:\n raise UserError(_('This serial number for product %s has already been produced', self.product_id.name))\n\n for move in self.move_finished_ids:\n if move.has_tracking != 'serial' or move.product_id == self.product_id:\n continue\n for move_line in move.move_line_ids:\n domain = [\n ('lot_id', '=', move_line.lot_id.id),\n ('qty_done', '=', 1),\n ('state', '=', 'done')\n ]\n message = _('The serial number %(number)s used for byproduct %(product_name)s has already been produced',\n number=move_line.lot_id.name,\n product_name=move_line.product_id.name)\n co_prod_move_lines = self.move_finished_ids.move_line_ids.filtered(lambda ml: ml.product_id != self.product_id)\n domain_unbuild = domain + [\n ('production_id', '=', False),\n ('location_dest_id.usage', '=', 'production')\n ]\n\n # Check presence of same sn in previous productions\n duplicates = self.env['stock.move.line'].search_count(domain + [\n ('location_id.usage', '=', 'production')\n ])\n if duplicates:\n # Maybe some move lines have been compensated by unbuild\n duplicates_unbuild = self.env['stock.move.line'].search_count(domain_unbuild + [\n ('move_id.unbuild_id', '!=', False)\n ])\n if not (duplicates_unbuild and duplicates - duplicates_unbuild == 0):\n raise UserError(message)\n # Check presence of same sn in current production\n duplicates = co_prod_move_lines.filtered(lambda ml: ml.qty_done and ml.lot_id == move_line.lot_id) - move_line\n if duplicates:\n raise UserError(message)\n\n for move in self.move_raw_ids:\n if move.has_tracking != 'serial':\n continue\n for move_line in move.move_line_ids:\n if float_is_zero(move_line.qty_done, precision_rounding=move_line.product_uom_id.rounding):\n continue\n domain = [\n ('lot_id', '=', move_line.lot_id.id),\n ('qty_done', '=', 1),\n ('state', '=', 'done')\n ]\n message = _('The serial number %(number)s used for component %(component)s has already been consumed',\n number=move_line.lot_id.name,\n component=move_line.product_id.name)\n co_prod_move_lines = self.move_raw_ids.move_line_ids\n domain_unbuild = domain + [\n ('production_id', '=', False),\n ('location_id.usage', '=', 'production')\n ]\n\n # Check presence of same sn in previous productions\n duplicates = self.env['stock.move.line'].search_count(domain + [\n ('location_dest_id.usage', '=', 'production')\n ])\n if duplicates:\n # Maybe some move lines have been compensated by unbuild\n duplicates_unbuild = self.env['stock.move.line'].search_count(domain_unbuild + [\n ('move_id.unbuild_id', '!=', False)\n ])\n if not (duplicates_unbuild and duplicates - duplicates_unbuild == 0):\n raise UserError(message)\n # Check presence of same sn in current production\n duplicates = co_prod_move_lines.filtered(lambda ml: ml.qty_done and ml.lot_id == move_line.lot_id) - move_line\n if duplicates:\n raise UserError(message)", "def test_create_ingredient_successful(self):\n\n payload = {'name': 'Salt'}\n\n res = self.client.post(INGREDIENTS_URL, payload)\n\n exists = Ingredient.objects.filter(\n user=self.user,\n name=payload['name']\n ).exists()\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n self.assertTrue(exists)", "def add_to_cart(self, cart_id, product):\n # Go through each producer\n for i in range(len(self.buff)):\n with self.locks[i]:\n # Go through his queue\n for prod in self.buff[i]:\n # If the product is found take it\n # Add it to the cart\n # Also keep the id of the producer in case we want to return it\n if product.__eq__(prod):\n self.carts[cart_id].append((prod, i))\n self.buff[i].remove(prod)\n return True\n return False", "def test_create_ingredient_successful(self):\n payload = {'name':'Cabbage'}\n self.client.post(INGREDIENTS_URL, payload)\n exists = Ingredient.objects.all().filter(user=self.user, name=payload['name']).exists\n self.assertTrue(exists)", "def test_cannot_sell_more_than_stock(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":15\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Only 10 NY_denims available right now!')\n self.assertEqual(resp.status_code, 400)", "def test_create_ingredient_succesfull(self):\n\n payload = {'name': 'Cabbage'}\n self.client.post(INGREDIENTS_URL, payload)\n\n exist = Ingredient.objects.filter(\n user=self.user,\n name=payload['name'],\n ).exists()\n self.assertTrue(exist)", "def is_product_saved(self):\n\n db.execute(\"SELECT product_id FROM Substitute WHERE product_id = %s\",\n (self.product.id,))\n product = db.fetch()\n if product:\n return True\n else:\n return False", "def inventory_add(self, item):\n if (len(self.ItemList) >= self.InventorySize):\n # Inventory full\n return 2\n self.ItemList.append(item)\n return 0", "def remove_item(self, product):\n if product in self.items_in_cart:\n del self.items_in_cart[product]\n print (product + \" removed.\")\n else:\n print (product + \" is not in the cart.\")", "def test_get_unexisting_products(self):\n response=self.get_unexisting_products()\n result = json.loads(response.data.decode('utf-8'))\n self.assertEqual(result['message'],\"No Available products\")\n self.assertEqual(response.status_code, 200)", "def add_new_product_to_store_inventory(user_name, product_details, store_name):\n\n user_name = auth.get_username_from_hash(user_name)\n permission_handler.is_permmited_to(user_name, Action.ADD_PRODUCT_TO_INVENTORY.value,\n store_name)\n store_handler.add_new_product_to_store_inventory(user_name, product_details, store_name)", "def remove_item(self, product):\r\n if product in self.items_in_cart:\r\n del self.items_in_cart[product]\r\n print(product + \" removed.\")\r\n else:\r\n print(product + \" is not in the cart.\")", "def test_add_item_to_cart(client):\n raise NotImplemented('Acceptance test failed')", "def test_meal_name_already_exists(self):\n\n with self.client:\n self.add_meal(\"fries\", 10000)\n response = self.add_meal(\"fries\", 10000)\n data = json.loads(response.data.decode())\n self.assertEqual(data.get('message'), \"Meal name already exists\")\n self.assertEqual(response.status_code, 409)", "def test_product(self):\n self.assertEqual(self.test_product.name, self.test_product_name)\n self.assertEqual(self.test_product.price, self.test_product_price)", "def test_delete_inventory(self):\n # save the current number of inventories for later comparision\n inventory_count = self.get_inventory_count()\n # delete a inventory\n resp = self.app.delete('/inventories/1', content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(len(resp.data), 0)\n new_count = self.get_inventory_count()\n self.assertEqual(new_count, inventory_count - 1)", "def test_installed(self):\n self.assertTrue(self.qi.isProductInstalled(PROJECTNAME))", "def test_items_in_cart(self):\r\n self.add_to_cart()\r\n self.request.user = self.user\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertTrue(context['display_shopping_cart'])", "def _check_if_ingredients_exists(self, drink_type: str, drink_details: Beverages) -> None:\n list_ingredient = drink_details.get_ingredients(drink_type=drink_type)\n for ingredient in list_ingredient:\n if not self._inventory.check_if_ingredient_exists(\n ingredient=ingredient\n ):\n raise InventoryUnavailable(\n inventory_type=ingredient, drink_type=drink_type\n )", "def check_if_ingredient_exists(self, ingredient: str) -> bool:\n return ingredient in self.inventory_availability", "def item_exists(item_id):\n return item_id in all_items", "def ingredient_used_canceled(self, item, quantity):\n logger.info('ReleaseDiscard ingredient used canceled initiated')\n try:\n quantity = Decimal(quantity).quantize(Decimal('0.11'))\n inventory_list = self.Inventory.search([('location', '=', self.used.id)]\n , order=[('batch_number', 'DESC')])\n product = self.Product.search([('name', '=', item),\n ('description', '=', 'Stock'),\n ('type', '=', 'goods')])[-1]\n done = False\n today = date.today()\n for i in inventory_list:\n for j in i.lines:\n if j.product.template.name == item:\n expiry = j.expiry_date\n if expiry:\n if expiry >= today:\n # pdb.set_trace()\n if Decimal(j.quantity) >= Decimal(quantity):\n j.quantity = Decimal(j.quantity) - Decimal(quantity)\n self.move(from_location=self.used, to_location=self.kitchen, item=product,\n quantity=quantity,\n batch_number=i.batch_number)\n self.store_inventory(location=self.kitchen, inventory_stock=j,\n quantity=quantity, batch=i.batch_number)\n j.save()\n self.check_and_delete(i)\n done = True\n else:\n quantity = Decimal(quantity) - Decimal(j.quantity)\n self.move(from_location=self.used, to_location=self.kitchen, item=product,\n quantity=j.quantity, batch_number=i.batch_number)\n self.store_inventory(location=self.kitchen, inventory_stock=j,\n quantity=j.quantity, batch=i.batch_number)\n j.quantity = 0\n j.save()\n self.check_and_delete(i)\n # transaction.cursor.commit()\n i.save()\n if done:\n return True\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return False", "def test_add_stock_item(self):\n pass", "def check_products(self, adi):\r\n results = []\r\n products = self.get_products(adi)\r\n for product in products[\"data\"][\"products\"]:\r\n print(\"Checking product '{}'... \".format(product[\"name\"]), end='')\r\n detail = self.get_product_detail(adi, product_id=product[\"productId\"], product_name=product[\"name\"])\r\n if self.rf.valid_product_detail(detail):\r\n print(\"Valid.\")\r\n result = \"Available\"\r\n else:\r\n print(\"INVALID.\")\r\n result = \"Not available\"\r\n results.append([product[\"name\"], result])\r\n return results", "def test_cart_creation_duplicate_default_will_not_create_new_cart(self):\n test_user_id = '123'\n cart_id_1 = self.cart_item_manager.create_cart(test_user_id, 'Cart1', True)\n cart_id_2 = self.cart_item_manager.create_cart(test_user_id, 'Cart3', True)\n self.assertEqual(cart_id_1, cart_id_2)\n self.assertEqual(1, len(self.cart_item_manager.get_user_carts(test_user_id)))", "def _is_duplicate (asin, current_list):\n\n dup = False\n for m in current_list:\n try:\n if unicode(asin) == m['sku']:\n dup = True\n break\n except KeyError:\n pass\n return dup", "def test_uninstalled(self):\n self.assertFalse(self.qi.isProductInstalled(PROJECTNAME))", "def test_product_uninstalled(self):\n self.assertFalse(\n self.installer.is_product_installed('{{cookiecutter.package_name}}')\n )", "def save_item(self, obj):\n logger.info('ItemProduct adding item initiated')\n try:\n with Transaction().start(DBNAME, 1) as transaction:\n product = self.Product.search([('code', '=', obj['id']), ('description', '=', 'Stock'),\n ('type', '=', 'goods')])[-1]\n template = self.ProductTemplate(id=product.template.id)\n unit, = self.ProductUom.search([('name', '=', obj['units'])])\n template.default_uom = unit\n template.purchase_uom = unit\n template.category = self.ProductCategory.search([('name', '=', obj['category'])])[-1]\n rate = Decimal(obj['rate'])\n cost = rate / 2\n template.name = obj['name']\n template.list_price = Decimal(rate)\n template.cost_price = Decimal(cost)\n template.purchasable = True\n template.account_expense = self.accounts['expense']\n template.account_receivable = self.accounts['receivable']\n template.save()\n # transaction.cursor.commit()\n product.description = 'Stock'\n product.save()\n transaction.cursor.commit()\n return True\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return False", "def test_create_ingredient_successful(self):\n payload = {\n 'name': 'turmeric'\n }\n\n self.client.post(INGREDIENT_URL, payload)\n\n exists = Ingredients.objects.filter(\n user=self.user,\n name=payload['name']\n ).exists()\n\n self.assertTrue(exists)", "def test_add_duplicate_book(self):\n create_admin()\n response = self.client().post('/api/v1/login', json=self.test_admin)\n json_data = json.loads(response.data)\n access_token = json_data.get('access_token')\n self.client().post('/api/v1/products',\n headers={\"Authorization\": \"Bearer \" + access_token}, json=self.test_book)\n response = self.client().post('/api/v1/products',\n headers={\"Authorization\": \"Bearer \" + access_token}, json=self.test_book)\n json_data = json.loads(response.data)\n self.assertTrue(json_data.get('Error'))\n self.assertEqual(json_data.get('Error'), \"Book already exists\")\n self.assertEqual(response.status_code, 409)" ]
[ "0.7496905", "0.7326862", "0.71235555", "0.68696725", "0.6750119", "0.6638737", "0.65924627", "0.6544556", "0.65392476", "0.6516768", "0.6488533", "0.6471995", "0.64518577", "0.6451675", "0.64378035", "0.63537824", "0.6353224", "0.63370115", "0.6325946", "0.6315863", "0.63116044", "0.62974346", "0.6286299", "0.62745404", "0.62663484", "0.62622607", "0.62600285", "0.62484556", "0.62286633", "0.62086487", "0.6208013", "0.6185048", "0.6149558", "0.6141064", "0.6124516", "0.6103421", "0.6087459", "0.605696", "0.60552114", "0.6053559", "0.60462797", "0.60404795", "0.60273373", "0.60068464", "0.6005224", "0.59959793", "0.59681916", "0.5945795", "0.5940647", "0.5934371", "0.5931882", "0.59297585", "0.5916298", "0.59068173", "0.59035194", "0.5903031", "0.5897652", "0.5895531", "0.5891988", "0.5888081", "0.5876346", "0.5876107", "0.58744013", "0.58731157", "0.5870393", "0.58654135", "0.58568597", "0.5854226", "0.585029", "0.5845865", "0.5820221", "0.58074474", "0.58054316", "0.57960296", "0.5794992", "0.5786563", "0.5775782", "0.57700014", "0.57691187", "0.5764748", "0.57487726", "0.5747423", "0.57393295", "0.5731437", "0.5718657", "0.5699501", "0.5694146", "0.5693963", "0.56910545", "0.5689773", "0.56834215", "0.56831825", "0.5682135", "0.56776565", "0.5673089", "0.5672561", "0.56719327", "0.5668389", "0.5667489", "0.56585205" ]
0.81727195
0
Tests that a user can view a product in the Inventory
def test_view_a_product(self): resp = self.admin_register() reply = self.admin_login() token = reply['token'] product = dict( prod_name='NY_denims', category='denims', stock=20, price=150 ) resp = self.client.post( '/api/v1/products', content_type='application/json', data=json.dumps(product), headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Product successfully added to Inventory!') self.assertEqual(resp.status_code, 201) resp = self.client.get( '/api/v1/products/1', headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertIn('NY_denims', str(reply['product'])) self.assertEqual(resp.status_code, 200)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_is_product_show(self):\n\n self.selenium.get(\"http://localhost:8000/\")\n response = self.selenium.find_element(By.ID, \"id_product_name\")\n response.send_keys(\"frosties\")\n response.send_keys(Keys.ENTER)\n self.assertTemplateUsed('selected_product.html')", "def test_list_products_logged_in(self):\n\n # Log in seller\n self.client.login(username=\"test_seller\", password=\"secret\")\n\n # Issue a GET request\n response = self.client.get(reverse('website:products'))\n\n # Check that the response is 200\n self.assertEqual(response.status_code, 200)\n\n # Check that the logged in user does not recieve any products to view because the only products available are the ones they have for sale\n self.assertEqual(len(response.context['products']),0)\n\n # Check that the product title appears in the rendered HTML content\n self.assertNotIn('<h5 class=\"card-title mb-0\">Test Product</h5>'.encode(), response.content)\n self.assertNotIn('<h5 class=\"card-title mb-0\">Test Product2</h5>'.encode(), response.content)", "def test_view_product_that_doesnot_exist_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product does not exist!')\n self.assertEqual(resp.status_code, 404)", "def test_security_on_get(self):\n # test the listing url\n product = Product.objects.all()[0]\n url = '/product/xml/'\n response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)\n # test the product detail url\n url = '/product/xml/%s/' % product.item_number\n Response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)", "def test_product_detail_view(client, sample_product, user_company, authenticated_user):\n products = Product.objects.all()\n for product in products:\n product_detail_view = reverse('product-detail', kwargs={'pk': product.pk})\n response = client.get(product_detail_view)\n #The view should return 200 for each product that exists\n assert response.status_code == 200\n content = response.content.decode(response.charset)\n #With content specific for each product\n assert product.name in content\n #checking for \"page not found\" if product does not exist\n product_not_exist_detail_view = reverse('product-detail', kwargs={'pk':104})\n response = client.get(product_not_exist_detail_view)\n assert response.status_code == 404 \n #Authenticated user but not the owner of the product returns 404\n if authenticated_user and not user_company:\n product_detail_view = reverse('product-detail', kwargs={'pk': 6})\n response = client.get(product_detail_view)\n assert response.status_code == 404", "def test_view_product_detail(self):\n product = sample_product(supplier_id=self.user)\n\n url = detail_url(product.id)\n res = self.client.get(url)\n\n serializer = ProductSerializer(product)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)", "def test_view_products_from_empty_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'There are no products yet!')\n self.assertEqual(resp.status_code, 404)", "def test_list_available_product(self):\n view = AvailableProductListView.as_view({'get': 'list'})\n uri = reverse('products:list-available-products')\n request = self.factory.get(uri, HTTP_AUTHORIZATION='Token {}'.format(self.token_user.key))\n request.user = self.user['user']\n response = view(request)\n self.assertEqual(response.status_code, 200,\n f'Expected Response Code 200, received {response.status_code} instead.')", "def test_detail_is_hacker_permission(self):\n self.user_1.username = 'pythonhacker'\n self.user_1.save()\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_1.id), **headers)\n\n expected = {'detail': 'You do not have permission to perform this action.'}\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)", "def test_view_all_products(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertIn('NY_denims', str(reply['products']))\n self.assertEqual(resp.status_code, 200)", "def test_view_cart_contents(self):\n User.objects.create_user(\n username=\"testuser\", password=\"thisisasecret101\")\n item = Product(name=\"Product\",\n product_image=\"testing_img.jpg\",\n description=\"Product description.\",\n price=\"20.00\",\n stock_available=\"5\",\n showcase_product=\"True\")\n item.save()\n self.client.login(username=\"testuser\", password=\"thisisasecret101\")\n session = self.client.session\n session[\"cart\"] = {1: 1}\n session.save()\n response = self.client.get(\"/cart/\")\n self.assertEqual(response.status_code, 200)", "def test_detail_odd_product_id_permission(self):\n self.assertEqual(self.product_2.id, 2)\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_2.id), **headers)\n\n expected = {'detail': 'You do not have permission to perform this action.'}\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)", "def test_product_exists_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product exists in the Inventory!')\n self.assertEqual(resp.status_code, 400)", "def test_only_admin_can_create_product(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_cannot_view_a_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_add_product_view_for_authenticated_users(user_company, client):\n add_product_url = reverse('add-product')\n response = client.get(add_product_url)\n assert response.status_code == 200", "def test_product_by_category_logged_in_user(self):\n\n # Log In user that is not the seller, check that the products not created by the user do show up\n self.client.login(username=\"test_user\", password=\"secret\")\n\n # Search for product category 1\n response = self.client.get(reverse('website:product_by_category', args=(1,)))\n\n # Check that status code is 200\n self.assertEqual(response.status_code, 200)\n\n # Make sure that only the product associated with product category 1 is displayed\n self.assertIn('<h6 class=\"mb-1\">Test Product</h6>'.encode(), response.content)\n self.assertNotIn('<h6 class=\"mb-1\">Test Product2</h6>'.encode(), response.content)\n\n # Search for product category 2\n response_non_seller = self.client.get(reverse('website:product_by_category', args=(2,)))\n\n # Check that the status code is 200\n self.assertEqual(response_non_seller.status_code, 200)\n\n # Make sure that only the product associated with product category 2 is displayed\n self.assertNotIn('<h6 class=\"mb-1\">Test Product</h6>'.encode(), response_non_seller.content)\n self.assertIn('<h6 class=\"mb-1\">Test Product2</h6>'.encode(), response_non_seller.content)", "def test_items_in_cart(self):\r\n self.add_to_cart()\r\n self.request.user = self.user\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertTrue(context['display_shopping_cart'])", "def test_view_url_propose_product_already_in_favorites(self):\r\n self.client.login(username='test', password='test')\r\n response = self.client.get(reverse('search_results'),\r\n {'query': '', 'name': 'nutella'})\r\n self.assertEqual(response.status_code, 200)\r\n self.assertTemplateUsed(response, 'purbeurre/search_results.html')", "def test_product_list(self):\n self.url = reverse(\"product-list\")\n response = self.client.get(self.url, **self.auth_headers)\n self.assertEqual(200, response.status_code)", "def test_view_product_with_invalid_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2kk',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Try an interger for product id')\n self.assertEqual(resp.status_code, 400)", "def test_listing_supplies_user(self):\n request = self.factory.get(\n '/api/supplies')\n force_authenticate(request, user=self.testuser1)\n response = SupplyListView.as_view()(request)\n # normal user can browse the data\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_form_submition_and_product_creation(user_company, client, authenticated_user):\n add_product_url = reverse('add-product')\n response = client.post(add_product_url, {\n 'name': 'Test_product_name',\n 'serial_number': 'XZ001', \n 'manufacturer': 'Test company',\n 'price_net': 415.26,\n 'description': fake.paragraph(),\n 'stock': 16\n })\n assert response.status_code == 302\n product = Product.objects.get(name='Test_product_name')\n assert response.url == reverse('product-detail',kwargs={'pk': product.pk}) \n assert product.user == authenticated_user\n assert product in Product.objects.all()", "def test_product_list_view(sample_product, user_company, client):\n product_list_url = reverse('product-list')\n response = client.get(product_list_url)\n assert response.status_code == 200\n assert Product.objects.count() == 9\n products = Product.objects.all()\n content = response.content.decode(response.charset)\n for product in products:\n assert product.name in content", "def test_basic_info(self):\n\n url = reverse('stock-item-detail', kwargs={'pk': 1})\n\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n html = str(response.content)\n\n # Part name\n self.assertIn('Stock Item: M2x4 LPHS', html)\n\n # Quantity\n self.assertIn('<h5>Available Quantity</h5>', html)\n self.assertIn('<h5>4000', html)\n\n # Batch code\n self.assertIn('Batch', html)\n self.assertIn('<td>B123</td>', html)\n\n # Actions to check\n actions = [\n \"id=\\\\\\'stock-count\\\\\\' title=\\\\\\'Count stock\\\\\\'\",\n \"id=\\\\\\'stock-add\\\\\\' title=\\\\\\'Add stock\\\\\\'\",\n \"id=\\\\\\'stock-remove\\\\\\' title=\\\\\\'Remove stock\\\\\\'\",\n \"id=\\\\\\'stock-move\\\\\\' title=\\\\\\'Transfer stock\\\\\\'\",\n \"id=\\\\\\'stock-duplicate\\\\\\'\",\n \"id=\\\\\\'stock-edit\\\\\\'\",\n \"id=\\\\\\'stock-delete\\\\\\'\",\n ]\n\n # Initially we should not have any of the required permissions\n for act in actions:\n self.assertNotIn(act, html)\n\n # Give the user all the permissions\n self.assignRole('stock.add')\n self.assignRole('stock.change')\n self.assignRole('stock.delete')\n\n response = self.client.get(url)\n html = str(response.content)\n\n for act in actions:\n self.assertIn(act, html)", "def test_list_products(self):\n self._require_login(self.user1)\n response = self.client.get('/api/1.0/products/')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[0]['name'], 'Producto 2')\n self.assertEqual(response.data[1]['description'], 'Descripcion producto 1')", "def test_product(self):\n self.assertEqual(self.test_product.name, self.test_product_name)\n self.assertEqual(self.test_product.price, self.test_product_price)", "def test_search_view_when_user_logged_in(self):\n self.user = User.objects.create_user(\n username='person',\n email='[email protected]',\n password='test12345@_password',\n )\n self.client.login(\n username='person',\n password='test12345@_password')\n\n response = self.client.get('/search/?q=')\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"products.html\")", "def test_list_product(self):\n url = reverse('products:list')\n response = self.client.get(url)\n self.assertEqual(len(response.data), 1)\n self.assertEqual(response.data[0]['name'], 'Eggs')", "def test_listing_supplies_admin(self):\n request = self.factory.get(\n '/api/supplies')\n force_authenticate(request, user=self.testadmin)\n response = SupplyListView.as_view()(request)\n # admin can browse the data\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_search_product_detail(self, setup):\n product_id = self.nutella.id\n path = reverse('website:detail', args=(product_id,))\n assert resolve(path).view_name == 'website:detail'", "def test_only_attendant_can_make_a_sale(self):\n resp = self.admin_add_product()\n reply = self.admin_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_admin_create_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)", "def test_add_product_view_for_unauthenticated_users(client):\n add_product_url = reverse('add-product')\n response = client.get(add_product_url)\n assert response.status_code == 302\n assert response.url == \"/accounts/login/?next=/products/add-product/\"", "def test_get_product_detail(self):\n\n response = self.client.get(reverse('website:product_details', args=(1,)))\n\n # Check that the response is 200 ok\n self.assertEqual(response.status_code, 200)\n\n # Product title appears in HTML response content\n self.assertIn('<h1>Test Product</h1>'.encode(), response.content)\n self.assertNotIn('<h1>Test Product2</h1>'.encode(), response.content)", "def test_creating_supply_user(self):\n request = self.factory.post(\n '/api/supplies/', {'name': '3d printer 2', 'state': 'good state', 'description': 'prints 3d objects'})\n force_authenticate(request, user=self.testuser1)\n response = SupplyListView.as_view()(request)\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n try:\n Supply.objects.get(name='3d printer')\n self.fail()\n except Supply.DoesNotExist:\n pass", "def test_show_cart_with_items(client):\n raise NotImplemented('Acceptance test failed')", "def test_stealability(self):\n prod = Product('Test Product', price=100, weight=1)\n self.assertEqual(prod.stealability(), \"Very stealable!\")", "def test_non_admin_cannot_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_add_product(self):\n view = ProductCreateListView.as_view({'post': 'create'})\n uri = reverse('products:create/list-products')\n data = {\n \"name\": \"Iphone 7\",\n \"description\": \"Mobile phone\",\n \"price\": 200,\n \"is_available\": True\n }\n request = self.factory.post(uri, data, HTTP_AUTHORIZATION='Token {}'.format(self.token_admin.key))\n request.user = self.user['admin']\n response = view(request)\n self.assertEqual(response.status_code, 201,\n f'Expected Response Code 201, received {response.status_code} instead.')", "def test_get_product(self):\n self._require_login(self.user1)\n response = self.client.get('/api/1.0/products/1/')\n self.assertEqual(response.data['name'], 'Producto 1')\n self.assertEqual(response.data['description'], 'Descripcion producto 1')\n self.assertEqual(response.data['selling'], True)\n self.assertEqual(response.data['price'], '24.0')\n self.assertEqual(response.data['seller']['user']['username'], 'testuser1')\n self.assertEqual(response.data['category']['name'], 'general')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n response = self.client.get('/api/1.0/products/2/')\n self.assertEqual(response.data['name'], 'Producto 2')\n self.assertEqual(response.data['description'], 'Descripcion producto 2')\n self.assertEqual(response.data['selling'], False)\n self.assertEqual(response.data['price'], '312.0')\n self.assertEqual(response.data['seller']['user']['username'], 'testuser2')\n self.assertEqual(response.data['category']['name'], 'deportes')\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_variant_view(self):\n response = self.client.get(reverse('variant-view', args=[self.variant.id]))\n self.assertContains(response, self.variant.chromosome)\n self.assertContains(response, self.variant.db_snp_id)\n self.assertContains(response, self.proband.gel_id)\n self.assertContains(response, self.variant.reference)\n self.assertEquals(response.status_code, 200)", "def test_retrieve_product(self):\n product_pk = 1\n url = reverse('products:detail', kwargs={'pk': product_pk})\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['id'], product_pk)", "def test_authorization(self):\n res = self.get(url=\"/products/1/pricehistory\")\n self.assertEqual(res.status_code, 401)\n self.assertException(res, exc.UnauthorizedAccess)\n res = self.get(url=\"/products/1/pricehistory\", role=\"user\")\n self.assertEqual(res.status_code, 401)\n self.assertException(res, exc.UnauthorizedAccess)", "def test_user_visibility(app, resource):\n nethz = 'Something'\n with app.user(nethz=nethz):\n # Create fake signup with different nethz\n own = str(app.data.driver.db[resource].insert({'nethz': nethz}))\n other = str(app.data.driver.db[resource].insert({'nethz': 'trolo'}))\n\n # Resource: Can only see own, not both signups\n response = app.client.get('/' + resource, assert_status=200)\n assert len(response['_items']) == 1\n assert response['_items'][0]['nethz'] == nethz\n\n # Items\n own_url = '/%s/%s' % (resource, own)\n other_url = '/%s/%s' % (resource, other)\n\n # Get\n app.client.get(own_url, assert_status=200)\n app.client.get(other_url, assert_status=404)\n\n # Patch (if we can see item, we get 428 since etag is missing)\n app.client.patch(own_url, data={}, assert_status=428)\n app.client.patch(other_url, data={}, assert_status=404)\n\n # Delete (etag missing again)\n app.client.delete(own_url, assert_status=428)\n app.client.delete(other_url, assert_status=404)", "def test_cannot_sale_nonexistant_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"Paris_heels\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product is not in the Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_attendant_can_only_view_own_sale(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Benja Maisha',\n username='maisha',\n password='Andela8',\n role='attendant'\n )\n\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n\n user = dict(\n username='maisha',\n password='Andela8'\n )\n response = self.client.post(\n '/api/v1/login',\n content_type='application/json',\n data=json.dumps(user)\n )\n reply = json.loads(response.data.decode())\n token = reply['token']\n\n resp = self.client.get(\n '/api/v1/sales/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'You have no access to this sale!')\n self.assertEqual(resp.status_code, 401)", "def test_update_inventory(self):\n pass", "def test_product_by_category_logged_in_seller(self):\n\n # Log In user that is the seller, check that their products to do not show up\n self.client.login(username=\"test_seller\", password=\"secret\")\n\n # Search for product category 1\n response = self.client.get(reverse('website:product_by_category', args=(1,)))\n\n # Check that status code is 200\n self.assertEqual(response.status_code, 200)\n\n # Ensure that the returned HTML does not include either product\n self.assertNotIn('<h6 class=\"mb-1\">Test Product</h6>'.encode(), response.content)\n self.assertNotIn('<h6 class=\"mb-1\">Test Product2</h6>'.encode(), response.content)\n\n # Search for product category 2\n response_seller = self.client.get(reverse('website:product_by_category', args=(2,)))\n\n # Check that the status code is 200\n self.assertEqual(response_seller.status_code, 200)\n\n # Ensure that the returned HTML does not include either product\n self.assertNotIn('<h6 class=\"mb-1\">Test Product</h6>'.encode(), response_seller.content)\n self.assertNotIn('<h6 class=\"mb-1\">Test Product2</h6>'.encode(), response_seller.content)", "def test_cart_correct_user_templates_rendered_with_call(self):\n User.objects.create_user(\n username=\"testuser\", password=\"thisisasecret101\")\n item = Product(name=\"Product\",\n product_image=\"testing_img.jpg\",\n description=\"Product description.\",\n price=\"20.00\",\n stock_available=\"5\",\n showcase_product=\"True\")\n item.save()\n self.client.login(username=\"testuser\", password=\"thisisasecret101\")\n session = self.client.session\n session[\"cart\"] = {1: 1}\n session.save()\n response = self.client.get(\"/cart/\")\n self.assertTemplateUsed(response, \"cart.html\")\n self.assertTemplateUsed(response, \"base.html\")\n self.assertTemplateUsed(response, \"layout/head.html\")\n self.assertTemplateUsed(response, \"components/navbar.html\")\n self.assertTemplateUsed(response, \"components/cart-contents.html\")\n self.assertTemplateUsed(response, \"components/footer.html\")\n self.assertTemplateUsed(response, \"layout/scripts.html\")", "def test_product_detail(self):\n # first performing create\n id = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id:\n # then performing detail\n self._detail_model(\"product\", self.product_data, id, [\"name\", \"description\", \"image_link\", \"price\"])\n \n self.assertIsNotNone(id)", "def test_admin_cannot_delete_product_from_empty_Inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'There are no products in Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_detail(self):\n self.assertEqual(self.product_1.id, 1)\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_1.id), **headers)\n\n expected = {\n 'id': self.product_1.id,\n 'name': self.product_1.name,\n 'sku': self.product_1.sku,\n 'category': self.product_1.category.id,\n 'description': self.product_1.description,\n 'price': str(self.product_1.price),\n 'created': '2018-12-20T10:15:30Z',\n 'featured': self.product_1.featured\n }\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response['Content-Type'], 'application/json')\n self.assertEqual(response.json(), expected)", "def test_get_reusableitem_api_public(self):\n\n self.reusableitem_1.is_public = True\n self.reusableitem_1.save()\n\n self.client.logout()\n\n\n response = self.client.get(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_cannot_view_all_products_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_product_by_category_guest(self):\n\n # Guest user, searching for product category 1\n response = self.client.get(reverse('website:product_by_category', args=(1,)))\n\n # Check that the response is 200 ok\n self.assertEqual(response.status_code, 200)\n\n # Check that new_product is the only product that shows in the query\n self.assertIn('<h6 class=\"mb-1\">Test Product</h6>'.encode(), response.content)\n self.assertNotIn('<h6 class=\"mb-1\">Test Product2</h6>'.encode(), response.content)", "def test_func(self, user):\n return self.get_object().admin == user", "def test_products_ref_users_get(self):\n pass", "def test_view_product_description_page(self):\r\n response = self.client.get('/product_description/nutella/')\r\n self.assertEqual(response.status_code, 200)", "def test_product_search(self):\n\n flag = \"user\"\n api = \"product.product.search\"\n current_page = 1\n search_info = json.dumps({\n })\n\n result = self.access_api(flag = flag, api = api, current_page = current_page, search_info = search_info)\n self.assertTrue('data_list' in result)", "def test_vault_get_vault_item(self):\n pass", "def test_default_product_stealability(self):\n prod = Product('Test Product')\n self.assertEqual(prod.stealability(), \"Kinda stealable.\")", "def test_creating_supply_admin(self):\n request = self.factory.post(\n '/api/supplies/', {'name': '3d printer', 'state': 'good state', 'description': 'prints 3d objects'})\n force_authenticate(request, user=self.testadmin)\n response = SupplyListView.as_view()(request)\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n try:\n supply = Supply.objects.get(name='3d printer')\n self.assertEqual(supply.name, '3d printer')\n self.assertEqual(supply.state, 'good state')\n self.assertEqual(supply.description, 'prints 3d objects')\n except Supply.DoesNotExist:\n self.fail()", "def test_get_product(self):\n # get the id of a product\n test_product = self._create_products(1)[0]\n resp = self.app.get(\n \"/products/{}\".format(test_product.id), content_type=\"application/json\"\n )\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = resp.get_json()\n self.assertEqual(data[\"name\"], test_product.name)\n \n # print the repr of a product\n rep = \"%s\" % test_product", "def test_get_dealer_active_inventory(self):\n pass", "def testGetAccessAllowed(self):\n for user in (self.guest, self.contributor, self.delegate, self.owner, self.root):\n response = self.runGet(user, sequencer=self.hiseq2000.vendor_id)\n self.response_200(response)\n data = json.loads(response.content.decode(\"utf-8\"))\n self.assertEqual(data[\"sodar_uuid\"], str(self.hiseq2000.sodar_uuid))", "def test_shoppingitems_page(self):\n # register and login a user\n self.app.post('/register', data=self.user_reg_details)\n self.app.post('/login', data=self.user_login_details)\n # send a GET request\n res = self.app.get('/shoppingitems/Easter')\n self.assertEqual(res.status_code, 200)\n self.assertIn(\"You can now add your items\", str(res.data))", "def test_delete_not_my_product(self):\n self._require_login(self.user1)\n response = self.client.delete('/api/1.0/products/2/')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_order_product(self):\n self.client.force_authenticate(self.user)\n resp = self.client.post(ORDER_URL, data={\n \"product\": self.product.id,\n \"count\": 1,\n \"option_value\": self.option_value.id\n })\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)", "def test_Product(self):\n self.assertEquals(self.prod_1.pk, 1)\n self.assertEquals(self.prod_1.ean, '3350033118072')\n self.assertEquals(self.prod_1.name, 'test 1')\n self.assertEquals(self.prod_1.nutriscore, 'u')\n self.assertEquals(self.prod_1.category, 'cat 1')", "def assert_can_view(selenium, obj):\n actual_obj = _get_ui_service(selenium, obj).get_obj_from_info_page(obj)\n obj_copy = copy.deepcopy(obj)\n # Code for working with custom attributes appears to be buggy\n base.Test.general_equal_assert(\n obj_copy.repr_ui(), actual_obj, \"audit\", \"custom_attributes\",\n \"program\", \"external_slug\", \"external_id\")", "def test_func(self):\n return self.request.user.has_permission(\"core.view_staffer\")", "def test_visualize_equipment(self):\n pass", "def test_new_equipment_page(self):\n create_user()\n login(self.app, 'me1', 'password')\n\n response = self.app.get('/new_equipment', follow_redirects=True)\n self.assertEqual(response.status_code, 200)\n\n response_text = response.get_data(as_text=True)\n self.assertIn('New Equipment', response_text)\n self.assertIn('Name', response_text)\n self.assertIn('Quantity', response_text)\n self.assertIn('Submit', response_text)\n\n self.assertNotIn('Calendar ', response_text)\n self.assertNotIn('Logout', response_text)\n self.assertNotIn('Login', response_text)\n self.assertNotIn('Sign up', response_text)", "def test_gene_detail_view(self):\n \n test_response = self.client.get('/gene/Pikfyve')\n self.assertEqual(test_response.status_code, 200)\n self.assertTrue('gene' in test_response.context) \n self.assertTemplateUsed(test_response, 'gene-detail.html')\n self.assertEqual(test_response.context['gene'].pk, u'Pikfyve')\n \n #tests a nonfunctional url\n test_response = self.client.get('/gene/Pikfour')\n self.assertEqual(test_response.status_code, 404)", "def test_list_products_filtered_by_seller_name(self):\n self._require_login(self.user1)\n response = self.client.get('/api/1.0/products/?seller=testuser1')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data.__len__(), 1)\n self.assertEqual(response.data[0]['name'], 'Producto 1')\n self.assertEqual(response.data[0]['description'], 'Descripcion producto 1')", "def test_list_products_filtered_by_selling_status(self):\n self._require_login(self.user1)\n response = self.client.get('/api/1.0/products/?selling=3')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data.__len__(), 1)\n self.assertEqual(response.data[0]['name'], 'Producto 2')\n self.assertEqual(response.data[0]['description'], 'Descripcion producto 2')", "def test_list_products(self):\n\n # Issue a GET request\n response = self.client.get(reverse('website:products'))\n\n # Check that the response is 200 ok\n self.assertEqual(response.status_code, 200)\n\n # Check that the context contains 2 products\n self.assertEqual(len(response.context['products']),2)\n\n # Check that the product title appears in the rendered HTML content\n self.assertIn('<h5 class=\"card-title mb-0\">Test Product</h5>'.encode(), response.content)\n self.assertIn('<h5 class=\"card-title mb-0\">Test Product2</h5>'.encode(), response.content)", "def test_professor_can_login_to_web_portal(professor):", "def test_admin_view_access(request_ctx):\n user = User.get(email=\"[email protected]\")\n with request_ctx(\"/org_invitatin_summary\") as ctx:\n login_user(user, remember=True)\n rv = ctx.app.full_dispatch_request()\n assert rv.status_code == 200\n assert b\"<!DOCTYPE html>\" in rv.data, \"Expected HTML content\"\n assert b\"Organisation Invitation Summary\" in rv.data\n assert b\"[email protected]\" in rv.data", "def test_security_on_delete(self):\n # test the delete product url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.delete(url)\n self.failUnlessEqual(response.status_code, 401)", "def test_product_landing(self, flag_is_active):\n flag_is_active.return_value = True\n\n # Create a product\n p = product(save=True)\n\n # Create some topics\n topics = []\n for i in range(11):\n topics.append(topic(save=True))\n\n # Create a document and assign the product and 10 topics.\n doc = revision(is_approved=True, save=True).document\n doc.products.add(p)\n for i in range(10):\n doc.topics.add(topics[i])\n\n self.refresh()\n\n # GET the topic page and verify the content\n url = reverse('products.product', args=[p.slug])\n r = self.client.get(url, follow=True)\n eq_(200, r.status_code)\n doc = pq(r.content)\n eq_(10, len(doc('#help-topics li')))", "def test_list_products(self):\n sample_product(supplier_id=self.user)\n sample_product(supplier_id=self.user)\n sample_product(supplier_id=self.user)\n\n res = self.client.get(PRODUCTS_URL)\n\n products = Product.objects.all().order_by('-name')\n serializer = ProductSerializer(products, many=True)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)\n self.assertEqual(len(res.data), 3)", "def test_view_reteta_detail(self):\n reteta = sample_reteta(user=self.user)\n reteta.tags.add(sample_tag(user=self.user))\n reteta.ingredients.add(sample_ingredient(user=self.user))\n\n url = detail_url(reteta.id)\n res = self.client.get(url)\n serializer = RetetaDetailSerializer(reteta)\n self.assertEqual(res.data, serializer.data)", "def test_functionality(self):\n self.browserObject = globalVars.browserObject\n \n #Check for current logged in user\n self.verifyCurrentUser(userRole='Read only', loginAsUser=True)\n \n #Navigate to Repositories Page\n self.get_RepositoriesPage(\"Firmware\")\n \n self.logout()\n \n #Verify Options", "def test_show_available(self):\n database.import_data('csvs', 'product_data.csv', 'customer_data.csv', 'rentals_data.csv')\n actual_available = database.show_available_products()\n expected_available = {'prd001': {'description': 'TV', 'product_type': 'livingroom',\n 'quantity_available': '3'},\n 'prd002': {'description': 'Couch', 'product_type': 'livingroom',\n 'quantity_available': '1'}}\n self.assertEqual(actual_available, expected_available)\n database.delete_database()\n\n database.import_data('csvs', 'produc_data.csv', 'customer_data.csv', 'rentals_data.csv')\n database.delete_database()", "def test_admin_get(self, *args, **kwargs):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n res = self.view.get(self.request, *args, **kwargs)\n nt.assert_equal(res.status_code, 200)", "def test_admin_get(self, *args, **kwargs):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n res = self.view.get(self.request, *args, **kwargs)\n nt.assert_equal(res.status_code, 200)", "def test_security_on_post(self):\n url = '/product/xml/'\n response = self.client.post(url,{'description':'my new description'})\n self.failUnlessEqual(response.status_code, 401)", "def test_recipe_limited_to_user(self):\n user2 = get_user_model().objects.create_user(\n '[email protected]',\n 'password3456',\n )\n sample_recipe(self.user)\n sample_recipe(user2)\n\n res = self.client.get(RECIPE_URL)\n\n recipes = Recipe.objects.filter(user=self.user)\n serializer = RecipeSerializer(recipes, many=True)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data, serializer.data)", "def test_user_can_read(app, resource):\n with app.user():\n # Read resource\n app.client.get('/' + resource, assert_status=200)\n\n # Create fake item and read item\n _id = app.data.driver.db[resource].insert({})\n app.client.get('/%s/%s' % (resource, _id),\n assert_status=200)", "def test_admin_cannot_delete_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This product does not exist in Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_video_detail_view_permission(\n mock_user_moira_lists, logged_in_apiclient, user_view_list_data\n):\n client = logged_in_apiclient[0]\n mock_user_moira_lists.return_value = {user_view_list_data.moira_list.name}\n url = reverse(\n \"video-detail\", kwargs={\"video_key\": user_view_list_data.video.hexkey}\n )\n result = client.get(url)\n assert result.status_code == status.HTTP_200_OK\n assert (\n json.loads(result.context_data[\"js_settings_json\"])[\"is_video_admin\"] is False\n )", "def test_product_available_by_stock(self):\n product = ProductFactory(stock_amount=10)\n self.assertEqual(product.left_in_stock, 10)\n self.assertTrue(product.is_available())", "def test_order_view_permissions(client, user):\n random_user = UserFactory.create(is_staff=False, is_superuser=False)\n order = OrderFactory.create(user=user)\n client.force_login(random_user)\n resp = client.get(reverse(\"order-api\", kwargs={\"pk\": order.id}))\n assert resp.status_code == statuses.HTTP_403_FORBIDDEN\n order.user = random_user\n order.save()\n resp = client.get(reverse(\"order-api\", kwargs={\"pk\": order.id}))\n assert resp.status_code == statuses.HTTP_200_OK", "def do_visible(self, exp_res, img_owner, img_public, **kwargs):\n\n img = FakeImage(img_owner, img_public)\n ctx = context.RequestContext(**kwargs)\n\n self.assertEqual(ctx.is_image_visible(img), exp_res)", "def test_admin_signup_visibility(app, resource):\n with app.admin(nethz='somethingsomething'):\n headers = {'If-Match': 'Wrong'}\n\n # Create fake signup with different nethz\n other = str(app.data.driver.db[resource].insert({'nethz': 'trolo'}))\n\n # Resource: Can see signups\n response = app.client.get('/' + resource,\n headers=headers,\n assert_status=200)\n assert len(response['_items']) == 1\n\n # Items\n url = '/%s/%s' % (resource, other)\n\n # Get\n app.client.get(url, headers=headers, assert_status=200)\n\n # Patch (if we can see item, we get 412 since etag is wrong)\n app.client.patch(url, headers=headers, data={}, assert_status=412)\n\n # Delete (etag missing again)\n app.client.delete(url, headers=headers, assert_status=412)", "def test_stealable(self):\r\n prod = Product(name='Test Product',\r\n weight=100, price=1,\r\n flammability=0.5)\r\n self.assertEqual(prod.stealability(), \"Not so stealable...\")", "def test_ingredients_limited_to_user(self):\n user2 = create_user(email='[email protected]')\n Ingredient.objects.create(user=user2, name='Salt')\n ingredient = Ingredient.objects.create(user=self.user, name='Pepper')\n\n res = self.client.get(INGREDIENTS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], ingredient.name)\n self.assertEqual(res.data[0]['id'], ingredient.id)", "def test_details(self, mock_product, mock_nutrival):\n mock_product.return_value = MagicMock(\n side_effect=Products.objects.filter()\n )\n mock_product.return_value.first.return_value = Products(rating=\"a\")\n mock_nutrival.return_value = MagicMock(\n side_effect=Products.objects.filter()\n )\n mock_nutrival.return_value.first.return_value = NutritionalValues()\n response = self.client.get(\"/details/1/\")\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"search/base.html\")\n self.assertTemplateUsed(response, \"search/search_form.html\")\n self.assertTemplateUsed(response, \"search/details.html\")\n self.assertIsInstance(response.context[\"product\"], Products)\n self.assertIsInstance(response.context[\"nutrival\"], NutritionalValues)\n self.assertIn(\"nutriscore-a\", response.context[\"nutriscore\"])" ]
[ "0.6914857", "0.68370014", "0.68132645", "0.67801803", "0.6777728", "0.6743917", "0.67292273", "0.6651706", "0.663699", "0.6590794", "0.6579514", "0.654358", "0.6536533", "0.64720875", "0.64325994", "0.6371396", "0.63542134", "0.63323015", "0.6293569", "0.6277347", "0.62515086", "0.62446225", "0.62368655", "0.6210974", "0.6210195", "0.62101847", "0.61971897", "0.618447", "0.61530405", "0.6144851", "0.6143726", "0.61327547", "0.6123174", "0.6096316", "0.60956883", "0.60944", "0.6071992", "0.6066017", "0.60506463", "0.6050403", "0.60441935", "0.60383075", "0.6035498", "0.6011358", "0.59978265", "0.59943694", "0.5992338", "0.5983419", "0.59754777", "0.59717226", "0.5970873", "0.59702307", "0.5963742", "0.5957048", "0.59404105", "0.59229076", "0.59166217", "0.5910995", "0.5908367", "0.5892192", "0.58747804", "0.58416635", "0.5837805", "0.58354694", "0.58300143", "0.58212227", "0.5817823", "0.58061564", "0.5790849", "0.577993", "0.57765573", "0.5771446", "0.5758779", "0.5757446", "0.5753973", "0.574166", "0.57411754", "0.5732888", "0.5724478", "0.5720595", "0.5714538", "0.57080096", "0.5696316", "0.5690902", "0.5686239", "0.56856704", "0.5678448", "0.5678448", "0.56768507", "0.56748056", "0.5646773", "0.5644567", "0.5635471", "0.5635226", "0.56270486", "0.5624728", "0.5624484", "0.5624433", "0.56194997", "0.5617807" ]
0.74070454
0
Tests that a user cannot view a product in the Inventory with blacklisted token
def test_cannot_view_a_product_with_blacklisted_token(self): resp = self.admin_register() reply = self.admin_login() token = reply['token'] product = dict( prod_name='NY_denims', category='denims', stock=20, price=150 ) resp = self.client.post( '/api/v1/products', content_type='application/json', data=json.dumps(product), headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Product successfully added to Inventory!') self.assertEqual(resp.status_code, 201) resp = self.client.delete( '/api/v1/logout', headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'You are successfully logged out!') self.assertEqual(resp.status_code, 200) resp = self.client.get( '/api/v1/products/1', headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!') self.assertEqual(resp.status_code, 401)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_cannot_view_all_products_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_cannot_create_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_admin_cannot_delete_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_locked_asset_not_registered(self):\r\n self.client.login(username=self.usr, password=self.pwd)\r\n resp = self.client.get(self.url_locked)\r\n self.assertEqual(resp.status_code, 403) # pylint: disable=E1103\r", "def test_cannot_update_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_view_products_from_empty_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'There are no products yet!')\n self.assertEqual(resp.status_code, 404)", "def test_user_not_authorized(self):\n response = self.client.post(self.url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_view_product_that_doesnot_exist_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product does not exist!')\n self.assertEqual(resp.status_code, 404)", "def test_listing_supplies_unauthenticated(self):\n request = self.factory.get('/api/supplies')\n response = SupplyListView.as_view()(request)\n # no permission\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_get_cart_items_unauthorized(self):\n with self.assertRaises(ResourceAccessError):\n self.cart_item_manager.get_cart_items('123', '1')", "def test_attendant_cannot_make_a_sale_with_blacklisted_token(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_cannot_get_sale_record_with_blacklisted_token(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/sales/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def testGetAccessDenied(self):\n self.runGet(None, sequencer=self.hiseq2000.vendor_id)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def test_admin_cannot_delete_product_from_empty_Inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'There are no products in Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_delete_not_my_product(self):\n self._require_login(self.user1)\n response = self.client.delete('/api/1.0/products/2/')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def assert_cannot_view(obj):\n selenium_utils.open_url(obj.url)\n assert ui_utils.is_error_403()", "def test_cannot_sale_nonexistant_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"Paris_heels\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product is not in the Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_if_not_available_for_unauthorized(self):\r\n res = self.not_authenticated.get(reverse(LIST_USER_URL),data={})\r\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_security_on_get(self):\n # test the listing url\n product = Product.objects.all()[0]\n url = '/product/xml/'\n response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)\n # test the product detail url\n url = '/product/xml/%s/' % product.item_number\n Response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)", "def test_view_disabled(self, method, url):\n response = getattr(self.client, method)(url)\n assert response.status_code == 403", "def test_lta_bad(self):\n with self.assertRaises(InventoryException):\n api.inventory.check(self.lta_order_bad)", "def test_cannot_get_all_sale_records_with_blacklisted_token(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n \n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/sales',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_detail_is_hacker_permission(self):\n self.user_1.username = 'pythonhacker'\n self.user_1.save()\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_1.id), **headers)\n\n expected = {'detail': 'You do not have permission to perform this action.'}\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)", "def test_token_was_blacklisted(self):\n\n revoked_token = RevokedToken('secret_token_blacklisted')\n revoked_token.save()\n\n self.assertTrue(\n RevokedToken.is_jti_blacklisted('secret_token_blacklisted'))", "def assert_user_cannot_read(self, user, video):\n livesession = LiveSessionFactory(\n email=user.email,\n is_registered=True,\n user=user,\n video=video,\n )\n\n jwt_token = UserAccessTokenFactory(user=user)\n\n response = self.client.get(\n self._get_url(video, livesession),\n content_type=\"application/json\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 403)", "def test_modify_reusableitem_not_authenticated(self):\n self.client.logout()\n \n response = self.client.patch(get_reusable_item_1_url(self), {}, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_can_not_cancel_current_block(self):\n date = datetime.now().replace(minute=0, second=0, microsecond=0)\n\n response = self.client.post(\n reverse('bookings', kwargs={'facility': 'g'}), {'cancel': str(date.timestamp())})\n\n context = response.context\n bookings = context[\"bookings\"]\n\n self.assertEqual(response.status_code, 403)\n self.assertEqual(context[\"quota\"], settings.BOOKINGS_QUOTA)", "def test_locked_asset_not_logged_in(self):\r\n self.client.logout()\r\n resp = self.client.get(self.url_locked)\r\n self.assertEqual(resp.status_code, 403) # pylint: disable=E1103\r", "def test_detail_odd_product_id_permission(self):\n self.assertEqual(self.product_2.id, 2)\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_2.id), **headers)\n\n expected = {'detail': 'You do not have permission to perform this action.'}\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)", "def test_list_not_authenticated(self):\n response = self.client.get('/api/products/')\n expected = {'detail': 'Authentication credentials were not provided.'}\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response.json(), expected)", "def test_visibility_of_not_available_2(self):\n partner = PartnerFactory(status=Partner.NOT_AVAILABLE)\n list_url = reverse('partners:list')\n\n editor = EditorFactory()\n\n request = RequestFactory().get(list_url)\n request.user = editor.user\n response = PartnersListView.as_view()(request)\n\n self.assertNotContains(response, partner.get_absolute_url())", "def test_no_items_in_cart(self):\r\n self.request.user = self.user\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertFalse(context['display_shopping_cart'])", "def test_video_detail_no_permission(\n mock_user_moira_lists, logged_in_apiclient, user_admin_list_data\n):\n client, _ = logged_in_apiclient\n mock_user_moira_lists.return_value = {\"some_other_list\"}\n url = reverse(\n \"video-detail\", kwargs={\"video_key\": user_admin_list_data.video.hexkey}\n )\n result = client.get(url)\n assert result.status_code == status.HTTP_403_FORBIDDEN", "def test_feature_disabled(self, url):\n response = self.client.get(url)\n assert response.status_code == 403\n response = self.client.post(url)\n assert response.status_code == 403", "def test_forbidden(self):\n self._error_test(fitbit_exceptions.HTTPForbidden)", "def test_product_buy_missing_goods(self):\n result_buy = self.info_list.product_buy(\"хлеб серый хлебозавод\", 3)\n self.assertFalse(result_buy)", "def test_unauthenticated_resource_allowed(self):\n raise NotImplementedError # FIXME", "def check_for_exposed(context):\n json_data = context.response.json()\n if \"exploitable_vulnerabilities_count\" in json_data:\n raise Exception(\"Field exploitable_vulnerabilities_count Exposed in\"\n \" Free user result\")\n if \"vendor_package_link\" in json_data:\n raise Exception(\"Field vendor_package_link has been exposed for free user\")", "def test_lpdaac_bad(self):\n with self.assertRaises(InventoryException):\n api.inventory.check(self.lpdaac_order_bad)", "def test_non_contractor_acks_receipt(self):\n res = self.client.post(self.url)\n self.assertEqual(res.status_code, 403)", "def test_cannot_sale_out_of_stock_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":20\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'NY_denims is out of stock!')\n self.assertEqual(resp.status_code, 404)", "def test_can_not_book_running_block(self):\n date = datetime.now().replace(minute=0, second=0, microsecond=0)\n\n response = self.client.post(\n reverse('bookings', kwargs={'facility': 'g'}), {'book': str(date.timestamp())})\n\n context = response.context\n bookings = context[\"bookings\"]\n\n self.assertEqual(response.status_code, 403)\n self.assertEqual(context[\"quota\"], settings.BOOKINGS_QUOTA)", "def test_wrong_token_permission_denied(self, client, token):\n with disable_logs(logging.WARNING):\n assert_hook_status(client, status=403, token=f\"{token}wrong\")", "def test_unauthenticated_get(self):\n url = reverse('edit-list')\n\n response = self.client.get(url)\n self.assertEqual(403, response.status_code)\n self.assertEqual('Forbidden', response.status_text)\n self.assertTrue(\n 'credentials were not provided.' in response.data.get('detail'))", "def noaccess(self):\n self.assertEqual(self.client.get(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.post(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.delete(self.ENDPOINT).status_code, 403)", "def test_custom_403(self):\n c = Client()\n response = c.post(\"/apimock/mocked/mocked_get\", data={\"post\": \"data\"})\n self.assertEqual(response.status_code, 403)\n self.assertEqual(\"wrong used test Data\", response.content)", "def test_admin_cannot_add_item(self):\n response = self.client.get(\n '/self.base_url/sales/3/2',\n headers=dict(Authorization=\"Bearer \" + self.owner_token),\n content_type = 'application/json'\n )\n\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"You cannot make a sale from an Admin account, Consider having an attendant account\")\n self.assertEqual(response.status_code,401)", "def test_no_enable_shoppingcart(self):\r\n self.add_to_cart()\r\n self.request.user = self.user\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertFalse(context['display_shopping_cart'])", "def assert_response_resource_not_accessible(self, response):\n self.assertEqual(response.status_code, 403)\n self.assertEqual(\n response.json(),\n {\"detail\": \"You do not have permission to perform this action.\"},\n )", "def test_invalid_credentials_forbidden(self):\n response = self._mock_utility(get_kwargs=self._data(),\n error=fitbit_exceptions.HTTPForbidden)\n self._check_response(response, 103)\n self.assertEqual(UserFitbit.objects.count(), 0)", "def testGetAccessDenied(self):\n self.runGet(None, sequencer=self.hiseq2000.sodar_uuid)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user, sequencer=self.hiseq2000.sodar_uuid)\n self.response_403()", "def test_no_permission(client, mocker):\n mocker.patch(\n \"ecommerce.views.IsSignedByCyberSource.has_permission\", return_value=False\n )\n resp = client.post(reverse(\"order-fulfillment\"), data={})\n assert resp.status_code == statuses.HTTP_403_FORBIDDEN", "def test_not_authenticated(self):\n response = self.client.get(telemetry_url)\n self.assertEqual(403, response.status_code)", "def test_need_login_to_see_usagelist(self):\n response = self.client.get(reverse('api_v1:usage-list'), follow=True)\n self.assertEqual(response.status_code, 403)", "def test_unauthorized_request(self):\n # test false token\n user_id = self.create_user()[0]\n question_id = int(self.create_question(user_id)[0])\n false_token = self.post_data(question_id, headers={\"Authorization\":\"Bearer wrongtoken\"})\n self.assertEqual(false_token.status_code, 401)", "def test_can_not_reserve_booked_block(self):\n booking_other = create_test_booking(self.someone, self.first_day, 11)\n\n response = self.client.post(\n reverse('bookings', kwargs={'facility': 'g'}), {'book': str(booking_other.date.timestamp())})\n\n context = response.context\n bookings = context[\"bookings\"]\n\n self.assertEqual(response.status_code, 403)\n self.assertEqual(context[\"quota\"], settings.BOOKINGS_QUOTA)\n\n self.assertEqual(type(context[\"info\"]), NotAllowedAlert)", "def test_unauthorized_user_like_field(self):\n response = self.client.get(reverse('lessons-detail', args=(1,)))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertNotIn('like', response.data)", "def test_can_not_book_past_block(self):\n date = datetime.now().replace(minute=0, second=0, microsecond=0) - timedelta(hours=1)\n\n response = self.client.post(\n reverse('bookings', kwargs={'facility': 'g'}), {'book': str(date.timestamp())})\n\n context = response.context\n bookings = context[\"bookings\"]\n\n self.assertEqual(response.status_code, 403)\n self.assertEqual(context[\"quota\"], settings.BOOKINGS_QUOTA)", "def test_interactive_withdraw_no_token(client):\n response = client.get(WEBAPP_PATH)\n assert \"Missing authentication token\" in str(response.content)\n assert response.status_code == 403", "def test_admin_cannot_delete_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This product does not exist in Inventory!')\n self.assertEqual(resp.status_code, 404)", "def testGetAccessDenied(self):\n self.runGet(None)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def test_visibility_of_not_available_1(self):\n partner = PartnerFactory(status=Partner.NOT_AVAILABLE)\n detail_url = partner.get_absolute_url()\n\n editor = EditorFactory()\n\n request = RequestFactory().get(detail_url)\n request.user = editor.user\n with self.assertRaises(Http404):\n # We must explicitly pass kwargs to the view even though they are\n # implied by the URL.\n _ = PartnersDetailView.as_view()(request, pk=partner.pk)", "def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, tenant='froggy')", "def test_bad_action(self):\r\n action = 'robot-not-an-action'\r\n url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n response = self.client.get(url, {'identifiers': self.beta_tester.email, 'action': action})\r\n self.assertEqual(response.status_code, 400)", "def is_blacklisted(token):\n if Revoked.query.filter_by(token=token).first():\n return True\n return False", "def test_cannot_view_all_users_with_blacklisted_token(self):\n resp = self.admin_create_user()\n reply = self.admin_create_user2()\n resp = self.admin_login()\n token = resp['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/users',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_purchase_not_available(self):\n purchase_model = {\"id\": 2, \"amount\": 1}\n resp = self.app.post(\"/products/2/purchase\", json=purchase_model, content_type=\"application/json\")\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)\n resp = self.app.get(\"/products/2\", content_type=\"application/json\")\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)", "def test_resuableitem_invalid_vote(self):\n\n original_reusableitem = setup_public_reusable_item_1(self)\n data1 = submit_change_request_1(self, self.user_1)\n\n # user 2 now submits an invalid vote\n self.client.force_authenticate(user=self.user_2)\n\n data2 = {'vote': 'banana'}\n response = self.client.patch(get_reusable_item_1_url(self), data2, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_falsepositive_no_token_passed(client):\n g.test_authorized_for = []\n res = client.get(\"/v0/falsepositive?fp=splunk_82998ef6bb3db9dff3dsfdsfsdc\")\n assert res.status == \"500 INTERNAL SERVER ERROR\"", "def test_visibility_of_not_available_4(self):\n partner = PartnerFactory(status=Partner.NOT_AVAILABLE)\n list_url = reverse('partners:list')\n\n editor = EditorFactory()\n editor.user.is_staff = True\n editor.user.save()\n\n request = RequestFactory().get(list_url)\n request.user = editor.user\n response = PartnersListView.as_view()(request)\n\n self.assertContains(response, partner.get_absolute_url())", "def test_not_permitted(self):\r\n test_user_client, test_user = self.create_non_staff_authed_user_client()\r\n CourseEnrollment.enroll(test_user, self.course.id)\r\n response = test_user_client.get(self.orphan_url)\r\n self.assertEqual(response.status_code, 403)\r\n response = test_user_client.delete(self.orphan_url)\r\n self.assertEqual(response.status_code, 403)", "def test_only_admin_can_create_product(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_add_cart_item_unauthorized_user(self):\n cart_id = self.cart_item_manager.create_cart('111', 'test cart', False)\n with self.assertRaises(ResourceAccessError):\n self.cart_item_manager.add_cart_item(catalog=self.catalog,\n user_id='112',\n cart_id=cart_id,\n entity_id='entity_id',\n entity_type='entity_type',\n entity_version='entity_version')", "def test_custom_403(self):\n c = Client()\n response = c.get(\"/apimock/mocked/mocked_post?format=json\")\n self.assertEqual(response.status_code, 403)\n self.assertEqual(\n \"wrong used test Data,this is api for POST\", response.content)", "def test_get_reusableitem_api_not_public(self):\n\n # user not logged in\n self.client.logout()\n\n response = self.client.get(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n\n # user logged in and created the Reusable Item\n self.client.force_authenticate(user=self.user_1)\n\n response = self.client.get(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # user logged in and did not create the Reusable Item\n self.client.logout()\n self.client.force_authenticate(user=self.user_2)\n\n response = self.client.get(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_not_logged_in(self):\n response = self.c.get(reverse(submit_point), {'lat': 34.0, 'lng': 45.3, 'zoom': 13})\n self.assertEqual(response.status_code, 403)", "def test_create_reusableitem_not_authenticated(self):\n\n self.client.logout()\n\n toptenitems = self.toptenlist_1.topTenItem.all()\n toptenitem_1_id = toptenitems[0].id\n\n response = create_reusable_item_1(self, toptenitem_1_id, **reusableitem_1_data)\n\n # the request should fail\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_artifactpriority_detail_api_unauthorized(self):\n\n # get object\n artifactpriority_api_1 = Artifactpriority.objects.get(\n artifactpriority_name='artifactpriority_api_1'\n )\n # get response\n response = self.client.get(\n '/api/artifactpriority/'\n + str(artifactpriority_api_1.artifactpriority_id)\n + '/'\n )\n # compare\n self.assertEqual(response.status_code, 401)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_cannot_get_other_attendant_sales(self):\n response = self.client.get(\n '/self.base_url/sales/1',\n headers=dict(Authorization=\"Bearer \" + self.attendant_token),\n content_type = 'application/json'\n )\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"You can only view your sales\")\n self.assertEqual(response.status_code,401)", "def test_unauthorized(self):\n self._error_test(fitbit_exceptions.HTTPUnauthorized)", "def test_unauthorized_product_update(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_attendant_cannot_view_all_sales(self):\n response = self.client.get(\n '/self.base_url/sales',\n headers=dict(Authorization=\"Bearer \" + self.attendant_token),\n content_type = 'application/json'\n )\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"You dont have rights to list all sales, contact the system admin\")\n self.assertEqual(response.status_code,401)", "def test_news_index_no_perm(self):\n self.assertStatusCode(self.url, 403)", "def test_view_product_with_invalid_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2kk',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Try an interger for product id')\n self.assertEqual(resp.status_code, 400)", "def test_deny_pending_payment(self):\n pass", "def test_returns_403_if_user_doesnt_have_PM_role(self):\n # Act\n response = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n )\n self.assertEqual(response.status_code, 403)", "def test_returns_403_if_user_doesnt_have_PM_role(self):\n # Act\n response = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n )\n self.assertEqual(response.status_code, 403)", "def test_returns_403_if_user_doesnt_have_PM_role(self):\n # Act\n response = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n )\n self.assertEqual(response.status_code, 403)", "def test_visibility_of_not_available_3(self):\n partner = PartnerFactory(status=Partner.NOT_AVAILABLE)\n detail_url = partner.get_absolute_url()\n\n editor = EditorFactory()\n editor.user.is_staff = True\n editor.user.save()\n\n request = RequestFactory().get(detail_url)\n request.user = editor.user\n\n # This should not raise Http404.\n response = PartnersDetailView.as_view()(request, pk=partner.pk)\n self.assertEqual(response.status_code, 200)", "def test_can_not_cancel_past_block(self):\n date = datetime.now().replace(minute=0, second=0, microsecond=0) - timedelta(hours=1)\n\n response = self.client.post(\n reverse('bookings', kwargs={'facility': 'g'}), {'cancel': str(date.timestamp())})\n\n context = response.context\n bookings = context[\"bookings\"]\n\n self.assertEqual(response.status_code, 403)\n self.assertEqual(context[\"quota\"], settings.BOOKINGS_QUOTA)", "def test_get_restaurant_unauthorized(self):\n resp = self.test_client.get(self.API_BASE, headers={})\n self.assertEqual(resp.status_code, 401)\n resp_dict = json.loads(resp.data)\n self.assertEqual(resp_dict['success'], False)", "def test_create_not_admin(self):\n self.assertEqual(Product.objects.count(), 2)\n payload = {\n 'name': 'New product',\n 'category': self.category_1.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99\n }\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n expected = {'detail': 'You do not have permission to perform this action.'}\n response = self.client.post(\n '/api/products/', data=payload,\n content_type='application/json', **headers)\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)\n self.assertEqual(Product.objects.count(), 2)", "def test_detail_blocked_forbidden_even_if_contributor(self):\n self.login(self.user1)\n resp = self.client.get(self.get_url(self.c3.pk))\n self.assert404(resp)", "def test_artifactpriority_list_api_unauthorized(self):\n\n # get response\n response = self.client.get('/api/artifactpriority/')\n # compare\n self.assertEqual(response.status_code, 401)", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])" ]
[ "0.77314836", "0.7319002", "0.68258417", "0.6709656", "0.66352224", "0.6600747", "0.65926576", "0.65914124", "0.6580106", "0.6573959", "0.6558386", "0.65569514", "0.6534714", "0.6512719", "0.6504638", "0.64704", "0.6448948", "0.6437487", "0.6420581", "0.6419264", "0.63966596", "0.63964593", "0.63914037", "0.6346709", "0.63102347", "0.6308961", "0.629592", "0.6293923", "0.6290818", "0.6275965", "0.6267467", "0.6262702", "0.6259832", "0.6257948", "0.6241813", "0.6240369", "0.62261003", "0.6223102", "0.6218836", "0.6215857", "0.61920935", "0.6169689", "0.613838", "0.61286384", "0.6121536", "0.6121215", "0.611691", "0.61051387", "0.61021924", "0.6101183", "0.61009586", "0.6089359", "0.60755926", "0.6048527", "0.6046254", "0.6045647", "0.6040338", "0.6040036", "0.6039291", "0.60370815", "0.60365427", "0.6033337", "0.60309684", "0.6025251", "0.6021512", "0.60113037", "0.60095507", "0.6007567", "0.6005233", "0.60002273", "0.5999814", "0.5995918", "0.59837794", "0.59814155", "0.59781575", "0.59696954", "0.59680533", "0.59668976", "0.5965554", "0.5965554", "0.5965554", "0.5965554", "0.5965342", "0.5957108", "0.5955401", "0.595158", "0.59510046", "0.5950791", "0.59478515", "0.5944824", "0.5944824", "0.5944824", "0.5931609", "0.59260285", "0.59250784", "0.5920849", "0.5918673", "0.5918249", "0.59170693", "0.59170693" ]
0.79721093
0
Tests that a user can view all products in the Inventory
def test_view_all_products(self): resp = self.admin_register() reply = self.admin_login() token = reply['token'] product = dict( prod_name='NY_denims', category='denims', stock=20, price=150 ) resp = self.client.post( '/api/v1/products', content_type='application/json', data=json.dumps(product), headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Product successfully added to Inventory!') self.assertEqual(resp.status_code, 201) resp = self.client.get( '/api/v1/products', headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertIn('NY_denims', str(reply['products'])) self.assertEqual(resp.status_code, 200)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_list_products_logged_in(self):\n\n # Log in seller\n self.client.login(username=\"test_seller\", password=\"secret\")\n\n # Issue a GET request\n response = self.client.get(reverse('website:products'))\n\n # Check that the response is 200\n self.assertEqual(response.status_code, 200)\n\n # Check that the logged in user does not recieve any products to view because the only products available are the ones they have for sale\n self.assertEqual(len(response.context['products']),0)\n\n # Check that the product title appears in the rendered HTML content\n self.assertNotIn('<h5 class=\"card-title mb-0\">Test Product</h5>'.encode(), response.content)\n self.assertNotIn('<h5 class=\"card-title mb-0\">Test Product2</h5>'.encode(), response.content)", "def test_view_products_from_empty_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'There are no products yet!')\n self.assertEqual(resp.status_code, 404)", "def test_list_available_product(self):\n view = AvailableProductListView.as_view({'get': 'list'})\n uri = reverse('products:list-available-products')\n request = self.factory.get(uri, HTTP_AUTHORIZATION='Token {}'.format(self.token_user.key))\n request.user = self.user['user']\n response = view(request)\n self.assertEqual(response.status_code, 200,\n f'Expected Response Code 200, received {response.status_code} instead.')", "def test_view_a_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertIn('NY_denims', str(reply['product']))\n self.assertEqual(resp.status_code, 200)", "def test_list_products(self):\n self._require_login(self.user1)\n response = self.client.get('/api/1.0/products/')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[0]['name'], 'Producto 2')\n self.assertEqual(response.data[1]['description'], 'Descripcion producto 1')", "def test_security_on_get(self):\n # test the listing url\n product = Product.objects.all()[0]\n url = '/product/xml/'\n response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)\n # test the product detail url\n url = '/product/xml/%s/' % product.item_number\n Response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)", "def test_product_list(self):\n self.url = reverse(\"product-list\")\n response = self.client.get(self.url, **self.auth_headers)\n self.assertEqual(200, response.status_code)", "def test_product_list_view(sample_product, user_company, client):\n product_list_url = reverse('product-list')\n response = client.get(product_list_url)\n assert response.status_code == 200\n assert Product.objects.count() == 9\n products = Product.objects.all()\n content = response.content.decode(response.charset)\n for product in products:\n assert product.name in content", "def test_list_products(self):\n sample_product(supplier_id=self.user)\n sample_product(supplier_id=self.user)\n sample_product(supplier_id=self.user)\n\n res = self.client.get(PRODUCTS_URL)\n\n products = Product.objects.all().order_by('-name')\n serializer = ProductSerializer(products, many=True)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)\n self.assertEqual(len(res.data), 3)", "def test_product_detail_view(client, sample_product, user_company, authenticated_user):\n products = Product.objects.all()\n for product in products:\n product_detail_view = reverse('product-detail', kwargs={'pk': product.pk})\n response = client.get(product_detail_view)\n #The view should return 200 for each product that exists\n assert response.status_code == 200\n content = response.content.decode(response.charset)\n #With content specific for each product\n assert product.name in content\n #checking for \"page not found\" if product does not exist\n product_not_exist_detail_view = reverse('product-detail', kwargs={'pk':104})\n response = client.get(product_not_exist_detail_view)\n assert response.status_code == 404 \n #Authenticated user but not the owner of the product returns 404\n if authenticated_user and not user_company:\n product_detail_view = reverse('product-detail', kwargs={'pk': 6})\n response = client.get(product_detail_view)\n assert response.status_code == 404", "def test_cannot_view_all_products_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_get_all_objects(self):\n url = '/product/xml/'\n response = self.client.get(url, **self.headers)\n # Request should not be validated by a 401\n self.failUnlessEqual(response.status_code, 401)\n response = self.client.get(url, **self.advancedheaders)\n # Request should be validated by a 200\n self.failUnlessEqual(response.status_code, 200)\n xml_response = parseString(response.content)\n\n product_tags =[elt for elt in xml_response.getElementsByTagName('object') if elt.getAttribute('model') == 'product.product']\n # check that all product are displayed\n self.failUnlessEqual(len(product_tags), Product.objects.count())", "def test_add_all(self): #SAUCE-LAB-7\n login = LoginPage(self.driver)\n login.open()\n inventory_page = login.login(_DEF_USER, _DEF_PASSWORD)\n first_item = inventory_page.products\n first_item: InventoryItem\n for item in first_item:\n item.add_to_cart()\n if inventory_page.header.get_total_cart_items() == 6:\n print('\\n')\n print(f'Total of products {inventory_page.header.get_total_cart_items()}')\n else:\n print('\\n')\n print('Not all items were added')", "def test_listing_supplies_user(self):\n request = self.factory.get(\n '/api/supplies')\n force_authenticate(request, user=self.testuser1)\n response = SupplyListView.as_view()(request)\n # normal user can browse the data\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_view_cart_contents(self):\n User.objects.create_user(\n username=\"testuser\", password=\"thisisasecret101\")\n item = Product(name=\"Product\",\n product_image=\"testing_img.jpg\",\n description=\"Product description.\",\n price=\"20.00\",\n stock_available=\"5\",\n showcase_product=\"True\")\n item.save()\n self.client.login(username=\"testuser\", password=\"thisisasecret101\")\n session = self.client.session\n session[\"cart\"] = {1: 1}\n session.save()\n response = self.client.get(\"/cart/\")\n self.assertEqual(response.status_code, 200)", "def test_product_by_category_logged_in_user(self):\n\n # Log In user that is not the seller, check that the products not created by the user do show up\n self.client.login(username=\"test_user\", password=\"secret\")\n\n # Search for product category 1\n response = self.client.get(reverse('website:product_by_category', args=(1,)))\n\n # Check that status code is 200\n self.assertEqual(response.status_code, 200)\n\n # Make sure that only the product associated with product category 1 is displayed\n self.assertIn('<h6 class=\"mb-1\">Test Product</h6>'.encode(), response.content)\n self.assertNotIn('<h6 class=\"mb-1\">Test Product2</h6>'.encode(), response.content)\n\n # Search for product category 2\n response_non_seller = self.client.get(reverse('website:product_by_category', args=(2,)))\n\n # Check that the status code is 200\n self.assertEqual(response_non_seller.status_code, 200)\n\n # Make sure that only the product associated with product category 2 is displayed\n self.assertNotIn('<h6 class=\"mb-1\">Test Product</h6>'.encode(), response_non_seller.content)\n self.assertIn('<h6 class=\"mb-1\">Test Product2</h6>'.encode(), response_non_seller.content)", "def test_items_in_cart(self):\r\n self.add_to_cart()\r\n self.request.user = self.user\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertTrue(context['display_shopping_cart'])", "def test_listing_supplies_admin(self):\n request = self.factory.get(\n '/api/supplies')\n force_authenticate(request, user=self.testadmin)\n response = SupplyListView.as_view()(request)\n # admin can browse the data\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_product_search(self):\n\n flag = \"user\"\n api = \"product.product.search\"\n current_page = 1\n search_info = json.dumps({\n })\n\n result = self.access_api(flag = flag, api = api, current_page = current_page, search_info = search_info)\n self.assertTrue('data_list' in result)", "def test_list_product(self):\n url = reverse('products:list')\n response = self.client.get(url)\n self.assertEqual(len(response.data), 1)\n self.assertEqual(response.data[0]['name'], 'Eggs')", "def test_vault_get_all_vault_items(self):\n pass", "def test_is_product_show(self):\n\n self.selenium.get(\"http://localhost:8000/\")\n response = self.selenium.find_element(By.ID, \"id_product_name\")\n response.send_keys(\"frosties\")\n response.send_keys(Keys.ENTER)\n self.assertTemplateUsed('selected_product.html')", "def index(self, user):\n\n cart_products = CartProduct.index(user)\n CartProductsView.index(cart_products)", "def test_view_product_that_doesnot_exist_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product does not exist!')\n self.assertEqual(resp.status_code, 404)", "def test_list_products_filtered_by_selling_status(self):\n self._require_login(self.user1)\n response = self.client.get('/api/1.0/products/?selling=3')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data.__len__(), 1)\n self.assertEqual(response.data[0]['name'], 'Producto 2')\n self.assertEqual(response.data[0]['description'], 'Descripcion producto 2')", "def test_get_inventory_list(self):\n resp = self.app.get('/inventories')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n self.assertEqual(len(data), 2)", "def test_list_products_filtered_by_seller_name(self):\n self._require_login(self.user1)\n response = self.client.get('/api/1.0/products/?seller=testuser1')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data.__len__(), 1)\n self.assertEqual(response.data[0]['name'], 'Producto 1')\n self.assertEqual(response.data[0]['description'], 'Descripcion producto 1')", "def test_only_admin_can_create_product(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_show_cart_with_items(client):\n raise NotImplemented('Acceptance test failed')", "def test_view_displays_all(self):\n set_up_one_user(self, 1, 0)\n login = self.client.login(username='test', password='2HJ1vRV0Z&3iD')\n response = self.client.get(reverse('index'))\n self.assertEqual(response.status_code, 200)\n self.assertEqual(str(response.context['user']), 'test')\n self.assertEqual(len(response.context['data']), 1)", "def test_list_products(self):\n\n # Issue a GET request\n response = self.client.get(reverse('website:products'))\n\n # Check that the response is 200 ok\n self.assertEqual(response.status_code, 200)\n\n # Check that the context contains 2 products\n self.assertEqual(len(response.context['products']),2)\n\n # Check that the product title appears in the rendered HTML content\n self.assertIn('<h5 class=\"card-title mb-0\">Test Product</h5>'.encode(), response.content)\n self.assertIn('<h5 class=\"card-title mb-0\">Test Product2</h5>'.encode(), response.content)", "def test_cannot_view_a_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_get_product(self):\n self._require_login(self.user1)\n response = self.client.get('/api/1.0/products/1/')\n self.assertEqual(response.data['name'], 'Producto 1')\n self.assertEqual(response.data['description'], 'Descripcion producto 1')\n self.assertEqual(response.data['selling'], True)\n self.assertEqual(response.data['price'], '24.0')\n self.assertEqual(response.data['seller']['user']['username'], 'testuser1')\n self.assertEqual(response.data['category']['name'], 'general')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n response = self.client.get('/api/1.0/products/2/')\n self.assertEqual(response.data['name'], 'Producto 2')\n self.assertEqual(response.data['description'], 'Descripcion producto 2')\n self.assertEqual(response.data['selling'], False)\n self.assertEqual(response.data['price'], '312.0')\n self.assertEqual(response.data['seller']['user']['username'], 'testuser2')\n self.assertEqual(response.data['category']['name'], 'deportes')\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_products_ref_users_get(self):\n pass", "def test_admin_cannot_delete_product_from_empty_Inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'There are no products in Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_get_all_products(self):\n response=self.get_all_products()\n result = json.loads(response.data.decode('utf-8'))\n self.assertEqual(response.status_code, 200,result['Available Products'] )", "def test_view_product_detail(self):\n product = sample_product(supplier_id=self.user)\n\n url = detail_url(product.id)\n res = self.client.get(url)\n\n serializer = ProductSerializer(product)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)", "def testGetAccessAllowed(self):\n for user in (self.guest, self.contributor, self.delegate, self.owner, self.root):\n response = self.runGet(user, sequencer=self.hiseq2000.vendor_id)\n self.response_200(response)\n data = json.loads(response.content.decode(\"utf-8\"))\n self.assertEqual(data[\"sodar_uuid\"], str(self.hiseq2000.sodar_uuid))", "def products(request):\n\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n products = Product.objects.all()\n template = \"auctionsmng/products.html\"\n\n context = {\n 'products': products\n }\n\n return render(request, template, context)", "def test_query_product_list_by_owner(self):\n products = self._create_products(10)\n test_owner = products[0].owner\n owner_products = [product for product in products if product.owner == test_owner]\n resp = self.app.get(\n \"/products\", query_string=\"owner={}\".format(quote_plus(test_owner))\n )\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = resp.get_json()\n self.assertEqual(len(data), len(owner_products))\n # check the data just to be sure\n for product in data:\n self.assertEqual(product[\"owner\"], test_owner)", "def test_product_exists_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product exists in the Inventory!')\n self.assertEqual(resp.status_code, 400)", "def test_get_product_list(self):\n self._create_products(5)\n resp = self.app.get(\"/products\")\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = resp.get_json()\n self.assertEqual(len(data), 5)", "def test_only_attendant_can_make_a_sale(self):\n resp = self.admin_add_product()\n reply = self.admin_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_detail_is_hacker_permission(self):\n self.user_1.username = 'pythonhacker'\n self.user_1.save()\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_1.id), **headers)\n\n expected = {'detail': 'You do not have permission to perform this action.'}\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)", "def test_detail_odd_product_id_permission(self):\n self.assertEqual(self.product_2.id, 2)\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_2.id), **headers)\n\n expected = {'detail': 'You do not have permission to perform this action.'}\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)", "def test_product_by_category_logged_in_seller(self):\n\n # Log In user that is the seller, check that their products to do not show up\n self.client.login(username=\"test_seller\", password=\"secret\")\n\n # Search for product category 1\n response = self.client.get(reverse('website:product_by_category', args=(1,)))\n\n # Check that status code is 200\n self.assertEqual(response.status_code, 200)\n\n # Ensure that the returned HTML does not include either product\n self.assertNotIn('<h6 class=\"mb-1\">Test Product</h6>'.encode(), response.content)\n self.assertNotIn('<h6 class=\"mb-1\">Test Product2</h6>'.encode(), response.content)\n\n # Search for product category 2\n response_seller = self.client.get(reverse('website:product_by_category', args=(2,)))\n\n # Check that the status code is 200\n self.assertEqual(response_seller.status_code, 200)\n\n # Ensure that the returned HTML does not include either product\n self.assertNotIn('<h6 class=\"mb-1\">Test Product</h6>'.encode(), response_seller.content)\n self.assertNotIn('<h6 class=\"mb-1\">Test Product2</h6>'.encode(), response_seller.content)", "def test_list_products_filtered_by_keyword(self):\n self._require_login(self.user1)\n response = self.client.get('/api/1.0/products/?name=1')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data.__len__(), 1)\n self.assertEqual(response.data[0]['name'], 'Producto 1')\n self.assertEqual(response.data[0]['description'], 'Descripcion producto 1')", "def test_basic_info(self):\n\n url = reverse('stock-item-detail', kwargs={'pk': 1})\n\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n html = str(response.content)\n\n # Part name\n self.assertIn('Stock Item: M2x4 LPHS', html)\n\n # Quantity\n self.assertIn('<h5>Available Quantity</h5>', html)\n self.assertIn('<h5>4000', html)\n\n # Batch code\n self.assertIn('Batch', html)\n self.assertIn('<td>B123</td>', html)\n\n # Actions to check\n actions = [\n \"id=\\\\\\'stock-count\\\\\\' title=\\\\\\'Count stock\\\\\\'\",\n \"id=\\\\\\'stock-add\\\\\\' title=\\\\\\'Add stock\\\\\\'\",\n \"id=\\\\\\'stock-remove\\\\\\' title=\\\\\\'Remove stock\\\\\\'\",\n \"id=\\\\\\'stock-move\\\\\\' title=\\\\\\'Transfer stock\\\\\\'\",\n \"id=\\\\\\'stock-duplicate\\\\\\'\",\n \"id=\\\\\\'stock-edit\\\\\\'\",\n \"id=\\\\\\'stock-delete\\\\\\'\",\n ]\n\n # Initially we should not have any of the required permissions\n for act in actions:\n self.assertNotIn(act, html)\n\n # Give the user all the permissions\n self.assignRole('stock.add')\n self.assignRole('stock.change')\n self.assignRole('stock.delete')\n\n response = self.client.get(url)\n html = str(response.content)\n\n for act in actions:\n self.assertIn(act, html)", "def test_add_product_view_for_authenticated_users(user_company, client):\n add_product_url = reverse('add-product')\n response = client.get(add_product_url)\n assert response.status_code == 200", "def test_view_url_propose_product_already_in_favorites(self):\r\n self.client.login(username='test', password='test')\r\n response = self.client.get(reverse('search_results'),\r\n {'query': '', 'name': 'nutella'})\r\n self.assertEqual(response.status_code, 200)\r\n self.assertTemplateUsed(response, 'purbeurre/search_results.html')", "def test_search_view_when_user_logged_in(self):\n self.user = User.objects.create_user(\n username='person',\n email='[email protected]',\n password='test12345@_password',\n )\n self.client.login(\n username='person',\n password='test12345@_password')\n\n response = self.client.get('/search/?q=')\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"products.html\")", "def test_authorization(self):\n res = self.get(url=\"/products/1/pricehistory\")\n self.assertEqual(res.status_code, 401)\n self.assertException(res, exc.UnauthorizedAccess)\n res = self.get(url=\"/products/1/pricehistory\", role=\"user\")\n self.assertEqual(res.status_code, 401)\n self.assertException(res, exc.UnauthorizedAccess)", "def test_show_available(self):\n database.import_data('csvs', 'product_data.csv', 'customer_data.csv', 'rentals_data.csv')\n actual_available = database.show_available_products()\n expected_available = {'prd001': {'description': 'TV', 'product_type': 'livingroom',\n 'quantity_available': '3'},\n 'prd002': {'description': 'Couch', 'product_type': 'livingroom',\n 'quantity_available': '1'}}\n self.assertEqual(actual_available, expected_available)\n database.delete_database()\n\n database.import_data('csvs', 'produc_data.csv', 'customer_data.csv', 'rentals_data.csv')\n database.delete_database()", "def test_products(self, flag_is_active):\n flag_is_active.return_value = True\n\n # Create some products\n for i in range(3):\n product(save=True)\n\n # GET the home page and verify the content\n r = self.client.get(reverse('products'), follow=True)\n eq_(200, r.status_code)\n doc = pq(r.content)\n eq_(3, len(doc('#products-and-services li')))", "def test_cart_correct_user_templates_rendered_with_call(self):\n User.objects.create_user(\n username=\"testuser\", password=\"thisisasecret101\")\n item = Product(name=\"Product\",\n product_image=\"testing_img.jpg\",\n description=\"Product description.\",\n price=\"20.00\",\n stock_available=\"5\",\n showcase_product=\"True\")\n item.save()\n self.client.login(username=\"testuser\", password=\"thisisasecret101\")\n session = self.client.session\n session[\"cart\"] = {1: 1}\n session.save()\n response = self.client.get(\"/cart/\")\n self.assertTemplateUsed(response, \"cart.html\")\n self.assertTemplateUsed(response, \"base.html\")\n self.assertTemplateUsed(response, \"layout/head.html\")\n self.assertTemplateUsed(response, \"components/navbar.html\")\n self.assertTemplateUsed(response, \"components/cart-contents.html\")\n self.assertTemplateUsed(response, \"components/footer.html\")\n self.assertTemplateUsed(response, \"layout/scripts.html\")", "def test_get_dealer_active_inventory(self):\n pass", "def test_dir_search_doesnt_get_any_product(client, jwt, session, keycloak_mock): # pylint:disable=unused-argument\n headers = factory_auth_header(jwt=jwt, claims=TestJwtClaims.staff_admin_role)\n client.post('/api/v1/users', headers=headers, content_type='application/json')\n rv = client.post('/api/v1/orgs', data=json.dumps(TestOrgInfo.org_anonymous),\n headers=headers, content_type='application/json')\n assert rv.status_code == http_status.HTTP_201_CREATED\n dictionary = json.loads(rv.data)\n assert dictionary['accessType'] == 'ANONYMOUS'\n assert schema_utils.validate(rv.json, 'org_response')[0]\n\n rv_products = client.get(f\"/api/v1/orgs/{dictionary.get('id')}/products\", headers=headers,\n content_type='application/json')\n\n list_products = json.loads(rv_products.data)\n assert len([x for x in list_products if x.get('subscriptionStatus') != 'NOT_SUBSCRIBED']) == 0", "def test_wiki_products(self):\n\n prod_vals = (\n (ProductFactory(slug='b2g'), 0),\n (ProductFactory(slug='mobile'), 1),\n (ProductFactory(slug='desktop'), 2),\n )\n\n for prod, total in prod_vals:\n for i in range(total):\n doc = DocumentFactory(locale=u'en-US', category=10)\n doc.products.add(prod)\n RevisionFactory(document=doc, is_approved=True)\n\n self.refresh()\n\n qs = {'a': 1, 'w': 1, 'format': 'json'}\n\n for prod, total in prod_vals:\n qs.update({'product': prod.slug})\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(total, json.loads(response.content)['total'])", "def test_recipes_limited_to_user(self):\n user2 = get_user_model().objects.create_user(\n email = '[email protected]',\n password = '123465',\n )\n sample_recipe(user = user2)\n sample_recipe(user = self.user)\n\n res = self.client.get(RECIPE_URL)\n\n recipes = Recipe.objects.filter(user=self.user)\n serializer = RecipeSerializer(recipes, many=True) # although we only have one recipe for this user, we still pass many=true since even if there is one object returned, the list api should always return a data type of list\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data, serializer.data)", "def test_recipe_limited_to_user(self):\n user2 = get_user_model().objects.create_user(\n '[email protected]',\n 'password3456',\n )\n sample_recipe(self.user)\n sample_recipe(user2)\n\n res = self.client.get(RECIPE_URL)\n\n recipes = Recipe.objects.filter(user=self.user)\n serializer = RecipeSerializer(recipes, many=True)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data, serializer.data)", "def test_get_cart_items_unauthorized(self):\n with self.assertRaises(ResourceAccessError):\n self.cart_item_manager.get_cart_items('123', '1')", "def test_purchase_products(self, driver):\n logging.info(\"Start test case: checkout product successfully\")\n products = self.test_data[\"Purchase Products\"][\"Products\"]\n address = self.test_data[\"Purchase Products\"][\"Address\"]\n payment_info = self.test_data[\"Purchase Products\"][\"Payment Info\"]\n logging.info(f\"Test Data: {self.test_data['Purchase Products']}\")\n\n select_product(driver, products[0][\"Page\"], products[0][\"Product Name\"])\n add_product_to_cart(driver, products[0][\"Size\"], products[0][\"Color\"], products[0][\"Quantity\"])\n checkout_from_order_summary(driver)\n set_address(driver, address[\"Billing Address\"], address[\"Country\"], address[\"City\"], address[\"Zip\"])\n checkout_order_to_pay(driver, payment_info[\"Payment Type\"])\n pay_order(driver, payment_info[\"Card ID\"], payment_info[\"Expired Date\"], payment_info[\"CVC\"])\n verify_message(driver, \"Order was successful\")", "def test_recipe_limited_to_user(self):\n user2 = get_user_model().objects.create_user(\n '[email protected]',\n 'password2'\n )\n sample_recipe(user=user2)\n sample_recipe(user=self.user)\n\n res = self.client.get(RECIPES_URL)\n recipes = Recipe.objects.filter(user=self.user)\n serializer = RecipeSerializer(recipes, many=True)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data, serializer.data)", "def test_list_products_filtered_by_category(self):\n self._require_login(self.user1)\n response = self.client.get('/api/1.0/products/?category=1')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data.__len__(), 1)\n self.assertEqual(response.data[0]['name'], 'Producto 2')\n self.assertEqual(response.data[0]['description'], 'Descripcion producto 2')", "def test_shoppingitems_page(self):\n # register and login a user\n self.app.post('/register', data=self.user_reg_details)\n self.app.post('/login', data=self.user_login_details)\n # send a GET request\n res = self.app.get('/shoppingitems/Easter')\n self.assertEqual(res.status_code, 200)\n self.assertIn(\"You can now add your items\", str(res.data))", "def test_no_items_in_cart(self):\r\n self.request.user = self.user\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertFalse(context['display_shopping_cart'])", "def test_product(self):\n self.assertEqual(self.test_product.name, self.test_product_name)\n self.assertEqual(self.test_product.price, self.test_product_price)", "def testGetAccessAllowed(self):\n for user in (self.guest, self.contributor, self.delegate, self.owner, self.root):\n response = self.runGet(user, sequencer=self.hiseq2000.sodar_uuid)\n self.response_200(response)\n data = json.loads(response.content.decode(\"utf-8\"))\n self.assertEqual(data[\"sodar_uuid\"], str(self.hiseq2000.sodar_uuid))", "def test_attendant_can_only_view_own_sale(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Benja Maisha',\n username='maisha',\n password='Andela8',\n role='attendant'\n )\n\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n\n user = dict(\n username='maisha',\n password='Andela8'\n )\n response = self.client.post(\n '/api/v1/login',\n content_type='application/json',\n data=json.dumps(user)\n )\n reply = json.loads(response.data.decode())\n token = reply['token']\n\n resp = self.client.get(\n '/api/v1/sales/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'You have no access to this sale!')\n self.assertEqual(resp.status_code, 401)", "def open_products(self, cursor, user, product_ids, context):\n ir_model_data = self.pool.get('ir.model.data')\n\n tree_res = ir_model_data.get_object_reference(\n cursor, user, 'product', 'product_product_tree_view'\n )\n tree_id = tree_res and tree_res[1] or False\n\n return {\n 'name': _('Products that have been exported to Magento'),\n 'view_type': 'form',\n 'view_mode': 'tree,form',\n 'res_model': 'product.product',\n 'views': [(tree_id, 'tree')],\n 'context': context,\n 'type': 'ir.actions.act_window',\n 'domain': [('id', 'in', product_ids)]\n }", "def test_get_user_carts(self):\n self.cart_item_manager.create_cart('123', 'Cart1', True)\n self.cart_item_manager.create_cart('123', 'Cart2', False)\n self.cart_item_manager.create_cart('123', 'Cart3', False)\n self.cart_item_manager.create_cart('124', 'Cart2', True)\n self.assertEqual(3, len(self.cart_item_manager.get_user_carts('123')))", "def test_product_by_category_guest(self):\n\n # Guest user, searching for product category 1\n response = self.client.get(reverse('website:product_by_category', args=(1,)))\n\n # Check that the response is 200 ok\n self.assertEqual(response.status_code, 200)\n\n # Check that new_product is the only product that shows in the query\n self.assertIn('<h6 class=\"mb-1\">Test Product</h6>'.encode(), response.content)\n self.assertNotIn('<h6 class=\"mb-1\">Test Product2</h6>'.encode(), response.content)", "def test_add_product_view_for_unauthenticated_users(client):\n add_product_url = reverse('add-product')\n response = client.get(add_product_url)\n assert response.status_code == 302\n assert response.url == \"/accounts/login/?next=/products/add-product/\"", "def test_listing_supplies_unauthenticated(self):\n request = self.factory.get('/api/supplies')\n response = SupplyListView.as_view()(request)\n # no permission\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_recipe_limited_to_user(self):\n user2 = get_user_model().objects.create_user('[email protected]','passpass')\n sample_recipe(user2)\n sample_recipe(self.user)\n res = self.client.get(RECIPE_URL)\n recipe = Recipe.objects.filter(user=self.user)\n serializer = RecipeSerializer(recipe,many=True)\n\n self.assertEqual(res.status_code,status.HTTP_200_OK)\n self.assertEqual(len(res.data),1)\n self.assertEqual(res.data,serializer.data)", "def test_update_inventory(self):\n pass", "def test_remove_all(self): #SAUCE-LAB-8\n login = LoginPage(self.driver)\n login.open()\n inventory_page = login.login(_DEF_USER, _DEF_PASSWORD)\n first_item = inventory_page.products\n first_item: InventoryItem\n for item in first_item:\n item.add_to_cart()\n if inventory_page.header.get_total_cart_items() == 6:\n print('\\n')\n print(f'Total of products {inventory_page.header.get_total_cart_items()}')\n else:\n print('Not all items were added')\n for item in first_item:\n item.remove_from_cart()\n if inventory_page.header.get_total_cart_items() == 0:\n print('\\n')\n print(f'Total of products {inventory_page.header.get_total_cart_items()}')\n else:\n print('Not all items were removed')", "def test_get_reusableitem_api_public(self):\n\n self.reusableitem_1.is_public = True\n self.reusableitem_1.save()\n\n self.client.logout()\n\n\n response = self.client.get(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_non_admin_cannot_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_invoice_item_list(self):\n self.url = reverse(\"invoiceitem-list\")\n response = self.client.get(self.url, **self.auth_headers)\n self.assertEqual(200, response.status_code)", "def test_retrieve_list_resgate_to_user_authenticated(self):\n sample_resgate(user=self.user, value=500)\n sample_resgate(user=self.user, value=200)\n\n response = self.client.get(RESGATE_URL)\n\n resgates = Resgate.objects.all().order_by('quantity')\n serializer = ResgateSerializer(resgates, many=True)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data, serializer.data)", "def test_retrieving_limited_to_request_user(self):\n\n user2 = get_user_model().objects.create_user(\n email='[email protected]',\n password='correctpass'\n )\n recipe1 = create_sample_recipe(user=self.user)\n create_sample_recipe(user=user2)\n\n recipe = Recipe.objects.filter(user=self.user)\n\n serializer = RecipeSerializer(recipe, many=True)\n\n res = self.client.get(RECIPE_URL)\n\n self.assertEqual(res.data, serializer.data)\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data[0]['title'], recipe1.title)", "def test_if_app_can_search_for_existing_lists_with_products(self):\n product_to_add = {'product':'nikes', 'Quantity':3, 'Amountspent':5000}\n jsonproduct_to_add = json.dumps(product_to_add)\n add_list = self.client.post('/shoppinglists/',\n data = self.shopllist, \n headers = {\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n add_product=self.client.post('/shoppinglist/shoes/items/',\n data=jsonproduct_to_add,\n headers={\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n searchforlists=self.client.get('/search/?q=shoes',\n headers={\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n searchforproducts=self.client.get('/searchProduct/?q=nike',\n headers={\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n self.assertIn(\"Success\",str(searchforlists.data))\n self.assertIn(\"Success\",str(searchforproducts.data))\n self.assertEqual(searchforproducts.status_code,200)\n self.assertEqual(searchforlists.status_code,200)", "def test_filter_search_form_is_valid(self):\r\n response = self.client.get(reverse('search_results'), {\r\n 'name': 'nutella',\r\n 'category': '1',\r\n 'nutriscore': 'd'\r\n })\r\n self.assertTrue(response.context['product_list'])", "def test_get_all_user(self):\n response = self.client().get(AuthTestCase.admin)\n # assert the response code\n self.assertEqual(response.status_code, 200)", "def test_detail(self):\n self.assertEqual(self.product_1.id, 1)\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_1.id), **headers)\n\n expected = {\n 'id': self.product_1.id,\n 'name': self.product_1.name,\n 'sku': self.product_1.sku,\n 'category': self.product_1.category.id,\n 'description': self.product_1.description,\n 'price': str(self.product_1.price),\n 'created': '2018-12-20T10:15:30Z',\n 'featured': self.product_1.featured\n }\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response['Content-Type'], 'application/json')\n self.assertEqual(response.json(), expected)", "def test_admin_create_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)", "def test_reteta_limited_to_user(self):\n user2 = get_user_model().objects.create_user(\n '[email protected]',\n 'testpass'\n )\n sample_reteta(user=user2)\n sample_reteta(user=self.user)\n\n res = self.client.get(RETETA_URL)\n rete = Reteta.objects.filter(user=self.user)\n serializer = RetetaSerializer(rete, many=True) # return list\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data, serializer.data)", "async def items(self, ctx, search=''):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n inventory = ch.print_inventory(ctx.user_object, search.lower())\n await self.paginate(ctx, inventory)", "def test_form_submition_and_product_creation(user_company, client, authenticated_user):\n add_product_url = reverse('add-product')\n response = client.post(add_product_url, {\n 'name': 'Test_product_name',\n 'serial_number': 'XZ001', \n 'manufacturer': 'Test company',\n 'price_net': 415.26,\n 'description': fake.paragraph(),\n 'stock': 16\n })\n assert response.status_code == 302\n product = Product.objects.get(name='Test_product_name')\n assert response.url == reverse('product-detail',kwargs={'pk': product.pk}) \n assert product.user == authenticated_user\n assert product in Product.objects.all()", "def test_shoppingcart_list(self):\n self.url = reverse(\"shoppingcart-list\")\n response = self.client.get(self.url, **self.auth_headers)\n self.assertEqual(200, response.status_code)", "def test_wiki_products_inherit(self):\n doc = DocumentFactory(locale=u'en-US', category=10)\n p = ProductFactory(title=u'Firefox', slug=u'desktop')\n doc.products.add(p)\n RevisionFactory(document=doc, is_approved=True)\n\n translated = DocumentFactory(locale=u'fr', parent=doc, category=10)\n RevisionFactory(document=translated, is_approved=True)\n\n self.refresh()\n\n qs = {'a': 1, 'w': 1, 'format': 'json', 'product': p.slug}\n response = self.client.get(reverse('search.advanced', locale='fr'), qs)\n eq_(1, json.loads(response.content)['total'])", "def test_recipe_limited_to_user(self):\n user2 = get_user_model().objects.create_user(\n '[email protected]',\n 'passwordqwe'\n )\n sample_recipe(user=user2)\n sample_recipe(user=self.user)\n\n res = self.client.get(RECIPE_URL)\n\n recipe = Recipe.objects.filter(user = self.user)\n serializer = Recipeserializer(recipe,many = True)\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data),1)\n self.assertEqual(res.data, serializer.data)", "def test_product_search(self):\n\n flag = \"user\"\n api = \"product.product.add\"\n current_page = 1\n search_info = json.dumps({\n 'name': \"可爱的小蓝牙呀\"\n })\n print('start------------------------>add')\n result = self.access_api(flag = flag, api = api, current_page = current_page, product_info = search_info)", "def test_vault_get_vault_item(self):\n pass", "def test_question_products(self):\n p1 = ProductFactory(slug='b2g')\n p2 = ProductFactory(slug='mobile')\n p3 = ProductFactory(slug='desktop')\n\n QuestionFactory(product=p2)\n QuestionFactory(product=p2)\n QuestionFactory(product=p3)\n\n self.refresh()\n\n product_vals = (\n (p1.slug, 0),\n (p2.slug, 2),\n (p3.slug, 1),\n )\n\n qs = {'a': 1, 'w': 2, 'format': 'json'}\n for products, number in product_vals:\n qs.update({'product': products})\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(number, json.loads(response.content)['total'])", "def test_view_product_with_invalid_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2kk',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Try an interger for product id')\n self.assertEqual(resp.status_code, 400)", "def test_get_product_detail(self):\n\n response = self.client.get(reverse('website:product_details', args=(1,)))\n\n # Check that the response is 200 ok\n self.assertEqual(response.status_code, 200)\n\n # Product title appears in HTML response content\n self.assertIn('<h1>Test Product</h1>'.encode(), response.content)\n self.assertNotIn('<h1>Test Product2</h1>'.encode(), response.content)", "def test_get_reusableitem_api_not_public(self):\n\n # user not logged in\n self.client.logout()\n\n response = self.client.get(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n\n # user logged in and created the Reusable Item\n self.client.force_authenticate(user=self.user_1)\n\n response = self.client.get(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # user logged in and did not create the Reusable Item\n self.client.logout()\n self.client.force_authenticate(user=self.user_2)\n\n response = self.client.get(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_list_not_authenticated(self):\n response = self.client.get('/api/products/')\n expected = {'detail': 'Authentication credentials were not provided.'}\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response.json(), expected)" ]
[ "0.72330284", "0.71533865", "0.70860183", "0.6867175", "0.6790764", "0.67349434", "0.6666839", "0.6636514", "0.66316766", "0.6597527", "0.65619427", "0.6444273", "0.6425292", "0.642249", "0.6402104", "0.64012486", "0.6383176", "0.6383148", "0.6321953", "0.6320072", "0.6299333", "0.6296548", "0.62822485", "0.62611794", "0.6251649", "0.624012", "0.6230861", "0.6216536", "0.6212767", "0.6196214", "0.61885864", "0.6186684", "0.61798525", "0.61382794", "0.61354315", "0.6116204", "0.61108273", "0.61082435", "0.6100489", "0.6099747", "0.60993123", "0.60828036", "0.6077931", "0.6067644", "0.60648656", "0.60539025", "0.60423017", "0.603645", "0.60146976", "0.6004269", "0.6001859", "0.5968482", "0.59635127", "0.5935956", "0.5924982", "0.59145975", "0.5910009", "0.5881205", "0.58798605", "0.5876092", "0.5875259", "0.58244884", "0.58202136", "0.581589", "0.57949734", "0.57933503", "0.57918745", "0.5784435", "0.5780928", "0.57806695", "0.57804763", "0.5776278", "0.57664263", "0.5753148", "0.5751271", "0.57315254", "0.57307446", "0.57254237", "0.5722489", "0.5714807", "0.5702262", "0.5692957", "0.5683524", "0.5677701", "0.5670792", "0.56707144", "0.56670946", "0.5663102", "0.56624544", "0.56588936", "0.5657391", "0.56550163", "0.5653216", "0.5651673", "0.564835", "0.5646219", "0.5640733", "0.5640458", "0.5638637", "0.5618552" ]
0.7486654
0
Tests that a user cannot view all products in the Inventory with blacklisted token
def test_cannot_view_all_products_with_blacklisted_token(self): resp = self.admin_register() reply = self.admin_login() token = reply['token'] product = dict( prod_name='NY_denims', category='denims', stock=20, price=150 ) resp = self.client.post( '/api/v1/products', content_type='application/json', data=json.dumps(product), headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Product successfully added to Inventory!') self.assertEqual(resp.status_code, 201) resp = self.client.delete( '/api/v1/logout', headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'You are successfully logged out!') self.assertEqual(resp.status_code, 200) resp = self.client.get( '/api/v1/products', headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!') self.assertEqual(resp.status_code, 401)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_cannot_view_a_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_cannot_create_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_view_products_from_empty_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'There are no products yet!')\n self.assertEqual(resp.status_code, 404)", "def test_listing_supplies_unauthenticated(self):\n request = self.factory.get('/api/supplies')\n response = SupplyListView.as_view()(request)\n # no permission\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_cannot_get_all_sale_records_with_blacklisted_token(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n \n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/sales',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_get_cart_items_unauthorized(self):\n with self.assertRaises(ResourceAccessError):\n self.cart_item_manager.get_cart_items('123', '1')", "def test_admin_cannot_delete_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def testGetAccessDenied(self):\n self.runGet(None, sequencer=self.hiseq2000.vendor_id)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def test_admin_cannot_delete_product_from_empty_Inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'There are no products in Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_security_on_get(self):\n # test the listing url\n product = Product.objects.all()[0]\n url = '/product/xml/'\n response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)\n # test the product detail url\n url = '/product/xml/%s/' % product.item_number\n Response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)", "def test_attendant_cannot_make_a_sale_with_blacklisted_token(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_list_not_authenticated(self):\n response = self.client.get('/api/products/')\n expected = {'detail': 'Authentication credentials were not provided.'}\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response.json(), expected)", "def test_cannot_get_sale_record_with_blacklisted_token(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/sales/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_no_items_in_cart(self):\r\n self.request.user = self.user\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertFalse(context['display_shopping_cart'])", "def test_cannot_update_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_cannot_view_all_users_with_blacklisted_token(self):\n resp = self.admin_create_user()\n reply = self.admin_create_user2()\n resp = self.admin_login()\n token = resp['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/users',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_user_not_authorized(self):\n response = self.client.post(self.url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_delete_not_my_product(self):\n self._require_login(self.user1)\n response = self.client.delete('/api/1.0/products/2/')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_if_not_available_for_unauthorized(self):\r\n res = self.not_authenticated.get(reverse(LIST_USER_URL),data={})\r\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_locked_asset_not_registered(self):\r\n self.client.login(username=self.usr, password=self.pwd)\r\n resp = self.client.get(self.url_locked)\r\n self.assertEqual(resp.status_code, 403) # pylint: disable=E1103\r", "def test_product_buy_missing_goods(self):\n result_buy = self.info_list.product_buy(\"хлеб серый хлебозавод\", 3)\n self.assertFalse(result_buy)", "def test_attendant_cannot_view_all_sales(self):\n response = self.client.get(\n '/self.base_url/sales',\n headers=dict(Authorization=\"Bearer \" + self.attendant_token),\n content_type = 'application/json'\n )\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"You dont have rights to list all sales, contact the system admin\")\n self.assertEqual(response.status_code,401)", "def test_detail_is_hacker_permission(self):\n self.user_1.username = 'pythonhacker'\n self.user_1.save()\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_1.id), **headers)\n\n expected = {'detail': 'You do not have permission to perform this action.'}\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)", "def test_view_disabled(self, method, url):\n response = getattr(self.client, method)(url)\n assert response.status_code == 403", "def test_visibility_of_not_available_2(self):\n partner = PartnerFactory(status=Partner.NOT_AVAILABLE)\n list_url = reverse('partners:list')\n\n editor = EditorFactory()\n\n request = RequestFactory().get(list_url)\n request.user = editor.user\n response = PartnersListView.as_view()(request)\n\n self.assertNotContains(response, partner.get_absolute_url())", "def test_view_product_that_doesnot_exist_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product does not exist!')\n self.assertEqual(resp.status_code, 404)", "def test_need_login_to_see_usagelist(self):\n response = self.client.get(reverse('api_v1:usage-list'), follow=True)\n self.assertEqual(response.status_code, 403)", "def test_video_detail_no_permission(\n mock_user_moira_lists, logged_in_apiclient, user_admin_list_data\n):\n client, _ = logged_in_apiclient\n mock_user_moira_lists.return_value = {\"some_other_list\"}\n url = reverse(\n \"video-detail\", kwargs={\"video_key\": user_admin_list_data.video.hexkey}\n )\n result = client.get(url)\n assert result.status_code == status.HTTP_403_FORBIDDEN", "def check_for_exposed(context):\n json_data = context.response.json()\n if \"exploitable_vulnerabilities_count\" in json_data:\n raise Exception(\"Field exploitable_vulnerabilities_count Exposed in\"\n \" Free user result\")\n if \"vendor_package_link\" in json_data:\n raise Exception(\"Field vendor_package_link has been exposed for free user\")", "def testGetAccessDenied(self):\n self.runGet(None, sequencer=self.hiseq2000.sodar_uuid)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user, sequencer=self.hiseq2000.sodar_uuid)\n self.response_403()", "def test_list_available_product(self):\n view = AvailableProductListView.as_view({'get': 'list'})\n uri = reverse('products:list-available-products')\n request = self.factory.get(uri, HTTP_AUTHORIZATION='Token {}'.format(self.token_user.key))\n request.user = self.user['user']\n response = view(request)\n self.assertEqual(response.status_code, 200,\n f'Expected Response Code 200, received {response.status_code} instead.')", "def test_modify_reusableitem_not_authenticated(self):\n self.client.logout()\n \n response = self.client.patch(get_reusable_item_1_url(self), {}, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_cannot_sale_nonexistant_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"Paris_heels\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product is not in the Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_forbidden(self):\n self._error_test(fitbit_exceptions.HTTPForbidden)", "def test_artifactpriority_list_api_unauthorized(self):\n\n # get response\n response = self.client.get('/api/artifactpriority/')\n # compare\n self.assertEqual(response.status_code, 401)", "def test_detail_odd_product_id_permission(self):\n self.assertEqual(self.product_2.id, 2)\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_2.id), **headers)\n\n expected = {'detail': 'You do not have permission to perform this action.'}\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)", "def noaccess(self):\n self.assertEqual(self.client.get(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.post(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.delete(self.ENDPOINT).status_code, 403)", "def test_listing_from_wall_when_blocked_some_users(self):", "def test_unauthenticated_resource_allowed(self):\n raise NotImplementedError # FIXME", "def test_lta_bad(self):\n with self.assertRaises(InventoryException):\n api.inventory.check(self.lta_order_bad)", "def testGetAccessDenied(self):\n self.runGet(None)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def test_no_enable_shoppingcart(self):\r\n self.add_to_cart()\r\n self.request.user = self.user\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertFalse(context['display_shopping_cart'])", "def test_unauthenticated_get(self):\n url = reverse('edit-list')\n\n response = self.client.get(url)\n self.assertEqual(403, response.status_code)\n self.assertEqual('Forbidden', response.status_text)\n self.assertTrue(\n 'credentials were not provided.' in response.data.get('detail'))", "def test_token_was_blacklisted(self):\n\n revoked_token = RevokedToken('secret_token_blacklisted')\n revoked_token.save()\n\n self.assertTrue(\n RevokedToken.is_jti_blacklisted('secret_token_blacklisted'))", "def test_get_reusableitem_api_not_public(self):\n\n # user not logged in\n self.client.logout()\n\n response = self.client.get(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n\n # user logged in and created the Reusable Item\n self.client.force_authenticate(user=self.user_1)\n\n response = self.client.get(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # user logged in and did not create the Reusable Item\n self.client.logout()\n self.client.force_authenticate(user=self.user_2)\n\n response = self.client.get(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_list_products_logged_in(self):\n\n # Log in seller\n self.client.login(username=\"test_seller\", password=\"secret\")\n\n # Issue a GET request\n response = self.client.get(reverse('website:products'))\n\n # Check that the response is 200\n self.assertEqual(response.status_code, 200)\n\n # Check that the logged in user does not recieve any products to view because the only products available are the ones they have for sale\n self.assertEqual(len(response.context['products']),0)\n\n # Check that the product title appears in the rendered HTML content\n self.assertNotIn('<h5 class=\"card-title mb-0\">Test Product</h5>'.encode(), response.content)\n self.assertNotIn('<h5 class=\"card-title mb-0\">Test Product2</h5>'.encode(), response.content)", "def test_can_not_book_running_block(self):\n date = datetime.now().replace(minute=0, second=0, microsecond=0)\n\n response = self.client.post(\n reverse('bookings', kwargs={'facility': 'g'}), {'book': str(date.timestamp())})\n\n context = response.context\n bookings = context[\"bookings\"]\n\n self.assertEqual(response.status_code, 403)\n self.assertEqual(context[\"quota\"], settings.BOOKINGS_QUOTA)", "def assert_cannot_view(obj):\n selenium_utils.open_url(obj.url)\n assert ui_utils.is_error_403()", "def test_can_not_cancel_current_block(self):\n date = datetime.now().replace(minute=0, second=0, microsecond=0)\n\n response = self.client.post(\n reverse('bookings', kwargs={'facility': 'g'}), {'cancel': str(date.timestamp())})\n\n context = response.context\n bookings = context[\"bookings\"]\n\n self.assertEqual(response.status_code, 403)\n self.assertEqual(context[\"quota\"], settings.BOOKINGS_QUOTA)", "def test_news_index_no_perm(self):\n self.assertStatusCode(self.url, 403)", "def test_invalid_credentials_forbidden(self):\n response = self._mock_utility(get_kwargs=self._data(),\n error=fitbit_exceptions.HTTPForbidden)\n self._check_response(response, 103)\n self.assertEqual(UserFitbit.objects.count(), 0)", "def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, tenant='froggy')", "def test_non_contractor_acks_receipt(self):\n res = self.client.post(self.url)\n self.assertEqual(res.status_code, 403)", "def test_feature_disabled(self, url):\n response = self.client.get(url)\n assert response.status_code == 403\n response = self.client.post(url)\n assert response.status_code == 403", "def test_not_authenticated(self):\n response = self.client.get(telemetry_url)\n self.assertEqual(403, response.status_code)", "def test_admin_user_list_all_users_permission_denied(self):\n self.client.logout()\n self.client.login(\n username=self.invalid_user.username,\n password=self.invalid_user.password\n )\n response = self.client.get(CONSTS.USER_ADMIN_LIST)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_visibility_of_not_available_4(self):\n partner = PartnerFactory(status=Partner.NOT_AVAILABLE)\n list_url = reverse('partners:list')\n\n editor = EditorFactory()\n editor.user.is_staff = True\n editor.user.save()\n\n request = RequestFactory().get(list_url)\n request.user = editor.user\n response = PartnersListView.as_view()(request)\n\n self.assertContains(response, partner.get_absolute_url())", "def test_10_admin_user_not_listed(self):\r\n self.register()\r\n res = self.app.get('/admin/users', follow_redirects=True)\r\n assert \"Manage Admin Users\" in res.data, res.data\r\n assert \"Current Users with Admin privileges\" not in res.data, res.data\r\n assert \"John\" not in res.data, res.data", "def test_get_unused_explores(fc: fetcher.Fetcher, test_model, test_unused_explores):\n unused_explores = fc.get_unused_explores(model=test_model[\"name\"])\n assert all(e in test_unused_explores for e in unused_explores)", "def test_admin_cannot_add_item(self):\n response = self.client.get(\n '/self.base_url/sales/3/2',\n headers=dict(Authorization=\"Bearer \" + self.owner_token),\n content_type = 'application/json'\n )\n\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"You cannot make a sale from an Admin account, Consider having an attendant account\")\n self.assertEqual(response.status_code,401)", "def test_cannot_sale_out_of_stock_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":20\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'NY_denims is out of stock!')\n self.assertEqual(resp.status_code, 404)", "def assert_user_cannot_read(self, user, video):\n livesession = LiveSessionFactory(\n email=user.email,\n is_registered=True,\n user=user,\n video=video,\n )\n\n jwt_token = UserAccessTokenFactory(user=user)\n\n response = self.client.get(\n self._get_url(video, livesession),\n content_type=\"application/json\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 403)", "def test_cannot_get_other_attendant_sales(self):\n response = self.client.get(\n '/self.base_url/sales/1',\n headers=dict(Authorization=\"Bearer \" + self.attendant_token),\n content_type = 'application/json'\n )\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"You can only view your sales\")\n self.assertEqual(response.status_code,401)", "def test_not_authenticated_non_public_course_with_all_blocks(self):\n self.client.logout()\n self.query_params.pop('username')\n self.query_params['all_blocks'] = True\n self.verify_response(403)", "def test_lpdaac_bad(self):\n with self.assertRaises(InventoryException):\n api.inventory.check(self.lpdaac_order_bad)", "def test_list_users_without_permissions(self):\n self.client.force_authenticate(user=self.user)\n\n response = self.client.get(reverse('user-list'))\n\n content = {\n 'detail': 'You do not have permission to perform this action.'\n }\n self.assertEqual(json.loads(response.content), content)\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_locked_asset_not_logged_in(self):\r\n self.client.logout()\r\n resp = self.client.get(self.url_locked)\r\n self.assertEqual(resp.status_code, 403) # pylint: disable=E1103\r", "def test_unauthorized(self):\n self._error_test(fitbit_exceptions.HTTPUnauthorized)", "def test_no_permission(client, mocker):\n mocker.patch(\n \"ecommerce.views.IsSignedByCyberSource.has_permission\", return_value=False\n )\n resp = client.post(reverse(\"order-fulfillment\"), data={})\n assert resp.status_code == statuses.HTTP_403_FORBIDDEN", "def check_for_no_privates(context):\n json_data = context.response.json()\n\n if \"component_analyses\" in json_data:\n vulnerabilities = json_data['component_analyses']['vulnerability']\n for v in vulnerabilities:\n assert \"cvss\" in v\n assert \"is_private\" in v\n assert \"vendor_cve_ids\" in v\n if v[\"is_private\"]:\n raise Exception(\"Private vulnerability found\")", "def is_blacklisted(token):\n if Revoked.query.filter_by(token=token).first():\n return True\n return False", "def test_unauthorized_user_like_field(self):\n response = self.client.get(reverse('lessons-detail', args=(1,)))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertNotIn('like', response.data)", "def test_custom_403(self):\n c = Client()\n response = c.post(\"/apimock/mocked/mocked_get\", data={\"post\": \"data\"})\n self.assertEqual(response.status_code, 403)\n self.assertEqual(\"wrong used test Data\", response.content)", "def test_only_admin_can_create_product(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_not_permitted(self):\r\n test_user_client, test_user = self.create_non_staff_authed_user_client()\r\n CourseEnrollment.enroll(test_user, self.course.id)\r\n response = test_user_client.get(self.orphan_url)\r\n self.assertEqual(response.status_code, 403)\r\n response = test_user_client.delete(self.orphan_url)\r\n self.assertEqual(response.status_code, 403)", "def test_no_token_get_all(self):\n response = self.app.get('/api/v3/users')\n self.assertEqual(response.status_code, 401)", "def test_can_not_book_past_block(self):\n date = datetime.now().replace(minute=0, second=0, microsecond=0) - timedelta(hours=1)\n\n response = self.client.post(\n reverse('bookings', kwargs={'facility': 'g'}), {'book': str(date.timestamp())})\n\n context = response.context\n bookings = context[\"bookings\"]\n\n self.assertEqual(response.status_code, 403)\n self.assertEqual(context[\"quota\"], settings.BOOKINGS_QUOTA)", "def test_create_reusableitem_not_authenticated(self):\n\n self.client.logout()\n\n toptenitems = self.toptenlist_1.topTenItem.all()\n toptenitem_1_id = toptenitems[0].id\n\n response = create_reusable_item_1(self, toptenitem_1_id, **reusableitem_1_data)\n\n # the request should fail\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_interactive_withdraw_no_token(client):\n response = client.get(WEBAPP_PATH)\n assert \"Missing authentication token\" in str(response.content)\n assert response.status_code == 403", "def test_if_forbiden_for_authenticated_permissions(self):\r\n res = self.client_authenticated.get(reverse(LIST_USER_URL),data={})\r\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)", "def test_unauthenticated_user_denial(self):\n\n self.response = self.client.get(\"/api/users/users_list/\")\n self.assertEqual(self.response.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(\n 'Authentication credentials were not provided.', self.response.data['detail'])", "def test_can_not_reserve_booked_block(self):\n booking_other = create_test_booking(self.someone, self.first_day, 11)\n\n response = self.client.post(\n reverse('bookings', kwargs={'facility': 'g'}), {'book': str(booking_other.date.timestamp())})\n\n context = response.context\n bookings = context[\"bookings\"]\n\n self.assertEqual(response.status_code, 403)\n self.assertEqual(context[\"quota\"], settings.BOOKINGS_QUOTA)\n\n self.assertEqual(type(context[\"info\"]), NotAllowedAlert)", "def test_systemtype_list_api_unauthorized(self):\n\n # get response\n response = self.client.get('/api/systemtype/')\n # compare\n self.assertEqual(response.status_code, 401)", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_anonymous_user(self):\r\n self.request.user = AnonymousUser()\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertFalse(context['display_shopping_cart'])", "def test_get_restaurant_unauthorized(self):\n resp = self.test_client.get(self.API_BASE, headers={})\n self.assertEqual(resp.status_code, 401)\n resp_dict = json.loads(resp.data)\n self.assertEqual(resp_dict['success'], False)", "def test_need_login_to_see_meterlist(self):\n response = self.client.get(reverse('api_v1:meter-list'), follow=True)\n self.assertEqual(response.status_code, 403)", "def test_artifactpriority_detail_api_unauthorized(self):\n\n # get object\n artifactpriority_api_1 = Artifactpriority.objects.get(\n artifactpriority_name='artifactpriority_api_1'\n )\n # get response\n response = self.client.get(\n '/api/artifactpriority/'\n + str(artifactpriority_api_1.artifactpriority_id)\n + '/'\n )\n # compare\n self.assertEqual(response.status_code, 401)", "def test_falsepositive_no_token_passed(client):\n g.test_authorized_for = []\n res = client.get(\"/v0/falsepositive?fp=splunk_82998ef6bb3db9dff3dsfdsfsdc\")\n assert res.status == \"500 INTERNAL SERVER ERROR\"", "def test_without_whitelisted_ip(self, public_omis_api_client):\n order = OrderFactory()\n\n url = reverse(\n 'api-v3:public-omis:payment:collection',\n kwargs={'public_token': order.public_token},\n )\n public_omis_api_client.set_http_x_forwarded_for('1.1.1.1')\n response = public_omis_api_client.get(url)\n\n assert response.status_code == status.HTTP_401_UNAUTHORIZED", "def test_create_not_admin(self):\n self.assertEqual(Product.objects.count(), 2)\n payload = {\n 'name': 'New product',\n 'category': self.category_1.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99\n }\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n expected = {'detail': 'You do not have permission to perform this action.'}\n response = self.client.post(\n '/api/products/', data=payload,\n content_type='application/json', **headers)\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)\n self.assertEqual(Product.objects.count(), 2)", "def test_11_admin_user_not_listed_in_search(self):\r\n self.register()\r\n data = {'user': 'john'}\r\n res = self.app.post('/admin/users', data=data, follow_redirects=True)\r\n assert \"Manage Admin Users\" in res.data, res.data\r\n assert \"Current Users with Admin privileges\" not in res.data, res.data\r\n assert \"John\" not in res.data, res.data", "def test_show_private_lists_invalid(self):\n with self.client as c:\n with c.session_transaction() as sess:\n sess[CURR_USER_KEY] = self.user2.id\n \n res = c.get(\"/users/tester1/private-lists\")\n\n self.assertEqual(res.status_code, 302)", "def test_get(self):\n self.assertEqual(403, self.response.status_code)", "def test_wrong_token_permission_denied(self, client, token):\n with disable_logs(logging.WARNING):\n assert_hook_status(client, status=403, token=f\"{token}wrong\")", "def assert_response_resource_not_accessible(self, response):\n self.assertEqual(response.status_code, 403)\n self.assertEqual(\n response.json(),\n {\"detail\": \"You do not have permission to perform this action.\"},\n )", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)" ]
[ "0.77733105", "0.7087671", "0.67957276", "0.6761181", "0.675135", "0.67509085", "0.66980356", "0.6541592", "0.65259844", "0.6448291", "0.64465666", "0.6436873", "0.64326245", "0.64205766", "0.6409761", "0.6387554", "0.6384848", "0.6369056", "0.63571316", "0.63104194", "0.62819606", "0.62777966", "0.6248736", "0.6216315", "0.6215546", "0.6214481", "0.6206037", "0.6202165", "0.6197165", "0.6163344", "0.61630917", "0.6163027", "0.6157586", "0.6156872", "0.6151871", "0.6143205", "0.6122024", "0.61206806", "0.61165017", "0.6109191", "0.60934", "0.6087529", "0.6080406", "0.6071754", "0.60694194", "0.6069345", "0.6064984", "0.60566014", "0.60543954", "0.60539705", "0.60517323", "0.6046775", "0.6033486", "0.60333353", "0.60141045", "0.60071236", "0.59979254", "0.5997664", "0.5982589", "0.59798187", "0.59746987", "0.5963707", "0.59629095", "0.59626424", "0.59601915", "0.5956718", "0.59524727", "0.59516186", "0.5947211", "0.59458226", "0.59350955", "0.59313846", "0.59269875", "0.5916839", "0.59113663", "0.59096986", "0.59035707", "0.5900296", "0.5897455", "0.5893802", "0.58929855", "0.58913", "0.5890032", "0.5888088", "0.5888088", "0.588593", "0.58785045", "0.58738405", "0.5866536", "0.5859295", "0.5856923", "0.583791", "0.5836227", "0.5831916", "0.583062", "0.5826336", "0.5822387", "0.5818349", "0.5818349", "0.5818349" ]
0.8038467
0
Tests that a user cannot view a product that doesnot exist in the Inventory
def test_view_product_that_doesnot_exist_in_inventory(self): resp = self.admin_register() reply = self.admin_login() token = reply['token'] product = dict( prod_name='NY_denims', category='denims', stock=20, price=150 ) resp = self.client.post( '/api/v1/products', content_type='application/json', data=json.dumps(product), headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Product successfully added to Inventory!') self.assertEqual(resp.status_code, 201) resp = self.client.get( '/api/v1/products/2', headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'This product does not exist!') self.assertEqual(resp.status_code, 404)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_cannot_sale_nonexistant_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"Paris_heels\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product is not in the Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_delete_not_my_product(self):\n self._require_login(self.user1)\n response = self.client.delete('/api/1.0/products/2/')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_no_items_in_cart(self):\r\n self.request.user = self.user\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertFalse(context['display_shopping_cart'])", "def test_view_products_from_empty_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'There are no products yet!')\n self.assertEqual(resp.status_code, 404)", "def test_admin_cannot_delete_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This product does not exist in Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_admin_cannot_delete_product_from_empty_Inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'There are no products in Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_cannot_view_a_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_get_cart_items_unauthorized(self):\n with self.assertRaises(ResourceAccessError):\n self.cart_item_manager.get_cart_items('123', '1')", "def test_lta_bad(self):\n with self.assertRaises(InventoryException):\n api.inventory.check(self.lta_order_bad)", "def test_add_cart_item_unauthorized_user(self):\n cart_id = self.cart_item_manager.create_cart('111', 'test cart', False)\n with self.assertRaises(ResourceAccessError):\n self.cart_item_manager.add_cart_item(catalog=self.catalog,\n user_id='112',\n cart_id=cart_id,\n entity_id='entity_id',\n entity_type='entity_type',\n entity_version='entity_version')", "def assert_cannot_view(obj):\n selenium_utils.open_url(obj.url)\n assert ui_utils.is_error_403()", "def test_product_buy_missing_goods(self):\n result_buy = self.info_list.product_buy(\"хлеб серый хлебозавод\", 3)\n self.assertFalse(result_buy)", "def test_visibility_of_not_available_1(self):\n partner = PartnerFactory(status=Partner.NOT_AVAILABLE)\n detail_url = partner.get_absolute_url()\n\n editor = EditorFactory()\n\n request = RequestFactory().get(detail_url)\n request.user = editor.user\n with self.assertRaises(Http404):\n # We must explicitly pass kwargs to the view even though they are\n # implied by the URL.\n _ = PartnersDetailView.as_view()(request, pk=partner.pk)", "def test_visibility_of_not_available_3(self):\n partner = PartnerFactory(status=Partner.NOT_AVAILABLE)\n detail_url = partner.get_absolute_url()\n\n editor = EditorFactory()\n editor.user.is_staff = True\n editor.user.save()\n\n request = RequestFactory().get(detail_url)\n request.user = editor.user\n\n # This should not raise Http404.\n response = PartnersDetailView.as_view()(request, pk=partner.pk)\n self.assertEqual(response.status_code, 200)", "def test_non_admin_cannot_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_detail_odd_product_id_permission(self):\n self.assertEqual(self.product_2.id, 2)\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_2.id), **headers)\n\n expected = {'detail': 'You do not have permission to perform this action.'}\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)", "def test_no_enable_shoppingcart(self):\r\n self.add_to_cart()\r\n self.request.user = self.user\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertFalse(context['display_shopping_cart'])", "def testGetAccessDenied(self):\n self.runGet(None, sequencer=self.hiseq2000.vendor_id)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def test_cannot_sale_out_of_stock_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":20\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'NY_denims is out of stock!')\n self.assertEqual(resp.status_code, 404)", "def test_visibility_of_not_available_2(self):\n partner = PartnerFactory(status=Partner.NOT_AVAILABLE)\n list_url = reverse('partners:list')\n\n editor = EditorFactory()\n\n request = RequestFactory().get(list_url)\n request.user = editor.user\n response = PartnersListView.as_view()(request)\n\n self.assertNotContains(response, partner.get_absolute_url())", "def test_get_reusableitem_api_not_public(self):\n\n # user not logged in\n self.client.logout()\n\n response = self.client.get(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n\n # user logged in and created the Reusable Item\n self.client.force_authenticate(user=self.user_1)\n\n response = self.client.get(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # user logged in and did not create the Reusable Item\n self.client.logout()\n self.client.force_authenticate(user=self.user_2)\n\n response = self.client.get(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_update_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], \"This product doesn't exists in the Inventory!\")\n self.assertEqual(resp.status_code, 400)", "def test_view_product_with_invalid_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2kk',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Try an interger for product id')\n self.assertEqual(resp.status_code, 400)", "def test_lpdaac_bad(self):\n with self.assertRaises(InventoryException):\n api.inventory.check(self.lpdaac_order_bad)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_security_on_get(self):\n # test the listing url\n product = Product.objects.all()[0]\n url = '/product/xml/'\n response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)\n # test the product detail url\n url = '/product/xml/%s/' % product.item_number\n Response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)", "def test_cannot_view_all_products_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_not_logged_in(self):\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n AnonymousUser(), album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n AnonymousUser(), album.display_photo, ALBUM_PUBLIC)", "def test_detail_is_hacker_permission(self):\n self.user_1.username = 'pythonhacker'\n self.user_1.save()\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_1.id), **headers)\n\n expected = {'detail': 'You do not have permission to perform this action.'}\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)", "def test_only_admin_can_create_product(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_locked_asset_not_registered(self):\r\n self.client.login(username=self.usr, password=self.pwd)\r\n resp = self.client.get(self.url_locked)\r\n self.assertEqual(resp.status_code, 403) # pylint: disable=E1103\r", "def assertCanNotFind(self, document):\n ctool = self.portal.portal_catalog\n self.assertEqual(len(ctool.searchResults(UID=document.UID())), 0)", "def test_user_not_authorized(self):\n response = self.client.post(self.url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_unavailable_item(self):\n item, change, _ = give_item_and_change('crisps', .50)\n self.assertIsNone(item)\n self.assertEqual(change, 0.5)", "def test_get_inventory_not_found(self):\n resp = self.app.get('/inventories/0')\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)", "def test_visibility_of_not_available_4(self):\n partner = PartnerFactory(status=Partner.NOT_AVAILABLE)\n list_url = reverse('partners:list')\n\n editor = EditorFactory()\n editor.user.is_staff = True\n editor.user.save()\n\n request = RequestFactory().get(list_url)\n request.user = editor.user\n response = PartnersListView.as_view()(request)\n\n self.assertContains(response, partner.get_absolute_url())", "def test_product_buy_with_not_exists_name(self):\n result_buy = self.info_list.product_buy(\"Говядина Немецкая 2кг\", 3)\n self.assertFalse(result_buy)", "def test_modify_reusableitem_not_authenticated(self):\n self.client.logout()\n \n response = self.client.patch(get_reusable_item_1_url(self), {}, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_create_reusableitem_not_authenticated(self):\n\n self.client.logout()\n\n toptenitems = self.toptenlist_1.topTenItem.all()\n toptenitem_1_id = toptenitems[0].id\n\n response = create_reusable_item_1(self, toptenitem_1_id, **reusableitem_1_data)\n\n # the request should fail\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_purchase_not_available(self):\n purchase_model = {\"id\": 2, \"amount\": 1}\n resp = self.app.post(\"/products/2/purchase\", json=purchase_model, content_type=\"application/json\")\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)\n resp = self.app.get(\"/products/2\", content_type=\"application/json\")\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)", "def test_admin_cannot_add_item(self):\n response = self.client.get(\n '/self.base_url/sales/3/2',\n headers=dict(Authorization=\"Bearer \" + self.owner_token),\n content_type = 'application/json'\n )\n\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"You cannot make a sale from an Admin account, Consider having an attendant account\")\n self.assertEqual(response.status_code,401)", "def assert_user_cannot_read(self, user, video):\n livesession = LiveSessionFactory(\n email=user.email,\n is_registered=True,\n user=user,\n video=video,\n )\n\n jwt_token = UserAccessTokenFactory(user=user)\n\n response = self.client.get(\n self._get_url(video, livesession),\n content_type=\"application/json\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 403)", "def test_delete_cart_item_unauthorized(self):\n user_id = '111'\n cart_id = self.cart_item_manager.create_cart(user_id, 'test cart', False)\n item_id1 = self.cart_item_manager.add_cart_item(self.catalog, user_id, cart_id, '1', 'entity_type',\n 'entity_version')\n with self.assertRaises(ResourceAccessError):\n self.cart_item_manager.delete_cart_item('112', cart_id, item_id1)", "def test_if_not_available_for_unauthorized(self):\r\n res = self.not_authenticated.get(reverse(LIST_USER_URL),data={})\r\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_user_not_in_group_cannot_update(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url,\n expected_url=expected_url,\n data=self.data)", "def test_resuableitem_invalid_vote(self):\n\n original_reusableitem = setup_public_reusable_item_1(self)\n data1 = submit_change_request_1(self, self.user_1)\n\n # user 2 now submits an invalid vote\n self.client.force_authenticate(user=self.user_2)\n\n data2 = {'vote': 'banana'}\n response = self.client.patch(get_reusable_item_1_url(self), data2, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create_not_admin(self):\n self.assertEqual(Product.objects.count(), 2)\n payload = {\n 'name': 'New product',\n 'category': self.category_1.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99\n }\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n expected = {'detail': 'You do not have permission to perform this action.'}\n response = self.client.post(\n '/api/products/', data=payload,\n content_type='application/json', **headers)\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)\n self.assertEqual(Product.objects.count(), 2)", "def test_unauthorized_product_update(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_add_to_cart_item_not_in_system(self):\n # test sale products not in db\n\n response = self.client.get(\n '/self.base_url/sales/1999/2',\n headers=dict(Authorization=\"Bearer \" + self.attendant_token),\n content_type = 'application/json'\n )\n\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"This product does not exist\")\n self.assertEqual(response.status_code,200)\n\n\n # test add item which is at minimum stock", "def test_get_pricehistory_non_existing_product(self):\n res = self.get(url=\"/products/10/pricehistory\", role=\"admin\")\n self.assertException(res, exc.EntryNotFound)", "def test_not_permitted(self):\r\n test_user_client, test_user = self.create_non_staff_authed_user_client()\r\n CourseEnrollment.enroll(test_user, self.course.id)\r\n response = test_user_client.get(self.orphan_url)\r\n self.assertEqual(response.status_code, 403)\r\n response = test_user_client.delete(self.orphan_url)\r\n self.assertEqual(response.status_code, 403)", "def test_cannot_create_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_user_not_in_group_cannot_access(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url, expected_url)", "def test_user_can_not_enroll_to_unpublished_project(self):\n self.client.force_authenticate(self.global_user_1)\n\n # Make sure nothing is found - not enrolled.\n resp = self.client.get(self.api_project_state_not_enrolled_unpublished_url)\n self.assertEqual(resp.status_code, 404)\n\n #NOTE: Allowed!\n # # POST and make sure the user is enrolled\n # resp = self.client.post(self.api_project_state_not_enrolled_unpublished_url)\n # self.assertEqual(resp.status_code, 403)\n # resp = self.client.get(self.api_project_state_not_enrolled_unpublished_url)\n # self.assertEqual(resp.status_code, 404)", "def test_can_info_does_not_exist(self):\n fake_user = User(username='Fake', password='')\n self.assertFalse(send_rotate_to_can(fake_user, self.BIN_NUM))", "def test_detailview_read_for_wrong_user(self):\n\n for user in self.users:\n detailview = reverse('account_detail', args=(user.uuid,))\n\n other_users = self.users\n other_users.remove(user)\n random_user = random.choice(self.users)\n\n self.client.login(email=random_user.email, password='letmein')\n\n response = self.client.get(detailview)\n\n self.assertEqual(response.status_code, 403)", "def test_get_non_owner(self):\n another_user = CustomUser(id=101, email='[email protected]', is_active=True)\n another_user.set_password('testpassword')\n another_user.save()\n self.client.login(email='[email protected]', password='testpassword')\n\n url = reverse('route', kwargs={'way_id': self.route.way_id, 'route_id': self.route.id})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 403)", "def testGetAccessDenied(self):\n self.runGet(None, sequencer=self.hiseq2000.sodar_uuid)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user, sequencer=self.hiseq2000.sodar_uuid)\n self.response_403()", "def test_admin_cannot_delete_product_with_non_integer_prod_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/kk',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'The product id should be a number!')\n self.assertEqual(resp.status_code, 400)", "def test_security_on_delete(self):\n # test the delete product url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.delete(url)\n self.failUnlessEqual(response.status_code, 401)", "def test_not_logged_cannot_update(self):\n\n utils.test_not_logged_cannot_access(self, self.url, self.data)", "def test_unauthorized_user_like_field(self):\n response = self.client.get(reverse('lessons-detail', args=(1,)))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertNotIn('like', response.data)", "def testGetAccessDenied(self):\n self.runGet(None)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def test_unavailable_item(self):\n item, change, _ = give_item_and_change('crisps', '1.00 .50')\n self.assertIsNone(item)\n self.assertEqual(change, 1.35)", "def not_test_without_user(self):\n # TODO", "def test_listing_supplies_unauthenticated(self):\n request = self.factory.get('/api/supplies')\n response = SupplyListView.as_view()(request)\n # no permission\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_product_is_uninstalled(self):\n try:\n result = self.installer.is_product_installed(PROJECT_NAME)\n except AttributeError:\n result = self.installer.isProductInstalled(PROJECT_NAME)\n self.assertFalse(result)", "def assert_response_resource_not_accessible(self, response):\n self.assertEqual(response.status_code, 403)\n self.assertEqual(\n response.json(),\n {\"detail\": \"You do not have permission to perform this action.\"},\n )", "def test_only_attendant_can_make_a_sale(self):\n resp = self.admin_add_product()\n reply = self.admin_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_anonymous_user(self):\r\n self.request.user = AnonymousUser()\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertFalse(context['display_shopping_cart'])", "def test_unavailabe_items(self):\n item, change, _ = give_item_and_change('crisps', .50)\n self.assertIsNone(item)\n self.assertEqual(change, 0.5)", "def test_get_non_owner(self):\n another_user = CustomUser(id=101, email='[email protected]', is_active=True)\n another_user.set_password('testpassword')\n another_user.save()\n self.client.login(email='[email protected]', password='testpassword')\n\n url = reverse('notification',\n kwargs={'way_id': self.notification.way_id, 'notification_id': self.notification.id})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 403)", "def test_resuableitem_vote_not_referenced(self):\n\n original_reusableitem = setup_public_reusable_item_1(self)\n data1 = submit_change_request_1(self, self.user_1)\n\n # user 3 now submits a vote\n self.client.force_authenticate(user=self.user_3)\n\n data2 = {'vote': 'banana'}\n response = self.client.patch(get_reusable_item_1_url(self), data2, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_not_logged_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_detail_not_contributor_forbidden(self):\n self.login(self.user1)\n resp = self.client.get(self.get_url(self.c2.pk))\n self.assert403(resp)", "def test_create_product_as_customer_fails(self):\n customer = get_user_model().objects.create_user(\n '[email protected]',\n 'Customer',\n 'user123'\n )\n self.client.force_authenticate(customer)\n res = self.client.post(PRODUCTS_URL, PRODUCT_PAYLOAD)\n\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)", "def test_create_reusableitem_not_toptenlist_owner(self):\n\n self.client.logout()\n\n self.client.force_authenticate(user=self.user_2)\n\n toptenitems = self.toptenlist_1.topTenItem.all()\n toptenitem_1_id = toptenitems[0].id\n\n response = create_reusable_item_1(self, toptenitem_1_id, **reusableitem_1_data)\n\n # the user cannot see the Top Ten Item because they did not create it\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_uninstalled(self):\n self.assertFalse(self.qi.isProductInstalled(PROJECTNAME))", "def test_list_not_authenticated(self):\n response = self.client.get('/api/products/')\n expected = {'detail': 'Authentication credentials were not provided.'}\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response.json(), expected)", "def test_view_disabled(self, method, url):\n response = getattr(self.client, method)(url)\n assert response.status_code == 403", "def test_query_inventory_not_found(self):\n resp = self.app.get('/inventories/query', query_string='name=shampoo&status=used')\n self.assertEquals(resp.status_code, 404)", "def test_cannot_get_empty_sales(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n\n resp = self.client.get(\n '/api/v1/sales/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This sale does not exist!')\n self.assertEqual(resp.status_code, 400)", "def test_validate_when_user_not_found(self, view, mget_user):\n mget_user.side_effect = NoResultFound()\n\n with raises(HTTPUnauthorized):\n view.validate()", "def test_artifactpriority_detail_api_unauthorized(self):\n\n # get object\n artifactpriority_api_1 = Artifactpriority.objects.get(\n artifactpriority_name='artifactpriority_api_1'\n )\n # get response\n response = self.client.get(\n '/api/artifactpriority/'\n + str(artifactpriority_api_1.artifactpriority_id)\n + '/'\n )\n # compare\n self.assertEqual(response.status_code, 401)", "def cant(user, action):\n\n return not can(user, action)", "def test_query_inventory_missing_not_found(self):\n resp = self.app.get('/inventories/query', query_string='status=used')\n self.assertEquals(resp.status_code, 404)", "def test_get_unexisting_products(self):\n response=self.get_unexisting_products()\n result = json.loads(response.data.decode('utf-8'))\n self.assertEqual(result['message'],\"No Available products\")\n self.assertEqual(response.status_code, 200)", "def raise_not_editable(self, viewer):\n if not self.id or viewer.has_perm(\"bookwyrm.create_invites\"):\n return\n raise PermissionDenied()", "def test_10_admin_user_not_listed(self):\r\n self.register()\r\n res = self.app.get('/admin/users', follow_redirects=True)\r\n assert \"Manage Admin Users\" in res.data, res.data\r\n assert \"Current Users with Admin privileges\" not in res.data, res.data\r\n assert \"John\" not in res.data, res.data", "def test_locked_asset_not_logged_in(self):\r\n self.client.logout()\r\n resp = self.client.get(self.url_locked)\r\n self.assertEqual(resp.status_code, 403) # pylint: disable=E1103\r", "def test_product_exists_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product exists in the Inventory!')\n self.assertEqual(resp.status_code, 400)", "def test_11_admin_user_not_listed_in_search(self):\r\n self.register()\r\n data = {'user': 'john'}\r\n res = self.app.post('/admin/users', data=data, follow_redirects=True)\r\n assert \"Manage Admin Users\" in res.data, res.data\r\n assert \"Current Users with Admin privileges\" not in res.data, res.data\r\n assert \"John\" not in res.data, res.data", "def test_nonexistent_user(self):\n self.client.login(username=self.global_staff.username, password=self.password)\n resp = self.client.get(self.get_url('IDoNotExist'))\n assert resp.status_code == status.HTTP_404_NOT_FOUND", "def test_delete_reusableitem_api_fails(self):\n self.client.force_authenticate(user=self.user_1)\n\n response = self.client.delete(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_500_INTERNAL_SERVER_ERROR)", "def test_seat_not_available(self):\n\n user1 = User.objects.create(username=\"user1\", password=\"\", email=\"[email protected]\")\n user2 = User.objects.create(username=\"user2\", password=\"\", email=\"[email protected]\")\n\n course = Course.objects.first()\n course.student.add(user1)\n course.student.add(user2)\n\n self.assertFalse(course.is_seat_available())", "def test_cart_item_batch_write_unauthorized(self):\n user_id = '111'\n cart_id = self.cart_item_manager.create_cart(user_id, 'test cart', False)\n with self.assertRaises(ResourceAccessError):\n self.cart_item_manager.start_batch_cart_item_write(self.catalog, '112', cart_id, 'foo', {}, 12345, 10000)", "def test_view_url_propose_product_already_in_favorites(self):\r\n self.client.login(username='test', password='test')\r\n response = self.client.get(reverse('search_results'),\r\n {'query': '', 'name': 'nutella'})\r\n self.assertEqual(response.status_code, 200)\r\n self.assertTemplateUsed(response, 'purbeurre/search_results.html')" ]
[ "0.72640157", "0.6747577", "0.6742351", "0.6686189", "0.66764927", "0.6665994", "0.6556591", "0.6439238", "0.6401339", "0.6370489", "0.6360187", "0.6351575", "0.6346178", "0.63409466", "0.63361067", "0.6330177", "0.6312651", "0.62697184", "0.6259748", "0.62596893", "0.62184983", "0.62113905", "0.6201547", "0.6200921", "0.6182439", "0.6182439", "0.6182439", "0.6182439", "0.61808616", "0.61482865", "0.6142915", "0.61216444", "0.61201245", "0.6118396", "0.61168003", "0.6090166", "0.6082628", "0.607417", "0.6073622", "0.60684365", "0.6052404", "0.60177594", "0.60090476", "0.59991527", "0.5996969", "0.5989256", "0.5972502", "0.5959089", "0.5949635", "0.5947237", "0.5934573", "0.5931181", "0.5925632", "0.5917249", "0.59018874", "0.58992845", "0.589296", "0.5890748", "0.5876573", "0.58738047", "0.58631206", "0.5860139", "0.5858319", "0.5858168", "0.585679", "0.58490264", "0.58449197", "0.5841956", "0.58397967", "0.5835746", "0.5833908", "0.58329815", "0.58321714", "0.5819265", "0.58099896", "0.58081365", "0.5801256", "0.5801155", "0.5799884", "0.5796168", "0.57933676", "0.57836634", "0.57833976", "0.5781891", "0.5778655", "0.57779175", "0.57773167", "0.5777312", "0.57680166", "0.57673055", "0.57648504", "0.5762647", "0.5761903", "0.5755523", "0.57524735", "0.5746705", "0.5743764", "0.57382864", "0.573803", "0.5736484" ]
0.73303914
0
Tests that a user cannot view products from empty Inventory
def test_view_products_from_empty_inventory(self): resp = self.admin_register() reply = self.admin_login() token = reply['token'] resp = self.client.get( '/api/v1/products', headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'There are no products yet!') self.assertEqual(resp.status_code, 404)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_admin_cannot_delete_product_from_empty_Inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'There are no products in Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_view_product_that_doesnot_exist_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product does not exist!')\n self.assertEqual(resp.status_code, 404)", "def test_no_items_in_cart(self):\r\n self.request.user = self.user\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertFalse(context['display_shopping_cart'])", "def test_cannot_sale_nonexistant_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"Paris_heels\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product is not in the Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_show_cart_empty(client):\n raise NotImplemented('Acceptance test failed')", "def test_get_cart_items_unauthorized(self):\n with self.assertRaises(ResourceAccessError):\n self.cart_item_manager.get_cart_items('123', '1')", "def test_product_buy_missing_goods(self):\n result_buy = self.info_list.product_buy(\"хлеб серый хлебозавод\", 3)\n self.assertFalse(result_buy)", "def test_cannot_view_all_products_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_cannot_view_a_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_login_with_nonempty_cart(client):\n raise NotImplemented('Acceptance test failed')", "def test_admin_cannot_create_product_with_empty_fields(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='',\n category='',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Please enter all fields!')\n self.assertEqual(resp.status_code, 400)", "def test_delete_not_my_product(self):\n self._require_login(self.user1)\n response = self.client.delete('/api/1.0/products/2/')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_lta_bad(self):\n with self.assertRaises(InventoryException):\n api.inventory.check(self.lta_order_bad)", "def test_no_enable_shoppingcart(self):\r\n self.add_to_cart()\r\n self.request.user = self.user\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertFalse(context['display_shopping_cart'])", "def test_unavailabe_items(self):\n item, change, _ = give_item_and_change('crisps', .50)\n self.assertIsNone(item)\n self.assertEqual(change, 0.5)", "def test_admin_cannot_delete_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This product does not exist in Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_get_inventory_not_found(self):\n resp = self.app.get('/inventories/0')\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)", "def test_add_to_cart_item_not_in_system(self):\n # test sale products not in db\n\n response = self.client.get(\n '/self.base_url/sales/1999/2',\n headers=dict(Authorization=\"Bearer \" + self.attendant_token),\n content_type = 'application/json'\n )\n\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"This product does not exist\")\n self.assertEqual(response.status_code,200)\n\n\n # test add item which is at minimum stock", "def test_purchase_not_available(self):\n purchase_model = {\"id\": 2, \"amount\": 1}\n resp = self.app.post(\"/products/2/purchase\", json=purchase_model, content_type=\"application/json\")\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)\n resp = self.app.get(\"/products/2\", content_type=\"application/json\")\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)", "def test_cannot_get_empty_sales(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n\n resp = self.client.get(\n '/api/v1/sales/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This sale does not exist!')\n self.assertEqual(resp.status_code, 400)", "def inventory(self):\n\n #when the item list is 0 , print out having no items \n if len(self.items) == 0:\n \n print('The player has no items')\n\n #if not, print out the item list \n else:\n print(self.items)", "def test_get_empty_product_list(self):\n response = self.client().get('/api/v1/products')\n json_data = json.loads(response.data)\n self.assertTrue(json_data.get('Error'))\n self.assertEqual(json_data.get('Error'), \"There are no books\")\n self.assertEqual(response.status_code, 404)", "def test_get_unexisting_products(self):\n response=self.get_unexisting_products()\n result = json.loads(response.data.decode('utf-8'))\n self.assertEqual(result['message'],\"No Available products\")\n self.assertEqual(response.status_code, 200)", "def action_confirm(self):\n if any(not l.is_available for l in self.mapped('order_line')):\n raise UserError(_('Some of your products in order does not have enough quantity available'))\n res = super(SaleOrder, self).action_confirm()\n return res", "def test_unavailable_item(self):\n item, change, _ = give_item_and_change('crisps', .50)\n self.assertIsNone(item)\n self.assertEqual(change, 0.5)", "def test_lpdaac_bad(self):\n with self.assertRaises(InventoryException):\n api.inventory.check(self.lpdaac_order_bad)", "def print_inventory_items(items):\r\n if not (len(items) == 0):\r\n wrap_print(\"You have \" + list_of_objects(items) + \".\\n\")\r\n else:\r\n wrap_print(\"You don't have anything.\\n\")", "def test_cannot_make_sale_with_missing_fields(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'One of the fields is empty!')\n self.assertEqual(resp.status_code, 400)", "def test_cannot_sale_out_of_stock_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":20\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'NY_denims is out of stock!')\n self.assertEqual(resp.status_code, 404)", "def test_query_inventory_missing_not_found(self):\n resp = self.app.get('/inventories/query', query_string='status=used')\n self.assertEquals(resp.status_code, 404)", "def test_list_products_logged_in(self):\n\n # Log in seller\n self.client.login(username=\"test_seller\", password=\"secret\")\n\n # Issue a GET request\n response = self.client.get(reverse('website:products'))\n\n # Check that the response is 200\n self.assertEqual(response.status_code, 200)\n\n # Check that the logged in user does not recieve any products to view because the only products available are the ones they have for sale\n self.assertEqual(len(response.context['products']),0)\n\n # Check that the product title appears in the rendered HTML content\n self.assertNotIn('<h5 class=\"card-title mb-0\">Test Product</h5>'.encode(), response.content)\n self.assertNotIn('<h5 class=\"card-title mb-0\">Test Product2</h5>'.encode(), response.content)", "def test_lta_good(self):\n self.assertIsNone(api.inventory.check(self.lta_order_good))", "def test_get_reusableitem_api_not_public(self):\n\n # user not logged in\n self.client.logout()\n\n response = self.client.get(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n\n # user logged in and created the Reusable Item\n self.client.force_authenticate(user=self.user_1)\n\n response = self.client.get(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # user logged in and did not create the Reusable Item\n self.client.logout()\n self.client.force_authenticate(user=self.user_2)\n\n response = self.client.get(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def clean_up_inventory(self):\n self.inventory = [i for i in self.inventory if i.quantity != 0]", "def test_query_inventory_not_found(self):\n resp = self.app.get('/inventories/query', query_string='name=shampoo&status=used')\n self.assertEquals(resp.status_code, 404)", "def test_show_cart_with_items(client):\n raise NotImplemented('Acceptance test failed')", "def test_gather_location_no_product(self):\n gathered_items = self.Quant._gather(self.apple, self.test_stock_location_02)\n # Check the number of apple quants returned is correct\n self.assertFalse(len(gathered_items))", "def test_add_cart_item_unauthorized_user(self):\n cart_id = self.cart_item_manager.create_cart('111', 'test cart', False)\n with self.assertRaises(ResourceAccessError):\n self.cart_item_manager.add_cart_item(catalog=self.catalog,\n user_id='112',\n cart_id=cart_id,\n entity_id='entity_id',\n entity_type='entity_type',\n entity_version='entity_version')", "def test_view_product_with_invalid_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2kk',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Try an interger for product id')\n self.assertEqual(resp.status_code, 400)", "def test_non_admin_cannot_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_unavailable_item(self):\n item, change, _ = give_item_and_change('crisps', '1.00 .50')\n self.assertIsNone(item)\n self.assertEqual(change, 1.35)", "def test_get_dealer_active_inventory(self):\n pass", "def test_shopping_cart_not_empty(self):\n expected_contents = self.fill_session_cart()\n response = self.client.get(self.SHOP_CART_URL)\n self.assertEqual(response.context['contents'], expected_contents)", "def test_update_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], \"This product doesn't exists in the Inventory!\")\n self.assertEqual(resp.status_code, 400)", "def test_create_reusableitem_not_authenticated(self):\n\n self.client.logout()\n\n toptenitems = self.toptenlist_1.topTenItem.all()\n toptenitem_1_id = toptenitems[0].id\n\n response = create_reusable_item_1(self, toptenitem_1_id, **reusableitem_1_data)\n\n # the request should fail\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_security_on_get(self):\n # test the listing url\n product = Product.objects.all()[0]\n url = '/product/xml/'\n response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)\n # test the product detail url\n url = '/product/xml/%s/' % product.item_number\n Response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)", "def test_basicNoSalePC(self):\n # Basic price check\n self.log.info(\"Price checking Generic Item via speedkey\")\n pos.click(\"Price Check\")\n pos.click_speed_key(\"Generic Item\")\n \n # Confirm the right item, at the right price\n self.read_price_check(\"Generic Item\", \"$0.01\")\n # Don't add the item\n pos.click(\"Ok\")\n \n # Confirm we aren't in a transaction\n if self.in_transaction():\n self.tc_fail(\"Unintentionally In Transaction\")\n else:\n self.log.info(\"Confirmed we are not in a transaction\")\n \n # Setup for next test\n self.recover()", "def test_product_buy_with_not_exists_name(self):\n result_buy = self.info_list.product_buy(\"Говядина Немецкая 2кг\", 3)\n self.assertFalse(result_buy)", "def test_get_default_cart_with_no_default_cart(self):\n user_id = '123'\n self.assertEqual(0, len(self.cart_item_manager.get_user_carts(user_id)))\n with self.assertRaises(ResourceAccessError):\n self.cart_item_manager.get_default_cart(user_id)", "def test_only_attendant_can_make_a_sale(self):\n resp = self.admin_add_product()\n reply = self.admin_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_list_not_authenticated(self):\n response = self.client.get('/api/products/')\n expected = {'detail': 'Authentication credentials were not provided.'}\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response.json(), expected)", "def test_product_nullables(self):\n self.assertIsNone(self.product3.main_image)\n self.assertIsNone(self.product3.protein)\n self.assertIsNone(self.product3.fat)\n self.assertIsNone(self.product3.carbs)\n self.assertIsNone(self.product3.calories)", "def test_only_admin_can_create_product(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_listing_supplies_unauthenticated(self):\n request = self.factory.get('/api/supplies')\n response = SupplyListView.as_view()(request)\n # no permission\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_can_not_access_checkout_with_empty_cart(self):\n responses = [self.client.post(self.CHECKOUT_URL, follow=True),\n self.client.get(self.CHECKOUT_URL, follow=True)]\n\n for response in responses:\n self.assertRedirects(response, reverse('orders:shopping_cart'))\n message = list(response.context.get('messages'))[0]\n self.assertEqual(message.tags, 'error')\n self.assertTrue(\"Your cart is empty.\" in message.message)", "def test_product_exists_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product exists in the Inventory!')\n self.assertEqual(resp.status_code, 400)", "def test_report_out_of_stock(self, *args):\n self.client.force_login(self.user)\n response = self.client.post(self.url)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(ReportOutOfStock.objects.count(), 1)\n report = ReportOutOfStock.objects.first()\n self.assertEqual(report.user, self.user)\n self.assertEqual(report.menu_item, self.menu)", "def test_remove_all(self): #SAUCE-LAB-8\n login = LoginPage(self.driver)\n login.open()\n inventory_page = login.login(_DEF_USER, _DEF_PASSWORD)\n first_item = inventory_page.products\n first_item: InventoryItem\n for item in first_item:\n item.add_to_cart()\n if inventory_page.header.get_total_cart_items() == 6:\n print('\\n')\n print(f'Total of products {inventory_page.header.get_total_cart_items()}')\n else:\n print('Not all items were added')\n for item in first_item:\n item.remove_from_cart()\n if inventory_page.header.get_total_cart_items() == 0:\n print('\\n')\n print(f'Total of products {inventory_page.header.get_total_cart_items()}')\n else:\n print('Not all items were removed')", "def test_is_valid_return_only_good_products(self):\n self.assertTrue(ProductValidator().is_valid(self.good_product))\n self.assertFalse(ProductValidator().is_valid(self.bad_product))", "def test_view_all_products(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertIn('NY_denims', str(reply['products']))\n self.assertEqual(resp.status_code, 200)", "def test_create_inventory_with_no_name(self):\n new_inventory = {'status': 'new'}\n resp = self.app.post('/inventories', data=new_inventory, content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)", "def test_list_available_product(self):\n view = AvailableProductListView.as_view({'get': 'list'})\n uri = reverse('products:list-available-products')\n request = self.factory.get(uri, HTTP_AUTHORIZATION='Token {}'.format(self.token_user.key))\n request.user = self.user['user']\n response = view(request)\n self.assertEqual(response.status_code, 200,\n f'Expected Response Code 200, received {response.status_code} instead.')", "def test_anonymous_user(self):\r\n self.request.user = AnonymousUser()\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertFalse(context['display_shopping_cart'])", "def test_cannot_sell_more_than_stock(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":15\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Only 10 NY_denims available right now!')\n self.assertEqual(resp.status_code, 400)", "def test_query_with_no_matches_returns_nothing(test_store):\n items = list(test_store.get_by(name=\"Sugar\"))\n\n assert len(items) == 0", "def test_get_pricehistory_non_existing_product(self):\n res = self.get(url=\"/products/10/pricehistory\", role=\"admin\")\n self.assertException(res, exc.EntryNotFound)", "def test_modify_reusableitem_not_authenticated(self):\n self.client.logout()\n \n response = self.client.patch(get_reusable_item_1_url(self), {}, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_product_buy_more_then_have(self):\n result_buy = self.info_list.product_buy(\"соль 1 кг\", 50)\n self.assertFalse(result_buy)", "def test_detail_odd_product_id_permission(self):\n self.assertEqual(self.product_2.id, 2)\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_2.id), **headers)\n\n expected = {'detail': 'You do not have permission to perform this action.'}\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)", "def test_visibility_of_not_available_2(self):\n partner = PartnerFactory(status=Partner.NOT_AVAILABLE)\n list_url = reverse('partners:list')\n\n editor = EditorFactory()\n\n request = RequestFactory().get(list_url)\n request.user = editor.user\n response = PartnersListView.as_view()(request)\n\n self.assertNotContains(response, partner.get_absolute_url())", "def test_creating_supply_unauthenticated(self):\n request = self.factory.post(\n '/api/supplies/', {'name': '3d printer 2', 'state': 'good state', 'description': 'prints 3d objects'})\n response = SupplyListView.as_view()(request)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n try:\n Supply.objects.get(name='3d printer')\n self.fail()\n except Supply.DoesNotExist:\n pass", "def test_admin_cannot_add_item(self):\n response = self.client.get(\n '/self.base_url/sales/3/2',\n headers=dict(Authorization=\"Bearer \" + self.owner_token),\n content_type = 'application/json'\n )\n\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"You cannot make a sale from an Admin account, Consider having an attendant account\")\n self.assertEqual(response.status_code,401)", "def test_product_not_available_by_stock(self):\n product = ProductFactory(stock_amount=2)\n\n for i in range(2):\n opr = OrderProductRelationFactory(product=product)\n order = opr.order\n order.paid = True\n order.save()\n\n self.assertEqual(product.left_in_stock, 0)\n self.assertFalse(product.is_stock_available)\n self.assertFalse(product.is_available())", "def test_create_reusableitem_not_toptenlist_owner(self):\n\n self.client.logout()\n\n self.client.force_authenticate(user=self.user_2)\n\n toptenitems = self.toptenlist_1.topTenItem.all()\n toptenitem_1_id = toptenitems[0].id\n\n response = create_reusable_item_1(self, toptenitem_1_id, **reusableitem_1_data)\n\n # the user cannot see the Top Ten Item because they did not create it\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_create_product_no_data(self):\n resp = self.app.post(\n \"/products\", json={}, content_type=\"application/json\"\n )\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)", "def test_lpdaac_good(self):\n self.assertIsNone(api.inventory.check(self.lpdaac_order_good))", "def test_visibility_of_not_available_1(self):\n partner = PartnerFactory(status=Partner.NOT_AVAILABLE)\n detail_url = partner.get_absolute_url()\n\n editor = EditorFactory()\n\n request = RequestFactory().get(detail_url)\n request.user = editor.user\n with self.assertRaises(Http404):\n # We must explicitly pass kwargs to the view even though they are\n # implied by the URL.\n _ = PartnersDetailView.as_view()(request, pk=partner.pk)", "def test_search_by_bad_ingredients(self):\n recipe_id = self.request_mgr.search_by_ingredients(['asdfadsfa'])\n self.assertEqual(recipe_id, None)", "def test_delete_cart_item_unauthorized(self):\n user_id = '111'\n cart_id = self.cart_item_manager.create_cart(user_id, 'test cart', False)\n item_id1 = self.cart_item_manager.add_cart_item(self.catalog, user_id, cart_id, '1', 'entity_type',\n 'entity_version')\n with self.assertRaises(ResourceAccessError):\n self.cart_item_manager.delete_cart_item('112', cart_id, item_id1)", "def test_create_not_admin(self):\n self.assertEqual(Product.objects.count(), 2)\n payload = {\n 'name': 'New product',\n 'category': self.category_1.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99\n }\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n expected = {'detail': 'You do not have permission to perform this action.'}\n response = self.client.post(\n '/api/products/', data=payload,\n content_type='application/json', **headers)\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)\n self.assertEqual(Product.objects.count(), 2)", "def test_get_product_not_found(self):\n resp = self.app.get(\"/products/0\")\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)", "def test_uninstalled(self):\n self.assertFalse(self.qi.isProductInstalled(PROJECTNAME))", "def test_admin_cannot_delete_product_with_non_integer_prod_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/kk',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'The product id should be a number!')\n self.assertEqual(resp.status_code, 400)", "def test_not_logged_in(self):\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n AnonymousUser(), album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n AnonymousUser(), album.display_photo, ALBUM_PUBLIC)", "def test_shopping_cart_is_empty(self):\n response = self.client.get(self.SHOP_CART_URL)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"Your shopping cart is empty.\")\n self.assertQuerysetEqual(response.context['contents'], [])", "def test_dir_search_doesnt_get_any_product(client, jwt, session, keycloak_mock): # pylint:disable=unused-argument\n headers = factory_auth_header(jwt=jwt, claims=TestJwtClaims.staff_admin_role)\n client.post('/api/v1/users', headers=headers, content_type='application/json')\n rv = client.post('/api/v1/orgs', data=json.dumps(TestOrgInfo.org_anonymous),\n headers=headers, content_type='application/json')\n assert rv.status_code == http_status.HTTP_201_CREATED\n dictionary = json.loads(rv.data)\n assert dictionary['accessType'] == 'ANONYMOUS'\n assert schema_utils.validate(rv.json, 'org_response')[0]\n\n rv_products = client.get(f\"/api/v1/orgs/{dictionary.get('id')}/products\", headers=headers,\n content_type='application/json')\n\n list_products = json.loads(rv_products.data)\n assert len([x for x in list_products if x.get('subscriptionStatus') != 'NOT_SUBSCRIBED']) == 0", "def test_retrieve_no_item(self):\n\n\t\titem = models.item(1)\n\t\tself.assertIsNone(item)", "def test_cannot_create_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_visibility_of_not_available_4(self):\n partner = PartnerFactory(status=Partner.NOT_AVAILABLE)\n list_url = reverse('partners:list')\n\n editor = EditorFactory()\n editor.user.is_staff = True\n editor.user.save()\n\n request = RequestFactory().get(list_url)\n request.user = editor.user\n response = PartnersListView.as_view()(request)\n\n self.assertContains(response, partner.get_absolute_url())", "def test_view_url_propose_product_already_in_favorites(self):\r\n self.client.login(username='test', password='test')\r\n response = self.client.get(reverse('search_results'),\r\n {'query': '', 'name': 'nutella'})\r\n self.assertEqual(response.status_code, 200)\r\n self.assertTemplateUsed(response, 'purbeurre/search_results.html')", "def test_act_not_is_searching(self):\n # setup\n self.strategy._is_searching = False\n\n # operation\n self.search_behaviour.act()\n\n # after\n self.assert_quantity_in_outbox(0)", "def test_cart_item_batch_write_unauthorized(self):\n user_id = '111'\n cart_id = self.cart_item_manager.create_cart(user_id, 'test cart', False)\n with self.assertRaises(ResourceAccessError):\n self.cart_item_manager.start_batch_cart_item_write(self.catalog, '112', cart_id, 'foo', {}, 12345, 10000)", "def test_search_by_no_ingredients(self):\n recipe_id = self.request_mgr.search_by_ingredients([])\n self.assertEqual(recipe_id, None)", "def test_show_available(self):\n database.import_data('csvs', 'product_data.csv', 'customer_data.csv', 'rentals_data.csv')\n actual_available = database.show_available_products()\n expected_available = {'prd001': {'description': 'TV', 'product_type': 'livingroom',\n 'quantity_available': '3'},\n 'prd002': {'description': 'Couch', 'product_type': 'livingroom',\n 'quantity_available': '1'}}\n self.assertEqual(actual_available, expected_available)\n database.delete_database()\n\n database.import_data('csvs', 'produc_data.csv', 'customer_data.csv', 'rentals_data.csv')\n database.delete_database()", "def test_if_app_can_search_for_existing_list_without_products(self):\n add_list=self.client.post('/shoppinglists/', \n data=self.shopllist,\n headers={\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n searchforlists=self.client.get('/search/?q=shoes',\n headers={\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n self.assertEqual(searchforlists.status_code,200) \n self.assertIn(\"No list found\",str(searchforlists.data))", "def test_visibility_of_not_available_3(self):\n partner = PartnerFactory(status=Partner.NOT_AVAILABLE)\n detail_url = partner.get_absolute_url()\n\n editor = EditorFactory()\n editor.user.is_staff = True\n editor.user.save()\n\n request = RequestFactory().get(detail_url)\n request.user = editor.user\n\n # This should not raise Http404.\n response = PartnersDetailView.as_view()(request, pk=partner.pk)\n self.assertEqual(response.status_code, 200)", "def test_update_inventory(self):\n pass", "def test_no_overprovision(self):\n command_line = (\n self._MENU + [self._POOLNAME] + self._DEVICES + [\"--no-overprovision\"]\n )\n TEST_RUNNER(command_line)", "def test_get_inventory_with_empty_result(self, m):\n url = \"https://www.cellartracker.com/xlquery.asp?User=test-username&Password=test-password&Table=Inventory&Format=tab&Location=1\"\n file = open(\"./tests/fixtures/inventory_empty.tsv\", \"r\")\n m.register_uri(\"GET\", url, status_code=200, text=file.read())\n file.close\n\n cellartracker = CellarTracker(username=\"test-username\", password=\"test-password\")\n data = cellartracker.get_inventory()\n self.assertEqual([], data)", "def test_login_required_to_retrieve_ingredients(self):\n\n # Retrieve the ingredients belonging to user.\n response = self.client.get(URL_INGREDIENTS)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)" ]
[ "0.74154675", "0.72096145", "0.7158583", "0.693217", "0.6781853", "0.6733253", "0.66969836", "0.65152705", "0.64797616", "0.64615333", "0.6445138", "0.6428424", "0.63962966", "0.6393635", "0.63872916", "0.63662773", "0.633816", "0.6332057", "0.63152456", "0.6308194", "0.6305822", "0.628019", "0.6269049", "0.6258291", "0.6256637", "0.6233273", "0.62296253", "0.620884", "0.6199793", "0.6197927", "0.6193165", "0.6176767", "0.61602336", "0.61564666", "0.6147166", "0.6136275", "0.6135504", "0.61291677", "0.61061215", "0.6090133", "0.60775506", "0.6068297", "0.60391337", "0.60304284", "0.60303676", "0.6027516", "0.60200363", "0.6006541", "0.600454", "0.59984267", "0.5991086", "0.5977905", "0.5976567", "0.5974627", "0.5958144", "0.5943235", "0.5934646", "0.5915532", "0.59139115", "0.589439", "0.58920157", "0.58911186", "0.5874614", "0.5872277", "0.5864596", "0.58605427", "0.58565706", "0.5852515", "0.5842684", "0.5837103", "0.5826281", "0.58176976", "0.5811971", "0.58093137", "0.580378", "0.580134", "0.5786627", "0.57804257", "0.5772565", "0.5771649", "0.57716155", "0.57657415", "0.5761863", "0.57579255", "0.5753217", "0.57443154", "0.57379234", "0.5737048", "0.572432", "0.5711211", "0.5704157", "0.57025474", "0.5701758", "0.5698575", "0.56896734", "0.56885237", "0.5688216", "0.5680035", "0.56733507", "0.5672365" ]
0.78474796
0
Tests that a user cannot view a product with invalid id
def test_view_product_with_invalid_id(self): resp = self.admin_register() reply = self.admin_login() token = reply['token'] product = dict( prod_name='NY_denims', category='denims', stock=20, price=150 ) resp = self.client.post( '/api/v1/products', content_type='application/json', data=json.dumps(product), headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Product successfully added to Inventory!') self.assertEqual(resp.status_code, 201) resp = self.client.get( '/api/v1/products/2kk', headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Try an interger for product id') self.assertEqual(resp.status_code, 400)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_detail_odd_product_id_permission(self):\n self.assertEqual(self.product_2.id, 2)\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_2.id), **headers)\n\n expected = {'detail': 'You do not have permission to perform this action.'}\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)", "def test_ProductsDataViewSet_with_get_request_Invalid_id(self):\n # Request the data by API call.\n response = self.client.get('/api/productsdata/{}/'.format(-1))\n\n # Checking the response\n self.assertEqual(response.status_code, 404)\n self.assertEqual(response.json()['detail'], 'Not found.')", "def test_admin_cannot_delete_product_with_non_integer_prod_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/kk',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'The product id should be a number!')\n self.assertEqual(resp.status_code, 400)", "def test_request_membership_form_with_an_invalid_user_id(self):\n pass", "def test_wrong_id(self):\n self.request.matchdict = {'user_id': int(self.request.user.id)+4}\n self.request.json_body = {}\n result = user_id_put_view(self.request)['d']\n self.assertEqual(result, error_dict('api_errors', 'not authenticated for this request'))", "def test_view_with_invalid_pk(self):\n response = self.client.get(self.get_url(self.htsv.pk + 1))\n self.assertEqual(response.status_code, 404)", "def test_view_with_invalid_pk(self):\n response = self.client.get(self.get_url(self.trait.pk + 1))\n self.assertEqual(response.status_code, 404)", "def test_view_with_invalid_pk(self):\n response = self.client.get(self.get_url(self.trait.pk + 1))\n self.assertEqual(response.status_code, 404)", "def test_view_with_invalid_pk(self):\n response = self.client.get(self.get_url(self.trait.pk + 1))\n self.assertEqual(response.status_code, 404)", "def test_delete_not_my_product(self):\n self._require_login(self.user1)\n response = self.client.delete('/api/1.0/products/2/')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_security_on_get(self):\n # test the listing url\n product = Product.objects.all()[0]\n url = '/product/xml/'\n response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)\n # test the product detail url\n url = '/product/xml/%s/' % product.item_number\n Response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)", "def assert_cannot_view(obj):\n selenium_utils.open_url(obj.url)\n assert ui_utils.is_error_403()", "def test_view_with_invalid_pk(self):\n response = self.client.get(self.get_url(self.dataset.pk + 1))\n self.assertEqual(response.status_code, 404)", "def test_bad_pk(self):\n self.url_kwargs[self.pk_url_kwarg] = 1234\n response = self._get()\n self.assertEquals(response.status_code, 404)", "def test_bad_pk(self):\n self.url_kwargs[self.pk_url_kwarg] = 1234\n response = self._get()\n self.assertEquals(response.status_code, 404)", "def test_bad_pk(self):\n self.url_kwargs[self.pk_url_kwarg] = 1234\n response = self._get()\n self.assertEquals(response.status_code, 404)", "def test_view_product_that_doesnot_exist_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product does not exist!')\n self.assertEqual(resp.status_code, 404)", "def test_get_item_details_invalid_id(self, mock_requests_get_404):\n with pytest.raises(exceptions.NoSuchItemException):\n resources.get_item_details(1)", "def test_cannot_sale_nonexistant_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"Paris_heels\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product is not in the Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_cannot_view_a_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_delete_product_non_valid_pk(self):\n product_pk = 9999\n product_count_before = models.Product.objects.count()\n url = reverse('products:detail', kwargs={'pk': product_pk})\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_security_on_delete(self):\n # test the delete product url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.delete(url)\n self.failUnlessEqual(response.status_code, 401)", "def test_no_user(self):\n self.request.user = None\n result = user_id_get_view(self.request)['d']\n self.assertEqual(result, error_dict('api_errors', 'not authenticated for this request'))", "def test_beneficiaries_retrieve_withoutID_that_will_fail(self):\n print('the test function name: {}'.format(sys._getframe().f_code.co_name))\n try:\n url = reverse('beneficiary:beneficiary-entity-by-id-retrieve')\n response = self.client.get(url)\n self.assertTrue(response.status_code, 200)\n except Exception as e:\n print(\"reason: \", e)", "def test_visibility_of_not_available_1(self):\n partner = PartnerFactory(status=Partner.NOT_AVAILABLE)\n detail_url = partner.get_absolute_url()\n\n editor = EditorFactory()\n\n request = RequestFactory().get(detail_url)\n request.user = editor.user\n with self.assertRaises(Http404):\n # We must explicitly pass kwargs to the view even though they are\n # implied by the URL.\n _ = PartnersDetailView.as_view()(request, pk=partner.pk)", "async def test_txn_get_with_bad_id(self):\n self.stream.preset_response(self.status.NO_RESOURCE)\n response = await self.get_assert_status('/transactions/bad', 404)\n\n self.assert_has_valid_error(response, 72)", "def test_api_url_no_id(self):\n url = 'http://api.shopstyle.com/action/apiVisitRetailer?pid=uid3600-33034440-48'\n assert extract_product_id_from_api_url(url) is None", "def test_detail_is_hacker_permission(self):\n self.user_1.username = 'pythonhacker'\n self.user_1.save()\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_1.id), **headers)\n\n expected = {'detail': 'You do not have permission to perform this action.'}\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)", "def test_details_nonnum_id(self):\n self.check_response(\n '/attributes/xyz',\n ('Please enter an integer value for Attribute ID',))", "def test_visibility_of_not_available_3(self):\n partner = PartnerFactory(status=Partner.NOT_AVAILABLE)\n detail_url = partner.get_absolute_url()\n\n editor = EditorFactory()\n editor.user.is_staff = True\n editor.user.save()\n\n request = RequestFactory().get(detail_url)\n request.user = editor.user\n\n # This should not raise Http404.\n response = PartnersDetailView.as_view()(request, pk=partner.pk)\n self.assertEqual(response.status_code, 200)", "def test_not_logged_in(self):\n self.request.user = None\n result = user_id_put_view(self.request)['d']\n self.assertEqual(result, error_dict('api_errors', 'not authenticated for this request'))", "def test_admin_cannot_delete_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This product does not exist in Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_validate_when_user_not_found(self, view, mget_user):\n mget_user.side_effect = NoResultFound()\n\n with raises(HTTPUnauthorized):\n view.validate()", "def test_view_with_invalid_pk(self):\n response = self.client.get(self.get_url(self.study.pk + 1))\n self.assertEqual(response.status_code, 404)", "def test_view_with_invalid_pk(self):\n response = self.client.get(self.get_url(self.study.pk + 1))\n self.assertEqual(response.status_code, 404)", "def test_view_with_invalid_pk(self):\n response = self.client.get(self.get_url(self.study.pk + 1))\n self.assertEqual(response.status_code, 404)", "def test_view_with_invalid_pk(self):\n response = self.client.get(self.get_url(self.study.pk + 1))\n self.assertEqual(response.status_code, 404)", "def test_wrong_edx_id(self):\r\n data = {\r\n \"EdX-ID\": \"Invalid-Id\",\r\n \"Result\": \"Testing\",\r\n \"Reason\": \"Testing\",\r\n \"MessageType\": \"Testing\"\r\n }\r\n json_data = json.dumps(data)\r\n response = self.client.post(\r\n reverse('verify_student_results_callback'),\r\n data=json_data,\r\n content_type='application/json',\r\n HTTP_AUTHORIZATION='test BBBBBBBBBBBBBBBBBBBB:testing',\r\n HTTP_DATE='testdate'\r\n )\r\n self.assertIn('edX ID Invalid-Id not found', response.content)\r\n self.assertEqual(response.status_code, 400)", "def test_resuableitem_invalid_vote(self):\n\n original_reusableitem = setup_public_reusable_item_1(self)\n data1 = submit_change_request_1(self, self.user_1)\n\n # user 2 now submits an invalid vote\n self.client.force_authenticate(user=self.user_2)\n\n data2 = {'vote': 'banana'}\n response = self.client.patch(get_reusable_item_1_url(self), data2, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_get_review_detail_fail(self):\n client = Client()\n response = client.get('/api/review/1/')\n self.assertEqual(response.status_code, 401)\n client.login(username='TEST_USER_1',\n email='TEST_EMAIL_1', password='TEST_PW_1')\n response = client.get('/api/review/7/')\n self.assertEqual(response.status_code, 404)", "def test_get_restaurant_review_list_fail(self):\n client = Client()\n res_id = Restaurant.objects.get(name='TEST_REST').id\n response = client.get('/api/restaurant/'+str(res_id)+'/')\n self.assertEqual(response.status_code, 401)", "def test_detail_not_contributor_forbidden(self):\n self.login(self.user1)\n resp = self.client.get(self.get_url(self.c2.pk))\n self.assert403(resp)", "def test_detail_blocked_forbidden_even_if_contributor(self):\n self.login(self.user1)\n resp = self.client.get(self.get_url(self.c3.pk))\n self.assert404(resp)", "def test_user_not_in_group_cannot_update(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url,\n expected_url=expected_url,\n data=self.data)", "def test_detailview_read_for_wrong_user(self):\n\n for user in self.users:\n detailview = reverse('account_detail', args=(user.uuid,))\n\n other_users = self.users\n other_users.remove(user)\n random_user = random.choice(self.users)\n\n self.client.login(email=random_user.email, password='letmein')\n\n response = self.client.get(detailview)\n\n self.assertEqual(response.status_code, 403)", "def test_beneficiaries_update_withoutID_that_will_fail(self):\n print('the test function name: {}'.format(sys._getframe().f_code.co_name))\n try:\n url = reverse('beneficiary:beneficiary-entity-by-id-update')\n response = self.client.get(url, content_type='application/json')\n return self.assertTrue(response.status_code, 200)\n except Exception as e:\n print(\"reason: \", e)", "def test_add_product_view_for_unauthenticated_users(client):\n add_product_url = reverse('add-product')\n response = client.get(add_product_url)\n assert response.status_code == 302\n assert response.url == \"/accounts/login/?next=/products/add-product/\"", "def test_details_id_neg(self):\n self.check_response(\n '/attributes/-1',\n ('Please enter a number that is 1 or greater for Attribute ID',))", "def test_missing_params(self):\r\n url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n response = self.client.get(url)\r\n self.assertEqual(response.status_code, 400)", "def test_itemidnotvalid_return4042(self):\r\n config = self.__load_config()\r\n url = f\"http://{config['api']['host']}:{config['api']['port']}/gs/api/v1/asdfg\"\r\n r = requests.get(url)\r\n\r\n self.assertEqual(r.status_code, 404)", "def test_get_by_id_wrong_type(self):\n assert ExampleUserModel.get_by_id(\"xyz\") is None", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_cannot_update(self):\n\n utils.test_not_logged_cannot_access(self, self.url, self.data)", "def test_getting_one_question_with_invalid_questionId(self):\n response = self.get_one_question_with_invalid_questionId()\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_detail_user_does_not_exists(self, client, users):\n user = users[0]\n url = reverse('users:detail', args=(user.pk + 100,))\n response = client.get(url)\n assert response.status_code == 404", "def test_purchase_not_available(self):\n purchase_model = {\"id\": 2, \"amount\": 1}\n resp = self.app.post(\"/products/2/purchase\", json=purchase_model, content_type=\"application/json\")\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)\n resp = self.app.get(\"/products/2\", content_type=\"application/json\")\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)", "def test_request_users_user_invalid_resource(self):\n response = requests.get(self.url + '/users/John/invalid')\n\n self.assertEqual(response.status_code, 404)", "def testGetAccessDenied(self):\n self.runGet(None, sequencer=self.hiseq2000.vendor_id)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def test_bad_action(self):\r\n action = 'robot-not-an-action'\r\n url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n response = self.client.get(url, {'identifiers': self.beta_tester.email, 'action': action})\r\n self.assertEqual(response.status_code, 400)", "def test_model_details_failure(self):\n\n # GIVEN invalid model ID\n model_id = 300\n\n # WHEN model details are retrieved\n response = self.api.dataid(self.app_label, self.model_name2, model_id)\n\n # THEN it should fail\n self.assertTrue(response.error)", "def test_user_not_authorized(self):\n response = self.client.post(self.url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_missing_tx_id(self):\n responses.add(responses.POST, self.endpoint, status=200)\n with self.assertRaises(QuarantinableError):\n with self.assertLogs() as cm:\n processor.process(encrypt(test_data['missing_tx_id']))\n self.assertIn('Decrypted json missing tx_id . Quarantining message', cm.output[0])", "def test_get_by_id_false(self):\n\n user = CustomUser.get_by_id(44444)\n\n self.assertIsNone(user)", "def test_request_membership_form_with_an_invalid_project_id(self):\n pass", "def test_create_not_admin(self):\n self.assertEqual(Product.objects.count(), 2)\n payload = {\n 'name': 'New product',\n 'category': self.category_1.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99\n }\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n expected = {'detail': 'You do not have permission to perform this action.'}\n response = self.client.post(\n '/api/products/', data=payload,\n content_type='application/json', **headers)\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)\n self.assertEqual(Product.objects.count(), 2)", "def test_login_view_fail(self):\n url = reverse('xds_api:login')\n\n response = self.client.post(url, self.userDict_login_fail)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_api_url_no_query(self):\n url = 'http://api.shopstyle.com/action/apiVisitRetailer'\n assert extract_product_id_from_api_url(url) is None", "def test_request_users_user_invalid(self):\n response = requests.get(self.url + '/users/invalid')\n\n self.assertEqual(response.status_code, 404)", "def test_get_non_existent_book_by_id(self):\n response = self.client().get('/api/v1/products/0')\n json_data = json.loads(response.data)\n self.assertTrue(json_data.get('Error'))\n self.assertEqual(json_data.get('Error'), \"That book does not exist\")\n self.assertEqual(response.status_code, 404)", "def test_cannot_create_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_get_product_not_found(self):\n resp = self.app.get(\"/products/0\")\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)", "def test_product_detail_view(client, sample_product, user_company, authenticated_user):\n products = Product.objects.all()\n for product in products:\n product_detail_view = reverse('product-detail', kwargs={'pk': product.pk})\n response = client.get(product_detail_view)\n #The view should return 200 for each product that exists\n assert response.status_code == 200\n content = response.content.decode(response.charset)\n #With content specific for each product\n assert product.name in content\n #checking for \"page not found\" if product does not exist\n product_not_exist_detail_view = reverse('product-detail', kwargs={'pk':104})\n response = client.get(product_not_exist_detail_view)\n assert response.status_code == 404 \n #Authenticated user but not the owner of the product returns 404\n if authenticated_user and not user_company:\n product_detail_view = reverse('product-detail', kwargs={'pk': 6})\n response = client.get(product_detail_view)\n assert response.status_code == 404", "def test_get_cart_items_unauthorized(self):\n with self.assertRaises(ResourceAccessError):\n self.cart_item_manager.get_cart_items('123', '1')", "def test_nonexistent_user(self):\n self.client.login(username=self.global_staff.username, password=self.password)\n resp = self.client.get(self.get_url('IDoNotExist'))\n assert resp.status_code == status.HTTP_404_NOT_FOUND", "def test_HTTP404_invalid_book_logged_in(self):\n test_uid = uuid.uuid4()\n login = self.client.login(\n username='testuser2',\n password='2HJ1vRV0Z&3iD')\n response = self.client.get(\n reverse('librarian-renew-book',\n kwargs={'pk': test_uid}))\n self.assertEqual(response.status_code, 404)", "def test_modify_access_noparams(self):\r\n url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n response = self.client.get(url)\r\n self.assertEqual(response.status_code, 400)", "def test_artifactpriority_detail_api_unauthorized(self):\n\n # get object\n artifactpriority_api_1 = Artifactpriority.objects.get(\n artifactpriority_name='artifactpriority_api_1'\n )\n # get response\n response = self.client.get(\n '/api/artifactpriority/'\n + str(artifactpriority_api_1.artifactpriority_id)\n + '/'\n )\n # compare\n self.assertEqual(response.status_code, 401)", "def assert_user_cannot_read(self, user, video):\n livesession = LiveSessionFactory(\n email=user.email,\n is_registered=True,\n user=user,\n video=video,\n )\n\n jwt_token = UserAccessTokenFactory(user=user)\n\n response = self.client.get(\n self._get_url(video, livesession),\n content_type=\"application/json\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 403)", "def test_create_product_as_customer_fails(self):\n customer = get_user_model().objects.create_user(\n '[email protected]',\n 'Customer',\n 'user123'\n )\n self.client.force_authenticate(customer)\n res = self.client.post(PRODUCTS_URL, PRODUCT_PAYLOAD)\n\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)", "def test_get_pricehistory_non_existing_product(self):\n res = self.get(url=\"/products/10/pricehistory\", role=\"admin\")\n self.assertException(res, exc.EntryNotFound)", "def test_create_user_invalid_id(self):\r\n print(\"Create user invalid id (already taken)\")\r\n u_id = 100\r\n username = \"newtestuser\"\r\n password = \"test9999\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def test_delete_reusableitem_api_fails(self):\n self.client.force_authenticate(user=self.user_1)\n\n response = self.client.delete(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_500_INTERNAL_SERVER_ERROR)", "def check_id(self, id):", "def test_delete_data_wrong_id(self):\n # get current ids\n list_current = [item['id'] for item in self.current_data]\n self.assertNotIn(10, list_current)\n\n response = self.client.delete(self.url + '10/')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())", "def test_modify_access_bad_action(self):\r\n url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n response = self.client.get(url, {\r\n 'unique_student_identifier': self.other_staff.email,\r\n 'rolename': 'staff',\r\n 'action': 'robot-not-an-action',\r\n })\r\n self.assertEqual(response.status_code, 400)", "def test_listing_supplies_unauthenticated(self):\n request = self.factory.get('/api/supplies')\n response = SupplyListView.as_view()(request)\n # no permission\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_info_id_resolution_wrong_academy(self):\n url = reverse_lazy('media:info_id_resolution', kwargs={'media_id': 1})\n response = self.client.get(url, **{'HTTP_Academy': 1})\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_get_non_owner(self):\n another_user = CustomUser(id=101, email='[email protected]', is_active=True)\n another_user.set_password('testpassword')\n another_user.save()\n self.client.login(email='[email protected]', password='testpassword')\n\n url = reverse('route', kwargs={'way_id': self.route.way_id, 'route_id': self.route.id})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 403)", "def test_add_cart_item_unauthorized_user(self):\n cart_id = self.cart_item_manager.create_cart('111', 'test cart', False)\n with self.assertRaises(ResourceAccessError):\n self.cart_item_manager.add_cart_item(catalog=self.catalog,\n user_id='112',\n cart_id=cart_id,\n entity_id='entity_id',\n entity_type='entity_type',\n entity_version='entity_version')", "def test_user_not_in_group_cannot_update_tab(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url, expected_url, self.data)", "def test_bad_action(self):\r\n action = 'robot-not-an-action'\r\n url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n response = self.client.get(url, {'identifiers': self.enrolled_student.email, 'action': action})\r\n self.assertEqual(response.status_code, 400)", "def test_verify_non_existing_user(self):\n data = {'rank_id': 1}\n res = self.post(url='/verify/5', data=data, role='admin')\n self.assertEqual(res.status_code, 401)\n self.assertException(res, exc.EntryNotFound)", "def test_no_error_if_filter_post_by_wrong_user_id(api_client, user_id):\n r = api_client.get(path=f\"/users/{user_id}/posts\")\n assert r.status_code == 200", "def test_not_logged_in(self):\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n AnonymousUser(), album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n AnonymousUser(), album.display_photo, ALBUM_PUBLIC)", "def test_if_not_available_for_unauthorized(self):\r\n res = self.not_authenticated.get(reverse(LIST_USER_URL),data={})\r\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_non_admin_cannot_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_only_edit_perm(self):\n self.assertStatusCode(self.url, 403)" ]
[ "0.7096591", "0.6907471", "0.6797254", "0.6654126", "0.665064", "0.6630695", "0.6613442", "0.6613442", "0.6613442", "0.66119534", "0.65806687", "0.6550584", "0.6499338", "0.64979565", "0.64979565", "0.64979565", "0.6494183", "0.64578736", "0.64060956", "0.63708884", "0.63435787", "0.63145936", "0.6301092", "0.6291938", "0.62912893", "0.6271717", "0.6244162", "0.622503", "0.6213876", "0.61919767", "0.61756897", "0.617545", "0.61559606", "0.61506844", "0.61506844", "0.61506844", "0.61506844", "0.61130977", "0.6095766", "0.60918885", "0.60807943", "0.6080605", "0.6076399", "0.60433537", "0.6040986", "0.6040447", "0.60370207", "0.6011963", "0.6011096", "0.60050684", "0.5999142", "0.5990415", "0.5990415", "0.5990415", "0.5990415", "0.5987946", "0.597694", "0.5962715", "0.59552634", "0.5944443", "0.5941243", "0.5938699", "0.59288096", "0.5928277", "0.59273714", "0.5923765", "0.5910267", "0.5906772", "0.5901053", "0.5898178", "0.5896816", "0.5886524", "0.5886238", "0.58825046", "0.58796513", "0.5878453", "0.5876979", "0.58741736", "0.5873357", "0.58724874", "0.58720964", "0.586503", "0.58635783", "0.5861368", "0.5851275", "0.58510625", "0.5841456", "0.58383393", "0.58340156", "0.5833301", "0.5832251", "0.5827333", "0.5824801", "0.5824133", "0.5811546", "0.5810603", "0.58098924", "0.5809837", "0.5808833", "0.58072245" ]
0.75562716
0
Test that product can be updated successfully
def test_update_product(self): resp = self.admin_register() reply = self.admin_login() token = reply['token'] product = dict( prod_name='NY_denims', category='denims', stock=20, price=150 ) resp = self.client.post( '/api/v1/products', content_type='application/json', data=json.dumps(product), headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Product successfully added to Inventory!') self.assertEqual(resp.status_code, 201) product_update = dict( prod_name='NY_jeans', category='denims', stock=50, price=180 ) resp = self.client.put( '/api/v1/products/1', content_type='application/json', data=json.dumps(product_update), headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'product updated!') self.assertEqual(resp.status_code, 200)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_full_update_product(self):\n view = ProductUpdateView.as_view({'patch': 'update'})\n uri = reverse('products:update-product', kwargs={'pk': self.product_id})\n data = {\n \"id\": self.product_id,\n \"name\": \"Headphone updated\",\n \"description\": \"New version\",\n \"price\": \"800\",\n \"price_currency\": \"USD\",\n \"is_available\": True\n }\n request = self.factory.patch(uri, data, HTTP_AUTHORIZATION='Token {}'.format(self.token_admin.key))\n request.user = self.user['admin']\n response = view(request, pk=self.product_id)\n self.assertEqual(response.status_code, 200,\n f'Expected Response Code 200, received {response.status_code} instead.')\n data['price'] = float(data['price'])\n response.data['price'] = float(response.data['price'])\n self.assertEqual(response.data, data)", "def test_02_product_update(self):\n # Update new product state2 from default draft to sellable\n new_product = self.create_product()\n self.assertEqual(new_product.state2, 'draft')\n new_product.state2 = 'sellable'\n self.assertEqual(new_product.state2, 'sellable')\n\n # Same but to an existing demo product.\n demo_product = self.product_obj.browse(\n self.ref('product_lifecycle.product_product_4g'))\n self.assertEqual(demo_product.state2, 'sellable')\n demo_product.state2 = 'draft'\n self.assertEqual(demo_product.state2, 'draft')\n\n # Update new product invividual field (field defined in product.product\n # model).\n self.assertEqual(new_product.default_code, 'A2330')\n new_product.default_code = 'A2330-1'\n self.assertEqual(new_product.default_code, 'A2330-1')\n\n # Same but to an existing demo product.\n self.assertEqual(demo_product.default_code, 'A2329')\n demo_product.default_code = 'A2329-1'\n self.assertEqual(demo_product.default_code, 'A2329-1')\n\n # Update new product commom characteristic (field defined in\n # product.template) and check that affects the another product\n # variants\n self.assertFalse(new_product.description)\n new_product.description = 'This is a New Product'\n self.assertEqual(new_product.description, 'This is a New Product')\n self.assertEqual(demo_product.description, 'This is a New Product')\n demo_product.description = False\n self.assertFalse(demo_product.description)", "def test_update_product_success(self):\n product = sample_product(supplier_id=self.user, name='old-name', price='100.00')\n url = detail_url(product.id)\n new_product = {\n 'name': 'new_name',\n 'price': '1000.0',\n 'image': ''\n }\n res = self.client.put(url, new_product)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data['name'], new_product['name'])", "def test_full_update(self):\n self.assertEqual(Product.objects.count(), 2)\n self.assertEqual(self.product_1.name, 'Nike Vapor')\n self.assertEqual(self.product_1.sku, '44444444')\n self.assertEqual(self.product_1.category, self.category_1)\n self.assertEqual(self.product_1.description, 'Some product description')\n self.assertEqual(self.product_1.price, 129.99)\n self.assertEqual(self.product_1.featured, False)\n\n payload = {\n 'name': 'Updated name',\n 'category': self.category_2.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99,\n 'featured': True\n }\n\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(self.token_admin)\n }\n response = self.client.put(\n '/api/products/{}/'.format(self.product_1.id),\n data=payload, content_type='application/json', **headers)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response['Content-Type'], 'application/json')\n self.assertEqual(Product.objects.count(), 2)\n\n product = Product.objects.get(id=self.product_1.id)\n self.assertEqual(product.name, 'Updated name')\n self.assertEqual(product.sku, '11111111')\n self.assertEqual(product.category, self.category_2)\n self.assertEqual(product.description, 'New product description')\n self.assertEqual(float(product.price), 39.99)\n self.assertEqual(product.featured, True)", "def test_product_update(self):\n # first performe create\n id = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id:\n # then performe update\n data = { \n \"name\": \"Changed the name\",\n \"description\": self.product_data[\"description\"],\n \"image_link\": self.product_data[\"image_link\"],\n \"price\": self.product_data[\"price\"]\n }\n self._update_model(\"product\", id, data, [\"name\"])\n self.assertIsNotNone(id)", "def test_partial_update(self):\n self.assertEqual(Product.objects.count(), 2)\n self.assertEqual(self.product_1.name, 'Nike Vapor')\n\n payload = {\n 'name': 'Updated name',\n }\n\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(self.token_admin)\n }\n response = self.client.patch(\n '/api/products/{}/'.format(self.product_1.id),\n data=payload, content_type='application/json', **headers)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response['Content-Type'], 'application/json')\n self.assertEqual(Product.objects.count(), 2)\n\n product = Product.objects.get(id=self.product_1.id)\n self.assertEqual(product.name, 'Updated name')", "def test_update_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], \"This product doesn't exists in the Inventory!\")\n self.assertEqual(resp.status_code, 400)", "def test_update_not_my_product(self):\n post_data = {\n \"category\": {\n \"name\": \"general\",\n \"index\": 0\n },\n \"name\": \"Producto 2 modified\",\n \"description\": \"Descripcion de producto 2 modified\",\n \"selling\": True,\n \"price\": 20,\n }\n\n response = self.client.put('/api/1.0/products/2/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_update_product(self):\n data = {\n 'pk': 1,\n 'name': 'New yogurt',\n 'description': '''\n Yogurt also spelled yoghurt, yogourt or yoghourt,\n is a food produced by bacterial fermentation of milk.\n '''\n }\n url = reverse('products:detail', kwargs={'pk': data['pk']})\n response = self.client.patch(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(models.Product.objects.filter(name=data['name']).count(), 1)", "def test_update_product(self):\n # create a product to update\n test_product = ProductFactory()\n test_product_name = test_product.name\n test_product_description = test_product.description\n test_product_price = test_product.price\n resp = self.app.post(\n \"/products\", json=test_product.serialize(), content_type=\"application/json\")\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)\n\n # update the product\n new_product = resp.get_json()\n new_product[\"category\"] = \"Education\"\n resp = self.app.put(\n \"/products/{}\".format(new_product[\"id\"]),\n json=new_product,\n content_type=\"application/json\")\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n updated_product = resp.get_json()\n self.assertEqual(updated_product[\"category\"], \"Education\")", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update_case(self):\n pass", "def test_update_product_to_not_selling(self):\n self._require_login(self.user1)\n post_data = {\n \"category\": {\n \"name\": \"deportes\",\n \"index\": 1\n },\n \"name\": \"Producto 1 modified\",\n \"description\": \"Descripcion de producto 1 modified\",\n \"selling\": False,\n \"price\": 60,\n }\n\n response = self.client.put('/api/1.0/products/1/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['name'], 'Producto 1 modified')\n self.assertEqual(response.data['description'], 'Descripcion de producto 1 modified')\n self.assertEqual(response.data['selling'], False)\n self.assertEqual(response.data['price'], '60.0')\n self.assertEqual(response.data['category']['name'], 'deportes')", "def test_update_product_required_fields(self):\n data = {\n 'pk': 1,\n 'name': None,\n 'description': '''\n Yogurt also spelled yoghurt, yogourt or yoghourt,\n is a food produced by bacterial fermentation of milk.\n '''\n }\n url = reverse('products:detail', kwargs={'pk': data['pk']})\n response = self.client.put(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(models.Product.objects.filter(name=None).count(), 0)", "def test_security_on_put(self):\n # test the update url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.put(url,{'description':'my new description'})\n self.failUnlessEqual(response.status_code, 401)", "def test_update_product_with_empty_fields(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n product_update = dict(\n prod_name='',\n category='',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'prod_name and category cannot be empty!')\n self.assertEqual(resp.status_code, 400)", "def test_update_product_to_selling(self):\n self._require_login(self.user2)\n post_data = {\n \"category\": {\n \"name\": \"general\",\n \"index\": 0\n },\n \"name\": \"Producto 2 modified\",\n \"description\": \"Descripcion de producto 2 modified\",\n \"selling\": True,\n \"price\": 20,\n }\n\n response = self.client.put('/api/1.0/products/2/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['name'], 'Producto 2 modified')\n self.assertEqual(response.data['description'], 'Descripcion de producto 2 modified')\n self.assertEqual(response.data['selling'], True)\n self.assertEqual(response.data['price'], '20.0')\n self.assertEqual(response.data['category']['name'], 'general')", "def test_update_inventory(self):\n pass", "def test_update_scenario(self):\n pass", "def test_update(self):\n # this is tested graphically, as it is UI\n pass", "def test_update(app):\n\n assert False", "def test_unauthorized_product_update(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_update_product_without_authentication(self):\n post_data = {\n \"category\": {\n \"name\": \"general\",\n \"index\": 0\n },\n \"name\": \"Producto 2 modified\",\n \"description\": \"Descripcion de producto 2 modified\",\n \"selling\": True,\n \"price\": 20,\n }\n\n response = self.client.put('/api/1.0/products/1/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_shoppingcart_update(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we create a product\n id_prod = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id_prod:\n # then we can create the shoppingcart\n data = self.shoppingcart_data\n data[\"customer_id\"] = id\n data[\"product_id\"] = id\n id_cart = self._create_model(\"shoppingcart\", data, [\"quantity\", \"discount_value\", \"is_closed\"])\n if id_cart:\n # then performe update\n data = self.shoppingcart_data\n data[\"quantity\"] = 20\n data[\"discount_value\"] = 9.99\n data[\"is_closed\"] = True\n self._update_model(\"shoppingcart\", id, data, [\"quantity\", \"discount_value\", \"is_closed\"])\n self.assertIsNotNone(id_cart)\n self.assertIsNotNone(id_prod)\n self.assertIsNotNone(id)", "def test_update_product_with_characters_for_numbers(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n product_update = dict(\n prod_name='NY_denims',\n category='denims',\n stock='many',\n price='pesa'\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'The Stock and Price must be numbers!')\n self.assertEqual(resp.status_code, 400)", "def test_update_product_with_numbers_for_strings(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n product_update = dict(\n prod_name=4562,\n category=5248,\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'prod_name and category should be characters!')\n self.assertEqual(resp.status_code, 400)", "def test_update_one(self):\n pass", "def test_update_record(self):\n pass", "def test_user_update_procedure_failure(self):\n p1 = models.Procedure.objects.create(\n name='temp',\n overview='bla bla bla'\n )\n p1.speciality.set([self.speciality.pk])\n p1.save()\n\n res = self.client.get(PROCEDURE_URL)\n\n url = get_item_url(res.data[0]['id'])\n new_payload = {\n 'other_details': 'new details'\n }\n\n response = self.client.patch(url, new_payload, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_beneficiaries_update_that_will_pass(self):\n print('the test function name: {}'.format(sys._getframe().f_code.co_name))\n url = reverse('beneficiary:beneficiary-entity-by-id-update', kwargs={'pk': 1})\n response = self.client.post(url, content_type='application/json')\n return self.assertTrue(response.status_code, 200)", "def test_cannot_update_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_updating_the_supply_price(self):\n self.assertEqual(self.po.id, 1)\n self.assertEqual(self.po.items.count(), 1)\n item = self.po.items.all()[0]\n self.assertEqual(item.id, 1)\n self.assertEqual(item.unit_cost, Decimal('12.11'))\n self.assertEqual(Log.objects.all().count(), 0)\n \n modified_po = copy.deepcopy(base_purchase_order)\n modified_po['items'][0]['unit_cost'] = Decimal('10.05')\n modified_po['items'][0]['id'] = 1\n modified_po['status'] = 'PROCESSED'\n del modified_po['items'][1]\n resp = self.client.put('/api/v1/purchase-order/1/',\n format='json',\n data=modified_po)\n self.assertEqual(resp.status_code, 200, msg=resp)\n resp_obj = resp.data\n self.assertEqual(resp_obj['revision'], 1)\n #Check the new pdf\n #webbrowser.get(\"open -a /Applications/Google\\ Chrome.app %s\").open(resp_obj['pdf']['url'])\n \n self.assertEqual(resp_obj['id'], 1)\n self.assertEqual(resp_obj['supplier']['id'], 1)\n self.assertEqual(resp_obj['vat'], 7)\n self.assertEqual(resp_obj['discount'], 0)\n self.assertEqual(resp_obj['revision'], 1)\n self.assertEqual(Decimal(resp_obj['grand_total']), Decimal('107.54'))\n self.assertEqual(len(resp_obj['items']), 1)\n item1 = resp_obj['items'][0]\n self.assertEqual(item1['id'], 1)\n self.assertEqual(item1['quantity'], Decimal('10.0000000000'))\n self.assertEqual(Decimal(item1['unit_cost']), Decimal('10.05'))\n self.assertEqual(Decimal(item1['total']), Decimal('100.50'))\n \n #Confirm cost change for item and supply in the database\n po = PurchaseOrder.objects.get(pk=1)\n self.assertEqual(po.grand_total, Decimal('107.54'))\n item1 = po.items.order_by('id').all()[0]\n self.assertEqual(item1.id, 1)\n self.assertEqual(item1.quantity, 10)\n self.assertEqual(item1.unit_cost, Decimal('10.05'))\n supply = item1.supply\n supply.supplier = po.supplier\n self.assertEqual(supply.cost, Decimal('10.05'))\n \n self.assertEqual(Log.objects.all().count(), 1)\n log = Log.objects.all()[0]\n self.assertEqual(log.cost, Decimal('10.05'))\n self.assertEqual(log.supply, supply)\n self.assertEqual(log.supplier, po.supplier)\n self.assertEqual(log.message, \"Price change from 12.11USD to 10.05USD for Pattern: Maxx, Col: Blue [Supplier: Zipper World]\")\n\n # Confirm that there is still only one product for this supply and supplier\n # in the database\n products = Product.objects.filter(supply=supply, supplier=po.supplier)\n self.assertEqual(len(products), 1)", "def test_update_item_using_post(self):\n pass", "def test_update_system(self):\n pass", "def test_update_product_not_found(self):\n test_product = ProductFactory()\n resp = self.app.put(\n \"/products/0\",\n json=test_product.serialize(),\n content_type=\"application/json\")\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)", "def test_vault_update_vault_item(self):\n pass", "def test_update9(self):\n pass", "def test_update_product_unique_name(self):\n data = {\n 'pk': 1,\n 'name': 'Banana',\n 'description': '''\n Yogurt also spelled yoghurt, yogourt or yoghourt,\n is a food produced by bacterial fermentation of milk.\n '''\n }\n url = reverse('products:detail', kwargs={'pk': data['pk']})\n response = self.client.patch(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertNotEqual(models.Product.objects.filter(name=data['name']), data['pk'])", "def test_update_shoppingcart_view(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we create a product\n id_prod = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id_prod:\n # then we can create the shoppingcart\n data = self.shoppingcart_data\n data[\"customer_id\"] = id\n data[\"product_id\"] = id\n id_cart = self._create_model(\"shoppingcart\", data, [\"quantity\", \"discount_value\", \"is_closed\"])\n if id_cart:\n # then performe the update\n self.url = reverse(\"update-shoppingcart\")\n data = { **self.shoppingcart_data }\n data[\"is_closed\"] = True\n data[\"id\"] = id_cart\n response = self.client.post(self.url, data, **self.auth_headers)\n if response.status_code == status.HTTP_200_OK:\n r_json = response.json()\n self.assertTrue(r_json[\"cart\"][\"is_closed\"])\n self.assertIsNotNone(id_cart)\n self.assertIsNotNone(id_prod)\n self.assertIsNotNone(id)", "def test_update(self, mock_put):\n self.policies.update(id=333114, policy_update=self.policy_show_response)\n\n mock_put.assert_called_once_with(\n url='https://api.newrelic.com/v2/alert_policies/333114.json',\n headers=self.policies.headers,\n data=json.dumps(self.policy_show_response)\n )", "def test_updating_item_status(self):\n #test original quantity\n self.assertEqual(self.supply1.quantity, 10)\n self.assertEqual(self.supply2.quantity, 10)\n \n modified_po = copy.deepcopy(base_purchase_order)\n modified_po['status'] = 'Received'\n modified_po['items'][0]['id'] = 1\n modified_po['items'][0]['status'] = 'Receieved'\n \n resp = self.client.put('/api/v1/purchase-order/1/',\n format='json',\n data=modified_po)\n \n self.assertEqual(resp.status_code, 200, msg=resp)\n \n po = resp.data\n \n self.assertEqual(Supply.objects.get(pk=1).quantity, 20)", "def test_update(self):\n self.client.force_authenticate(user=self.admin)\n\n data = {\n 'retreat': reverse(\n 'retreat:retreat-detail', args=[self.retreat.id]\n ),\n 'user': reverse('user-detail', args=[self.user2.id]),\n }\n\n response = self.client.put(\n reverse(\n 'retreat:waitqueuenotification-detail',\n kwargs={'pk': 1},\n ),\n data,\n format='json',\n )\n\n self.assertEqual(\n response.status_code,\n status.HTTP_405_METHOD_NOT_ALLOWED\n )", "def test_user_update_request(self):\n pass", "def test_client_update(self):\n pass", "def test_update_sample(self):\n response = self.client.post(reverse('update-proband', args=[self.gel_ir.id]),\n {'outcome': 'testoutcome',\n 'comment': 'testcomment',\n 'case_status': 'N',\n 'pilot_case': True,\n 'mdt_status': 'R',\n 'case_sent': False,\n 'no_primary_findings': False},\n follow=True)\n self.assertContains(response, 'Proband Updated')\n self.assertEquals(response.status_code, 200)\n proband = Proband.objects.get(id=self.proband.id)\n gelir = GELInterpretationReport.objects.get(id=self.gel_ir.id)\n self.assertEqual(proband.comment, 'testcomment')\n self.assertEqual(gelir.pilot_case, True)", "def test_update_cart(self, driver):\n\n logging.info(\"Start test case: Edit product in orderSummary\")\n data = self.test_data[\"Edit product in orderSummary\"]\n products = data[\"Products\"]\n logging.info(\"Test data: {}\".format(products))\n\n for i in range(len(products)):\n select_product(driver, products[i][\"Page\"], products[i][\"Product Name\"])\n add_product_to_cart(driver, products[i][\"Size\"], products[i][\"Color\"], products[i][\"Quantity\"])\n\n added_name = get_product_name(driver, index=data[\"Added Index\"] - 1)\n update_quantity_in_cart(driver, name=added_name, added_amount=data[\"Added Amount\"])\n expected_qty = get_product_detail_in_cart(driver, added_name)[\"Qty\"]\n\n removed_name = get_product_name(driver, index=data[\"Removed Index\"] - 1)\n remove_product_from_cart(driver, name=removed_name)\n expected_amt = get_product_amount_in_cart(driver)\n\n checkout_from_order_summary(driver)\n actual_amt = get_product_amount_in_order(driver)\n actual_qty = get_product_detail_in_order(driver, added_name)[\"Qty\"]\n logging.info(\"Verify product amount and product quantity on checkout page\")\n assert actual_amt == expected_amt, f\"your cart product amount is {actual_amt}, it should be {expected_amt}\"\n assert actual_qty == expected_qty, f\"The quantity of added product {added_name} is {actual_qty}, it should be {expected_qty}\"\n assert not verify_product_in_order(driver, removed_name)", "def test_product_search(self):\n\n flag = \"user\"\n api = \"product.product.update\"\n current_page = 1\n search_info = json.dumps({\n 'id': 6,\n 'name': '一点都不可爱的蓝牙',\n 'alias': \"捣乱哟\"\n })\n print('start------------------------>update')\n result = self.access_api(flag = flag, api = api, current_page = current_page, product_info = search_info)", "def test_product(self):\n self.assertEqual(self.test_product.name, self.test_product_name)\n self.assertEqual(self.test_product.price, self.test_product_price)", "def test_update_cloud(self):\n pass", "def test_full_update_recipe(self):\n recipe = sample_recipe(user=self.user)\n recipe.tag.add(sample_tag(user=self.user))\n payload = {\n 'title':'chicken noodles',\n 'time_minutes':50,\n 'price':12.67,\n }\n url = detail_url(recipe.id)\n self.client.put(url,payload)\n\n recipe.refresh_from_db()\n self.assertEqual(recipe.title,payload['title'])\n self.assertEqual(recipe.time_minutes,payload['time_minutes'])\n self.assertEqual(float(recipe.price),payload['price'])\n tags = recipe.tag.all()\n self.assertEqual(len(tags),0)\n self.assertEqual(recipe.user,self.user)", "def test_update_device(self):\n pass", "def test_update_device(self):\n pass", "def test_update_success(self, mock_put):\n self.policies.update(\n id=self.policy_single_response['policy']['id'],\n name=self.policy_single_response['policy']['name'],\n incident_preference=self.policy_single_response['policy']['incident_preference']\n )\n\n mock_put.assert_called_once_with(\n url='https://api.newrelic.com/v2/alerts_policies/{0}.json'.format(\n self.policy_single_response['policy']['id']\n ),\n headers=self.policies.headers,\n data=json.dumps({\n \"policy\": {\n \"name\": self.policy_single_response['policy']['name'],\n \"incident_preference\": self.policy_single_response['policy']['incident_preference']\n }\n })\n )", "def test_update_client(self):\n pass", "def taco_test_put_update(self):\n body = '{ \"id\": 400, \"name\": \"item4\", \"content\": \"after test update\" }'\n env = self.get_env('PUT', '/item/4', body=body)\n webapi_start(env, lambda status, response_headers: self.assertEqual(status, '204'))", "def test_full_update_reecipe(self):\n recipe = sample_recipe(user=self.user)\n recipe.tags.add(sample_tag(user = self.user))\n payload = {\n 'title': 'mutton curry',\n 'time_minuts': 45,\n 'price':450\n\n }\n url = detail_url(recipe.id)\n self.client.put(url , payload)\n\n recipe.refresh_from_db()\n self.assertEqual(recipe.title, payload['title'])\n self.assertEqual(recipe.time_minuts, payload['time_minuts'])\n self.assertEqual(recipe.price, payload['price'])\n tags =recipe.tags.all()\n self.assertEqual(len(tags), 0 )", "def test_update(self):\n\n # Test that instances without application information cannot be started\n incomplete_instance = Instance(self.client, 'foo')\n with self.assertRaises(ValueError):\n incomplete_instance.update()\n\n value = self.instance.update()\n update_instance = self.client.update_instance\n update_instance.assert_called_once_with('nginx', 'nginx', 'latest',\n parameters={\n 'SETTING': 'value'\n },\n options={\n 'storageBucket': 'custom'\n })\n self.assertEqual(value, update_instance.return_value)", "def test_client_verification_document_update(self):\n pass", "def test_update_deployment(self):\n pass", "def test_update_batch(self):\n self.batch_data['batch_id'] = self.batch_info.id\n resp = self.query_with_token(\n self.access_token_master,\n update_batch_info.format(**self.batch_data))\n\n self.assertIn('data', resp)\n self.assertEqual(\n resp['data']['updateBatchInfo']['batchInfo']['supplier']['name'],\n self.supplier.name)\n self.assertEqual(\n resp['data']['updateBatchInfo']['batchInfo']['batchNo'],\n self.batch_info.batch_no)", "def test_update_order_failure(self):\n # create a order to update\n test_order = OrderFactory()\n resp = self.app.post('/orders',\n json=test_order.serialize(),\n content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)\n\n # update the order\n new_order = resp.get_json()\n new_order['product_id'] = 2\n resp = self.app.put('/orders/{}'.format(5),\n json=new_order,\n content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)", "def test_full_update(self):\n recipe = create_sample_recipe(user=self.user)\n recipe.ingredients.add(create_sample_ingredient(\n user=self.user,\n name='Fries'\n ))\n payload = {\n \"title\": \"New Cuisine\",\n \"price\": 5.00,\n \"time_minutes\": 90\n }\n recipe_url = create_detail_url(recipe.id)\n self.client.put(recipe_url, payload)\n recipe.refresh_from_db()\n ingredients = recipe.ingredients.all()\n self.assertEqual(recipe.title, payload['title'])\n self.assertEqual(recipe.time_minutes, payload['time_minutes'])\n self.assertEqual(len(ingredients), 0)", "def test_order_product(self):\n self.client.force_authenticate(self.user)\n resp = self.client.post(ORDER_URL, data={\n \"product\": self.product.id,\n \"count\": 1,\n \"option_value\": self.option_value.id\n })\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)", "def test_update_software_asset(self):\n pass", "def test_update(client):\n rv = update(client, 'Michael')\n assert json.loads(rv.data.decode())['code'] == 0\n assert json.loads(rv.data.decode())['owner'] == 'Michael'", "def test_update_submission_service(self):\n pass", "def test_add_new_product(self):\n response=self.add_new_product()\n result = json.loads(response.data.decode('utf-8'))\n self.assertEqual(response.status_code, 201, result['New Product'])", "def test_updating_po_items(self):\n print '\\n'\n logger.debug('Updating items')\n print '\\n'\n \n modified_po_data = copy.deepcopy(base_purchase_order)\n modified_po_data['status'] = 'PROCESSED'\n modified_po_data['items'][0]['purchasing_units'] = 'set'\n\n resp = self.client.put('/api/v1/purchase-order/1/', format='json', data=modified_po_data)\n \n po = resp.data\n item1 = po['items'][0]\n #self.assertIn('purchasing_units', item1)\n #self.assertEqual(item1['purchasing_units'], 'set')", "def test_update_user(self):\n pass", "def test_update_subscription(self):\n pass", "def test_update(self):\n obj = self.provision_single_asset()\n test_string = \"testing this thing\"\n p = {'id': obj.id, 'description': test_string}\n self.put('widget', 200, params=p)\n self.session.refresh(obj)\n assert obj.description == test_string", "def test_post_update_sucess(self):\n url = reverse(\n 'post-detail',\n args=[\n self.topic1.url_name,\n self.post1.id\n ]\n )\n payload = {\n 'title': 'Updated title',\n 'content': 'Updated content'\n }\n self.client.credentials(\n HTTP_AUTHORIZATION = 'Token ' + self.user1.auth_token.key\n )\n response = self.client.patch(url, payload)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n updated_post = Post.objects.filter(\n id=self.post1.id,\n author=self.user1,\n title=payload.get('title'),\n content=payload.get('content')\n )\n self.assertTrue(updated_post.exists())", "def test_update_book(self):\n book_information = self.books_from_json[0]\n book_id = '60773a16cb838494e13d3652'\n self.books.update = MagicMock(return_value=None) # success on update\n update_book = self.books.update_details(book_id, self.books_from_json[0])\n self.assertEqual(\"Mock Book updated!\", update_book['flash_message'])", "def test_view_a_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertIn('NY_denims', str(reply['product']))\n self.assertEqual(resp.status_code, 200)", "def test_app_can_update_a_list(self):\n self.ne=json.dumps({\"newName\":\"pants\"})\n list_update=self.client.put('/shoppinglists/trou',\n data=self.ne,\n headers={\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n self.assertIn(\"list doesnt exist\",str(list_update.data)) \n self.assertEqual(list_update.status_code,200)", "def test_products_ref_users_put(self):\n pass", "def test_user_update_permissions(self):\n userPK = User.objects.get(username='c2e1').pk\n url = reverse('User-detail', kwargs={'pk': userPK})\n data = {'username': 'company1NewTest'}\n response = self.client.put(url, data, format='json')\n #This is 404 instead of 403 because there is no way to view a company\n #that you arent an employee of.\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertNotEqual(User.objects.get(pk=userPK).username,\n 'company1NewTest')\n data = {'username': 'company1NewTest2'}\n response = self.client.patch(url, data, format='json')\n #This is 404 instead of 403 because there is no way to view a company\n #that you arent an employee of.\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertNotEqual(User.objects.get(pk=userPK).username,\n 'company1NewTest2')", "def test_partial_update(self):\n self.client.force_authenticate(user=self.admin)\n\n data = {\n 'retreat': reverse(\n 'retreat:retreat-detail', args=[self.retreat.id]\n ),\n 'user': reverse('user-detail', args=[self.user2.id]),\n }\n\n response = self.client.put(\n reverse(\n 'retreat:waitqueuenotification-detail',\n kwargs={'pk': 1},\n ),\n data,\n format='json',\n )\n\n self.assertEqual(\n response.status_code,\n status.HTTP_405_METHOD_NOT_ALLOWED\n )", "def test_updating_to_receive_items(self):\n modified_po = copy.deepcopy(base_purchase_order)\n del modified_po['items'][1]\n modified_po['items'][0]['id'] = 1\n modified_po['items'][0]['status'] = 'RECEIVED'\n modified_po['status'] = 'RECEIVED'\n self.assertEqual(Supply.objects.get(pk=1).quantity, 10)\n \n resp = self.client.put('/api/v1/purchase-order/1/', format='json', data=modified_po)\n \n self.assertEqual(resp.status_code, 200, msg=resp)\n \n po_data = resp.data\n self.assertEqual(po_data['id'], 1)\n self.assertEqual(po_data['status'], 'RECEIVED')\n \n item1 = po_data['items'][0]\n self.assertEqual(item1['id'], 1)\n self.assertEqual(item1['status'], 'RECEIVED')\n \n #Test database values\n po = PurchaseOrder.objects.get(pk=1)\n self.assertEqual(po.id, 1)\n self.assertEqual(po.status, 'RECEIVED')\n for item in po.items.all():\n self.assertEqual(item.status, \"RECEIVED\")\n \n supply = Supply.objects.get(pk=1)\n self.assertEqual(supply.quantity, 20)\n log = Log.objects.all().order_by('-id')[0]\n self.assertEqual(log.action, \"ADD\")\n self.assertEqual(log.quantity, 10)\n self.assertEqual(log.supplier.id, 1)\n self.assertEqual(log.message, \"Received 10m of Pattern: Maxx, Col: Blue from Zipper World\")", "def test_full_update_recipe(self):\n recipe = sample_recipe(user=self.user)\n recipe.tags.add(sample_tags(user=self.user))\n payload = {\n 'title': 'Jollof Spaghetti',\n 'time_minutes': 30,\n 'price': 5.00,\n 'currency': 'USD',\n }\n url = detail_url(recipe_id=recipe.id)\n self.client.put(url, payload)\n\n recipe.refresh_from_db()\n self.assertEqual(recipe.title, payload['title'])\n self.assertEqual(recipe.time_minutes, payload['time_minutes'])\n self.assertEqual(recipe.price, payload['price'])\n self.assertEqual(recipe.currency, payload['currency'])\n tags = recipe.tags.all()\n self.assertEqual(len(tags), 0)", "def test_create_product_success(self):\n res = self.client.post(PRODUCTS_URL, PRODUCT_PAYLOAD)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n self.assertEqual(res.data['supplier_id'], self.user.id)\n self.assertEqual(res.data['name'], PRODUCT_PAYLOAD['name'])\n self.assertEqual(res.data['price'], PRODUCT_PAYLOAD['price'])", "def test_beneficiaries_update_withoutID_that_will_fail(self):\n print('the test function name: {}'.format(sys._getframe().f_code.co_name))\n try:\n url = reverse('beneficiary:beneficiary-entity-by-id-update')\n response = self.client.get(url, content_type='application/json')\n return self.assertTrue(response.status_code, 200)\n except Exception as e:\n print(\"reason: \", e)", "def test_client_partial_update(self):\n pass", "def test_invoice_item_update(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we create a invoice\n data = self.invoice_data\n data[\"customer_id\"] = id\n id_inv = self._create_model(\"invoice\", data, [])\n if id_inv:\n # then we create a product\n id_prod = self._create_model(\"product\", self.product_data, [ \"name\", \"description\", \"image_link\", \"price\" ])\n if id_prod:\n # then we can create the invoice's item\n data = self.invoice_item_data\n data[\"invoice_id\"] = id_inv\n data[\"product_id\"] = id_prod\n id_itm = self._create_model(\"invoiceitem\", data, [ \"quantity\", \"quote_price\" ])\n if id_itm:\n # then performe update\n data = self.invoice_item_data\n data[\"price_paid\"] = 88.77\n self._update_model(\"invoiceitem\", id, data, [\"quote_price\"])\n self.assertIsNotNone(id_itm)\n self.assertIsNotNone(id_prod)\n self.assertIsNotNone(id_inv)\n self.assertIsNotNone(id)", "def test_full_update_reteta(self):\n recipe = sample_reteta(user=self.user)\n recipe.tags.add(sample_tag(user=self.user))\n payload = {\n 'title': 'Pepperoni',\n 'time_minutes': 3,\n 'price': 3.00\n }\n url = detail_url(recipe.id)\n self.client.put(url, payload)\n\n recipe.refresh_from_db()\n self.assertEqual(recipe.title, payload['title'])\n self.assertEqual(recipe.time_minutes, payload['time_minutes'])\n self.assertEqual(recipe.price, payload['price'])\n tags = recipe.tags.all()\n self.assertEqual(len(tags), 0)", "def test_update(test_store, andy, pandy, candy):\n n_updated = test_store.update(fields={\"age\": 15}, name=\"Candy\")\n assert n_updated == 1\n items = list(test_store.get_by())\n\n candy.age = 15\n assert andy in items\n assert pandy in items\n assert candy in items", "def test_update_activity(self):\n pass", "def test_updating_the_po(self):\n print '\\n'\n logger.debug('Updating PO')\n print '\\n'\n \n #Verifying po in database\n self.assertEqual(self.po.id, 1)\n self.assertEqual(self.po.items.count(), 1)\n self.assertEqual(self.po.grand_total, Decimal('129.58'))\n self.assertEqual(timezone('Asia/Bangkok').normalize(self.po.order_date).date(), datetime.datetime.now().date())\n item = self.po.items.all()[0]\n self.assertEqual(item.id, 1)\n self.assertEqual(item.quantity, 10)\n self.assertEqual(item.total, Decimal('121.1'))\n \n modified_po_data = copy.deepcopy(base_purchase_order)\n del modified_po_data['items'][1]\n modified_po_data['id'] = 1\n modified_po_data['items'][0]['id'] = 1\n modified_po_data['items'][0]['comments'] = 'test change'\n modified_po_data['items'][0]['quantity'] = 3\n modified_po_data['items'][0]['description'] = 'test description change'\n\n resp = self.client.put('/api/v1/purchase-order/1/',\n format='json',\n data=modified_po_data)\n \n #Verify the response\n self.assertEqual(resp.status_code, 200, msg=resp)\n po = resp.data\n self.assertEqual(po['id'], 1)\n self.assertEqual(po['supplier']['id'], 1)\n self.assertEqual(po['vat'], 7)\n self.assertEqual(Decimal(po['grand_total']), Decimal('38.87'))\n self.assertEqual(po['discount'], 0)\n self.assertEqual(po['revision'], 1)\n self.assertEqual(len(po['items']), 1)\n #self.assertEqual(po['status'], 'PAID')\n #Check the new pdf\n #webbrowser.get(\"open -a /Applications/Google\\ Chrome.app %s\").open(po['pdf']['url'])\n \n item2 = po['items'][0]\n \n self.assertEqual(item2['id'], 1)\n self.assertEqual(item2['quantity'], Decimal('3.0000000000'))\n self.assertEqual(item2['comments'], 'test change')\n self.assertEqual(item2['description'], 'test description change')\n self.assertEqual(Decimal(item2['unit_cost']), Decimal('12.11'))\n self.assertEqual(Decimal(item2['total']), Decimal('36.33'))\n \n #Verify database record\n po = PurchaseOrder.objects.get(pk=1)\n \n self.assertEqual(po.supplier.id, 1)\n #self.assertEqual(timezone('Asia/Bangkok').normalize(po.order_date), datetime.datetime.now().date())\n self.assertEqual(po.vat, 7)\n self.assertEqual(po.grand_total, Decimal('38.87'))\n self.assertEqual(po.items.count(), 1)\n \n item2 = po.items.all().order_by('id')[0]\n self.assertEqual(item2.id, 1)\n self.assertEqual(item2.description, 'test description change')\n self.assertEqual(item2.comments, 'test change')\n self.assertEqual(item2.quantity, 3)\n self.assertEqual(item2.unit_cost, Decimal('12.11'))\n self.assertEqual(item2.total, Decimal('36.33'))", "def test_full_update_recipe(self):\n recipe = sample_recipe(user=self.user)\n recipe.tags.add(sample_tag(user=self.user))\n payload = {\n 'title': 'Spaghetti',\n 'time_minutes': 25,\n 'price': 5.00,\n }\n url = detail_url(recipe.id)\n self.client.put(url, payload)\n\n recipe.refresh_from_db()\n self.assertEqual(recipe.title, payload['title'])\n self.assertEqual(recipe.time_minutes, payload['time_minutes'])\n self.assertEqual(recipe.price, payload['price'])\n tags = recipe.tags.all()\n self.assertEqual(len(tags), 0)", "def test_partly_update_book(self):\n data = {'isbn':'96712116-2'}\n response = self.client.patch(self.book.get_absolute_url(), data, format='json', content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n response = self.client.get(self.book.get_absolute_url())\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertContains(response, '96712116-2')", "def test_request_do_update(test_dao, test_configuration):\r\n DUT = dtcFunction(test_dao, test_configuration, test=True)\r\n DUT.request_do_select_all(revision_id=1)\r\n\r\n assert not DUT.request_do_update(1)", "def test_full_update_vehicle(self):\n vehicle = sample_vehicle(self.user)\n\n payload = {\n 'type': 'VSL',\n 'license_plate': 'BB-123-BB'\n }\n url = detail_url(vehicle.id)\n\n self.client.put(url, payload)\n\n vehicle.refresh_from_db()\n\n self.assertEqual(vehicle.type, payload['type'])\n self.assertEqual(vehicle.license_plate, payload['license_plate'])", "def test_update_creates_a_new_version(self):\n company = CompanyFactory(name='Foo ltd.')\n\n assert Version.objects.get_for_object(company).count() == 0\n\n response = self.api_client.patch(\n reverse('api-v4:company:item', kwargs={'pk': company.pk}),\n data={'name': 'Acme'},\n )\n\n assert response.status_code == status.HTTP_200_OK\n assert response.json()['name'] == 'Acme'\n\n # check version created\n assert Version.objects.get_for_object(company).count() == 1\n version = Version.objects.get_for_object(company).first()\n assert version.revision.user == self.user\n assert version.field_dict['name'] == 'Acme'", "def test_update_project(self):\n pass", "def test_update_project(self):\n pass", "def test_update_attribute_data(self):\n pass", "def test_put_stock(self):\n body = StockProduct()\n response = self.client.open(\n '/omogollo2/ServerAPI/1.0.0/stock/{productId}'.format(product_id=56),\n method='PUT',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_update_device_by_id(self):\n pass" ]
[ "0.8304104", "0.8161559", "0.8123623", "0.8105165", "0.809037", "0.79182774", "0.790087", "0.78989977", "0.7809965", "0.7688704", "0.7684496", "0.7684496", "0.7684496", "0.76524705", "0.7631343", "0.76261145", "0.7580674", "0.7554157", "0.75375104", "0.7501177", "0.7474084", "0.7378205", "0.73697907", "0.73492247", "0.7300464", "0.7293577", "0.7248671", "0.7248583", "0.7248542", "0.7239005", "0.72322553", "0.722738", "0.71417856", "0.7135945", "0.7116023", "0.7064847", "0.70352054", "0.69966424", "0.6981934", "0.6980251", "0.6979059", "0.69521254", "0.694214", "0.69307315", "0.692264", "0.69156724", "0.68875057", "0.6878231", "0.6877728", "0.6877102", "0.6877", "0.685934", "0.68362725", "0.68362725", "0.6831381", "0.6827203", "0.6814575", "0.6792701", "0.67857456", "0.6780096", "0.67720366", "0.6759461", "0.675188", "0.674159", "0.6741288", "0.67339545", "0.6726797", "0.6714701", "0.67139435", "0.67062175", "0.6700715", "0.66976434", "0.66875124", "0.66868967", "0.66800785", "0.66792613", "0.66537374", "0.66526085", "0.66344917", "0.66344076", "0.6624481", "0.6609558", "0.66063696", "0.6602279", "0.6592642", "0.6575863", "0.6567223", "0.6565553", "0.6556148", "0.6554997", "0.6552889", "0.655114", "0.6549729", "0.6544555", "0.6526382", "0.6524252", "0.6524252", "0.652302", "0.651247", "0.64935136" ]
0.81104404
3
Test that product cannot be updated successfully with blacklisted token
def test_cannot_update_product_with_blacklisted_token(self): resp = self.admin_register() reply = self.admin_login() token = reply['token'] product = dict( prod_name='NY_denims', category='denims', stock=20, price=150 ) resp = self.client.post( '/api/v1/products', content_type='application/json', data=json.dumps(product), headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Product successfully added to Inventory!') self.assertEqual(resp.status_code, 201) resp = self.client.delete( '/api/v1/logout', headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'You are successfully logged out!') self.assertEqual(resp.status_code, 200) product_update = dict( prod_name='NY_jeans', category='denims', stock=50, price=180 ) resp = self.client.put( '/api/v1/products/1', content_type='application/json', data=json.dumps(product_update), headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!') self.assertEqual(resp.status_code, 401)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_cannot_create_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_cannot_view_a_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_update_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], \"This product doesn't exists in the Inventory!\")\n self.assertEqual(resp.status_code, 400)", "def test_unauthorized_product_update(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_cannot_view_all_products_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_admin_cannot_delete_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_update_not_my_product(self):\n post_data = {\n \"category\": {\n \"name\": \"general\",\n \"index\": 0\n },\n \"name\": \"Producto 2 modified\",\n \"description\": \"Descripcion de producto 2 modified\",\n \"selling\": True,\n \"price\": 20,\n }\n\n response = self.client.put('/api/1.0/products/2/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_token_was_blacklisted(self):\n\n revoked_token = RevokedToken('secret_token_blacklisted')\n revoked_token.save()\n\n self.assertTrue(\n RevokedToken.is_jti_blacklisted('secret_token_blacklisted'))", "def test_cannot_update_user_with_blacklisted_token(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n user = dict(\n name='Summer Lover',\n username='lover',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.put(\n '/api/v1/users/2',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_update_product_to_not_selling(self):\n self._require_login(self.user1)\n post_data = {\n \"category\": {\n \"name\": \"deportes\",\n \"index\": 1\n },\n \"name\": \"Producto 1 modified\",\n \"description\": \"Descripcion de producto 1 modified\",\n \"selling\": False,\n \"price\": 60,\n }\n\n response = self.client.put('/api/1.0/products/1/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['name'], 'Producto 1 modified')\n self.assertEqual(response.data['description'], 'Descripcion de producto 1 modified')\n self.assertEqual(response.data['selling'], False)\n self.assertEqual(response.data['price'], '60.0')\n self.assertEqual(response.data['category']['name'], 'deportes')", "def test_product_buy_missing_goods(self):\n result_buy = self.info_list.product_buy(\"хлеб серый хлебозавод\", 3)\n self.assertFalse(result_buy)", "def test_cannot_get_sale_record_with_blacklisted_token(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/sales/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_security_on_put(self):\n # test the update url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.put(url,{'description':'my new description'})\n self.failUnlessEqual(response.status_code, 401)", "def test_a_renew_non_active_license(self):\n self.assertTrue(self.status.is_ready(), \"The license is active, non active state awaited\")\n with self.assertRaisesRegexp(IOError, 'PUT .* HTTP error 4[0-9][0-9]$'):\n self.status.renew(self.status.DEVICEID1, self.status.DEVICENAME1, self.end+2*self.ADAY)", "def test_wrong_token(self):\n token = str((jwt.encode(\n {\"email\": \"[email protected]\"},\n settings.SECRET_KEY)).decode('utf-8')\n )\n self.client.post(self.registration_url, valid_user, format='json')\n response = self.client.patch(\n self.change_password_url+\"?token=\"+token+\"wrong\",\n {\"password\": \"bagenda1234\"},\n format='json'\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data['error'],\n \"verification link is invalid.\")", "def test_attendant_cannot_make_a_sale_with_blacklisted_token(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_token_missing_edit(self):\n with self.client:\n id = self.get_id()\n response = self.client.put('api/v1/meals/{}'.format(id),\n data=json.dumps(dict(\n meal_name=\"chips\",\n price=15000\n )),\n content_type='application/json',\n headers=({\"token\": \"\"}))\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 401)\n self.assertEqual(data.get('message'), \"Token is missing\")", "def test_update_product_without_authentication(self):\n post_data = {\n \"category\": {\n \"name\": \"general\",\n \"index\": 0\n },\n \"name\": \"Producto 2 modified\",\n \"description\": \"Descripcion de producto 2 modified\",\n \"selling\": True,\n \"price\": 20,\n }\n\n response = self.client.put('/api/1.0/products/1/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_resuableitem_invalid_vote(self):\n\n original_reusableitem = setup_public_reusable_item_1(self)\n data1 = submit_change_request_1(self, self.user_1)\n\n # user 2 now submits an invalid vote\n self.client.force_authenticate(user=self.user_2)\n\n data2 = {'vote': 'banana'}\n response = self.client.patch(get_reusable_item_1_url(self), data2, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_deny_pending_payment(self):\n pass", "def test_user_update_procedure_failure(self):\n p1 = models.Procedure.objects.create(\n name='temp',\n overview='bla bla bla'\n )\n p1.speciality.set([self.speciality.pk])\n p1.save()\n\n res = self.client.get(PROCEDURE_URL)\n\n url = get_item_url(res.data[0]['id'])\n new_payload = {\n 'other_details': 'new details'\n }\n\n response = self.client.patch(url, new_payload, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_no_unlisted(self):\n Version.objects.get(pk=self.version_1_2_2).update(\n channel=amo.RELEASE_CHANNEL_UNLISTED)\n self.addon.reload()\n assert self.addon.status == amo.STATUS_PUBLIC\n version, file = self.get('1.2', self.version_int,\n self.app, self.platform)\n assert version == self.version_1_2_1", "def test_update_ban(self):\n pass", "def test_cannot_sale_out_of_stock_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":20\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'NY_denims is out of stock!')\n self.assertEqual(resp.status_code, 404)", "def test_locked_asset_not_registered(self):\r\n self.client.login(username=self.usr, password=self.pwd)\r\n resp = self.client.get(self.url_locked)\r\n self.assertEqual(resp.status_code, 403) # pylint: disable=E1103\r", "def test_update_not_matching_token(\n self, registered_user: user_models.User,\n valid_header_dict_with_user_id: Dict[str, Any]):\n update_json_payload = get_valid_update_request(registered_user)\n response = get_response_from_json(update_json_payload,\n valid_header_dict_with_user_id)\n\n assert not check_response_valid_update(response)\n assert not check_fields_updated_correctly(registered_user,\n update_json_payload)\n assert response.status_code == 401", "def test_bad_action(self):\r\n action = 'robot-not-an-action'\r\n url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n response = self.client.get(url, {'identifiers': self.beta_tester.email, 'action': action})\r\n self.assertEqual(response.status_code, 400)", "def test_cannot_get_all_sale_records_with_blacklisted_token(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n \n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/sales',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_component_update_available_NO(self):\n self.assertFalse(self.u.component_update_available())", "def test_admin_cannot_delete_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This product does not exist in Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_non_contractor_acks_receipt(self):\n res = self.client.post(self.url)\n self.assertEqual(res.status_code, 403)", "def test_add_to_cart_item_not_in_system(self):\n # test sale products not in db\n\n response = self.client.get(\n '/self.base_url/sales/1999/2',\n headers=dict(Authorization=\"Bearer \" + self.attendant_token),\n content_type = 'application/json'\n )\n\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"This product does not exist\")\n self.assertEqual(response.status_code,200)\n\n\n # test add item which is at minimum stock", "def test_update_device_token(self):\n pass", "def test_view_product_that_doesnot_exist_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product does not exist!')\n self.assertEqual(resp.status_code, 404)", "def test_delete_not_my_product(self):\n self._require_login(self.user1)\n response = self.client.delete('/api/1.0/products/2/')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_invalid_verify_patch_request(self, cred):\n resp = requests.patch(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number))\n assert resp.status_code == 403", "def test_cannot_sale_nonexistant_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"Paris_heels\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product is not in the Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_unsuccessful_verification(self):\n for i in (-4, -3, 3, 4):\n description = \"TOTP verified for `i={0}`\".format(i)\n calculated = self.algorithm.calculate(self.device.secret, drift=i)\n confirmed = self.relate.verify(calculated, save=False)\n\n self.assertFalse(confirmed, description)\n\n self.relate.confirm = False", "def test_update_exchange_not_exists(self):\n values = {\"exchange_name\": \"111\", \"api_key\": \"111\", \"secret\": \"111\"}\n ret = self.app.update_exchange(20, values)\n self.assertIn(ret[0], \"error\")", "def test_get_unexisting_products(self):\n response=self.get_unexisting_products()\n result = json.loads(response.data.decode('utf-8'))\n self.assertEqual(result['message'],\"No Available products\")\n self.assertEqual(response.status_code, 200)", "def test_wrong_token_permission_denied(self, client, token):\n with disable_logs(logging.WARNING):\n assert_hook_status(client, status=403, token=f\"{token}wrong\")", "def test_invalid_token_put(self):\n with self.client:\n id = self.get_id()\n response = self.client.put('api/v1/meals/{}'.format(id),\n data=json.dumps(dict(\n meal_name=\"chips\",\n price=15000\n )),\n content_type='application/json',\n headers=({\"token\": \"12345\"}))\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 401)\n self.assertEqual(data.get('message'), \"Invalid token.Please login\")", "def test_admin_cannot_delete_product_from_empty_Inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'There are no products in Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_rejected_devices_are_rejected(self):\n self.assertEqual(\n self._request(self._make_dummy_notification([DEVICE_REJECTED])),\n {\"rejected\": [DEVICE_REJECTED[\"pushkey\"]]},\n )", "def test_beneficiaries_update_withoutID_that_will_fail(self):\n print('the test function name: {}'.format(sys._getframe().f_code.co_name))\n try:\n url = reverse('beneficiary:beneficiary-entity-by-id-update')\n response = self.client.get(url, content_type='application/json')\n return self.assertTrue(response.status_code, 200)\n except Exception as e:\n print(\"reason: \", e)", "def test_patch_not_allowed(self, parse_args):\n parse_args.side_effect = [{\n _ATTEMPT.attempt_id: 'forbidden'\n }, {\n _ATTEMPT.run_id: 'forbidden'\n }]\n _, err = self.resource.patch(self.attempts[1][_ATTEMPT.attempt_id])\n self.assertEqual(403, err)", "def test_create_not_admin(self):\n self.assertEqual(Product.objects.count(), 2)\n payload = {\n 'name': 'New product',\n 'category': self.category_1.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99\n }\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n expected = {'detail': 'You do not have permission to perform this action.'}\n response = self.client.post(\n '/api/products/', data=payload,\n content_type='application/json', **headers)\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)\n self.assertEqual(Product.objects.count(), 2)", "def test_unavailable_item(self):\n item, change, _ = give_item_and_change('crisps', .50)\n self.assertIsNone(item)\n self.assertEqual(change, 0.5)", "def test_not_logged_cannot_update(self):\n\n utils.test_not_logged_cannot_access(self, self.url, self.data)", "def test_update_should_not_be_allowed(self):\n response = self.client.put(self.get_url(), {})\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)", "def test_authenticated_user_update(self):\r\n with self.flask_app.test_request_context('/'):\r\n for token in self.auth_providers:\r\n assert_raises(Forbidden,\r\n getattr(require, 'token').update,\r\n token)", "def test_feature_disabled(self, url):\n response = self.client.get(url)\n assert response.status_code == 403\n response = self.client.post(url)\n assert response.status_code == 403", "def test_invalid_secrets(self):\n s = SecretsChecker(stage='dev')\n # Override the email field obtained from terraform\n s.email = ['nonsense']\n with self.assertRaises(ValueError):\n s.run()", "def test_verification_failed(self):\n pass", "def test_admin_cannot_create_product_with_empty_fields(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='',\n category='',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Please enter all fields!')\n self.assertEqual(resp.status_code, 400)", "def test_admin_cannot_add_item(self):\n response = self.client.get(\n '/self.base_url/sales/3/2',\n headers=dict(Authorization=\"Bearer \" + self.owner_token),\n content_type = 'application/json'\n )\n\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"You cannot make a sale from an Admin account, Consider having an attendant account\")\n self.assertEqual(response.status_code,401)", "def test_activate_form_bad(self):\r\n res = self.testapp.post(\r\n '/api/v1/suspend',\r\n content_type='application/json',\r\n status=406)\r\n success = json.loads(res.body)['error']\r\n self.assertTrue(\r\n success is not None,\r\n \"Should not be successful with no email address: \" + str(res))\r\n\r\n res = self.testapp.post('/api/v1/suspend',\r\n params={'email': '[email protected]'},\r\n status=404)\r\n success = json.loads(res.body)\r\n self.assertTrue(\r\n 'error' in success,\r\n \"Should not be successful with invalid email address: \" + str(res))", "def test_product_buy_more_then_have(self):\n result_buy = self.info_list.product_buy(\"соль 1 кг\", 50)\n self.assertFalse(result_buy)", "def test_cannot_sell_more_than_stock(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":15\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Only 10 NY_denims available right now!')\n self.assertEqual(resp.status_code, 400)", "def test_bad_token(self):\n user = self.create_user()\n\n token_generator = EmailActivationTokenGenerator()\n bad_activation_keys = (\n 'emailactivationtokengenerator',\n 'emailactivation-tokengenerator',\n '3rd-bademailactivationkey'\n )\n for key in bad_activation_keys:\n self.assertFalse(token_generator.check_token(user, key))", "def test_bayes_updates_bad_data(self):\r\n self.assertRaises(ValueError, bayes_updates, self.bad)", "def test_renew_user_not_subscribed(self):\n self.braintree_customer.payment_method_token = 'valid_payment_token'\n self.assertTrue(SubscriptionManager.renew(self.braintree_customer))", "def test_with_unpermitted_token(self):\n email_text = self.email_template % self.token.uuid\n assert not save_from_email_reply(email_text)", "def test_modify_reusableitem_not_verified(self):\n email_address = EmailAddress.objects.get(user_id=self.user_1.id)\n email_address.verified = False\n email_address.save()\n\n self.client.force_authenticate(user=self.user_1)\n\n response = self.client.patch(get_reusable_item_1_url(self), {}, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_correct_token_working(self, client, token, update_repo_mock):\n assert_hook_status(client, status=204, token=token)\n assert update_repo_mock.called", "def test_security_on_post(self):\n url = '/product/xml/'\n response = self.client.post(url,{'description':'my new description'})\n self.failUnlessEqual(response.status_code, 401)", "def test_modify_reusableitem_not_authenticated(self):\n self.client.logout()\n \n response = self.client.patch(get_reusable_item_1_url(self), {}, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_attempts_to_refresh_token_when_appropriate(self, mock):\n\n badgr = self.get_badgr_setup()\n with vcr.use_cassette('tests/vcr_cassettes/try_refresh_token.yaml'):\n with self.assertRaises(exceptions.TokenAndRefreshExpiredError):\n badgr.get_from_server(self._sample_url)\n self.assertTrue(mock.called)", "def test_out_of_date(self):\n self.assertTrue(update_available(0.0))", "def test_webhook_bad_status_update(self):\n payload = json.dumps({\n 'matrix': [\n {\n 'config': {\n 'env': [\n 'REVIEWBOARD_STATUS_UPDATE_ID=%d'\n % (self.status_update.pk + 1),\n 'REVIEWBOARD_TRAVIS_INTEGRATION_CONFIG_ID=%d'\n % self.config.pk,\n ],\n },\n },\n ],\n })\n self.spy_on(TravisCIWebHookView._validate_signature,\n owner=TravisCIWebHookView,\n call_fake=lambda self, request, integration_config: True)\n\n rsp = self.client.post(self.webhook_url, {'payload': payload})\n\n self.assertEqual(rsp.status_code, 400)\n self.assertEqual(\n rsp.content,\n b'Unable to find matching status update ID %d.'\n % (self.status_update.pk + 1))", "def test_view_product_with_invalid_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2kk',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Try an interger for product id')\n self.assertEqual(resp.status_code, 400)", "def test_upgrade_to_non_registered(self):\n with pytest.raises(\n ClickException,\n match=r\".* with id .* is not registered. Please use the `add` command. Aborting...\",\n ):\n self.runner.invoke(\n cli,\n [\n \"-v\",\n \"DEBUG\",\n \"upgrade\",\n *self.LOCAL,\n self.ITEM_TYPE,\n \"nonexits/dummy:0.0.0\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )", "def test_redeem_blvt_without_tokenName():\n\n client = Client(key, secret)\n client.redeem_blvt.when.called_with(\"\", \"1\").should.throw(ParameterRequiredError)", "def test_registration_modified_inactive(dummy_regform, api_delete, api_post):\n registration = dummy_regform.registrations[0]\n modify_registration(registration, {\n 'first_name': 'Conan',\n 'last_name': 'Osiris',\n 'email': '[email protected]'\n })\n assert api_delete.call_count == 0\n assert api_post.call_count == 0", "def test_registration_modified_inactive(dummy_regform, api_delete, api_post):\n registration = dummy_regform.registrations[0]\n modify_registration(registration, {\n 'first_name': 'Conan',\n 'last_name': 'Osiris',\n 'email': '[email protected]'\n })\n assert api_delete.call_count == 0\n assert api_post.call_count == 0", "async def test_api_state_change_with_bad_data(\n hass: HomeAssistant, mock_api_client: TestClient\n) -> None:\n resp = await mock_api_client.post(\n \"/api/states/test_entity.that_does_not_exist\", json={}\n )\n\n assert resp.status == HTTPStatus.BAD_REQUEST", "def test_account_modification_inexistent(flask_server):\n import requests\n\n data = {\n 'name': 'no',\n 'password': 'nope',\n 'new_name': 'foo2',\n 'new_password': 'bar2',\n 'new_code': '456',\n }\n\n req = requests.post('{}/account/modify'.format(API_URL), data=data)\n assert req.content == b'No such account in database'\n assert req.status_code == 400", "def test_update_product_with_empty_fields(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n product_update = dict(\n prod_name='',\n category='',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'prod_name and category cannot be empty!')\n self.assertEqual(resp.status_code, 400)", "async def test_not_update_with_account_token(self):\n provisioning_client = ProvisioningProfileClient(httpClient, 'token')\n try:\n await provisioning_client.update_provisioning_profile('id', {'name': 'new name'})\n except Exception as err:\n assert err.__str__() == 'You can not invoke update_provisioning_profile method, because you ' + \\\n 'have connected with account access token. Please use API access token from ' + \\\n 'https://app.metaapi.cloud/token page to invoke this method.'", "def test_update_product_not_found(self):\n test_product = ProductFactory()\n resp = self.app.put(\n \"/products/0\",\n json=test_product.serialize(),\n content_type=\"application/json\")\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)", "def test_purchase_not_available(self):\n purchase_model = {\"id\": 2, \"amount\": 1}\n resp = self.app.post(\"/products/2/purchase\", json=purchase_model, content_type=\"application/json\")\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)\n resp = self.app.get(\"/products/2\", content_type=\"application/json\")\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)", "def test_resuableitem_vote_not_referenced(self):\n\n original_reusableitem = setup_public_reusable_item_1(self)\n data1 = submit_change_request_1(self, self.user_1)\n\n # user 3 now submits a vote\n self.client.force_authenticate(user=self.user_3)\n\n data2 = {'vote': 'banana'}\n response = self.client.patch(get_reusable_item_1_url(self), data2, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_can_not_book_running_block(self):\n date = datetime.now().replace(minute=0, second=0, microsecond=0)\n\n response = self.client.post(\n reverse('bookings', kwargs={'facility': 'g'}), {'book': str(date.timestamp())})\n\n context = response.context\n bookings = context[\"bookings\"]\n\n self.assertEqual(response.status_code, 403)\n self.assertEqual(context[\"quota\"], settings.BOOKINGS_QUOTA)", "def test_reusableitem_unsupported_modification(self):\n\n self.client.force_authenticate(user=self.user_1)\n\n response = self.client.patch(get_reusable_item_1_url(self), {'change_request': 'Some text'}, format='json')\n\n updated_object = ReusableItem.objects.get(pk=self.reusableitem_1.id)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_unavailable_item(self):\n item, change, _ = give_item_and_change('crisps', '1.00 .50')\n self.assertIsNone(item)\n self.assertEqual(change, 1.35)", "def unable(self):\n response.status = 400\n return {'message':'current state does not allow modification'}", "def test_accepted_devices_are_not_rejected(self):\n self.assertEqual(\n self._request(self._make_dummy_notification([DEVICE_ACCEPTED])),\n {\"rejected\": []},\n )", "def test_update_nonexist(self):\n promotion = PromotionFactory()\n promotion.id = '1cak41-nonexist'\n try:\n promotion.update()\n except KeyError:\n self.assertRaises(KeyError)", "def test_delete_unexisting_product(self):\n response=self.delete_unexisting_products()\n result = json.loads(response.data.decode('utf-8'))\n self.assertEqual(result['Error'],'Product Not found')\n self.assertEqual(response.status_code, 400)", "def test_product_buy_with_not_exists_name(self):\n result_buy = self.info_list.product_buy(\"Говядина Немецкая 2кг\", 3)\n self.assertFalse(result_buy)", "def test_update_offline_status(self):\n pass", "def test_verification_with_invalid_token(self) -> None:\n\n uuids: typing.List[str] = []\n for i in range(2, 5):\n uuids.append(str(uuid.uuid5(\n uuid.uuid1(1),\n f'abcd123456{i}'\n )))\n\n for temp_uuid in uuids:\n response: Response = self.client.get(f'/api/authors/verify/{temp_uuid}/')\n data = u.get_json(response)\n self.assertEqual(response.status_code, 404)\n self.assertEqual(data, {\n 'detail': 'Not found.'\n })", "def test_update_issue_by_unauthenticated_user_fails(self):\n response = self.client.patch(\n self.url,\n json={\"description\": TEST_ISSUE_DESCRIPTION, \"name\": TEST_ISSUE_NAME},\n )\n response_json = response.get_json()\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response_json[\"SubCode\"], \"InvalidToken\")", "def test_non_admin_cannot_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_wrong_admin_put(self):\n\n with self.client:\n token = self.get_token()\n id = 4\n response = self.client.put('api/v1/meals/{}'.format(id),\n data=json.dumps(dict(\n meal_name=\"chips\",\n price=15000\n )),\n content_type='application/json',\n headers=({\"token\": token}))\n data = json.loads(response.data.decode())\n self.assertEqual(data.get('message'), \"Meal not found\")\n self.assertEqual(response.status_code, 400)", "def test_revoke_inactive(self):\n self.invite.active = False\n self.invite.save()\n url = reverse(\n 'projectroles:api_invite_revoke',\n kwargs={'projectinvite': self.invite.sodar_uuid},\n )\n response = self.request_knox(url, method='POST')\n self.assertEqual(response.status_code, 400, msg=response.content)", "def test_can_not_cancel_current_block(self):\n date = datetime.now().replace(minute=0, second=0, microsecond=0)\n\n response = self.client.post(\n reverse('bookings', kwargs={'facility': 'g'}), {'cancel': str(date.timestamp())})\n\n context = response.context\n bookings = context[\"bookings\"]\n\n self.assertEqual(response.status_code, 403)\n self.assertEqual(context[\"quota\"], settings.BOOKINGS_QUOTA)", "def test_only_rejected_devices_are_rejected(self):\n self.assertEqual(\n self._request(\n self._make_dummy_notification([DEVICE_REJECTED, DEVICE_ACCEPTED])\n ),\n {\"rejected\": [DEVICE_REJECTED[\"pushkey\"]]},\n )", "def test_inactive_account(self):", "def test_app_can_update_a_list(self):\n self.ne=json.dumps({\"newName\":\"pants\"})\n list_update=self.client.put('/shoppinglists/trou',\n data=self.ne,\n headers={\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n self.assertIn(\"list doesnt exist\",str(list_update.data)) \n self.assertEqual(list_update.status_code,200)" ]
[ "0.7377064", "0.7333218", "0.7190565", "0.7068335", "0.70310974", "0.69859517", "0.67677313", "0.6604328", "0.65338165", "0.65273803", "0.6446804", "0.6441972", "0.64315784", "0.64071953", "0.6401291", "0.6390641", "0.6355562", "0.6326239", "0.6304661", "0.62910193", "0.6279447", "0.6265347", "0.6247338", "0.6210322", "0.62019455", "0.62015384", "0.6186053", "0.61702967", "0.6142926", "0.6135204", "0.6130958", "0.6109089", "0.6103145", "0.60839504", "0.60546553", "0.60499394", "0.6036985", "0.60345334", "0.60152686", "0.6000098", "0.5995663", "0.5994601", "0.59913415", "0.5988789", "0.59828925", "0.5980311", "0.59697574", "0.5966392", "0.595195", "0.5948716", "0.5946457", "0.59463567", "0.59376264", "0.5936726", "0.5929681", "0.5911594", "0.5904584", "0.58983207", "0.5896142", "0.58921695", "0.5890987", "0.5889609", "0.58861923", "0.58799237", "0.58787036", "0.5878241", "0.58773", "0.5862966", "0.58557886", "0.5855278", "0.5852915", "0.58497447", "0.5846475", "0.5844612", "0.5844612", "0.58324116", "0.5832372", "0.58153427", "0.5814279", "0.5809521", "0.58020675", "0.58010507", "0.57998484", "0.57997435", "0.57952636", "0.57921255", "0.57878953", "0.5785982", "0.5778533", "0.5778413", "0.5777722", "0.5773401", "0.57729036", "0.5772654", "0.5770692", "0.57651883", "0.57644314", "0.57644254", "0.5761011", "0.57609296" ]
0.80966264
0
Test that you cant updated a nonexistant product
def test_update_nonexistant_product(self): resp = self.admin_register() reply = self.admin_login() token = reply['token'] product_update = dict( prod_name='NY_jeans', category='denims', stock=50, price=180 ) resp = self.client.put( '/api/v1/products/1', content_type='application/json', data=json.dumps(product_update), headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], "This product doesn't exists in the Inventory!") self.assertEqual(resp.status_code, 400)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_nonexist(self):\n promotion = PromotionFactory()\n promotion.id = '1cak41-nonexist'\n try:\n promotion.update()\n except KeyError:\n self.assertRaises(KeyError)", "def test_update_not_my_product(self):\n post_data = {\n \"category\": {\n \"name\": \"general\",\n \"index\": 0\n },\n \"name\": \"Producto 2 modified\",\n \"description\": \"Descripcion de producto 2 modified\",\n \"selling\": True,\n \"price\": 20,\n }\n\n response = self.client.put('/api/1.0/products/2/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_update_product_to_not_selling(self):\n self._require_login(self.user1)\n post_data = {\n \"category\": {\n \"name\": \"deportes\",\n \"index\": 1\n },\n \"name\": \"Producto 1 modified\",\n \"description\": \"Descripcion de producto 1 modified\",\n \"selling\": False,\n \"price\": 60,\n }\n\n response = self.client.put('/api/1.0/products/1/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['name'], 'Producto 1 modified')\n self.assertEqual(response.data['description'], 'Descripcion de producto 1 modified')\n self.assertEqual(response.data['selling'], False)\n self.assertEqual(response.data['price'], '60.0')\n self.assertEqual(response.data['category']['name'], 'deportes')", "def test_update_product_not_found(self):\n test_product = ProductFactory()\n resp = self.app.put(\n \"/products/0\",\n json=test_product.serialize(),\n content_type=\"application/json\")\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)", "def test_unauthorized_product_update(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_admin_cannot_delete_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This product does not exist in Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_cannot_sale_nonexistant_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"Paris_heels\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product is not in the Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_update_product_without_authentication(self):\n post_data = {\n \"category\": {\n \"name\": \"general\",\n \"index\": 0\n },\n \"name\": \"Producto 2 modified\",\n \"description\": \"Descripcion de producto 2 modified\",\n \"selling\": True,\n \"price\": 20,\n }\n\n response = self.client.put('/api/1.0/products/1/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_cannot_update_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_update_on_unique_field_raises(test_store):\n\n with pytest.raises(NotImplementedError):\n test_store.update(fields={\"name\": \"Andy\"})", "def test_user_update_procedure_failure(self):\n p1 = models.Procedure.objects.create(\n name='temp',\n overview='bla bla bla'\n )\n p1.speciality.set([self.speciality.pk])\n p1.save()\n\n res = self.client.get(PROCEDURE_URL)\n\n url = get_item_url(res.data[0]['id'])\n new_payload = {\n 'other_details': 'new details'\n }\n\n response = self.client.patch(url, new_payload, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_02_product_update(self):\n # Update new product state2 from default draft to sellable\n new_product = self.create_product()\n self.assertEqual(new_product.state2, 'draft')\n new_product.state2 = 'sellable'\n self.assertEqual(new_product.state2, 'sellable')\n\n # Same but to an existing demo product.\n demo_product = self.product_obj.browse(\n self.ref('product_lifecycle.product_product_4g'))\n self.assertEqual(demo_product.state2, 'sellable')\n demo_product.state2 = 'draft'\n self.assertEqual(demo_product.state2, 'draft')\n\n # Update new product invividual field (field defined in product.product\n # model).\n self.assertEqual(new_product.default_code, 'A2330')\n new_product.default_code = 'A2330-1'\n self.assertEqual(new_product.default_code, 'A2330-1')\n\n # Same but to an existing demo product.\n self.assertEqual(demo_product.default_code, 'A2329')\n demo_product.default_code = 'A2329-1'\n self.assertEqual(demo_product.default_code, 'A2329-1')\n\n # Update new product commom characteristic (field defined in\n # product.template) and check that affects the another product\n # variants\n self.assertFalse(new_product.description)\n new_product.description = 'This is a New Product'\n self.assertEqual(new_product.description, 'This is a New Product')\n self.assertEqual(demo_product.description, 'This is a New Product')\n demo_product.description = False\n self.assertFalse(demo_product.description)", "def test_update_with_no_matches(test_store, andy, pandy, candy):\n n_updated = test_store.update(fields={\"age\": 15}, name=\"Mark\")\n assert n_updated == 0\n\n items = list(test_store.get_by())\n assert len(items) == 3\n assert andy in items\n assert pandy in items\n assert candy in items", "def test_update_non_existent(cards_db):\n i = 123 # any number will do, db is empty\n with pytest.raises(InvalidCardId):\n cards_db.update_card(i, Card(summary=\"bar\", owner=\"not me\"))", "def test_update_product_required_fields(self):\n data = {\n 'pk': 1,\n 'name': None,\n 'description': '''\n Yogurt also spelled yoghurt, yogourt or yoghourt,\n is a food produced by bacterial fermentation of milk.\n '''\n }\n url = reverse('products:detail', kwargs={'pk': data['pk']})\n response = self.client.put(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(models.Product.objects.filter(name=None).count(), 0)", "def test_product_buy_missing_goods(self):\n result_buy = self.info_list.product_buy(\"хлеб серый хлебозавод\", 3)\n self.assertFalse(result_buy)", "def test_beneficiaries_update_withoutID_that_will_fail(self):\n print('the test function name: {}'.format(sys._getframe().f_code.co_name))\n try:\n url = reverse('beneficiary:beneficiary-entity-by-id-update')\n response = self.client.get(url, content_type='application/json')\n return self.assertTrue(response.status_code, 200)\n except Exception as e:\n print(\"reason: \", e)", "def test_view_product_that_doesnot_exist_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product does not exist!')\n self.assertEqual(resp.status_code, 404)", "def test_update_product_with_empty_fields(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n product_update = dict(\n prod_name='',\n category='',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'prod_name and category cannot be empty!')\n self.assertEqual(resp.status_code, 400)", "def test_upgrade_non_vendor(self):\n with pytest.raises(\n ClickException,\n match=r\"The .* with id '.*' already has version .*. Nothing to upgrade.\",\n ):\n self.runner.invoke(\n cli,\n [\n \"upgrade\",\n *self.LOCAL,\n self.ITEM_TYPE,\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:100.0.0\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )", "def test_unavailable_item(self):\n item, change, _ = give_item_and_change('crisps', .50)\n self.assertIsNone(item)\n self.assertEqual(change, 0.5)", "def test_request_do_update_non_existent_id(test_dao, test_configuration):\r\n DUT = dtcFunction(test_dao, test_configuration, test=True)\r\n DUT.request_do_select_all(revision_id=1)\r\n\r\n assert DUT.request_do_update(100)", "def test_security_on_put(self):\n # test the update url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.put(url,{'description':'my new description'})\n self.failUnlessEqual(response.status_code, 401)", "def test_component_update_available_NO(self):\n self.assertFalse(self.u.component_update_available())", "def test_patch_a_resource_that_does_not_exist():\n pass", "def test_add_without_name(self):\n good = GoodInfo(\"\", \"30\", \"40\", \"2020-12-30\", \n \"14\", \"2020-12-30\")\n check_product_data = self.database.add(good)\n\n self.assertFalse(check_product_data)", "def test_update_inventory_not_found(self):\n new_inventory = {'name': 'conditioner', 'quantity': 1, 'status': 'new'}\n data = json.dumps(new_inventory)\n resp = self.app.put('/inventories/0', data=data, content_type='application/json')\n self.assertEquals(resp.status_code, status.HTTP_404_NOT_FOUND)", "def test_delete_not_my_product(self):\n self._require_login(self.user1)\n response = self.client.delete('/api/1.0/products/2/')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_product_not_available_by_stock(self):\n product = ProductFactory(stock_amount=2)\n\n for i in range(2):\n opr = OrderProductRelationFactory(product=product)\n order = opr.order\n order.paid = True\n order.save()\n\n self.assertEqual(product.left_in_stock, 0)\n self.assertFalse(product.is_stock_available)\n self.assertFalse(product.is_available())", "def test_03_product_delete(self):\n product = self.create_product()\n products = self.product_obj.search([])\n self.assertIn(product, products)\n product.unlink()\n self.assertNotIn(product.exists(), products)", "def test_product_id_in_database(monkeypatch):\n assert api_crawler.product_id_in_database(None) is False\n\n test_query = \"WS-C2960-24T-S\"\n assert api_crawler.product_id_in_database(test_query) is False\n\n mixer.blend(\"productdb.Product\", product_id=test_query, vendor=Vendor.objects.get(id=1))\n assert api_crawler.product_id_in_database(test_query) is True\n\n def unexpected_exception():\n raise Exception\n monkeypatch.setattr(Product.objects, \"filter\", lambda: unexpected_exception)\n\n assert api_crawler.product_id_in_database(test_query) is False", "def test_reusableitem_unsupported_modification(self):\n\n self.client.force_authenticate(user=self.user_1)\n\n response = self.client.patch(get_reusable_item_1_url(self), {'change_request': 'Some text'}, format='json')\n\n updated_object = ReusableItem.objects.get(pk=self.reusableitem_1.id)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_add_with_not_right_shelf_life(self):\n good = GoodInfo(\"яйцо 1 кат.\", \"-30\", \"40\", \"2020-12-30\", \n \"-14\", \"2020-12-30\")\n check_product_data = self.database.add(good)\n\n self.assertFalse(check_product_data)", "def test_partial_update(self):\n self.assertEqual(Product.objects.count(), 2)\n self.assertEqual(self.product_1.name, 'Nike Vapor')\n\n payload = {\n 'name': 'Updated name',\n }\n\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(self.token_admin)\n }\n response = self.client.patch(\n '/api/products/{}/'.format(self.product_1.id),\n data=payload, content_type='application/json', **headers)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response['Content-Type'], 'application/json')\n self.assertEqual(Product.objects.count(), 2)\n\n product = Product.objects.get(id=self.product_1.id)\n self.assertEqual(product.name, 'Updated name')", "def test_update_inventory(self):\n pass", "def test_update_case(self):\n pass", "def test_update_cart_invalid_attributes(self):\n user_id = '123'\n cart_id = self.cart_item_manager.create_cart(user_id, 'Cart1', False)\n self.cart_item_manager.update_cart(user_id, cart_id, {'InvalidAttribute': 'Cart2'})\n self.assertEqual('Cart1', self.cart_item_manager.get_cart(user_id, cart_id)['CartName'])", "def test_update_no_customer(self):\n set_up_db()\n with self.assertRaises(ValueError):\n update_customer_credit(2, 5.50)", "def test_unavailable_item(self):\n item, change, _ = give_item_and_change('crisps', '1.00 .50')\n self.assertIsNone(item)\n self.assertEqual(change, 1.35)", "def test_unavailabe_items(self):\n item, change, _ = give_item_and_change('crisps', .50)\n self.assertIsNone(item)\n self.assertEqual(change, 0.5)", "def test_update(app):\n\n assert False", "def test_update_exchange_not_exists(self):\n values = {\"exchange_name\": \"111\", \"api_key\": \"111\", \"secret\": \"111\"}\n ret = self.app.update_exchange(20, values)\n self.assertIn(ret[0], \"error\")", "def test_add_to_cart_item_not_in_system(self):\n # test sale products not in db\n\n response = self.client.get(\n '/self.base_url/sales/1999/2',\n headers=dict(Authorization=\"Bearer \" + self.attendant_token),\n content_type = 'application/json'\n )\n\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"This product does not exist\")\n self.assertEqual(response.status_code,200)\n\n\n # test add item which is at minimum stock", "def test_product_update(self):\n # first performe create\n id = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id:\n # then performe update\n data = { \n \"name\": \"Changed the name\",\n \"description\": self.product_data[\"description\"],\n \"image_link\": self.product_data[\"image_link\"],\n \"price\": self.product_data[\"price\"]\n }\n self._update_model(\"product\", id, data, [\"name\"])\n self.assertIsNotNone(id)", "def test_uninstalled(self):\n self.assertFalse(self.qi.isProductInstalled(PROJECTNAME))", "def test_upgrade_to_non_registered(self):\n with pytest.raises(\n ClickException,\n match=r\".* with id .* is not registered. Please use the `add` command. Aborting...\",\n ):\n self.runner.invoke(\n cli,\n [\n \"-v\",\n \"DEBUG\",\n \"upgrade\",\n *self.LOCAL,\n self.ITEM_TYPE,\n \"nonexits/dummy:0.0.0\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )", "def test_put_db_fail(self):\n test_data = {\n 'first_name': 'new_first_name',\n 'last_name': 'new_last_name'\n }\n with mock.patch('user_profile.models.UserProfile.update') as update:\n update.return_value = False\n response = self.client.put(self.url, json.dumps(test_data), content_type='application/json')\n self.assertEquals(response.status_code, 400)", "def test_admin_cannot_delete_product_with_non_integer_prod_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/kk',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'The product id should be a number!')\n self.assertEqual(resp.status_code, 400)", "def test_product_buy_with_not_exists_name(self):\n result_buy = self.info_list.product_buy(\"Говядина Немецкая 2кг\", 3)\n self.assertFalse(result_buy)", "def test_delete_unexisting_product(self):\n response=self.delete_unexisting_products()\n result = json.loads(response.data.decode('utf-8'))\n self.assertEqual(result['Error'],'Product Not found')\n self.assertEqual(response.status_code, 400)", "def test_update_order_failure(self):\n # create a order to update\n test_order = OrderFactory()\n resp = self.app.post('/orders',\n json=test_order.serialize(),\n content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)\n\n # update the order\n new_order = resp.get_json()\n new_order['product_id'] = 2\n resp = self.app.put('/orders/{}'.format(5),\n json=new_order,\n content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)", "def test_primary_key_update_failure(self):\r\n with self.assertRaises(ValidationError):\r\n TestQueryUpdateModel.objects(partition=uuid4(), cluster=3).update(cluster=5000)", "def test_update_product(self):\n data = {\n 'pk': 1,\n 'name': 'New yogurt',\n 'description': '''\n Yogurt also spelled yoghurt, yogourt or yoghourt,\n is a food produced by bacterial fermentation of milk.\n '''\n }\n url = reverse('products:detail', kwargs={'pk': data['pk']})\n response = self.client.patch(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(models.Product.objects.filter(name=data['name']).count(), 1)", "def test_non_admin_cannot_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_product_is_uninstalled(self):\n try:\n result = self.installer.is_product_installed(PROJECT_NAME)\n except AttributeError:\n result = self.installer.isProductInstalled(PROJECT_NAME)\n self.assertFalse(result)", "def test_add_with_negative_price(self):\n good = GoodInfo(\"яйцо 1 кат.\", \"-30\", \"40\", \"2020-12-30\", \n \"14\", \"2020-12-30\")\n check_product_data = self.database.add(good)\n\n self.assertFalse(check_product_data)", "def test_not_logged_cannot_update(self):\n\n utils.test_not_logged_cannot_access(self, self.url, self.data)", "def test_primary_key_update_failure(self):\n with self.assertRaises(ValidationError):\n TestQueryUpdateModel.objects(partition=uuid4(), cluster=3).update(cluster=5000)", "def test_update_non_existing_article(self):\n response = self.update_article(\n self.article_data_2,\n \"this-is-a-non-existing-slug\"\n )\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_do_update_non_existent_id(test_dao):\r\n DUT = dtmFunction(test_dao, test=True)\r\n DUT.do_select_all(revision_id=1)\r\n\r\n _error_code, _msg = DUT.do_update(100)\r\n\r\n assert _error_code == 2005\r\n assert _msg == (\"RAMSTK ERROR: Attempted to save non-existent \"\r\n \"Function ID 100.\")", "def test_not_enough_change(self):\n item, change, _ = give_item_and_change('apple', '.2')\n self.assertIsNone(item)\n self.assertEqual(change, 0.2)", "def test_primary_key_update_failure(self):\n m0 = TestUpdateModel.create(count=5, text='monkey')\n with self.assertRaises(ValidationError):\n m0.update(partition=uuid4())", "def test_primary_key_update_failure(self):\r\n m0 = TestUpdateModel.create(count=5, text='monkey')\r\n with self.assertRaises(ValidationError):\r\n m0.update(partition=uuid4())", "def test_admin_cannot_delete_product_from_empty_Inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'There are no products in Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_update_product_unique_name(self):\n data = {\n 'pk': 1,\n 'name': 'Banana',\n 'description': '''\n Yogurt also spelled yoghurt, yogourt or yoghourt,\n is a food produced by bacterial fermentation of milk.\n '''\n }\n url = reverse('products:detail', kwargs={'pk': data['pk']})\n response = self.client.patch(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertNotEqual(models.Product.objects.filter(name=data['name']), data['pk'])", "def test_update_inventory_with_no_name(self):\n new_inventory = {'id': 2, 'quantity': 2, 'status': 'new'}\n resp = self.app.put('/inventories/2', data=new_inventory, content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)", "async def test_update_not_implemented(self):\n with self.assertRaises(NotImplementedError):\n await self.collection.update('x', {})", "def test_resuableitem_invalid_vote(self):\n\n original_reusableitem = setup_public_reusable_item_1(self)\n data1 = submit_change_request_1(self, self.user_1)\n\n # user 2 now submits an invalid vote\n self.client.force_authenticate(user=self.user_2)\n\n data2 = {'vote': 'banana'}\n response = self.client.patch(get_reusable_item_1_url(self), data2, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_an_extra_delete_is_not_sent(self):\n partition = uuid4()\n cluster = 1\n\n TestQueryUpdateModel.objects.create(\n partition=partition, cluster=cluster)\n\n obj = TestQueryUpdateModel.objects(\n partition=partition, cluster=cluster).first()\n\n self.assertFalse({k: v for (k, v) in obj._values.items() if v.deleted})\n\n obj.text = 'foo'\n obj.save()\n #execute_count will check the execution count and\n #assert no more calls than necessary where made", "def test_update_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'product updated!')\n self.assertEqual(resp.status_code, 200)", "def test_update_400_doesnt_create_a_new_version(self):\n company = CompanyFactory()\n\n response = self.api_client.patch(\n reverse('api-v4:company:item', kwargs={'pk': company.pk}),\n data={'trading_names': ['a' * 600]},\n )\n\n assert response.status_code == status.HTTP_400_BAD_REQUEST\n assert Version.objects.get_for_object(company).count() == 0", "def test_edit_non_existing_item(self):\n response = self.client.put('/api/v1/category/200',\n data=json.dumps(category[3]),\n content_type='application/json',\n headers=self.admin_headers)\n self.assertEqual(response.status_code, 404)\n self.assertIn('category with id 200 does not exist',\n str(response.data))", "def test_update_no_pk(self):\n album = Album(artist='Artist', album='Album',\n totaltracks=1, totalseconds=120)\n with self.assertRaises(Exception):\n album.update(self.app.db, self.app.curs)\n self.assertEqual(self.get_album_count(), 0)", "def test_full_update(self):\n self.assertEqual(Product.objects.count(), 2)\n self.assertEqual(self.product_1.name, 'Nike Vapor')\n self.assertEqual(self.product_1.sku, '44444444')\n self.assertEqual(self.product_1.category, self.category_1)\n self.assertEqual(self.product_1.description, 'Some product description')\n self.assertEqual(self.product_1.price, 129.99)\n self.assertEqual(self.product_1.featured, False)\n\n payload = {\n 'name': 'Updated name',\n 'category': self.category_2.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99,\n 'featured': True\n }\n\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(self.token_admin)\n }\n response = self.client.put(\n '/api/products/{}/'.format(self.product_1.id),\n data=payload, content_type='application/json', **headers)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response['Content-Type'], 'application/json')\n self.assertEqual(Product.objects.count(), 2)\n\n product = Product.objects.get(id=self.product_1.id)\n self.assertEqual(product.name, 'Updated name')\n self.assertEqual(product.sku, '11111111')\n self.assertEqual(product.category, self.category_2)\n self.assertEqual(product.description, 'New product description')\n self.assertEqual(float(product.price), 39.99)\n self.assertEqual(product.featured, True)", "def test_full_update_product(self):\n view = ProductUpdateView.as_view({'patch': 'update'})\n uri = reverse('products:update-product', kwargs={'pk': self.product_id})\n data = {\n \"id\": self.product_id,\n \"name\": \"Headphone updated\",\n \"description\": \"New version\",\n \"price\": \"800\",\n \"price_currency\": \"USD\",\n \"is_available\": True\n }\n request = self.factory.patch(uri, data, HTTP_AUTHORIZATION='Token {}'.format(self.token_admin.key))\n request.user = self.user['admin']\n response = view(request, pk=self.product_id)\n self.assertEqual(response.status_code, 200,\n f'Expected Response Code 200, received {response.status_code} instead.')\n data['price'] = float(data['price'])\n response.data['price'] = float(response.data['price'])\n self.assertEqual(response.data, data)", "def test_package_can_not_upgraded_cause_required(self):\n with self.with_config_update():\n with patch(\n \"aea.cli.upgrade.ItemRemoveHelper.check_remove\",\n return_value=(\n set([PackageId(\"connection\", PublicId(\"test\", \"test\", \"0.0.1\"))]),\n set(),\n dict(),\n ),\n ), pytest.raises(\n ClickException,\n match=r\"Can not upgrade .* because it is required by '.*'\",\n ):\n self.runner.invoke(\n cli,\n [\n \"upgrade\",\n *self.LOCAL,\n self.ITEM_TYPE,\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:latest\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )", "def test_06_replacement_product_wizard(self):\n # Create a purchase order with two lines.\n order = self.create_po()\n sellable_product = self.product_obj.browse(self.sellable_product)\n draft_product = self.product_obj.browse(self.draft_product)\n self.create_pol(order, sellable_product)\n self.create_pol(order, draft_product)\n self.assertNotIn('obsolete',\n order.order_line.mapped('product_id.state2'))\n\n # Update sellable product to obsolete\n # NOTE: This check check the write() method of the product.product\n # record.\n self.assertIn(sellable_product, order.order_line.mapped('product_id'))\n self.assertEqual(sellable_product.state2, 'sellable')\n sellable_product.state2 = 'obsolete'\n self.assertEqual(sellable_product.state2, 'obsolete')\n\n # Check that the purchase order line now have a obsolete line.\n obsolete_order_line = order.order_line.filtered(\n lambda line: line.product_id.state2 == 'obsolete')\n self.assertTrue(obsolete_order_line)\n self.assertEqual(obsolete_order_line.product_id, sellable_product)\n\n # Simulate click on the \"Check Discontinued Products\" button to run the\n # replacemenet product wizard.\n wiz = self.wiz_obj.with_context({\n 'active_id': order.id,\n 'active_ids': [order.id],\n 'active_model': 'purchase.order',\n }).create({})\n\n # Chech that the wizard auto create correctly the replacement lines.\n # The replacement line must be linked/generate to the obsolete purchase\n # order line.\n self.assertTrue(wiz.lines)\n self.assertEqual(len(wiz.lines), 1)\n self.assertEqual(obsolete_order_line, wiz.lines.mapped('line_id'))\n\n # TODO add a case to try to add a new replacement line manually. this\n # must be fail.\n\n # Try to add an obsolete replacement product in the replacement line.\n # This will raise an exception becuase only not obsolete products can\n # be used as a valid replacement.\n wiz_line = wiz.lines[0]\n with self.assertRaises(exceptions.ValidationError):\n wiz_line.replacement_product_id = self.obsolete_replacement\n\n # Add a sellable replacement product in the replacement line.\n wiz_line.replacement_product_id = self.sellable_replacement\n self.assertEqual(wiz_line.replacement_product_id.id,\n self.sellable_replacement)\n\n # Make the replacement in the purchase order by clicking the button\n # \"Replace\" in the replacement wizard and check that the changes were\n # applied to the purchase order line.\n wiz.replacement()\n self.assertEqual(obsolete_order_line.product_id,\n wiz_line.replacement_product_id)\n self.assertEqual(obsolete_order_line.discontinued_product_id,\n wiz_line.discontinued_product_id)", "def test_add_with_negative_amount(self):\n good = GoodInfo(\"яйцо 1 кат.\", \"30\", \"-40\", \"2020-12-30\", \n \"14\", \"2020-12-30\")\n check_product_data = self.database.add(good)\n\n self.assertFalse(check_product_data)", "def test_cannot_update_an_existing_service_when_missing_service_price(self):\n self.data = {\n \"service_name\": \"Live at the shop\",\n \"service_price\": \"\",\n \"service_description\": \"See Kendrick perform live at the shop\",\n \"service_category\": \"Music\",\n \"service_subcategory\": \"Live\",\n \"service_attributes\": {\n \"duration\": \"as long \",\n \"width\": \"20\",\n \"length\": \"20\",\n \"height\": \"20\"\n }\n }\n create_store = self.client.post(create_store_url, data=json.dumps(self.shop_zero), headers=self.my_header)\n store_id = json.loads(create_store.data)\n store_id = json.loads(store_id['store_id'])\n store_id = store_id['$oid']\n response2 = self.client.post(store_url + store_id + '/service/',\n data=json.dumps(self.service_zero),\n headers=self.my_header)\n self.assertEqual(response2.status, \"201 CREATED\")\n self.assertIn(\"Success. You have added a new Service Live at the yard to the store.\", str(response2.data))\n service_id = json.loads(response2.data)\n service_id = service_id['service_identifier']\n response3 = self.client.put(store_url + store_id + '/service/' + service_id + '/',\n data=json.dumps(self.data),\n headers=self.my_header)\n self.assertEqual(response3.status, \"400 BAD REQUEST\")\n self.assertIn(\"Error. Missing Service price.\", str(response3.data))", "def test_create_not_admin(self):\n self.assertEqual(Product.objects.count(), 2)\n payload = {\n 'name': 'New product',\n 'category': self.category_1.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99\n }\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n expected = {'detail': 'You do not have permission to perform this action.'}\n response = self.client.post(\n '/api/products/', data=payload,\n content_type='application/json', **headers)\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)\n self.assertEqual(Product.objects.count(), 2)", "def test_get_unexisting_products(self):\n response=self.get_unexisting_products()\n result = json.loads(response.data.decode('utf-8'))\n self.assertEqual(result['message'],\"No Available products\")\n self.assertEqual(response.status_code, 200)", "def test_update_community_name_to_an_existing_one_fails(self):\n self.login_as(\"bob\")\n\n bad_payload = self.update_payload.copy()\n bad_payload[\"name\"] = \"group2\"\n\n with self.assertNumQueries(5):\n response = self.client.put(self.url, bad_payload)\n self.assert_validation_failed(response, data={\n \"name\": [\"community with this name already exists.\"]\n })\n self.assertEqual(Community.objects.filter(name=\"group2\").count(), 1)", "def test_update_update_has_a_value(self):\n self.Person.drop_collection()\n\n author = self.Person.objects.create(name=\"Test User\")\n\n with pytest.raises(OperationError):\n self.Person.objects(pk=author.pk).update({})\n\n with pytest.raises(OperationError):\n self.Person.objects(pk=author.pk).update_one({})", "def update_existing_key_fail(self, data, new_data):\n message = 'exists'\n rv = self.add_success(data)\n assert not in_response(rv, message)\n rv = self.add_success(new_data)\n assert not in_response(rv, message)\n rv = self.update_fail(data, message)\n assert self.verify_object(new_data)\n return rv", "def test_update_item_incorrect_id(test_client, item):\n\n response = test_client.put(BAD_ITEM_URL,\n data=json.dumps(item),\n content_type='application/json')\n\n data = json.loads(response.get_data())\n\n assert response.status_code == 404\n assert data['error'] == app.NOT_FOUND", "def test_product_uninstalled(self):\n self.assertFalse(\n self.installer.is_product_installed('{{cookiecutter.package_name}}')\n )", "def test_update_release_note_not_found(db, admin_client):\n release_note = ReleaseNoteFactory()\n release_note_id = to_global_id(\"ReleaseNoteNode\", release_note.kf_id)\n release_note.delete()\n\n variables = {\n \"releaseNote\": release_note_id,\n \"input\": {\"description\": \"Updated description\"},\n }\n resp = admin_client.post(\n \"/graphql\",\n format=\"json\",\n data={\"query\": UPDATE_RELEASE_NOTE, \"variables\": variables},\n )\n\n assert \"errors\" in resp.json()\n errors = resp.json()[\"errors\"]\n assert \"does not exist\" in errors[0][\"message\"]", "def test_update_should_not_be_allowed(self):\n response = self.client.put(self.get_url(), {})\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)", "def test_vault_update_vault_item(self):\n pass", "def test_update_when_unavailable(self):\n self.api.update = Mock(\n \"google_wifi.GoogleWifiAPI.update\", side_effect=self.update_side_effect()\n )\n for name in self.sensor_dict:\n sensor = self.sensor_dict[name][\"sensor\"]\n sensor.update()\n assert sensor.state is None", "def test_deep_update_illegal_update(self):\n # Update with an illegal type\n for update_with in [42, None, [42], \"bar\"]:\n with self.assertRaisesRegex(\n SaltInvocationError,\n r\"Cannot update {} with a {}.\" \"\".format(type({}), type(update_with)),\n ):\n dictupdate.update_dict_key_value({}, \"foo\", update_with)\n # Again, but now using OrderedDicts\n for update_with in [42, None, [42], \"bar\"]:\n with self.assertRaisesRegex(\n SaltInvocationError,\n r\"Cannot update {} with a {}.\"\n \"\".format(type(OrderedDict()), type(update_with)),\n ):\n dictupdate.update_dict_key_value(\n {}, \"foo\", update_with, ordered_dict=True\n )", "def test_view_product_with_invalid_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2kk',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Try an interger for product id')\n self.assertEqual(resp.status_code, 400)", "def test_resuableitem_vote_not_referenced(self):\n\n original_reusableitem = setup_public_reusable_item_1(self)\n data1 = submit_change_request_1(self, self.user_1)\n\n # user 3 now submits a vote\n self.client.force_authenticate(user=self.user_3)\n\n data2 = {'vote': 'banana'}\n response = self.client.patch(get_reusable_item_1_url(self), data2, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_invalid_update_kwarg(self):\r\n with self.assertRaises(ValidationError):\r\n TestQueryUpdateModel.objects(partition=uuid4(), cluster=3).update(bacon=5000)", "def test_update_one(self):\n pass", "def test_cannot_update_details_of_service_that_does_not_exist(self):\n self.data = {\n \"service_name\": \"Live at the shop\",\n \"service_price\": \"5000\",\n \"service_description\": \"See Kendrick perform live at the shop\",\n \"service_category\": \"Music\",\n \"service_subcategory\": \"Live\",\n \"service_attributes\": {\n \"duration\": \"as long \",\n \"width\": \"20\",\n \"length\": \"20\",\n \"height\": \"20\"\n }\n }\n create_store = self.client.post(create_store_url, data=json.dumps(self.shop_zero), headers=self.my_header)\n store_id = json.loads(create_store.data)\n store_id = json.loads(store_id['store_id'])\n store_id = store_id['$oid']\n response2 = self.client.post(store_url + store_id + '/service/',\n data=json.dumps(self.service_zero),\n headers=self.my_header)\n self.assertEqual(response2.status, \"201 CREATED\")\n self.assertIn(\"Success. You have added a new Service Live at the yard to the store.\", str(response2.data))\n response3 = self.client.put(store_url + store_id + '/service/5a2bc733791e4bbc9a26f7a5/',\n data=json.dumps(self.data),\n headers=self.my_header)\n self.assertEqual(response3.status, \"404 NOT FOUND\")", "def test_updating_non_existing_article(self):\n saved = self.create_article()\n token = saved[2]\n url = 'articles/notsaved'\n response = self.test_client.put(url, self.article_update_data, format='json', HTTP_AUTHORIZATION=token)\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)" ]
[ "0.74883664", "0.74142987", "0.7132973", "0.7130076", "0.6898503", "0.6843232", "0.676572", "0.6739922", "0.67206293", "0.67113554", "0.66875863", "0.6684773", "0.668369", "0.66656035", "0.6630668", "0.6606419", "0.65949506", "0.6585263", "0.65486664", "0.65191656", "0.6512701", "0.6504794", "0.6485288", "0.6474551", "0.64305764", "0.6416947", "0.64099246", "0.64092165", "0.6385925", "0.6373567", "0.6359523", "0.6353722", "0.6345625", "0.63407725", "0.63242793", "0.63229334", "0.6317926", "0.6316732", "0.6309869", "0.63039637", "0.62899786", "0.6289501", "0.62874216", "0.62811977", "0.62493426", "0.62465656", "0.6237586", "0.6230094", "0.62273264", "0.62181264", "0.6211421", "0.6210224", "0.6205794", "0.62044", "0.6200733", "0.6188033", "0.6181313", "0.6162835", "0.61526406", "0.61510557", "0.6150738", "0.6142237", "0.6140746", "0.6139509", "0.612958", "0.6125705", "0.612534", "0.611744", "0.6116217", "0.6115174", "0.61032116", "0.6102237", "0.6100893", "0.6091807", "0.60823095", "0.6076891", "0.6075004", "0.6065536", "0.6064302", "0.605071", "0.6050686", "0.6047932", "0.60426915", "0.604064", "0.60267854", "0.60236853", "0.6013514", "0.6009296", "0.60025", "0.60010296", "0.5994626", "0.5980273", "0.5976938", "0.59746003", "0.59746003", "0.59746003", "0.5974551", "0.5973658", "0.59735745", "0.59728616" ]
0.7994515
0
Test that product cannot be updated with unauthorised user
def test_unauthorized_product_update(self): resp = self.admin_create_user() reply = self.attendant_login() token = reply['token'] product_update = dict( prod_name='NY_jeans', category='denims', stock=50, price=180 ) resp = self.client.put( '/api/v1/products/1', content_type='application/json', data=json.dumps(product_update), headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Unauthorized Access!') self.assertEqual(resp.status_code, 401)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_not_my_product(self):\n post_data = {\n \"category\": {\n \"name\": \"general\",\n \"index\": 0\n },\n \"name\": \"Producto 2 modified\",\n \"description\": \"Descripcion de producto 2 modified\",\n \"selling\": True,\n \"price\": 20,\n }\n\n response = self.client.put('/api/1.0/products/2/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_post_update_unauthorized(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n title = 'Random New Title'\n body = 'Random New Body'\n response = self.client.put(url, {'title': title, 'body': body, 'user': user_url}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_update_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], \"This product doesn't exists in the Inventory!\")\n self.assertEqual(resp.status_code, 400)", "def test_update_product_without_authentication(self):\n post_data = {\n \"category\": {\n \"name\": \"general\",\n \"index\": 0\n },\n \"name\": \"Producto 2 modified\",\n \"description\": \"Descripcion de producto 2 modified\",\n \"selling\": True,\n \"price\": 20,\n }\n\n response = self.client.put('/api/1.0/products/1/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_delete_not_my_product(self):\n self._require_login(self.user1)\n response = self.client.delete('/api/1.0/products/2/')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_update_author_unlogged(self):\n data = {'name': 'Ken Thompson'}\n\n request = self.client.patch(self.epoint, data)\n\n self.assertEqual(request.status_code, status.HTTP_403_FORBIDDEN)", "def test_not_logged_cannot_update(self):\n\n utils.test_not_logged_cannot_access(self, self.url, self.data)", "def test_cannot_update_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_security_on_put(self):\n # test the update url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.put(url,{'description':'my new description'})\n self.failUnlessEqual(response.status_code, 401)", "def test_modify_reusableitem_not_authenticated(self):\n self.client.logout()\n \n response = self.client.patch(get_reusable_item_1_url(self), {}, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_update_by_non_owner(self):\n # User 1\n saved1 = self.create_article()\n article_url = saved1[0]\n # get user2 details\n token = self.create_article_user2()\n response = self.test_client.put(article_url,self.article_update_data, format='json', HTTP_AUTHORIZATION=token)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_create_not_admin(self):\n self.assertEqual(Product.objects.count(), 2)\n payload = {\n 'name': 'New product',\n 'category': self.category_1.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99\n }\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n expected = {'detail': 'You do not have permission to perform this action.'}\n response = self.client.post(\n '/api/products/', data=payload,\n content_type='application/json', **headers)\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)\n self.assertEqual(Product.objects.count(), 2)", "def test_non_admin_cannot_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_only_admin_can_create_product(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_not_creator_cannot_update(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url,\n expected_url=expected_url,\n data=self.data)", "def test_user_update_procedure_failure(self):\n p1 = models.Procedure.objects.create(\n name='temp',\n overview='bla bla bla'\n )\n p1.speciality.set([self.speciality.pk])\n p1.save()\n\n res = self.client.get(PROCEDURE_URL)\n\n url = get_item_url(res.data[0]['id'])\n new_payload = {\n 'other_details': 'new details'\n }\n\n response = self.client.patch(url, new_payload, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_user_not_in_group_cannot_update(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url,\n expected_url=expected_url,\n data=self.data)", "def test_resuableitem_invalid_vote(self):\n\n original_reusableitem = setup_public_reusable_item_1(self)\n data1 = submit_change_request_1(self, self.user_1)\n\n # user 2 now submits an invalid vote\n self.client.force_authenticate(user=self.user_2)\n\n data2 = {'vote': 'banana'}\n response = self.client.patch(get_reusable_item_1_url(self), data2, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_none_admin_edit(self):\n\n with self.client:\n token = self.customer()\n id = 1\n response = self.client.put('api/v1/meals/{}'.format(id),\n data=json.dumps(dict(\n meal_name=\"chips\",\n price=15000\n )),\n content_type='application/json',\n headers=({\"token\": token}))\n data = json.loads(response.data.decode())\n self.assertEqual(data.get('message'),\n \"Customer is not authorized to access this page\")\n self.assertEqual(response.status_code, 401)", "def test_post_update_regular_user(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n self.client.force_authenticate(user=self.user)\n title = 'Random New Title'\n body = 'Random New Body'\n response = self.client.put(url, {'title': title, 'body': body, 'user': user_url}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_user_update_permissions(self):\n userPK = User.objects.get(username='c2e1').pk\n url = reverse('User-detail', kwargs={'pk': userPK})\n data = {'username': 'company1NewTest'}\n response = self.client.put(url, data, format='json')\n #This is 404 instead of 403 because there is no way to view a company\n #that you arent an employee of.\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertNotEqual(User.objects.get(pk=userPK).username,\n 'company1NewTest')\n data = {'username': 'company1NewTest2'}\n response = self.client.patch(url, data, format='json')\n #This is 404 instead of 403 because there is no way to view a company\n #that you arent an employee of.\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertNotEqual(User.objects.get(pk=userPK).username,\n 'company1NewTest2')", "def test_put_non_owner(self):\n another_user = CustomUser.objects.create(id=1067, email='[email protected]', is_active=True)\n another_user.set_password('testpassword')\n another_user.save()\n\n self.client.login(email='[email protected]', password='testpassword')\n\n data = {\n 'week_day': 3\n }\n\n url = reverse('notification',\n kwargs={'way_id': self.notification.way_id, 'notification_id': self.notification.id})\n response = self.client.put(url, json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')\n self.assertEqual(response.status_code, 403)", "def test_admin_cannot_update_non_existant_user(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Lover',\n username='lover',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.put(\n '/api/v1/users/5',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], \"This user doesn't exist!\")\n self.assertEqual(resp.status_code, 400)", "def test_update_should_not_be_allowed(self):\n response = self.client.put(self.get_url(), {})\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)", "def test_admin_cannot_delete_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This product does not exist in Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_create_product_as_customer_fails(self):\n customer = get_user_model().objects.create_user(\n '[email protected]',\n 'Customer',\n 'user123'\n )\n self.client.force_authenticate(customer)\n res = self.client.post(PRODUCTS_URL, PRODUCT_PAYLOAD)\n\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)", "def test_update_self_fail(self):\n new_user = self.create_user('1')\n url = '/0/chefs/' + str(new_user.pk)\n\n headers = self.login()\n resp = self.client.put(url, **headers)\n self.assertInvalidCredentials(resp)", "def test_patch_not_allowed(self, parse_args):\n parse_args.side_effect = [{\n _ATTEMPT.attempt_id: 'forbidden'\n }, {\n _ATTEMPT.run_id: 'forbidden'\n }]\n _, err = self.resource.patch(self.attempts[1][_ATTEMPT.attempt_id])\n self.assertEqual(403, err)", "def test_post_partial_update_unauthorized(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n title = 'Random New Title'\n response = self.client.patch(url, {'title': title}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_editing_supplies_unauthenticated(self):\n id = self.testsupply.id\n oldstate = self.testsupply.state\n request = self.factory.put(\n '/api/supplies/%s/' % id, {'name': '3d printer', 'state': 'bbb'})\n response = SupplyDetailsView.as_view()(request, pk=id)\n # unauthenticated user should get unauthorized error\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n # data should not change\n self.assertEqual(Supply.objects.get(id=id).state, oldstate)", "def test_user_cannot_write(app, resource):\n with app.user():\n data = {}\n\n # Try to post something\n app.client.post('/' + resource,\n data=data,\n assert_status=403)\n\n # Create fake item, try to patch/delete it\n _id = app.data.driver.db[resource].insert({})\n app.client.patch('/%s/%s' % (resource, _id),\n data=data,\n assert_status=403)\n app.client.delete('/%s/%s' % (resource, _id),\n assert_status=403)", "def test_update_restaurant_unauthorized(self):\n from espresso import db\n from espresso import Restaurant\n\n name = 'Restaurant Pho 2000'\n db.session.add(Restaurant(name=name, creator='[email protected]'))\n db.session.commit()\n\n headers = {'Content-Type': 'application/json'}\n info = {'name': 'Php 2048'}\n resp = self.test_client.put(self.API_BASE + '/1', headers=headers, data=json.dumps(info))\n self.assertEqual(resp.status_code, 401)", "def test_update_privileges_fails(self):\n user = User.create(name='foo', email='[email protected]', user_type='user',\n owned_teams=[], owned_organizations=[])\n user.put()\n\n # You get a 200, but the changes you requested don't happen.\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'user_type': 'super_admin', 'owned_teams': ['Team_foo'],\n 'owned_organizations': ['Organization_foo']},\n headers=self.login_headers(user),\n )\n user_dict = json.loads(response.body)\n self.assertEqual(user.user_type, user_dict['user_type'])\n self.assertEqual(user.owned_teams, user_dict['owned_teams'])\n self.assertEqual(user.owned_organizations,\n user_dict['owned_organizations'])\n\n # Also not changed in the db.\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(user.user_type, fetched_user.user_type)\n self.assertEqual(user.owned_teams, fetched_user.owned_teams)\n self.assertEqual(user.owned_organizations,\n fetched_user.owned_organizations)", "def test_update_movie_not_authenticated(self):\n data = {'title': 'The Mask 2'}\n response = self.client.patch(self.url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_updateview_read_for_wrong_user(self):\n\n for user in self.users:\n updateview = reverse('account_update', args=(user.uuid,))\n other_users = self.users\n other_users.remove(user)\n random_user = random.choice(other_users)\n\n self.client.login(email=random_user.email, password='letmein')\n\n response = self.client.get(updateview)\n\n self.assertEqual(response.status_code, 403)", "def test_update_product_to_not_selling(self):\n self._require_login(self.user1)\n post_data = {\n \"category\": {\n \"name\": \"deportes\",\n \"index\": 1\n },\n \"name\": \"Producto 1 modified\",\n \"description\": \"Descripcion de producto 1 modified\",\n \"selling\": False,\n \"price\": 60,\n }\n\n response = self.client.put('/api/1.0/products/1/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['name'], 'Producto 1 modified')\n self.assertEqual(response.data['description'], 'Descripcion de producto 1 modified')\n self.assertEqual(response.data['selling'], False)\n self.assertEqual(response.data['price'], '60.0')\n self.assertEqual(response.data['category']['name'], 'deportes')", "def testUpdateAccessDenied(self):\n self.runPut(None, sequencer=self.hiseq2000.sodar_uuid, data=self.post_data)\n self.response_401()\n for user in (self.guest, self.norole, self.unrelated_owner):\n self.runPut(user, sequencer=self.hiseq2000.sodar_uuid, data=self.post_data)\n self.response_403()", "def test_modify_reusableitem_not_verified(self):\n email_address = EmailAddress.objects.get(user_id=self.user_1.id)\n email_address.verified = False\n email_address.save()\n\n self.client.force_authenticate(user=self.user_1)\n\n response = self.client.patch(get_reusable_item_1_url(self), {}, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_security_on_delete(self):\n # test the delete product url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.delete(url)\n self.failUnlessEqual(response.status_code, 401)", "def test_handle_edit_not_admin(self):\n test_user = User(\"userid\")\n team = Team(\"BRS\", \"brs\", \"brS\")\n self.db.retrieve.return_value = test_user\n self.db.query.return_value = [team]\n with self.app.app_context():\n self.assertTupleEqual(self.testcommand.handle(\"team \"\n \"edit brs\", user),\n (self.testcommand.permission_error, 200))\n self.db.store.assert_not_called()", "def test_update_person_not_authenticated(self):\n\n data = {'first_name': 'Daenerys'}\n response = self.client.patch(self.url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_only_edit_perm(self):\n self.assertStatusCode(self.url, 403)", "def test_update_answer_unauthorized(self):\n _, other_user_token, question_id, answer_id = self.add_answer()\n\n headers = self.get_request_header(other_user_token)\n data = json.dumps(self.update_answer)\n url = f'/questions/{question_id}/answers/{answer_id}'\n\n response = self.test_client.put(url, headers=headers, data=data)\n\n self.assertEqual(response.status_code, 403)", "def test_cannot_change_usage(self):\n p = Permission.objects.get(name='Can change usage')\n self.user.user_permissions.add(p)\n self.client.login(username='testuser', password='q2w3E$R%')\n data = {'month': 2}\n response = self.client.patch(reverse('api_v1:usage-detail', kwargs={'pk': 1}),\n data=json.dumps(data),\n content_type='application/json',\n follow=True)\n self.assertEqual(response.status_code, 405)\n self.assertIn('not allowed', str(response.content))", "def test_detail_is_hacker_permission(self):\n self.user_1.username = 'pythonhacker'\n self.user_1.save()\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_1.id), **headers)\n\n expected = {'detail': 'You do not have permission to perform this action.'}\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)", "def test_cannot_sale_nonexistant_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"Paris_heels\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product is not in the Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_security_on_post(self):\n url = '/product/xml/'\n response = self.client.post(url,{'description':'my new description'})\n self.failUnlessEqual(response.status_code, 401)", "def test_detail_odd_product_id_permission(self):\n self.assertEqual(self.product_2.id, 2)\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_2.id), **headers)\n\n expected = {'detail': 'You do not have permission to perform this action.'}\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)", "def test_admin_approval_nonexistent_id(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n\n user = self.registration_profile.objects.admin_approve_user(\n profile.id, Site.objects.get_current())\n self.assertIs(user, False)", "def test_update_issue_by_unauthenticated_user_fails(self):\n response = self.client.patch(\n self.url,\n json={\"description\": TEST_ISSUE_DESCRIPTION, \"name\": TEST_ISSUE_NAME},\n )\n response_json = response.get_json()\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response_json[\"SubCode\"], \"InvalidToken\")", "def test_not_logged_in(self):\n self.request.user = None\n result = user_id_put_view(self.request)['d']\n self.assertEqual(result, error_dict('api_errors', 'not authenticated for this request'))", "def test_patch_user(self):\n new_user = self.make_user('new_user')\n url = reverse(\n 'projectroles:api_role_update',\n kwargs={'roleassignment': self.update_as.sodar_uuid},\n )\n patch_data = {'user': str(new_user.sodar_uuid)}\n response = self.request_knox(url, method='PATCH', data=patch_data)\n self.assertEqual(response.status_code, 400, msg=response.content)", "def test_cannot_view_a_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_resuableitem_vote_not_referenced(self):\n\n original_reusableitem = setup_public_reusable_item_1(self)\n data1 = submit_change_request_1(self, self.user_1)\n\n # user 3 now submits a vote\n self.client.force_authenticate(user=self.user_3)\n\n data2 = {'vote': 'banana'}\n response = self.client.patch(get_reusable_item_1_url(self), data2, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_post_update_unauthenticated(self):\n url = reverse(\n 'post-detail',\n args=[\n self.topic1.url_name,\n self.post1.id\n ]\n )\n payload = {\n 'title': 'Updated title',\n 'content': 'Updated content'\n }\n response = self.client.patch(url, payload)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n updated_post = Post.objects.filter(\n id=self.post1.id,\n author=self.user1,\n title=payload.get('title'),\n content=payload.get('content')\n )\n self.assertFalse(updated_post.exists())", "def test_user_not_authorized(self):\n response = self.client.post(self.url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_reusableitem_unsupported_modification(self):\n\n self.client.force_authenticate(user=self.user_1)\n\n response = self.client.patch(get_reusable_item_1_url(self), {'change_request': 'Some text'}, format='json')\n\n updated_object = ReusableItem.objects.get(pk=self.reusableitem_1.id)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_user_update_request(self):\n pass", "def test_updateview_write_for_wrong_user(self):\n\n for user in self.users:\n updateview = reverse('account_update', args=(user.uuid,))\n other_users = self.users\n other_users.remove(user)\n random_user = random.choice(other_users)\n\n self.client.login(email=random_user.email, password='letmein')\n\n valid_data = {'email': user.email, 'first_name': user.first_name,\n 'last_name': user.last_name, 'language': user.language}\n invalid_data = valid_data.copy()\n invalid_data['email'] = 'invalid_email_address'\n valid_data_response = self.client.post(updateview, valid_data)\n invalid_data_response = self.client.post(updateview, invalid_data)\n\n self.assertEqual(valid_data_response.status_code, 403)\n self.assertEqual(invalid_data_response.status_code, 403)", "def test_not_permitted(self):\r\n test_user_client, test_user = self.create_non_staff_authed_user_client()\r\n CourseEnrollment.enroll(test_user, self.course.id)\r\n response = test_user_client.get(self.orphan_url)\r\n self.assertEqual(response.status_code, 403)\r\n response = test_user_client.delete(self.orphan_url)\r\n self.assertEqual(response.status_code, 403)", "def test_user_can_change_not_author(self):\n self.assertFalse(self.story.user_can_change(self.user2))", "def test_update_not_matching_token(\n self, registered_user: user_models.User,\n valid_header_dict_with_user_id: Dict[str, Any]):\n update_json_payload = get_valid_update_request(registered_user)\n response = get_response_from_json(update_json_payload,\n valid_header_dict_with_user_id)\n\n assert not check_response_valid_update(response)\n assert not check_fields_updated_correctly(registered_user,\n update_json_payload)\n assert response.status_code == 401", "def test_get_non_owner(self):\n another_user = CustomUser(id=101, email='[email protected]', is_active=True)\n another_user.set_password('testpassword')\n another_user.save()\n self.client.login(email='[email protected]', password='testpassword')\n\n url = reverse('notification',\n kwargs={'way_id': self.notification.way_id, 'notification_id': self.notification.id})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 403)", "def test_cart_item_batch_write_unauthorized(self):\n user_id = '111'\n cart_id = self.cart_item_manager.create_cart(user_id, 'test cart', False)\n with self.assertRaises(ResourceAccessError):\n self.cart_item_manager.start_batch_cart_item_write(self.catalog, '112', cart_id, 'foo', {}, 12345, 10000)", "def test_authenticated_user_update(self):\r\n with self.flask_app.test_request_context('/'):\r\n for token in self.auth_providers:\r\n assert_raises(Forbidden,\r\n getattr(require, 'token').update,\r\n token)", "def test_add_cart_item_unauthorized_user(self):\n cart_id = self.cart_item_manager.create_cart('111', 'test cart', False)\n with self.assertRaises(ResourceAccessError):\n self.cart_item_manager.add_cart_item(catalog=self.catalog,\n user_id='112',\n cart_id=cart_id,\n entity_id='entity_id',\n entity_type='entity_type',\n entity_version='entity_version')", "def testUpdateUserIsDenied(self):\n UserAPI().create([(u'user', u'secret', u'User', u'[email protected]')])\n info = TUserUpdate(u'username', u'secret', u'Username',\n u'[email protected]')\n self.store.commit()\n with login(u'user', uuid4(), self.transact) as session:\n deferred = self.facade.updateUser(session, info)\n error = yield self.assertFailure(deferred, TPathPermissionDenied)\n self.assertEqual(u'username', error.path)", "def test_admin_cannot_add_item(self):\n response = self.client.get(\n '/self.base_url/sales/3/2',\n headers=dict(Authorization=\"Bearer \" + self.owner_token),\n content_type = 'application/json'\n )\n\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"You cannot make a sale from an Admin account, Consider having an attendant account\")\n self.assertEqual(response.status_code,401)", "def test_access_negative(self, api):\n self.builder.add_user(api.get_user())\n r1 = api.access_user(api.get_user(), False)\n access_false = self.builder.get_access(api.get_user())\n self.builder.del_user(api.get_user())\n assert access_false == 0\n assert r1.status_code == 200", "def test_handle_refresh_not_admin(self):\n test_user = User(user)\n self.db.retrieve.return_value = test_user\n with self.app.app_context():\n self.assertTupleEqual(self.testcommand.handle(\"team refresh\",\n user),\n (self.testcommand.permission_error, 200))\n self.db.store.assert_not_called()", "def test_admin_approval_not_activated(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n\n user = self.registration_profile.objects.admin_approve_user(\n profile.id, Site.objects.get_current())\n self.assertIs(user, False)\n self.assertIs(profile.user.is_active, False)", "def test_update_existent_campaign_by_non_admin_fails(self):\n response = self.client.patch(\n f\"{self.endpoint_url}{self.test_campaign.id}/\",\n json={\n \"logo\": None,\n \"name\": NEW_CAMPAIGN_NAME,\n \"organisations\": [self.test_org.id],\n \"url\": None,\n },\n headers={\"Authorization\": self.non_admin_token},\n )\n response_body = response.get_json()\n self.assertEqual(response.status_code, 403)\n self.assertEqual(\n response_body[\"Error\"], \"CampaignsRestAPI PATCH: User not a Org Manager\"\n )\n self.assertEqual(response_body[\"SubCode\"], \"UserNotPermitted\")", "def test_non_owner_authenticated_user_update_blogpost(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n admin = UserFactory.create()\r\n app = AppFactory.create()\r\n blogpost = BlogpostFactory.create(app=app)\r\n\r\n assert self.mock_admin.id != blogpost.owner.id\r\n assert_raises(Forbidden, getattr(require, 'blogpost').update, blogpost)", "def test_cannot_update_user_with_blacklisted_token(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n user = dict(\n name='Summer Lover',\n username='lover',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.put(\n '/api/v1/users/2',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_unauthenticated_resource_allowed(self):\n raise NotImplementedError # FIXME", "def test_book_edit_for_student(self):\n client = APIClient()\n client.login(username=self.students[0].username, password=\"salam*123\")\n response = client.patch(\"/books/4/\", data={\"copies\": 2})\n self.assertEqual(response.status_code, 403)", "def test_cannot_create_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def testGetAccessDenied(self):\n self.runGet(None, sequencer=self.hiseq2000.vendor_id)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def test_admin_cannot_delete_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_patch_request_by_non_owner(self):\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION=self.test_user2_token)\n response = client.post('/api/places/', self.restaurant_data, format='json')\n url = f\"/api/places/{response.data['id']}/\"\n\n client.credentials(HTTP_AUTHORIZATION=self.test_user1_token)\n response = client.patch(url, self.restaurant_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_disable(self):\n self.assertTrue(self.user1.active)\n self.assertFalse(self.user1.ad_deleted)\n url = '/api/users/{}/'.format(self.user1.ad_guid)\n data = {\n 'Enabled': False,\n }\n response = self.client.put(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 202)\n user = DepartmentUser.objects.get(pk=self.user1.pk) # Refresh from db\n self.assertFalse(user.ad_deleted)\n self.assertFalse(user.active)\n self.assertTrue(user.in_sync)", "def test_owner_edit_assessment_invalid(self):\n req, resp = data.get_assessment(self.contract['id'])\n response = self.user_01.put(self.assessment_report_url, req)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_edit_interest_list_not_owner(self):\n id = self.list_2.pk\n url = reverse('xds_api:interest-list', args=(id,))\n _, token = AuthToken.objects.create(self.user_1)\n response = \\\n self.client.patch(url,\n data={\"test\": \"test\"},\n HTTP_AUTHORIZATION='Token {}'.format(token))\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_security_on_get(self):\n # test the listing url\n product = Product.objects.all()[0]\n url = '/product/xml/'\n response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)\n # test the product detail url\n url = '/product/xml/%s/' % product.item_number\n Response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)", "def testUpdateAccessAllowed(self):\n for user in (self.contributor, self.delegate, self.owner, self.root):\n response = self.runPut(user, sequencer=self.hiseq2000.sodar_uuid, data=self.post_data)\n self.response_200(response)\n data = json.loads(response.content.decode(\"utf-8\"))\n self.assertEqual(data[\"vendor_id\"], self.post_data[\"vendor_id\"])", "def test_call_update_if_organization(self):\n Project.can_access.return_value = False\n self.mixin.check_can_access.when\\\n .called_with(MagicMock(user=self.user))\\\n .should.throw(PermissionDenied)\n Project.objects.update_user_projects.asset_called_once_with(\n self.user,\n )", "def test_edit_object_with_require_auth_false(self):\n self.test_object.require_auth = False\n self.test_object.save()\n response = self.client.put(\n f\"/permissiontest/{self.test_object.id}/\", self.test_update_object\n )\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_beneficiaries_update_withoutID_that_will_fail(self):\n print('the test function name: {}'.format(sys._getframe().f_code.co_name))\n try:\n url = reverse('beneficiary:beneficiary-entity-by-id-update')\n response = self.client.get(url, content_type='application/json')\n return self.assertTrue(response.status_code, 200)\n except Exception as e:\n print(\"reason: \", e)", "def test_editing_supplies_user(self):\n id = self.testsupply.id\n oldstate = self.testsupply.state\n request = self.factory.put(\n '/api/supplies/%s/' % id, {'name': '3d printer', 'state': 'aaa'})\n force_authenticate(request, user=self.testuser1)\n response = SupplyDetailsView.as_view()(request, pk=id)\n # normal user should get forbidden error\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n # data should not change\n self.assertEqual(Supply.objects.get(id=id).state, oldstate)", "def test_api_user_is_not_admin(self):\n\n\t\t# register the user\n\t\treg_user = self.register_user('lilbaby', '[email protected]', 'test#op3456', 'test#op3456')\n\t\tdata = json.loads(reg_user.data.decode())\n\t\tself.assertEqual(reg_user.status_code, 201)\n\t\tself.assertIn('successfully registered', str(data))\n\n\t\t# login user\n\t\tlogin_res = self.client.post(\n\t\t\tf'{URL_AUTH}login',\n\t\t\tdata=json.dumps(\n\t\t\t\tdict(\n\t\t\t\t\tusername='lilbaby',\n\t\t\t\t\tpassword='test#op3456'\n\t\t\t\t)\n\t\t\t),\n\t\t\tcontent_type='application/json'\n\t\t)\n\t\tlogin_data = json.loads(login_res.data.decode())\n\t\ttoken = login_data['auth_token']\n\n\t\tbook = self.client.put(\n\t\t\tf'{URL_BOOKS}/1',\n\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\tcontent_type='text',\n\t\t\tdata=json.dumps(\n\t\t\t\tdict(\n\t\t\t\t\ttitle='updated book'\n\t\t\t\t)\n\t\t\t)\n\t\t)\n\n\t\tbook_res = json.loads(book.data.decode())\n\t\tself.assertTrue(book_res['error'] == 'forbidden')\n\t\tself.assertTrue(book.status_code == 403)", "def test_unauthorized_mod(self, mapp, existing_user_id):\n mapp.logoff()\n mapp.modify_user(user=existing_user_id, password=id(self), code=403)", "def test_post_no_permission(self):\n self.user.user_permissions.clear()\n response = self._post()\n self.assertRedirectsToLogin(response)\n self._assert_no_change()", "def test_invalid_update_request_with_taken_username(self):\n self.client.credentials(HTTP_AUTHORIZATION=u.auth_header(self.author.get_key()))\n response: Response = self.client.patch(BASE_URL + '/update/', data={\n 'username': self.temporary_author.username\n })\n data = u.get_json(response)\n\n self.assertEqual(response.status_code, status.HTTP_409_CONFLICT, msg=data)\n self.assertEqual(data, {'detail': f\"User '{self.temporary_author.username}' already exists.\"})", "def test_admin_cannot_delete_product_with_non_integer_prod_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/kk',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'The product id should be a number!')\n self.assertEqual(resp.status_code, 400)", "def test_anonymous_user_update(self):\r\n with self.flask_app.test_request_context('/'):\r\n for token in self.auth_providers:\r\n assert_raises(Unauthorized,\r\n getattr(require, 'token').update,\r\n token)", "def test_unauthorized_update_article(self):\n response = self.client.put(\n reverse(\n \"article\",\n kwargs={\n \"slug\": self.stored_articles[0].slug\n }),\n self.article_data,\n content_type='application/json',\n )\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_update_user(self):\n pass", "def test_view_product_that_doesnot_exist_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product does not exist!')\n self.assertEqual(resp.status_code, 404)", "def test_admin_update_user_taskrun(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n user_taskrun = TaskRunFactory.create()\r\n\r\n assert self.mock_admin.id != user_taskrun.user.id\r\n assert_raises(Forbidden,\r\n getattr(require, 'taskrun').update,\r\n user_taskrun)", "def test_update_nonexist(self):\n promotion = PromotionFactory()\n promotion.id = '1cak41-nonexist'\n try:\n promotion.update()\n except KeyError:\n self.assertRaises(KeyError)" ]
[ "0.7472202", "0.74601954", "0.730332", "0.72697306", "0.71795666", "0.71784", "0.7131658", "0.70938385", "0.7079321", "0.6975282", "0.695774", "0.6886112", "0.68209416", "0.68174875", "0.6791423", "0.6772019", "0.6765322", "0.6752393", "0.67434675", "0.66997343", "0.6691479", "0.66883683", "0.66813403", "0.66810966", "0.6675983", "0.667394", "0.6670071", "0.66646767", "0.6650377", "0.6643716", "0.6635298", "0.6621765", "0.66144925", "0.6612154", "0.66067713", "0.6602565", "0.66005003", "0.6597629", "0.6595452", "0.6557706", "0.65451497", "0.65287715", "0.6521416", "0.6511007", "0.65063673", "0.64916104", "0.64888805", "0.64864546", "0.64685684", "0.64492553", "0.6442219", "0.64352214", "0.64347476", "0.6431119", "0.6424657", "0.64162475", "0.6414545", "0.64087963", "0.64048845", "0.6393928", "0.63882214", "0.63871056", "0.63855314", "0.6368334", "0.63638854", "0.63623774", "0.6360101", "0.6359659", "0.6350324", "0.63396966", "0.6339228", "0.6327964", "0.6324441", "0.6315564", "0.63057894", "0.6305248", "0.6304876", "0.6294999", "0.6294242", "0.6290484", "0.6284192", "0.62778413", "0.6273782", "0.62716156", "0.6263914", "0.6263777", "0.6261466", "0.6254107", "0.6250822", "0.6246719", "0.62442654", "0.62207776", "0.6216557", "0.6211518", "0.62090105", "0.62016183", "0.61982065", "0.61973923", "0.6185711", "0.61759055" ]
0.80988926
0
Test that product cannot be updated with empty fields
def test_update_product_with_empty_fields(self): resp = self.admin_register() reply = self.admin_login() token = reply['token'] product = dict( prod_name='NY_denims', category='denims', stock=20, price=150 ) resp = self.client.post( '/api/v1/products', content_type='application/json', data=json.dumps(product), headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Product successfully added to Inventory!') self.assertEqual(resp.status_code, 201) product_update = dict( prod_name='', category='', stock=50, price=180 ) resp = self.client.put( '/api/v1/products/1', content_type='application/json', data=json.dumps(product_update), headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'prod_name and category cannot be empty!') self.assertEqual(resp.status_code, 400)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_product_required_fields(self):\n data = {\n 'pk': 1,\n 'name': None,\n 'description': '''\n Yogurt also spelled yoghurt, yogourt or yoghourt,\n is a food produced by bacterial fermentation of milk.\n '''\n }\n url = reverse('products:detail', kwargs={'pk': data['pk']})\n response = self.client.put(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(models.Product.objects.filter(name=None).count(), 0)", "def test_update_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], \"This product doesn't exists in the Inventory!\")\n self.assertEqual(resp.status_code, 400)", "def test_update_not_my_product(self):\n post_data = {\n \"category\": {\n \"name\": \"general\",\n \"index\": 0\n },\n \"name\": \"Producto 2 modified\",\n \"description\": \"Descripcion de producto 2 modified\",\n \"selling\": True,\n \"price\": 20,\n }\n\n response = self.client.put('/api/1.0/products/2/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_admin_cannot_create_product_with_empty_fields(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='',\n category='',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Please enter all fields!')\n self.assertEqual(resp.status_code, 400)", "def test_cannot_update_with_empty_field(self):\n\n self.client.login(username='notlogged', password='notlogged')\n group_fields = ['name', 'description']\n\n utils.test_cannot_post_with_empty_fields(self, self.url, group_fields)\n\n # Group is not updated.\n updated_group = Group.objects.get(pk=self.group.pk)\n self.assertEqual(updated_group.name, 'test')\n self.assertEqual(updated_group.description, 'test')\n self.assertIsNone(updated_group.last_edit_date)", "def test_partial_update(self):\n self.assertEqual(Product.objects.count(), 2)\n self.assertEqual(self.product_1.name, 'Nike Vapor')\n\n payload = {\n 'name': 'Updated name',\n }\n\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(self.token_admin)\n }\n response = self.client.patch(\n '/api/products/{}/'.format(self.product_1.id),\n data=payload, content_type='application/json', **headers)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response['Content-Type'], 'application/json')\n self.assertEqual(Product.objects.count(), 2)\n\n product = Product.objects.get(id=self.product_1.id)\n self.assertEqual(product.name, 'Updated name')", "def test_cannot_make_sale_with_missing_fields(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'One of the fields is empty!')\n self.assertEqual(resp.status_code, 400)", "def test_update_with_no_matches(test_store, andy, pandy, candy):\n n_updated = test_store.update(fields={\"age\": 15}, name=\"Mark\")\n assert n_updated == 0\n\n items = list(test_store.get_by())\n assert len(items) == 3\n assert andy in items\n assert pandy in items\n assert candy in items", "def test_update_nonexist(self):\n promotion = PromotionFactory()\n promotion.id = '1cak41-nonexist'\n try:\n promotion.update()\n except KeyError:\n self.assertRaises(KeyError)", "def test_update_product_to_not_selling(self):\n self._require_login(self.user1)\n post_data = {\n \"category\": {\n \"name\": \"deportes\",\n \"index\": 1\n },\n \"name\": \"Producto 1 modified\",\n \"description\": \"Descripcion de producto 1 modified\",\n \"selling\": False,\n \"price\": 60,\n }\n\n response = self.client.put('/api/1.0/products/1/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['name'], 'Producto 1 modified')\n self.assertEqual(response.data['description'], 'Descripcion de producto 1 modified')\n self.assertEqual(response.data['selling'], False)\n self.assertEqual(response.data['price'], '60.0')\n self.assertEqual(response.data['category']['name'], 'deportes')", "async def test_update_missing_field(self):\n await self.collection.create({'id': 'foo', 'value': 'bar'})\n with self.assertRaises(InvalidResourceDetails) as cm:\n await self.resource.update('foo', {})\n self.assertEqual(\n 'Error: \"value\": Required', str(cm.exception))", "def test_update_on_unique_field_raises(test_store):\n\n with pytest.raises(NotImplementedError):\n test_store.update(fields={\"name\": \"Andy\"})", "def test_update_product_not_found(self):\n test_product = ProductFactory()\n resp = self.app.put(\n \"/products/0\",\n json=test_product.serialize(),\n content_type=\"application/json\")\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)", "def test_02_product_update(self):\n # Update new product state2 from default draft to sellable\n new_product = self.create_product()\n self.assertEqual(new_product.state2, 'draft')\n new_product.state2 = 'sellable'\n self.assertEqual(new_product.state2, 'sellable')\n\n # Same but to an existing demo product.\n demo_product = self.product_obj.browse(\n self.ref('product_lifecycle.product_product_4g'))\n self.assertEqual(demo_product.state2, 'sellable')\n demo_product.state2 = 'draft'\n self.assertEqual(demo_product.state2, 'draft')\n\n # Update new product invividual field (field defined in product.product\n # model).\n self.assertEqual(new_product.default_code, 'A2330')\n new_product.default_code = 'A2330-1'\n self.assertEqual(new_product.default_code, 'A2330-1')\n\n # Same but to an existing demo product.\n self.assertEqual(demo_product.default_code, 'A2329')\n demo_product.default_code = 'A2329-1'\n self.assertEqual(demo_product.default_code, 'A2329-1')\n\n # Update new product commom characteristic (field defined in\n # product.template) and check that affects the another product\n # variants\n self.assertFalse(new_product.description)\n new_product.description = 'This is a New Product'\n self.assertEqual(new_product.description, 'This is a New Product')\n self.assertEqual(demo_product.description, 'This is a New Product')\n demo_product.description = False\n self.assertFalse(demo_product.description)", "def test_update_product_without_authentication(self):\n post_data = {\n \"category\": {\n \"name\": \"general\",\n \"index\": 0\n },\n \"name\": \"Producto 2 modified\",\n \"description\": \"Descripcion de producto 2 modified\",\n \"selling\": True,\n \"price\": 20,\n }\n\n response = self.client.put('/api/1.0/products/1/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_object_is_not_created_without_required_fields(self):\n data1 = self.data.copy()\n del data1[\"title\"]\n\n serializer = ProductSerializer(data=data1)\n\n self.assertFalse(serializer.is_valid())\n self.assertEqual(serializer.errors.get(\"title\")[0], self.error_message)\n\n data2 = self.data.copy()\n del data2[\"description\"]\n\n serializer = ProductSerializer(data=data2)\n self.assertFalse(serializer.is_valid())\n self.assertEqual(serializer.errors.get(\"description\")[0], self.error_message)\n\n data3 = self.data.copy()\n del data3[\"price\"]\n\n serializer = ProductSerializer(data=data3)\n self.assertFalse(serializer.is_valid())\n self.assertEqual(serializer.errors.get(\"price\")[0], self.error_message)", "def test_invalid_update_kwarg(self):\r\n with self.assertRaises(ValidationError):\r\n TestQueryUpdateModel.objects(partition=uuid4(), cluster=3).update(bacon=5000)", "def test_empty_data(self, client, users):\n user = users[0]\n url = reverse('users:update', args=(user.pk,))\n response = client.post(url)\n assert response.status_code == 200\n assert 'This field is required.' in str(response.content)", "def test_invalid_update_kwarg(self):\n with self.assertRaises(ValidationError):\n TestQueryUpdateModel.objects(partition=uuid4(), cluster=3).update(bacon=5000)", "def test_update_cart_invalid_attributes(self):\n user_id = '123'\n cart_id = self.cart_item_manager.create_cart(user_id, 'Cart1', False)\n self.cart_item_manager.update_cart(user_id, cart_id, {'InvalidAttribute': 'Cart2'})\n self.assertEqual('Cart1', self.cart_item_manager.get_cart(user_id, cart_id)['CartName'])", "def test_full_update(self):\n self.assertEqual(Product.objects.count(), 2)\n self.assertEqual(self.product_1.name, 'Nike Vapor')\n self.assertEqual(self.product_1.sku, '44444444')\n self.assertEqual(self.product_1.category, self.category_1)\n self.assertEqual(self.product_1.description, 'Some product description')\n self.assertEqual(self.product_1.price, 129.99)\n self.assertEqual(self.product_1.featured, False)\n\n payload = {\n 'name': 'Updated name',\n 'category': self.category_2.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99,\n 'featured': True\n }\n\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(self.token_admin)\n }\n response = self.client.put(\n '/api/products/{}/'.format(self.product_1.id),\n data=payload, content_type='application/json', **headers)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response['Content-Type'], 'application/json')\n self.assertEqual(Product.objects.count(), 2)\n\n product = Product.objects.get(id=self.product_1.id)\n self.assertEqual(product.name, 'Updated name')\n self.assertEqual(product.sku, '11111111')\n self.assertEqual(product.category, self.category_2)\n self.assertEqual(product.description, 'New product description')\n self.assertEqual(float(product.price), 39.99)\n self.assertEqual(product.featured, True)", "def test_update_customer_invalid_payload(self):\n update_customer_url = reverse(\"customer_detail\", kwargs={\"pk\": 1})\n\n payload = {\"first_name\": \"Dennis\", \"last_name\": \"\", \"is_active\": True}\n\n response = self.client.put(update_customer_url, payload)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_put_db_fail(self):\n test_data = {\n 'first_name': 'new_first_name',\n 'last_name': 'new_last_name'\n }\n with mock.patch('user_profile.models.UserProfile.update') as update:\n update.return_value = False\n response = self.client.put(self.url, json.dumps(test_data), content_type='application/json')\n self.assertEquals(response.status_code, 400)", "def test_product_update(self):\n # first performe create\n id = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id:\n # then performe update\n data = { \n \"name\": \"Changed the name\",\n \"description\": self.product_data[\"description\"],\n \"image_link\": self.product_data[\"image_link\"],\n \"price\": self.product_data[\"price\"]\n }\n self._update_model(\"product\", id, data, [\"name\"])\n self.assertIsNotNone(id)", "def test_unauthorized_product_update(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_user_update_procedure_failure(self):\n p1 = models.Procedure.objects.create(\n name='temp',\n overview='bla bla bla'\n )\n p1.speciality.set([self.speciality.pk])\n p1.save()\n\n res = self.client.get(PROCEDURE_URL)\n\n url = get_item_url(res.data[0]['id'])\n new_payload = {\n 'other_details': 'new details'\n }\n\n response = self.client.patch(url, new_payload, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_full_update_product(self):\n view = ProductUpdateView.as_view({'patch': 'update'})\n uri = reverse('products:update-product', kwargs={'pk': self.product_id})\n data = {\n \"id\": self.product_id,\n \"name\": \"Headphone updated\",\n \"description\": \"New version\",\n \"price\": \"800\",\n \"price_currency\": \"USD\",\n \"is_available\": True\n }\n request = self.factory.patch(uri, data, HTTP_AUTHORIZATION='Token {}'.format(self.token_admin.key))\n request.user = self.user['admin']\n response = view(request, pk=self.product_id)\n self.assertEqual(response.status_code, 200,\n f'Expected Response Code 200, received {response.status_code} instead.')\n data['price'] = float(data['price'])\n response.data['price'] = float(response.data['price'])\n self.assertEqual(response.data, data)", "def test_do_cell_update_ignores_unknown_fields(self, mock_update):\n client = mock.Mock()\n inventory = mock.Mock()\n inventory.cells = cells.CellManager(mock.ANY,\n mock.ANY,\n 'http://127.0.0.1/')\n client.inventory = mock.Mock(name='inventory')\n client.inventory.return_value = inventory\n invalid_input = Namespace(region=1,\n id=1,\n name='mock_cell',\n invalid=True)\n cells_shell.do_cell_update(client, invalid_input)\n vars(invalid_input).pop('region')\n vars(invalid_input).pop('invalid')\n mock_update.assert_called_once_with(**vars(invalid_input))", "def test_update_no_customer(self):\n set_up_db()\n with self.assertRaises(ValueError):\n update_customer_credit(2, 5.50)", "def test_update_customer_fails(self):\n customer = Customer.objects.create(**customer_data)\n\n self.assertTrue(isinstance(customer, Customer))\n\n with self.assertRaises(IntegrityError):\n customer.email = None\n customer.save()", "def test_update_case(self):\n pass", "def test_admin_cannot_update_user_with_empty_fields(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='',\n username='',\n password='',\n role=''\n )\n resp = self.client.put(\n '/api/v1/users/2',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Please input all fields!')\n self.assertEqual(resp.status_code, 400)", "def test_reusableitem_changerequest_bad_data(self):\n\n self.client.force_authenticate(user=self.user_1)\n\n # name is empty string\n response = self.client.patch(get_reusable_item_1_url(self), {'name': '', 'link': 'hello'}, format='json')\n\n updated_object = ReusableItem.objects.get(pk=self.reusableitem_1.id)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n # name is None\n response = self.client.patch(get_reusable_item_1_url(self), {'name': None, 'link': 'hello'}, format='json')\n\n updated_object = ReusableItem.objects.get(pk=self.reusableitem_1.id)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n # no values\n response = self.client.patch(get_reusable_item_1_url(self), {}, format='json')\n\n updated_object = ReusableItem.objects.get(pk=self.reusableitem_1.id)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n # no new values\n response = self.client.patch(get_reusable_item_1_url(self), {'name': self.reusableitem_1.name}, format='json')\n\n updated_object = ReusableItem.objects.get(pk=self.reusableitem_1.id)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_updating_when_service_attributes_value_is_empty(self):\n self.data = {\n \"service_name\": \"Live at the shop\",\n \"service_price\": \"5000\",\n \"service_description\": \"See Kendrick perform live at the shop\",\n \"service_category\": \"Music\",\n \"service_subcategory\": \"Live\",\n \"service_attributes\": {\n \"duration\": \"\",\n \"width\": \"20\",\n \"length\": \"20\",\n \"height\": \"20\"\n }\n }\n create_store = self.client.post(create_store_url, data=json.dumps(self.shop_zero), headers=self.my_header)\n store_id = json.loads(create_store.data)\n store_id = json.loads(store_id['store_id'])\n store_id = store_id['$oid']\n response2 = self.client.post(store_url + store_id + '/service/',\n data=json.dumps(self.service_zero),\n headers=self.my_header)\n self.assertEqual(response2.status, \"201 CREATED\")\n self.assertIn(\"Success. You have added a new Service Live at the yard to the store.\", str(response2.data))\n service_id = json.loads(response2.data)\n service_id = service_id['service_identifier']\n response3 = self.client.put(store_url + store_id + '/service/' + service_id + '/',\n data=json.dumps(self.data),\n headers=self.my_header)\n self.assertEqual(response3.status, \"400 BAD REQUEST\")\n self.assertIn(\"attribute value cannot be Empty.\", str(response3.data))", "def test_update_one(self):\n pass", "def test_update_inventory_with_no_name(self):\n new_inventory = {'id': 2, 'quantity': 2, 'status': 'new'}\n resp = self.app.put('/inventories/2', data=new_inventory, content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)", "def test_cannot_update_an_existing_service_when_missing_service_price(self):\n self.data = {\n \"service_name\": \"Live at the shop\",\n \"service_price\": \"\",\n \"service_description\": \"See Kendrick perform live at the shop\",\n \"service_category\": \"Music\",\n \"service_subcategory\": \"Live\",\n \"service_attributes\": {\n \"duration\": \"as long \",\n \"width\": \"20\",\n \"length\": \"20\",\n \"height\": \"20\"\n }\n }\n create_store = self.client.post(create_store_url, data=json.dumps(self.shop_zero), headers=self.my_header)\n store_id = json.loads(create_store.data)\n store_id = json.loads(store_id['store_id'])\n store_id = store_id['$oid']\n response2 = self.client.post(store_url + store_id + '/service/',\n data=json.dumps(self.service_zero),\n headers=self.my_header)\n self.assertEqual(response2.status, \"201 CREATED\")\n self.assertIn(\"Success. You have added a new Service Live at the yard to the store.\", str(response2.data))\n service_id = json.loads(response2.data)\n service_id = service_id['service_identifier']\n response3 = self.client.put(store_url + store_id + '/service/' + service_id + '/',\n data=json.dumps(self.data),\n headers=self.my_header)\n self.assertEqual(response3.status, \"400 BAD REQUEST\")\n self.assertIn(\"Error. Missing Service price.\", str(response3.data))", "def test_product_nullables(self):\n self.assertIsNone(self.product3.main_image)\n self.assertIsNone(self.product3.protein)\n self.assertIsNone(self.product3.fat)\n self.assertIsNone(self.product3.carbs)\n self.assertIsNone(self.product3.calories)", "def test_invalid_update_kwarg(self):\n m0 = TestUpdateModel.create(count=5, text='monkey')\n with self.assertRaises(ValidationError):\n m0.update(numbers=20)", "def test_update_record(self):\n pass", "def test_invalid_update_kwarg(self):\r\n m0 = TestUpdateModel.create(count=5, text='monkey')\r\n with self.assertRaises(ValidationError):\r\n m0.update(numbers=20)", "def test_add_without_name(self):\n good = GoodInfo(\"\", \"30\", \"40\", \"2020-12-30\", \n \"14\", \"2020-12-30\")\n check_product_data = self.database.add(good)\n\n self.assertFalse(check_product_data)", "def test_submit_bad_data_when_updating_membership(self):\n self.login_as(\"bob\")\n\n # let's try to change bob's membership to ben\n # user is a read-only field so it is simply ignored:\n payload = {\"user\": {\"id\": self.USERS[\"ben\"][\"id\"]}}\n with self.assertNumQueries(6):\n response = self.client.put(self.url, payload)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[\"user\"][\"id\"], self.USER_ID)\n\n # now, let's try to move bob's membership to another community\n # community is a read-only field so it is also ignored:\n payload = {\"community\": self.COMMUNITIES[\"group2\"][\"id\"]}\n with self.assertNumQueries(6):\n response = self.client.put(self.url, payload)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[\"community\"], self.GROUP_ID)\n\n # now, let's try to submit bad value:\n payload = {\"is_admin\": \"Of course!\"}\n with self.assertNumQueries(4):\n response = self.client.put(self.url, payload)\n self.assert_validation_failed(response, data={\n \"is_admin\": [\"Must be a valid boolean.\"]\n })\n self.assertTrue(Membership.objects.get(\n community_id=self.GROUP_ID, user_id=self.USER_ID).is_admin)", "def test_update_values_validation(self):\n partition = uuid4()\n for i in range(5):\n TestQueryUpdateModel.create(partition=partition, cluster=i, count=i, text=str(i))\n\n # sanity check\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\n self.assertEqual(row.cluster, i)\n self.assertEqual(row.count, i)\n self.assertEqual(row.text, str(i))\n\n # perform update\n with self.assertRaises(ValidationError):\n TestQueryUpdateModel.objects(partition=partition, cluster=3).update(count='asdf')", "def test_update_product(self):\n data = {\n 'pk': 1,\n 'name': 'New yogurt',\n 'description': '''\n Yogurt also spelled yoghurt, yogourt or yoghourt,\n is a food produced by bacterial fermentation of milk.\n '''\n }\n url = reverse('products:detail', kwargs={'pk': data['pk']})\n response = self.client.patch(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(models.Product.objects.filter(name=data['name']).count(), 1)", "def test_update_values_validation(self):\r\n partition = uuid4()\r\n for i in range(5):\r\n TestQueryUpdateModel.create(partition=partition, cluster=i, count=i, text=str(i))\r\n\r\n # sanity check\r\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\r\n assert row.cluster == i\r\n assert row.count == i\r\n assert row.text == str(i)\r\n\r\n # perform update\r\n with self.assertRaises(ValidationError):\r\n TestQueryUpdateModel.objects(partition=partition, cluster=3).update(count='asdf')", "def test_cannot_update_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_update_exchange_value_empty(self):\n new_exchange = self.app.add_exchange(\"test\", \"test\", \"test\")\n values = {\"exchange_name\": \"\", \"api_key\": \"111\", \"secret\": \"111\"}\n ret = self.app.update_exchange(new_exchange.id, values)\n self.assertIn(ret[0], \"error\")", "def test_update_update_has_a_value(self):\n self.Person.drop_collection()\n\n author = self.Person.objects.create(name=\"Test User\")\n\n with pytest.raises(OperationError):\n self.Person.objects(pk=author.pk).update({})\n\n with pytest.raises(OperationError):\n self.Person.objects(pk=author.pk).update_one({})", "def test_updating_when_service_attributes_name_is_empty(self):\n self.data = {\n \"service_name\": \"Live at the shop\",\n \"service_price\": \"5000\",\n \"service_description\": \"See Kendrick perform live at the shop\",\n \"service_category\": \"Music\",\n \"service_subcategory\": \"Live\",\n \"service_attributes\": {\n \"\": \"as long \",\n \"width\": \"20\",\n \"length\": \"20\",\n \"height\": \"20\"\n }\n }\n create_store = self.client.post(create_store_url, data=json.dumps(self.shop_zero), headers=self.my_header)\n store_id = json.loads(create_store.data)\n store_id = json.loads(store_id['store_id'])\n store_id = store_id['$oid']\n response2 = self.client.post(store_url + store_id + '/service/',\n data=json.dumps(self.service_zero),\n headers=self.my_header)\n self.assertEqual(response2.status, \"201 CREATED\")\n self.assertIn(\"Success. You have added a new Service Live at the yard to the store.\", str(response2.data))\n service_id = json.loads(response2.data)\n service_id = service_id['service_identifier']\n response3 = self.client.put(store_url + store_id + '/service/' + service_id + '/',\n data=json.dumps(self.data),\n headers=self.my_header)\n self.assertEqual(response3.status, \"400 BAD REQUEST\")\n self.assertIn(\"attribute name cannot be empty.\", str(response3.data))", "def test_security_on_put(self):\n # test the update url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.put(url,{'description':'my new description'})\n self.failUnlessEqual(response.status_code, 401)", "def test_signup_required_fields(self, mock_update) -> None:\n user_data = {\n 'username': 'alexphi57',\n 'first_name': 'alex',\n 'last_name': 'bill',\n 'email': '[email protected]',\n 'password1': 'barfoobas',\n 'password2': 'barfoobas',\n 'phone': '+18001234567',\n 'resorts': []\n }\n resp = self.client.post(reverse('signup'), data=user_data)\n self.assertEqual(resp.status_code, 302)\n\n # Include resorts causes error\n user_data['resorts'] = ['test1']\n resp = self.client.post(reverse('signup'), data=user_data)\n self.assertEqual(resp.status_code, 200)\n\n # Include contact_days only causes error\n user_data['contact_days'] = [\"Mon\"]\n resp = self.client.post(reverse('signup'), data=user_data)\n self.assertEqual(resp.status_code, 200)\n\n # Include contact_method only causes error\n del user_data['contact_days']\n user_data['contact_method'] = 'email'\n resp = self.client.post(reverse('signup'), data=user_data)\n self.assertEqual(resp.status_code, 200)\n\n # Include both contact_days and contact_method works\n user_data['contact_days'] = [\"Mon\"]\n user_data['username'] = 'alexphi18'\n user_data['phone'] = '+18009876543'\n user_data['email'] = '[email protected]'\n resp = self.client.post(reverse('signup'), data=user_data)\n self.assertEqual(resp.status_code, 302)", "def test_client_partial_update(self):\n pass", "def test_primary_key_update_failure(self):\n m0 = TestUpdateModel.create(count=5, text='monkey')\n with self.assertRaises(ValidationError):\n m0.update(partition=uuid4())", "def test_update_order_with_no_status(self):\n response = self.api_test_client.put('{}/orders/1'.format(\n self.BASE_URL), json={})\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(response_as_json(\n response)['message'], 'Bad request. Missing required param')", "def test_mixed_value_and_null_update(self):\n partition = uuid4()\n for i in range(5):\n TestQueryUpdateModel.create(partition=partition, cluster=i, count=i, text=str(i))\n\n # sanity check\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\n self.assertEqual(row.cluster, i)\n self.assertEqual(row.count, i)\n self.assertEqual(row.text, str(i))\n\n # perform update\n TestQueryUpdateModel.objects(partition=partition, cluster=3).update(count=6, text=None)\n\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\n self.assertEqual(row.cluster, i)\n self.assertEqual(row.count, 6 if i == 3 else i)\n self.assertEqual(row.text, None if i == 3 else str(i))", "def test_update_condition_not_defined(self):\n original_alt_info = getattr(self.form, 'alt_field_info', None)\n expected_label = 'alt_test_no_method'\n label_for_used_attrs = 'alt_test_feature'\n test_method = getattr(self.form, 'condition_' + expected_label, None)\n alt_info = getattr(self, 'alt_field_info', None)\n expected = alt_info.get(label_for_used_attrs, None)\n self.form.alt_field_info = alt_info\n self.form.test_condition_response = True\n actual = self.form.get_alt_field_info()\n\n self.assertIsNotNone(alt_info)\n self.assertIsNone(test_method)\n self.assertIsNotNone(expected)\n self.assertIn(expected_label, alt_info)\n self.assertEqual(expected, actual)\n\n self.form.test_condition_response = False\n self.form.alt_field_info = original_alt_info\n if original_alt_info is None:\n del self.form.alt_field_info", "def test_primary_key_update_failure(self):\r\n with self.assertRaises(ValidationError):\r\n TestQueryUpdateModel.objects(partition=uuid4(), cluster=3).update(cluster=5000)", "def test_primary_key_update_failure(self):\r\n m0 = TestUpdateModel.create(count=5, text='monkey')\r\n with self.assertRaises(ValidationError):\r\n m0.update(partition=uuid4())", "def test_update(app):\n\n assert False", "def test_update_incomplete_payload(self):\n payload = {'name': 'Pecho inclinado'}\n response = self.client.put(\n '/exercises/{}/'.format(self.exer1.id), data=payload)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n content = {\n 'muscle_group': ['This field is required.'],\n }\n self.assertEqual(json.loads(response.content), content)", "def test_client_verification_document_partial_update(self):\n pass", "def test_an_extra_delete_is_not_sent(self):\n partition = uuid4()\n cluster = 1\n\n TestQueryUpdateModel.objects.create(\n partition=partition, cluster=cluster)\n\n obj = TestQueryUpdateModel.objects(\n partition=partition, cluster=cluster).first()\n\n self.assertFalse({k: v for (k, v) in obj._values.items() if v.deleted})\n\n obj.text = 'foo'\n obj.save()\n #execute_count will check the execution count and\n #assert no more calls than necessary where made", "def test_primary_key_update_failure(self):\n with self.assertRaises(ValidationError):\n TestQueryUpdateModel.objects(partition=uuid4(), cluster=3).update(cluster=5000)", "def test_email_is_optional(self):\n self.updated_data['email'] = ''\n self.update_user()\n self.assertEqual(self.user.email, self.updated_data['email'])", "def test_product_price_is_required(self):\n product = {\n 'name': 'LAPTOP',\n 'price': '',\n 'image': ''\n }\n res = self.client.post(PRODUCTS_URL, product)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "async def test_update_not_implemented(self):\n with self.assertRaises(NotImplementedError):\n await self.collection.update('x', {})", "def test_cannot_update_tab_with_empty_field(self):\n\n self.client.login(username='tab', password='tab')\n tab_fields = ['name']\n\n utils.test_cannot_post_with_empty_fields(self, self.url, tab_fields)\n\n tab = Tab.objects.get(pk=self.tab.pk)\n self.assertEqual(tab.name, 'test')\n self.assertIsNone(tab.last_edit_date)", "def test_cannot_update_an_existing_service_when_missing_service_description(self):\n self.data = {\n \"service_name\": \"Live at the yard\",\n \"service_price\": \"5000\",\n \"service_description\": \"\",\n \"service_category\": \"Music\",\n \"service_subcategory\": \"Live\",\n \"service_attributes\": {\n \"duration\": \"as long \",\n \"width\": \"20\",\n \"length\": \"20\",\n \"height\": \"20\"\n }\n }\n create_store = self.client.post(create_store_url, data=json.dumps(self.shop_zero), headers=self.my_header)\n store_id = json.loads(create_store.data)\n store_id = json.loads(store_id['store_id'])\n store_id = store_id['$oid']\n response2 = self.client.post(store_url + store_id + '/service/',\n data=json.dumps(self.service_zero),\n headers=self.my_header)\n self.assertEqual(response2.status, \"201 CREATED\")\n self.assertIn(\"Success. You have added a new Service Live at the yard to the store.\", str(response2.data))\n service_id = json.loads(response2.data)\n service_id = service_id['service_identifier']\n response3 = self.client.put(store_url + store_id + '/service/' + service_id + '/',\n data=json.dumps(self.data),\n headers=self.my_header)\n self.assertEqual(response3.status, \"400 BAD REQUEST\")\n self.assertIn(\"Error. Missing Service Description.\", str(response3.data))", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_create_product_no_data(self):\n resp = self.app.post(\n \"/products\", json={}, content_type=\"application/json\"\n )\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)", "def test_check_update_properly_build_request_when_no_custom_data_given():\n request = UpdateDetailRequest('v1', 'MyDevice', None)\n update_helper = UpdateCheckHelper(_api_key, _base_url)\n built_request = update_helper.build_request(request)\n body = json.loads(built_request.body)\n\n assert body['unitId'] == request.unit_id\n assert body['versionId'] == request.version_id\n assert 'customClientData' not in body\n\n headers = built_request.headers\n assert headers['Authorization'] == _api_key\n assert headers['Content-Type'] == 'application/json'", "def test_deep_update_illegal_update(self):\n # Update with an illegal type\n for update_with in [42, None, [42], \"bar\"]:\n with self.assertRaisesRegex(\n SaltInvocationError,\n r\"Cannot update {} with a {}.\" \"\".format(type({}), type(update_with)),\n ):\n dictupdate.update_dict_key_value({}, \"foo\", update_with)\n # Again, but now using OrderedDicts\n for update_with in [42, None, [42], \"bar\"]:\n with self.assertRaisesRegex(\n SaltInvocationError,\n r\"Cannot update {} with a {}.\"\n \"\".format(type(OrderedDict()), type(update_with)),\n ):\n dictupdate.update_dict_key_value(\n {}, \"foo\", update_with, ordered_dict=True\n )", "def test_put_no_data(self):\n test_data = {}\n response = self.client.put(self.url, json.dumps(test_data), content_type='application/json')\n self.assertEquals(response.status_code, 400)", "def test_update_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'product updated!')\n self.assertEqual(resp.status_code, 200)", "def test_set_empty_field(self):\n self._p.fields = {}\n received = self._p.fields\n expected = {}\n msg = 'Setting field with empty list should not produce error.'\n self.assertDictEqual(received, expected, msg)", "def test_update_non_existent(cards_db):\n i = 123 # any number will do, db is empty\n with pytest.raises(InvalidCardId):\n cards_db.update_card(i, Card(summary=\"bar\", owner=\"not me\"))", "def test_alright_when_non_required_field_is_missing():\n\n model_definition = {'language': {'type': 'fixed',\n 'required': True,\n 'persisted': True},\n 'source': {'type': 'list',\n 'required': False,\n 'persisted': True},\n 'resources.title': {'type': 'text',\n 'required': False,\n 'persisted': True}}\n product1 = {'language': 'english'}\n factory = ProductModelFactory(model_definition)\n factory.build('product1', product1)\n # Ok. No exceptions were raised.", "def test_update_with_invalid_data(self):\n saved_article = self.create_article()\n url = saved_article[0]\n token = saved_article[2]\n response = self.test_client.put(url, self.article_invalid_data2, format='json', HTTP_AUTHORIZATION=token)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_update_order_with_non_json_data(self):\n response = self.api_test_client.put('{}/orders/1'.format(\n self.BASE_URL), data='order_status=rejected')\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(response_as_json(\n response)['message'],\n 'Bad request. Request data must be in json format')", "def test_update_should_not_be_allowed(self):\n response = self.client.put(self.get_url(), {})\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)", "def test_update_a_recommendation_no_relationship(self):\n recommendation = RecommendationFactory()\n logging.debug(recommendation)\n recommendation.create()\n logging.debug(recommendation)\n recommendation.relationship = None\n self.assertRaises(DataValidationError, recommendation.update)", "def test_set_non_dictionary_based_field(self):\n self.assertRaises(TypeError, self._p.set_fields, '')", "def test_update_order_failure(self):\n # create a order to update\n test_order = OrderFactory()\n resp = self.app.post('/orders',\n json=test_order.serialize(),\n content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)\n\n # update the order\n new_order = resp.get_json()\n new_order['product_id'] = 2\n resp = self.app.put('/orders/{}'.format(5),\n json=new_order,\n content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)", "def test_client_nationlity_partial_update(self):\n pass", "def test_cannot_update_details_of_service_that_does_not_exist(self):\n self.data = {\n \"service_name\": \"Live at the shop\",\n \"service_price\": \"5000\",\n \"service_description\": \"See Kendrick perform live at the shop\",\n \"service_category\": \"Music\",\n \"service_subcategory\": \"Live\",\n \"service_attributes\": {\n \"duration\": \"as long \",\n \"width\": \"20\",\n \"length\": \"20\",\n \"height\": \"20\"\n }\n }\n create_store = self.client.post(create_store_url, data=json.dumps(self.shop_zero), headers=self.my_header)\n store_id = json.loads(create_store.data)\n store_id = json.loads(store_id['store_id'])\n store_id = store_id['$oid']\n response2 = self.client.post(store_url + store_id + '/service/',\n data=json.dumps(self.service_zero),\n headers=self.my_header)\n self.assertEqual(response2.status, \"201 CREATED\")\n self.assertIn(\"Success. You have added a new Service Live at the yard to the store.\", str(response2.data))\n response3 = self.client.put(store_url + store_id + '/service/5a2bc733791e4bbc9a26f7a5/',\n data=json.dumps(self.data),\n headers=self.my_header)\n self.assertEqual(response3.status, \"404 NOT FOUND\")", "def test_update_scenario(self):\n pass", "def test_update_product_unique_name(self):\n data = {\n 'pk': 1,\n 'name': 'Banana',\n 'description': '''\n Yogurt also spelled yoghurt, yogourt or yoghourt,\n is a food produced by bacterial fermentation of milk.\n '''\n }\n url = reverse('products:detail', kwargs={'pk': data['pk']})\n response = self.client.patch(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertNotEqual(models.Product.objects.filter(name=data['name']), data['pk'])", "def test_shoppingcart_update(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we create a product\n id_prod = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id_prod:\n # then we can create the shoppingcart\n data = self.shoppingcart_data\n data[\"customer_id\"] = id\n data[\"product_id\"] = id\n id_cart = self._create_model(\"shoppingcart\", data, [\"quantity\", \"discount_value\", \"is_closed\"])\n if id_cart:\n # then performe update\n data = self.shoppingcart_data\n data[\"quantity\"] = 20\n data[\"discount_value\"] = 9.99\n data[\"is_closed\"] = True\n self._update_model(\"shoppingcart\", id, data, [\"quantity\", \"discount_value\", \"is_closed\"])\n self.assertIsNotNone(id_cart)\n self.assertIsNotNone(id_prod)\n self.assertIsNotNone(id)", "def test_update_inventory(self):\n pass", "def test_update_no_pk(self):\n album = Album(artist='Artist', album='Album',\n totaltracks=1, totalseconds=120)\n with self.assertRaises(Exception):\n album.update(self.app.db, self.app.curs)\n self.assertEqual(self.get_album_count(), 0)", "def test_update_item_incorrect_value_type(test_client, item_with_bad_value):\n\n response = test_client.put(GOOD_ITEM_URL,\n data=json.dumps(item_with_bad_value),\n content_type='application/json')\n\n data = json.loads(response.get_data())\n\n assert response.status_code == 400\n assert data['error'] == app.BAD_REQUEST", "def validate_fields_for_magento(self,data):\n for field in data:\n if data[field] == None :\n del data[field]\n if data[field] == True:\n data[field] = 1\n if data[field] == False :\n data[field] = 0", "def test_cannot_update_an_existing_service_when_missing_service_subcategory(self):\n self.data = {\n \"service_name\": \"Live at the shop\",\n \"service_price\": \"5000\",\n \"service_description\": \"See Kendrick perform live at the shop\",\n \"service_category\": \"Music\",\n \"service_subcategory\": \"\",\n \"service_attributes\": {\n \"duration\": \"as long \",\n \"width\": \"20\",\n \"length\": \"20\",\n \"height\": \"20\"\n }\n }\n create_store = self.client.post(create_store_url, data=json.dumps(self.shop_zero), headers=self.my_header)\n store_id = json.loads(create_store.data)\n store_id = json.loads(store_id['store_id'])\n store_id = store_id['$oid']\n response2 = self.client.post(store_url + store_id + '/service/',\n data=json.dumps(self.service_zero),\n headers=self.my_header)\n self.assertEqual(response2.status, \"201 CREATED\")\n self.assertIn(\"Success. You have added a new Service Live at the yard to the store.\", str(response2.data))\n service_id = json.loads(response2.data)\n service_id = service_id['service_identifier']\n response3 = self.client.put(store_url + store_id + '/service/' + service_id + '/',\n data=json.dumps(self.data),\n headers=self.my_header)\n self.assertEqual(response3.status, \"400 BAD REQUEST\")\n self.assertIn(\"Error. Missing Service Subcategory.\", str(response3.data))", "def test_beneficiaries_update_withoutID_that_will_fail(self):\n print('the test function name: {}'.format(sys._getframe().f_code.co_name))\n try:\n url = reverse('beneficiary:beneficiary-entity-by-id-update')\n response = self.client.get(url, content_type='application/json')\n return self.assertTrue(response.status_code, 200)\n except Exception as e:\n print(\"reason: \", e)", "def test_update_product_success(self):\n product = sample_product(supplier_id=self.user, name='old-name', price='100.00')\n url = detail_url(product.id)\n new_product = {\n 'name': 'new_name',\n 'price': '1000.0',\n 'image': ''\n }\n res = self.client.put(url, new_product)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data['name'], new_product['name'])", "def test_update_inventory_not_found(self):\n new_inventory = {'name': 'conditioner', 'quantity': 1, 'status': 'new'}\n data = json.dumps(new_inventory)\n resp = self.app.put('/inventories/0', data=data, content_type='application/json')\n self.assertEquals(resp.status_code, status.HTTP_404_NOT_FOUND)", "def test_client_risk_assessment_partial_update(self):\n pass" ]
[ "0.802753", "0.7568224", "0.7292608", "0.7203753", "0.7033387", "0.69645137", "0.6949159", "0.6943705", "0.69149756", "0.691329", "0.690093", "0.6899394", "0.6895893", "0.68875086", "0.6824652", "0.6816774", "0.68002254", "0.6785967", "0.6772013", "0.67623484", "0.6751942", "0.6750954", "0.671549", "0.6710964", "0.66895324", "0.66697335", "0.66623724", "0.66439843", "0.662813", "0.6616762", "0.65642226", "0.6533374", "0.6522956", "0.6508813", "0.6501456", "0.6496754", "0.64840573", "0.6476278", "0.6475182", "0.6473176", "0.6469148", "0.6459123", "0.6443856", "0.64434", "0.64339215", "0.6425756", "0.64116085", "0.64074355", "0.63954526", "0.63760227", "0.63716465", "0.6354545", "0.63497835", "0.6338663", "0.63366973", "0.6331612", "0.6330976", "0.6323804", "0.63213277", "0.63119954", "0.6311459", "0.6307739", "0.63011724", "0.62953496", "0.6291305", "0.6280891", "0.6275605", "0.62694305", "0.6266704", "0.62650645", "0.62650645", "0.62650645", "0.62620586", "0.62530977", "0.6251825", "0.6250799", "0.62491846", "0.62472486", "0.6227767", "0.6225692", "0.6212141", "0.62051314", "0.6204828", "0.6203714", "0.6201867", "0.6192951", "0.61837363", "0.61834", "0.6181386", "0.6178941", "0.6177278", "0.61745256", "0.61678606", "0.61603457", "0.6159947", "0.61594063", "0.61483544", "0.61419636", "0.61379313", "0.6131513" ]
0.8045699
0
Test that product cannot be updated with numbers for strings
def test_update_product_with_numbers_for_strings(self): resp = self.admin_register() reply = self.admin_login() token = reply['token'] product = dict( prod_name='NY_denims', category='denims', stock=20, price=150 ) resp = self.client.post( '/api/v1/products', content_type='application/json', data=json.dumps(product), headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Product successfully added to Inventory!') self.assertEqual(resp.status_code, 201) product_update = dict( prod_name=4562, category=5248, stock=50, price=180 ) resp = self.client.put( '/api/v1/products/1', content_type='application/json', data=json.dumps(product_update), headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'prod_name and category should be characters!') self.assertEqual(resp.status_code, 400)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_non_numberic_validation(self):", "def test_non_numberic_validation(self):", "def test_update_product_with_characters_for_numbers(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n product_update = dict(\n prod_name='NY_denims',\n category='denims',\n stock='many',\n price='pesa'\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'The Stock and Price must be numbers!')\n self.assertEqual(resp.status_code, 400)", "def test_Product_name_cannot_contain_a_number(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_3',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Please enter strings in name and category!')\n self.assertEqual(resp.status_code, 400)", "def test_num_prod(self):\n self.assertEqual(self.Nprod, 1)", "def test_product(self):\n self.assertEqual(functions.product(2, 2), 4)\n self.assertEqual(functions.product(2, -2), -4)", "def test_multiplication():\n assert calculator.multiply(7, 3) == 21\n assert calculator.multiply(7.0, 3.0) == 21.0\n assert calculator.multiply(7, -3) == -21\n assert calculator.multiply(7.0, -3.0) == -21.0", "def test_calculate_multiplication(self):\n result = self.calcuate.calcuate('3x3')\n expected_result = \"9\"\n self.assertEqual(expected_result, result)", "def test_product_mult_only(self):\r\n self.assertEquals(preview.latex_preview('2*3'), r'2\\cdot 3')", "def test_numbers(number):\n assert number ** 2 == number ** 2", "def product(nbs):\n try:\n prod = 0\n for nb in nbs:\n prod *= float(nb)\n except TypeError:\n print(\"Hmmm, I guess you haven't only entered valid numbers\")\n return\n print(\"And the product is.... : {0} \".format(prod))", "def test_numbers(self):\n \n result = gen_expansion(sym.pi, 2)\n self.assertEqual(result, '14')\n result = gen_expansion(sym.exp(1), 2)\n self.assertEqual(result, '72')", "def test_mult_specifiers_missing(self):\n template = '{0} too few {1}'\n value_count = 3\n msg = ('The formatter contains too few \"{}\" '\n 'specifiers for the number of source fields.')\n with six.assertRaisesRegex(self, ValidationError, msg):\n validate_str_substitution(template, value_count)", "def make_quantity(string):\n pass", "def __checkProduct(self, prd, num):\n if prd not in vmdata.prdStore or not isinstance(num, int) or num < 1:\n return False \n return True", "def prodName(self, pName):\r\n if str(pName).isnumeric() == False:\r\n self.__prodName = pName\r\n else:\r\n raise Exception(\"Product Names cannot be numbers\")", "def testBadFormatISBNAgain(self): \n val = format_isbn(\"12345678\")\n self.assertFalse(val)", "def test_empty_input():\n assert _currency_column_to_numeric(\"\") == \"ORIGINAL_NA\"", "def test_not_int(self):\n invalid_args = [\"random string\", \"123\", 123.5]\n for arg in invalid_args:\n assert meters_to_km(arg) is arg", "def test_calculate_multiplication_and_adding(self):\n result = self.calcuate.calcuate('1+2x3')\n expected_result = \"7\"\n self.assertEqual(expected_result, result)", "def test_memorystr():\n for val in ['123G', '123g', '123M', '123m', '123.5m', '25k', '25K']:\n MemoryStr(val)\n\n for val in [123, '123', '123mm', '123a', 'G']:\n print(val)\n with pytest.raises(ValueError):\n MemoryStr(val)\n\n assert MemoryStr('1024m').asGB() == 1.0\n assert MemoryStr('3G').asGB() == 3.0", "def test_str_type(self):\n\n expected = TypeError\n input_ = 'c'\n with self.assertRaises(expected):\n math.factorial(input_)", "def test_category_cannot_contain_a_number(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='4dens',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Please enter strings in name and category!')\n self.assertEqual(resp.status_code, 400)", "def __set_has_numeric(text=str):\n reg_ex = constants.NUMERIC_REG_EX_PATTERN\n if reg_ex.search(text) is None:\n return text\n return reg_ex.sub(constants.QUESTION_HAS_NUMERIC_KEY, text)", "def test_is_product_entangled_state():\n ent_vec = max_entangled(3)\n np.testing.assert_equal(is_product_vector(ent_vec), False)", "def test_no_multiply():\n with pytest.raises(NotImplementedError):\n SplineTerm(0) * LinearTerm(1)\n\n term_list = SplineTerm(0) + LinearTerm(1)\n with pytest.raises(NotImplementedError):\n term_list * term_list", "def test_product_keep_going(self):\r\n self.assertEquals(\r\n preview.latex_preview('2/3*4/5*6'),\r\n r'\\frac{2}{3}\\cdot \\frac{4}{5}\\cdot 6'\r\n )", "def test_non_cast_input():\n assert _currency_column_to_numeric(\"-1,000,000 yen\") == \"-1000000\"", "def test_non_integer_suffix(self):\n with self.assertRaises(Exception) as exception:\n make_rpm_version('0.1.2preX')\n\n self.assertEqual(\n u'Non-integer value \"X\" for \"pre\". Supplied version 0.1.2preX',\n unicode(exception.exception)\n )", "def evaluator_side_effect(_, __, math_string):\r\n if math_string != '4':\r\n raise err", "def test_non_integral_validation(self):", "def test_non_integral_validation(self):", "def testBadFormatISBN(self): \n val = format_isbn(\"1234567843534594123\")\n self.assertFalse(val)", "def test_add_with_negative_price(self):\n good = GoodInfo(\"яйцо 1 кат.\", \"-30\", \"40\", \"2020-12-30\", \n \"14\", \"2020-12-30\")\n check_product_data = self.database.add(good)\n\n self.assertFalse(check_product_data)", "def test_int_install_2():\n expected_output_price = 65000\n output_price = int_installs('+65,000')\n assert math.fabs(output_price - expected_output_price) < ROUND_OFF_ERROR, \\\n \"\"\"Should show that the installs is 65000.\"\"\"", "def test_not_units(self):\n with self.assertRaises(AssertionError):\n _unit_map(\"WiB\")", "def test_pauli_string_expval(self, shadow):\n\n o1 = qml.PauliX(0)\n res1 = shadow.expval(o1, k=2)\n\n o2 = qml.PauliX(0) @ qml.PauliX(1)\n res2 = shadow.expval(o2, k=2)\n\n res_exact = 1.0\n assert qml.math.allclose(res1, res_exact, atol=1e-1)\n assert qml.math.allclose(res2, res_exact, atol=1e-1)", "def test_add_sale_with_price_not_digit_format(self):\n self.register_admin_test_account()\n token = self.login_admin_test()\n\n response = self.app_test_client.post('{}/saleorder'.format(\n self.base_url), json={'name': \"Hand Bag\", 'price': \"1500\", 'quantity': 3, 'totalamt': \"\"},\n headers=dict(Authorization=token),\n content_type='application/json')\n\n self.assertEqual(response.status_code, 400)\n\n self.assertEqual(general_helper_functions.convert_json(\n response)['message'], 'Bad request. The product price should be an integer.')", "def test_cast_non_numeric_false():\n assert _currency_column_to_numeric(\"10 dollars\", {\"foo\": 42}) == \"10\"", "def test_stock_and_price_must_be_numbers(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock='stock',\n price='money'\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'The Stock and Price must be numbers!')\n self.assertEqual(resp.status_code, 400)", "def test_not_enough_change(self):\n item, change, _ = give_item_and_change('apple', '.2')\n self.assertIsNone(item)\n self.assertEqual(change, 0.2)", "def test_multiply_except(self):\n chan = Chi(self.chiI)\n self.assertRaises(QiskitError, chan.multiply, 's')\n self.assertRaises(QiskitError, chan.multiply, chan)", "def test_if_it_accepts_string_datatype(self):\n with self.assertRaises(TypeError):\n prime_numbers(\"string\")", "def test02_password_numeric(self):\n self.set_complexity(length=0, numeric=4, upper=0, lower=0, special=0)\n\n invalid = (\n \"A\",\n \"Tr0ub4dor&3\",\n \"!!!!!!!!!!!!\",\n \"Password\",\n \"Password123!\",\n \"admin\",\n \"1abcd2efghij3\",\n )\n self.assertPasswordsInvalid(invalid)\n\n valid = (\n \"1correct2horse3battery4staple\",\n \"1234abc\",\n \"abc1234\",\n \"0000\",\n \"Password1234!___\",\n \"Test1Split2Numerics3In4Password\",\n )\n self.set_passwords(valid)", "def minuscule(product):\n product = product.lower()\n return product", "def test_convert_nonnumeric_value():\n with pytest.raises(TypeError):\n pressure_util.convert(\"a\", PRESSURE_HPA, PRESSURE_INHG)", "def test_is_old_papernum(self):\n self.assertFalse(util.is_old_papernum(\"9106001\"))\n self.assertTrue(util.is_old_papernum(\"9107001\"))\n self.assertFalse(util.is_old_papernum(\"9200001\"))\n self.assertTrue(util.is_old_papernum(\"9201001\"))\n self.assertTrue(util.is_old_papernum(\"0703999\"))\n self.assertFalse(util.is_old_papernum(\"0704001\"))", "def test_is_gene_continuously_amplified_wrong_input(self):\n self.assertEqual(\"Wrong input data\", is_gene_continuously_amplified(13))", "def test_cannot_make_sale_with_wrong_datatypes(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_345\", \n \"quantity\":'Kummi'\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'prod_name & quantity should be a character & number respectively!')\n self.assertEqual(resp.status_code, 400)", "def testPowerSetStrings(self):\n def setPower():\n self.node.power = [1.3782, 278.32, 'banana']\n\n self.assertRaises(\n TypeError,\n setPower\n )", "def test_calculate_order_multiplication_subtraction_adding(self):\n result = self.calcuate.calcuate('11-2+4x3')\n expected_result = \"21\"\n self.assertEqual(expected_result, result)", "def testPowerSetStrings(self):\n def setPower():\n self.cc.power = [1.3782, 278.32, 'banana']\n\n self.assertRaises(\n TypeError,\n setPower\n )", "def test_numeric(self):\n conn = self.database.connection()\n cursor = conn.cursor()\n dialect = self.database.dialect()\n dbapi = self.database.dbapi()\n query = dialect.translate('DROP TABLE test_numeric')\n try:\n cursor.execute(query)\n except dbapi.Error:\n conn.rollback()\n query = dialect.translate('CREATE TABLE test_numeric ' \\\n '( value NUMERIC(100,50) NOT NULL )')\n cursor.execute(query)\n data = []\n query = 'INSERT INTO test_numeric VALUES (%s)'\n for i in range(100):\n int = random.getrandbits(150)\n frac = random.getrandbits(150)\n item = decimal.Decimal('%d.%s' % (int, frac))\n data.append(item)\n cursor.execute(query, (item,))\n query = 'SELECT * FROM test_numeric'\n cursor.execute(query)\n result = cursor.fetchall()\n for row in result:\n item = row[0]\n assert isinstance(item, decimal.Decimal)\n assert item in data\n data.remove(item)\n query = dialect.translate('DELETE FROM test_numeric')\n cursor.execute(query)\n query = dialect.translate('DROP TABLE test_numeric')\n cursor.execute(query)\n conn.commit()", "def test_legal_names(self):\r\n prod = generate_products()\r\n ADJECTIVES = ['Awesome', 'Shiny', 'Impressive', 'Portable', 'Improved']\r\n NOUNS = ['Anvil', 'Catapult', 'Disguise', 'Mousetrap', '???']\r\n for product in prod:\r\n self.assertIn(product.name.split(\" \")[0], ADJECTIVES)\r\n self.assertIn(product.name.split(\" \")[1], NOUNS)", "def multiply_list(input):\n result = 1\n #if there is no numbers in list return False\n if not input:\n return False\n #if the list contains strings or 0 return False\n else: \n for x in input:\n if type(x) == str or x == 0:\n return False \n else:\n result = result*x\n return result", "def test_unit_conversion_incompatible(self):\n self.orography_cube.units = 'K'\n msg = \"Unable to convert from\"\n with self.assertRaisesRegex(ValueError, msg):\n self.plugin_instance.process(self.orography_cube)", "def test_bad_values(self):\n self.assertOK([60])\n self.assertRaisesInternalError([59.9])\n self.assertRaisesInternalError([''])\n self.assertRaisesInternalError([';'])\n self.assertRaisesInternalError(['('])\n self.assertRaisesInternalError([None])", "def test_add_strings(self):\n print(\"---running test_add_strings\")\n result = some_math.add('abc', 'def')\n assert result == 'abcdef'", "def test_scalar_multiplication(self):\n\n a1 = tuples.Tuple([\"a\", \"b\", \"c\", \"d\"], 1, -2, 3, -4)\n a2 = a1 * 3.5\n a3 = a1 * 0.5\n\n self.assertEqual(a2,\n tuples.Tuple([\"a\", \"b\", \"c\", \"d\"], 3.5, -7, 10.5, -14))\n self.assertEqual(a3,\n tuples.Tuple([\"a\", \"b\", \"c\", \"d\"], 0.5, -1, 1.5, -2))", "def test_case_strings_numbers2(self):\n data = {\"numbers\": \"1,4,e,w,5,t\"}\n response = self.client.post(\"/api/hi\", data)\n self.assertEqual(response.data, {\"error\":\"must be a number\"})", "def test_load_do_not_convert_non_quantity_strings(self):\n sage = ForceField(\"openff-2.0.0.offxml\")\n\n for parameter_handler_name in sage.registered_parameter_handlers:\n parameter_handler = sage.get_parameter_handler(parameter_handler_name)\n\n for parameter in parameter_handler.parameters:\n assert isinstance(parameter.smirks, str)\n assert not isinstance(parameter.smirks, unit.Quantity)\n\n # Ensure that, for example, F isn't converted to Farad\n if (\n parameter_handler_name == \"LibraryCharges\"\n and parameter.name is not None\n ):\n assert isinstance(parameter.name, str)\n assert not isinstance(parameter.name, unit.Quantity)", "def test_wrong_input(self):\n\n test_float = 2954.02\n test_list = [\"anagram\", \"gramana\"]\n with pytest.raises(AttributeError) as exc_info:\n is_anagram(test_float, test_list)\n expected_error_msg = \"Words must be strings!\"\n assert exc_info.match(expected_error_msg)", "def test_cast_non_numeric_true():\n assert _currency_column_to_numeric(\"foo\", {\"foo\": 42}) == 42", "def test_speciality_str_representation(speciality):\n assert str(speciality) == \"Electrical\"", "def test_simtk_list_of_quantities_to_pint():\n list_of_quantities = [val * omm_unit.meter for val in range(10)]\n quantity_list = omm_unit.meter * [val for val in range(10)]\n\n assert list_of_quantities != quantity_list\n assert all(simtk_to_pint(list_of_quantities) == simtk_to_pint(quantity_list))", "def test_words_with_numbers(self):\n\n test_string = \"1. FC Köln\"\n test_anagram = \"anagram\"\n with pytest.raises(ValueError) as exc_info:\n is_anagram(test_string, test_anagram)\n expected_error_msg = \"should only contain letters!\"\n assert exc_info.match(expected_error_msg)", "def test_add_to_stock_negative(add):\n assert STOCK[0]['quantity'] == 20\n for i in [\"2.32\", \"sd\", -2, 0, 201]:\n value = validate_int(i)\n add[0].add_to_stock(value)\n # there is no change in our stock on invalid input\n assert STOCK[0]['quantity'] == 20\n STOCK[0]['quantity'] = 20", "def test_negative_pricing(self):\n with self.assertRaises(InvalidProductPriceException):\n Product(self.test_product_name, -1.00)\n with self.assertRaises(InvalidProductPriceException):\n Product(self.test_product_name, -0.01)\n with self.assertRaises(InvalidProductPriceException):\n Product(self.test_product_name, 0)\n with self.assertRaises(InvalidProductPriceException):\n Product(self.test_product_name, 0.00)\n try:\n Product(self.test_product_name, 1.00)\n Product(self.test_product_name, 0.01)\n except InvalidProductPriceException:\n self.fail(\"InvalidProductPriceException raised for positive value unexpectedly\")", "def test_valid_name_invalid():\n assert not valid_name(\"\")\n assert not valid_name(\"a\"*21)", "def test_int_install_1():\n expected_output_price = 65000\n output_price = int_installs('65000')\n assert math.fabs(output_price - expected_output_price) < ROUND_OFF_ERROR, \\\n \"\"\"Should show that the installs is 65000.\"\"\"", "def test_legal_names(self):\n prods = generate_products()\n for obj in prods:\n self.assertRegexpMatches(\n '(\\w{2,10} \\w{0,12}|\\?{0,3}){1}', obj.name)", "def _fix_surprising_number(val, s):\n if (\n isinstance(val, (int, float)) and \"!!\" not in s\n and _contains_non_numeric_chars(s)\n ):\n return s\n return val", "def _cached_product(self, x, y):\n xy = x + ''.join(c for c in y if c not in x)\n return self.normal_form(xy)", "def test_add_with_negative_amount(self):\n good = GoodInfo(\"яйцо 1 кат.\", \"30\", \"-40\", \"2020-12-30\", \n \"14\", \"2020-12-30\")\n check_product_data = self.database.add(good)\n\n self.assertFalse(check_product_data)", "def test_letters(self):\n self.assertFalse(validate_measure_input('a', self.measures))\n self.assertFalse(validate_measure_input('1a', self.measures))", "def test_product_buy_more_then_have(self):\n result_buy = self.info_list.product_buy(\"соль 1 кг\", 50)\n self.assertFalse(result_buy)", "def test_post_sale_record_with_quantity_string(self):\n\t\tself.register_user()\n\t\tresult = self.login_user()\n\t\taccess_token = json.loads(result.data.decode())['token']\n\n\t\tres = self.client.post(self.sl_url,\n\t\t\tdata=self.string_sales,\n\t\t\theaders=dict(Authorization=\"Bearer \" + access_token))\n\t\tresult = json.loads(res.data.decode())\n\t\tself.assertEqual(res.status_code, 400)\n\t\tself.assertEqual(result[\"message\"][\"quantity\"], \"Only integers allowed\")", "def test_string_or_number():\n assert is_string_or_number(None) is None\n assert is_string_or_number(1) is None\n assert is_string_or_number(1.1) is None\n assert is_string_or_number('1.1') is None\n assert is_string_or_number([])", "def test_add_sale_with_product_name_not_string(self):\n self.register_admin_test_account()\n token = self.login_admin_test()\n\n response = self.app_test_client.post('{}/saleorder'.format(\n self.base_url), json={'name': 1, 'price': 1500, 'quantity': 10, 'totalamt': \"\"},\n headers=dict(Authorization=token),\n content_type='application/json')\n\n self.assertEqual(response.status_code, 400)\n\n self.assertEqual(general_helper_functions.convert_json(\n response)['message'], 'Bad request. The product name should be a string.')", "def test_e0_prod(self):\n self.assertAlmostEqual(self.tunneling.E0_prod.value_si * 0.001, self.E0_prod, 4)", "def _confirm_quantity(units):\n quantities = dict()\n for key, value in units.items():\n if isinstance(value, str):\n quant = 1 * parse_unit(value)\n elif isinstance(value, u.UnitBase):\n quant = 1 * value\n elif isinstance(value, u.quantity.Quantity):\n quant = value\n else:\n quant = None\n key = key.split('_')[0]\n quantities[key] = quant\n return quantities", "def test_containsOnly(self) -> None:\n assert containsOnly('.83', '0123456789.')\n assert not containsOnly('43221', '123')", "def testStringInput(self):\n nb.rescale_length(\"2.0\")\n self.assertEqual(2.0, nb.rscale)", "def test_multiply(self):\n self.assertEqual(work_file.multiply(10, 5), 50)\n self.assertEqual(work_file.multiply(-1, 1), -1)\n self.assertEqual(work_file.multiply(-1, -1), 1)", "def test_staff_inputs_expressions_legacy(self):\r\n problem = self.build_problem(answer=\"1+1j\", tolerance=1e-3)\r\n self.assert_grade(problem, '1+j', 'correct')", "def test_convert_incompatible_units(self):\n self.assertRaises(ValueError, convert_units, self.arr, 'm')", "def check_string( pname, use ):\n for l in pname:\n if l in string.letters: continue\n if l in string.digits : continue\n if l =='_' : continue\n print( \"your \"+use+\" (\" + pname + \") contains invalid characters, please choose another one!\" )\n return False\n return True", "def test_empty_value(self, sc):\n assert sc.add('') == 0", "def test_case_strings_numbers(self):\n data = {\"numbers\": \"1,4,6,e,r,6,t,1\"}\n response = self.client.post(\"/api/hi\", data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_noQuantity(self):\n # result = self.parser.parse(\"d6\")\n\n # TODO\n # self.assertIsNone(result)", "def validate_product_quantity(item, qty):\n return True", "def test_not_repeat_combination(self):\n self.assertTrue(\"-Yao Ming Zydrunas Ilgauskas\", show_players_sumheigh_is_input(177))\n self.assertFalse(show_players_sumheigh_is_input(177), \"-Zydrunas Ilgauskas Yao Ming\")", "def testSetPowerWithString(self):\n self.node.power = '12.3'\n\n self.assertEqual(\n (Decimal('12.3'), Decimal('12.3'), Decimal('12.3')),\n self.node.power\n )", "def test_should_accept_alphanumeric_formulas(self):\n validator = CharCombinationValidator()\n\n for formula in self.correct_formulas:\n self.assertIsNone(validator(formula))", "def test_encoding_non_int_fails(self):\n self.assertRaises(EncodingError, base62.from_decimal, string.ascii_letters)", "def test_int_install_3():\n with pytest.raises(ValueError): \n int_installs('$65000')", "def test_mul2():\n assert (2 * (x + 1)).is_Mul", "def testcharge_and_mult(self):\r\n assert self.data.charge == 0\r\n assert self.data.mult == 1", "def test_only_nums_are_valid_inputs():\n bad_inputs = [[\"boop\", \"boink\"], 10, 99.99, {\"one\": 2, \"three:\": 4}]\n\n for input in bad_inputs:\n with pytest.raises(AttributeError):\n song_decoder(bad_inputs)", "def quantity_from_string(quantity_str):\n\n # Strip out (possible) surrounding quotes\n quote_pattern = '[^\\'\"]+'\n try:\n quantity_str = re.search(quote_pattern, quantity_str).group()\n except AttributeError as e:\n raise AttributeError(\"Please pass a quantity in format of '#*unit'. e.g. '1*atmosphere'\")\n # Parse String\n operators = ['(', ')', '*', '/']\n def find_operator(passed_str):\n # Process the current string until the next operator\n for i, char in enumerate(passed_str):\n if char in operators:\n break\n return i\n\n\n def nested_string(passed_str):\n def exponent_unit(passed_str):\n # Attempt to cast argument as an exponenet\n future_operator_loc = find_operator(passed_str)\n future_operator = passed_str[future_operator_loc]\n if future_operator == '(': # This catches things like x**(3*2), rare, but it could happen\n exponent, exponent_type, exp_count_indices = nested_string(passed_str[future_operator_loc+1:])\n elif future_operator_loc == 0:\n # No more operators\n exponent = passed_str\n future_operator_loc = len(passed_str)\n exp_count_indices = future_operator_loc + 2 # +2 to skip the **\n else:\n exponent = passed_str[:future_operator_loc]\n exp_count_indices = future_operator_loc + 2 # +2 to skip the **\n exponent = float(exponent) # These should only ever be numbers, not quantities, let error occur if they aren't\n if exponent.is_integer(): # Method of float\n exponent = int(exponent)\n return exponent, exp_count_indices\n # Loop through a given string level, returns how many indicies of the string it got through\n last_char_loop = 0\n number_pass_string = len(passed_str)\n last_operator = None\n final_quantity = None\n # Close Parenthisis flag\n paren_closed = False\n while last_char_loop < number_pass_string:\n next_char_loop = find_operator(passed_str[last_char_loop:]) + last_char_loop\n next_char = passed_str[next_char_loop]\n # Figure out what the operator is\n if (next_char_loop == number_pass_string - 1 and (next_char != ')')) or (next_char_loop == 0 and next_char != '(' and next_char != ')'):\n # Case of no new operators found\n argument = passed_str[last_char_loop:]\n else:\n argument = passed_str[last_char_loop:next_char_loop]\n # Strip leading/trailing spaces\n argument = argument.strip(' ')\n # Determine if argument is a unit\n try:\n arg_unit = getattr(unit, argument)\n arg_type = 'unit'\n except Exception as e:\n # Assume its float\n try:\n arg_unit = float(argument)\n arg_type = 'float'\n except: # Usually empty string\n if argument == '':\n arg_unit = None\n arg_type = 'None'\n else:\n raise e # Raise the syntax error\n # See if we are at the end\n augment = None\n count_indices = 1 # How much to offset by to move past operator\n if next_char_loop != number_pass_string:\n next_operator = passed_str[next_char_loop]\n if next_operator == '*':\n try: # Exponent\n if passed_str[next_char_loop+1] == '*':\n exponent, exponent_offset = exponent_unit(passed_str[next_char_loop+2:])\n try:\n next_char_loop += exponent_offset\n # Set the actual next operator (Does not handle nested **)\n next_operator = passed_str[next_char_loop]\n except IndexError:\n # End of string\n next_operator = None\n # Apply exponent\n arg_unit **= exponent\n except:\n pass\n # Check for parenthises\n if next_operator == '(':\n augment, augment_type, count_indices = nested_string(passed_str[next_char_loop+1:])\n count_indices += 1 # add 1 more to offset the '(' itself\n elif next_operator == ')':\n paren_closed = True\n else:\n # Case of no found operators\n next_operator = None\n # Handle the conditions\n if (last_operator is None):\n if (final_quantity is None) and (arg_type is 'None') and (augment is None):\n raise TypeError(\"Given Quantity could not be interpreted as presented\")\n elif (final_quantity is None) and (augment is None):\n final_quantity = arg_unit\n final_type = arg_type\n elif (final_quantity is None) and (arg_type is 'None'):\n final_quantity = augment\n final_type = augment_type\n else:\n if augment is None:\n augment = arg_unit\n augment_type = arg_type\n if last_operator == '*':\n final_quantity *= augment\n elif last_operator == '/':\n final_quantity /= augment\n # Assign type\n if augment_type == 'unit':\n final_type = 'unit'\n elif augment_type == 'float':\n final_type = 'float'\n last_operator = next_operator\n last_char_loop = next_char_loop + count_indices # Set the new position here skipping over processed terms\n if paren_closed:\n # Determine if the next term is a ** to exponentiate augment\n try:\n if passed_str[last_char_loop:last_char_loop+2] == '**':\n exponent, exponent_offset = exponent_unit(passed_str[last_char_loop+2:])\n final_quantity **= exponent\n last_char_loop += exponent_offset\n except:\n pass\n break\n return final_quantity, final_type, last_char_loop\n\n quantity, final_type, x = nested_string(quantity_str)\n return quantity" ]
[ "0.6655952", "0.6655952", "0.62706953", "0.62572986", "0.6155536", "0.60939926", "0.6081916", "0.60734576", "0.6025201", "0.5992067", "0.5951248", "0.5883255", "0.58490777", "0.5848731", "0.58472234", "0.5843898", "0.5835155", "0.5830356", "0.57858956", "0.57778364", "0.5768428", "0.57661104", "0.5754419", "0.57541645", "0.57533205", "0.57391566", "0.5725025", "0.57245207", "0.5709176", "0.5676185", "0.56731796", "0.56731796", "0.56665045", "0.5664438", "0.5661229", "0.56331545", "0.5627919", "0.5618148", "0.55888754", "0.5582758", "0.55671614", "0.55666864", "0.5554994", "0.555131", "0.5551059", "0.5549139", "0.55462813", "0.55412996", "0.55186915", "0.551328", "0.55005527", "0.5496773", "0.5491366", "0.5491274", "0.54900974", "0.5477752", "0.54669935", "0.54608047", "0.54533994", "0.5448801", "0.5440677", "0.54388356", "0.5436624", "0.54285693", "0.5426048", "0.542383", "0.54236877", "0.54212326", "0.54197013", "0.5417582", "0.54145753", "0.5409307", "0.5405904", "0.54052883", "0.54043937", "0.5400728", "0.53918153", "0.5388715", "0.537768", "0.53723544", "0.5369182", "0.536375", "0.53537023", "0.5351062", "0.53500634", "0.5349779", "0.53480065", "0.5346223", "0.5344041", "0.53334403", "0.5332229", "0.53281194", "0.5317751", "0.5314962", "0.5314257", "0.53114164", "0.5305305", "0.5302949", "0.5302367", "0.5301498" ]
0.6393195
2
Test that product cannot be updated with strings for numbers
def test_update_product_with_characters_for_numbers(self): resp = self.admin_register() reply = self.admin_login() token = reply['token'] product = dict( prod_name='NY_denims', category='denims', stock=20, price=150 ) resp = self.client.post( '/api/v1/products', content_type='application/json', data=json.dumps(product), headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Product successfully added to Inventory!') self.assertEqual(resp.status_code, 201) product_update = dict( prod_name='NY_denims', category='denims', stock='many', price='pesa' ) resp = self.client.put( '/api/v1/products/1', content_type='application/json', data=json.dumps(product_update), headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'The Stock and Price must be numbers!') self.assertEqual(resp.status_code, 400)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_non_numberic_validation(self):", "def test_non_numberic_validation(self):", "def test_product(self):\n self.assertEqual(functions.product(2, 2), 4)\n self.assertEqual(functions.product(2, -2), -4)", "def test_update_product_with_numbers_for_strings(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n product_update = dict(\n prod_name=4562,\n category=5248,\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'prod_name and category should be characters!')\n self.assertEqual(resp.status_code, 400)", "def test_num_prod(self):\n self.assertEqual(self.Nprod, 1)", "def test_multiplication():\n assert calculator.multiply(7, 3) == 21\n assert calculator.multiply(7.0, 3.0) == 21.0\n assert calculator.multiply(7, -3) == -21\n assert calculator.multiply(7.0, -3.0) == -21.0", "def test_calculate_multiplication(self):\n result = self.calcuate.calcuate('3x3')\n expected_result = \"9\"\n self.assertEqual(expected_result, result)", "def product(nbs):\n try:\n prod = 0\n for nb in nbs:\n prod *= float(nb)\n except TypeError:\n print(\"Hmmm, I guess you haven't only entered valid numbers\")\n return\n print(\"And the product is.... : {0} \".format(prod))", "def test_product_mult_only(self):\r\n self.assertEquals(preview.latex_preview('2*3'), r'2\\cdot 3')", "def test_Product_name_cannot_contain_a_number(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_3',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Please enter strings in name and category!')\n self.assertEqual(resp.status_code, 400)", "def test_is_product_entangled_state():\n ent_vec = max_entangled(3)\n np.testing.assert_equal(is_product_vector(ent_vec), False)", "def __checkProduct(self, prd, num):\n if prd not in vmdata.prdStore or not isinstance(num, int) or num < 1:\n return False \n return True", "def test_numbers(number):\n assert number ** 2 == number ** 2", "def test_numbers(self):\n \n result = gen_expansion(sym.pi, 2)\n self.assertEqual(result, '14')\n result = gen_expansion(sym.exp(1), 2)\n self.assertEqual(result, '72')", "def test_no_multiply():\n with pytest.raises(NotImplementedError):\n SplineTerm(0) * LinearTerm(1)\n\n term_list = SplineTerm(0) + LinearTerm(1)\n with pytest.raises(NotImplementedError):\n term_list * term_list", "def test_non_integral_validation(self):", "def test_non_integral_validation(self):", "def test_calculate_multiplication_and_adding(self):\n result = self.calcuate.calcuate('1+2x3')\n expected_result = \"7\"\n self.assertEqual(expected_result, result)", "def test_product_keep_going(self):\r\n self.assertEquals(\r\n preview.latex_preview('2/3*4/5*6'),\r\n r'\\frac{2}{3}\\cdot \\frac{4}{5}\\cdot 6'\r\n )", "def test_not_int(self):\n invalid_args = [\"random string\", \"123\", 123.5]\n for arg in invalid_args:\n assert meters_to_km(arg) is arg", "def test_empty_input():\n assert _currency_column_to_numeric(\"\") == \"ORIGINAL_NA\"", "def test_mult_specifiers_missing(self):\n template = '{0} too few {1}'\n value_count = 3\n msg = ('The formatter contains too few \"{}\" '\n 'specifiers for the number of source fields.')\n with six.assertRaisesRegex(self, ValidationError, msg):\n validate_str_substitution(template, value_count)", "def test_add_with_negative_price(self):\n good = GoodInfo(\"яйцо 1 кат.\", \"-30\", \"40\", \"2020-12-30\", \n \"14\", \"2020-12-30\")\n check_product_data = self.database.add(good)\n\n self.assertFalse(check_product_data)", "def testBadFormatISBNAgain(self): \n val = format_isbn(\"12345678\")\n self.assertFalse(val)", "def prodName(self, pName):\r\n if str(pName).isnumeric() == False:\r\n self.__prodName = pName\r\n else:\r\n raise Exception(\"Product Names cannot be numbers\")", "def test_int_install_2():\n expected_output_price = 65000\n output_price = int_installs('+65,000')\n assert math.fabs(output_price - expected_output_price) < ROUND_OFF_ERROR, \\\n \"\"\"Should show that the installs is 65000.\"\"\"", "def test_multiply_except(self):\n chan = Chi(self.chiI)\n self.assertRaises(QiskitError, chan.multiply, 's')\n self.assertRaises(QiskitError, chan.multiply, chan)", "def multiply_list(input):\n result = 1\n #if there is no numbers in list return False\n if not input:\n return False\n #if the list contains strings or 0 return False\n else: \n for x in input:\n if type(x) == str or x == 0:\n return False \n else:\n result = result*x\n return result", "def test_category_cannot_contain_a_number(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='4dens',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Please enter strings in name and category!')\n self.assertEqual(resp.status_code, 400)", "def test_scalar_multiplication(self):\n\n a1 = tuples.Tuple([\"a\", \"b\", \"c\", \"d\"], 1, -2, 3, -4)\n a2 = a1 * 3.5\n a3 = a1 * 0.5\n\n self.assertEqual(a2,\n tuples.Tuple([\"a\", \"b\", \"c\", \"d\"], 3.5, -7, 10.5, -14))\n self.assertEqual(a3,\n tuples.Tuple([\"a\", \"b\", \"c\", \"d\"], 0.5, -1, 1.5, -2))", "def test_not_enough_change(self):\n item, change, _ = give_item_and_change('apple', '.2')\n self.assertIsNone(item)\n self.assertEqual(change, 0.2)", "def evaluator_side_effect(_, __, math_string):\r\n if math_string != '4':\r\n raise err", "def test_e0_prod(self):\n self.assertAlmostEqual(self.tunneling.E0_prod.value_si * 0.001, self.E0_prod, 4)", "def test_not_units(self):\n with self.assertRaises(AssertionError):\n _unit_map(\"WiB\")", "def test_non_cast_input():\n assert _currency_column_to_numeric(\"-1,000,000 yen\") == \"-1000000\"", "def test_pauli_string_expval(self, shadow):\n\n o1 = qml.PauliX(0)\n res1 = shadow.expval(o1, k=2)\n\n o2 = qml.PauliX(0) @ qml.PauliX(1)\n res2 = shadow.expval(o2, k=2)\n\n res_exact = 1.0\n assert qml.math.allclose(res1, res_exact, atol=1e-1)\n assert qml.math.allclose(res2, res_exact, atol=1e-1)", "def make_quantity(string):\n pass", "def test_cannot_make_sale_with_wrong_datatypes(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_345\", \n \"quantity\":'Kummi'\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'prod_name & quantity should be a character & number respectively!')\n self.assertEqual(resp.status_code, 400)", "def test_str_type(self):\n\n expected = TypeError\n input_ = 'c'\n with self.assertRaises(expected):\n math.factorial(input_)", "def test_simtk_list_of_quantities_to_pint():\n list_of_quantities = [val * omm_unit.meter for val in range(10)]\n quantity_list = omm_unit.meter * [val for val in range(10)]\n\n assert list_of_quantities != quantity_list\n assert all(simtk_to_pint(list_of_quantities) == simtk_to_pint(quantity_list))", "def test_stock_and_price_must_be_numbers(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock='stock',\n price='money'\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'The Stock and Price must be numbers!')\n self.assertEqual(resp.status_code, 400)", "def test_add_sale_with_price_not_digit_format(self):\n self.register_admin_test_account()\n token = self.login_admin_test()\n\n response = self.app_test_client.post('{}/saleorder'.format(\n self.base_url), json={'name': \"Hand Bag\", 'price': \"1500\", 'quantity': 3, 'totalamt': \"\"},\n headers=dict(Authorization=token),\n content_type='application/json')\n\n self.assertEqual(response.status_code, 400)\n\n self.assertEqual(general_helper_functions.convert_json(\n response)['message'], 'Bad request. The product price should be an integer.')", "def test_non_integer_suffix(self):\n with self.assertRaises(Exception) as exception:\n make_rpm_version('0.1.2preX')\n\n self.assertEqual(\n u'Non-integer value \"X\" for \"pre\". Supplied version 0.1.2preX',\n unicode(exception.exception)\n )", "def test_multiply(self):\n self.assertEqual(work_file.multiply(10, 5), 50)\n self.assertEqual(work_file.multiply(-1, 1), -1)\n self.assertEqual(work_file.multiply(-1, -1), 1)", "def test_is_product_entangled_state_3_sys():\n ent_vec = max_entangled(4)\n np.testing.assert_equal(is_product_vector(ent_vec, dim=[2, 2, 2, 2]), False)", "def prod(self):\n # skipna == True\n # only_numerical == True\n return self._lift(\"prod\")", "def test_product_buy_more_then_have(self):\n result_buy = self.info_list.product_buy(\"соль 1 кг\", 50)\n self.assertFalse(result_buy)", "def test_correct_p_values_mixed(self):\r\n exp = [None, 0.008, 0.01, None]\r\n obs = self.mc._correct_p_values([None, 0.004, 0.005, None])\r\n self.assertEqual(obs, exp)", "def test_is_gene_continuously_amplified_wrong_input(self):\n self.assertEqual(\"Wrong input data\", is_gene_continuously_amplified(13))", "def testBadFormatISBN(self): \n val = format_isbn(\"1234567843534594123\")\n self.assertFalse(val)", "def _cached_product(self, x, y):\n xy = x + ''.join(c for c in y if c not in x)\n return self.normal_form(xy)", "def test_unit_conversion_incompatible(self):\n self.orography_cube.units = 'K'\n msg = \"Unable to convert from\"\n with self.assertRaisesRegex(ValueError, msg):\n self.plugin_instance.process(self.orography_cube)", "def test_memorystr():\n for val in ['123G', '123g', '123M', '123m', '123.5m', '25k', '25K']:\n MemoryStr(val)\n\n for val in [123, '123', '123mm', '123a', 'G']:\n print(val)\n with pytest.raises(ValueError):\n MemoryStr(val)\n\n assert MemoryStr('1024m').asGB() == 1.0\n assert MemoryStr('3G').asGB() == 3.0", "def test_is_old_papernum(self):\n self.assertFalse(util.is_old_papernum(\"9106001\"))\n self.assertTrue(util.is_old_papernum(\"9107001\"))\n self.assertFalse(util.is_old_papernum(\"9200001\"))\n self.assertTrue(util.is_old_papernum(\"9201001\"))\n self.assertTrue(util.is_old_papernum(\"0703999\"))\n self.assertFalse(util.is_old_papernum(\"0704001\"))", "def test_is_product_entangled_state_2_sys():\n ent_vec = max_entangled(4)\n np.testing.assert_equal(is_product_vector(ent_vec, dim=[4, 4]), False)", "def test_convert_nonnumeric_value():\n with pytest.raises(TypeError):\n pressure_util.convert(\"a\", PRESSURE_HPA, PRESSURE_INHG)", "def test_bad_values(self):\n self.assertOK([60])\n self.assertRaisesInternalError([59.9])\n self.assertRaisesInternalError([''])\n self.assertRaisesInternalError([';'])\n self.assertRaisesInternalError(['('])\n self.assertRaisesInternalError([None])", "def test_negative_pricing(self):\n with self.assertRaises(InvalidProductPriceException):\n Product(self.test_product_name, -1.00)\n with self.assertRaises(InvalidProductPriceException):\n Product(self.test_product_name, -0.01)\n with self.assertRaises(InvalidProductPriceException):\n Product(self.test_product_name, 0)\n with self.assertRaises(InvalidProductPriceException):\n Product(self.test_product_name, 0.00)\n try:\n Product(self.test_product_name, 1.00)\n Product(self.test_product_name, 0.01)\n except InvalidProductPriceException:\n self.fail(\"InvalidProductPriceException raised for positive value unexpectedly\")", "def testcharge_and_mult(self):\r\n assert self.data.charge == 0\r\n assert self.data.mult == 1", "def test_add_with_negative_amount(self):\n good = GoodInfo(\"яйцо 1 кат.\", \"30\", \"-40\", \"2020-12-30\", \n \"14\", \"2020-12-30\")\n check_product_data = self.database.add(good)\n\n self.assertFalse(check_product_data)", "def testPowerSetStrings(self):\n def setPower():\n self.node.power = [1.3782, 278.32, 'banana']\n\n self.assertRaises(\n TypeError,\n setPower\n )", "def __set_has_numeric(text=str):\n reg_ex = constants.NUMERIC_REG_EX_PATTERN\n if reg_ex.search(text) is None:\n return text\n return reg_ex.sub(constants.QUESTION_HAS_NUMERIC_KEY, text)", "def test_int_install_1():\n expected_output_price = 65000\n output_price = int_installs('65000')\n assert math.fabs(output_price - expected_output_price) < ROUND_OFF_ERROR, \\\n \"\"\"Should show that the installs is 65000.\"\"\"", "def test_add_to_stock_negative(add):\n assert STOCK[0]['quantity'] == 20\n for i in [\"2.32\", \"sd\", -2, 0, 201]:\n value = validate_int(i)\n add[0].add_to_stock(value)\n # there is no change in our stock on invalid input\n assert STOCK[0]['quantity'] == 20\n STOCK[0]['quantity'] = 20", "def testPowerSetStrings(self):\n def setPower():\n self.cc.power = [1.3782, 278.32, 'banana']\n\n self.assertRaises(\n TypeError,\n setPower\n )", "def test_product_buy_missing_goods(self):\n result_buy = self.info_list.product_buy(\"хлеб серый хлебозавод\", 3)\n self.assertFalse(result_buy)", "def test_cast_non_numeric_false():\n assert _currency_column_to_numeric(\"10 dollars\", {\"foo\": 42}) == \"10\"", "def _confirm_quantity(units):\n quantities = dict()\n for key, value in units.items():\n if isinstance(value, str):\n quant = 1 * parse_unit(value)\n elif isinstance(value, u.UnitBase):\n quant = 1 * value\n elif isinstance(value, u.quantity.Quantity):\n quant = value\n else:\n quant = None\n key = key.split('_')[0]\n quantities[key] = quant\n return quantities", "def test_calculate_order_multiplication_subtraction_adding(self):\n result = self.calcuate.calcuate('11-2+4x3')\n expected_result = \"21\"\n self.assertEqual(expected_result, result)", "def test_numeric(self):\n conn = self.database.connection()\n cursor = conn.cursor()\n dialect = self.database.dialect()\n dbapi = self.database.dbapi()\n query = dialect.translate('DROP TABLE test_numeric')\n try:\n cursor.execute(query)\n except dbapi.Error:\n conn.rollback()\n query = dialect.translate('CREATE TABLE test_numeric ' \\\n '( value NUMERIC(100,50) NOT NULL )')\n cursor.execute(query)\n data = []\n query = 'INSERT INTO test_numeric VALUES (%s)'\n for i in range(100):\n int = random.getrandbits(150)\n frac = random.getrandbits(150)\n item = decimal.Decimal('%d.%s' % (int, frac))\n data.append(item)\n cursor.execute(query, (item,))\n query = 'SELECT * FROM test_numeric'\n cursor.execute(query)\n result = cursor.fetchall()\n for row in result:\n item = row[0]\n assert isinstance(item, decimal.Decimal)\n assert item in data\n data.remove(item)\n query = dialect.translate('DELETE FROM test_numeric')\n cursor.execute(query)\n query = dialect.translate('DROP TABLE test_numeric')\n cursor.execute(query)\n conn.commit()", "def test_staff_inputs_expressions_legacy(self):\r\n problem = self.build_problem(answer=\"1+1j\", tolerance=1e-3)\r\n self.assert_grade(problem, '1+j', 'correct')", "def test_not_repeat_combination(self):\n self.assertTrue(\"-Yao Ming Zydrunas Ilgauskas\", show_players_sumheigh_is_input(177))\n self.assertFalse(show_players_sumheigh_is_input(177), \"-Zydrunas Ilgauskas Yao Ming\")", "def test_mul2():\n assert (2 * (x + 1)).is_Mul", "def minuscule(product):\n product = product.lower()\n return product", "def test_correct_value(self):\n self.assertTrue(py_function(6) == 36)\n self.assertFalse(py_function(5) == 9)\n for i in range(0, 10):\n self.assertTrue(py_function(i) == i**2 if i != 0 else 100)", "def test_our_multiply(self):\n\n self.assertEqual(self.our_module.multiply(3, 4), 12)", "def test_mul(self):\n newvalues = Fraction(3,2)*Fraction(1,4)\n fraction1 = Fraction(newvalues[0],newvalues[1])\n self.assertEqual(str(fraction1),\"3/8\")", "def test_if_it_accepts_string_datatype(self):\n with self.assertRaises(TypeError):\n prime_numbers(\"string\")", "def test_convert_incompatible_units(self):\n self.assertRaises(ValueError, convert_units, self.arr, 'm')", "def test_load_do_not_convert_non_quantity_strings(self):\n sage = ForceField(\"openff-2.0.0.offxml\")\n\n for parameter_handler_name in sage.registered_parameter_handlers:\n parameter_handler = sage.get_parameter_handler(parameter_handler_name)\n\n for parameter in parameter_handler.parameters:\n assert isinstance(parameter.smirks, str)\n assert not isinstance(parameter.smirks, unit.Quantity)\n\n # Ensure that, for example, F isn't converted to Farad\n if (\n parameter_handler_name == \"LibraryCharges\"\n and parameter.name is not None\n ):\n assert isinstance(parameter.name, str)\n assert not isinstance(parameter.name, unit.Quantity)", "def test_empty_product_arr():\n assert largest_product([]) == 0", "def validate_product_quantity(item, qty):\n return True", "def test02_password_numeric(self):\n self.set_complexity(length=0, numeric=4, upper=0, lower=0, special=0)\n\n invalid = (\n \"A\",\n \"Tr0ub4dor&3\",\n \"!!!!!!!!!!!!\",\n \"Password\",\n \"Password123!\",\n \"admin\",\n \"1abcd2efghij3\",\n )\n self.assertPasswordsInvalid(invalid)\n\n valid = (\n \"1correct2horse3battery4staple\",\n \"1234abc\",\n \"abc1234\",\n \"0000\",\n \"Password1234!___\",\n \"Test1Split2Numerics3In4Password\",\n )\n self.set_passwords(valid)", "def test_cast_non_numeric_true():\n assert _currency_column_to_numeric(\"foo\", {\"foo\": 42}) == 42", "def test_sqopprod():\n w.reset_space()\n w.add_space(\"o\", \"fermion\", \"occupied\", [\"i\", \"j\"])\n w.add_space(\"a\", \"fermion\", \"general\", [\"u\", \"v\"])\n w.add_space(\"v\", \"fermion\", \"occupied\", [\"a\", \"b\", \"c\"])\n\n opprod = w.sqopprod([], [])\n assert str(opprod) == \"\"\n\n opprod = w.sqopprod([\"v_0\"], [])\n assert str(opprod) == \"a+(v0)\"\n\n opprod = w.sqopprod([], [\"o_0\"])\n assert str(opprod) == \"a-(o0)\"\n\n opprod = w.sqopprod([\"v_0\"], [\"o_0\"])\n assert str(opprod) == \"a+(v0) a-(o0)\"\n assert opprod.latex() == r\"\\hat{a}^\\dagger_{a} \\hat{a}_{i}\"\n\n opprod = w.sqopprod([\"v_0\", \"v_1\"], [\"o_0\", \"o_1\"])\n assert str(opprod) == \"a+(v0) a+(v1) a-(o1) a-(o0)\"\n\n opprod1 = w.sqopprod([\"v_0\", \"v_1\"], [\"o_0\", \"o_1\"])\n opprod2 = w.sqopprod([\"v_0\", \"v_1\"], [\"o_0\", \"o_1\"])\n assert opprod1 == opprod2\n\n opprod1 = w.sqopprod([\"v_0\"], [])\n opprod2 = w.sqopprod([\"v_0\"], [])\n assert not (opprod1 < opprod2)\n\n opprod1 = w.sqopprod([\"v_0\"], [])\n opprod2 = w.sqopprod([\"v_1\"], [])\n assert opprod1 < opprod2\n\n # let's test a bunch of combinations\n\n opprod1 = w.sqopprod([\"v_0\"], [\"o_0\"])\n opprod2 = w.sqopprod([\"v_0\"], [\"o_0\"])\n assert opprod1 == opprod2\n assert not (opprod1 < opprod2)\n\n opprod1 = w.sqopprod([\"v_0\"], [\"o_0\"])\n opprod2 = w.sqopprod([\"v_1\"], [\"o_0\"])\n assert opprod1 < opprod2\n\n opprod1 = w.sqopprod([\"v_0\"], [\"o_1\"])\n opprod2 = w.sqopprod([\"v_1\"], [\"o_0\"])\n assert opprod1 < opprod2\n\n opprod1 = w.sqopprod([\"v_1\"], [\"o_0\"])\n opprod2 = w.sqopprod([\"v_1\"], [\"o_1\"])\n assert opprod1 < opprod2\n\n opprod1 = w.sqopprod([\"v_1\"], [\"o_1\"])\n opprod2 = w.sqopprod([\"v_1\"], [\"o_0\"])\n assert not (opprod1 < opprod2)\n\n opprod1 = w.sqopprod([\"v_1\"], [\"o_2\"])\n opprod2 = w.sqopprod([\"v_1\", \"v_2\"], [\"o_0\"])\n assert opprod1 < opprod2\n\n opprod1 = w.sqopprod([\"v_2\"], [\"o_2\"])\n opprod2 = w.sqopprod([\"v_1\", \"v_2\"], [\"o_0\"])\n assert opprod1 < opprod2\n\n opprod1 = w.sqopprod([\"o_4\"], [\"o_2\"])\n opprod2 = w.sqopprod([\"v_1\", \"v_2\"], [\"o_0\"])\n assert opprod1 < opprod2\n\n opprod1 = w.sqopprod([\"o_4\"], [\"o_2\"])\n opprod2 = w.sqopprod([\"a_1\", \"o_2\"], [\"o_0\"])\n assert opprod1 < opprod2\n\n opprod1 = w.sqopprod([\"a_4\"], [\"o_2\"])\n opprod2 = w.sqopprod([\"a_4\"], [\"a_2\"])\n assert opprod1 < opprod2", "def test_correct_p_values_no_change(self):\r\n exp = [None, 0.008]\r\n obs = self.mc._correct_p_values([None, 0.008])\r\n self.assertEqual(obs, exp)\r\n exp = [0.007]\r\n obs = self.mc._correct_p_values([0.007])\r\n assert_almost_equal(obs, exp)", "def test_non_int_case(self):\n self.assertRaises(TypeError, factorial, 1.5)", "def test_decompose_two_qubit_product_gate_not_product(self):\n klkr = Ud(1.0e-6, 0, 0)\n with self.assertRaises(QiskitError) as exc:\n decompose_two_qubit_product_gate(klkr)\n self.assertIn(\"decomposition failed\", exc.exception.message)", "def test_mul(x, y):\n\n assert mul(x, y) == mul(y, x)", "def test_legal_names(self):\r\n prod = generate_products()\r\n ADJECTIVES = ['Awesome', 'Shiny', 'Impressive', 'Portable', 'Improved']\r\n NOUNS = ['Anvil', 'Catapult', 'Disguise', 'Mousetrap', '???']\r\n for product in prod:\r\n self.assertIn(product.name.split(\" \")[0], ADJECTIVES)\r\n self.assertIn(product.name.split(\" \")[1], NOUNS)", "def is_acceptable_multiplier(m):\n return 1 < m < (2 ** 61 - 1)", "def test_multiply(self):\n\n a = random.randint(100, 10000)\n b = random.randint(100, 10000)\n\n path = \"/multiply/{}/{}\".format(a, b)\n\n response = self.get_response(path)\n self.assertEqual(200, response.getcode())\n\n self.assertIn(str(a*b).encode(), response.read())", "def testmul_X_Y ( self ):\r\n\t\tr = re.compile ( 'frac' )\r\n\t\tfor fracTupX, fracTupY, dictAdd, dictSub, dictMul, dictDiv in self.knownArithResultValues:\r\n\t\t\tfracX = eval ( r.sub ( 'frac.frac', fracTupX ) )\r\n\t\t\tfracY = eval ( r.sub ( 'frac.frac', fracTupY ) )\r\n\t\t\tmul_fracX_fracY = fracX * fracY\r\n\t\t\tself.assertEqual ( mul_fracX_fracY.toString ().split ()[0], dictMul ['X*Y'] )", "def subtest_Plateau_evaluation():\n _out =''\n p = tp.Plateau()\n for pion in (tp.BLANC, tp.NOIR):\n _out = check_property(isinstance(p.evaluation(pion), Number),\n \"bad type {} not a number\"\n \"\".format(p.evaluation(pion)))\n return _out", "def test_empty_value(self, sc):\n assert sc.add('') == 0", "def test_calculator_multiply():\n assert Calculator.multiply_numbers(1, 2) == 2", "def test_only_nums_are_valid_inputs():\n bad_inputs = [[\"boop\", \"boink\"], 10, 99.99, {\"one\": 2, \"three:\": 4}]\n\n for input in bad_inputs:\n with pytest.raises(AttributeError):\n song_decoder(bad_inputs)", "def test_add_with_not_right_shelf_life(self):\n good = GoodInfo(\"яйцо 1 кат.\", \"-30\", \"40\", \"2020-12-30\", \n \"-14\", \"2020-12-30\")\n check_product_data = self.database.add(good)\n\n self.assertFalse(check_product_data)", "def test_wrong_input(self):\n\n test_float = 2954.02\n test_list = [\"anagram\", \"gramana\"]\n with pytest.raises(AttributeError) as exc_info:\n is_anagram(test_float, test_list)\n expected_error_msg = \"Words must be strings!\"\n assert exc_info.match(expected_error_msg)", "def test_wrong_type_of_fill_all_non_numeric(currency_df):\n with pytest.raises(TypeError):\n _ = currency_df.currency_column_to_numeric(\n \"d_col\",\n fill_all_non_numeric=\"zzzzz\",\n )" ]
[ "0.6562785", "0.6562785", "0.6329283", "0.6300493", "0.62892044", "0.62012047", "0.6175202", "0.61220795", "0.60739857", "0.6057926", "0.6026264", "0.6014248", "0.60098", "0.5888831", "0.5833497", "0.5802217", "0.5802217", "0.57738906", "0.57637155", "0.57532704", "0.5741166", "0.57154775", "0.5705396", "0.56961143", "0.5674511", "0.5664426", "0.5638292", "0.56315786", "0.56303114", "0.562195", "0.55942684", "0.5584566", "0.55798846", "0.557904", "0.5574101", "0.5567218", "0.5565102", "0.5559975", "0.5555513", "0.555317", "0.5548995", "0.554436", "0.554094", "0.55398095", "0.55273426", "0.5524776", "0.55232507", "0.5519777", "0.55189997", "0.55051184", "0.5497659", "0.54947215", "0.548377", "0.54788697", "0.5478271", "0.5475976", "0.54758424", "0.54733586", "0.54565454", "0.54493773", "0.54492146", "0.5441405", "0.54400676", "0.5438761", "0.543562", "0.54355615", "0.5425312", "0.5421221", "0.5420531", "0.5420359", "0.5416945", "0.5412735", "0.54112613", "0.54040086", "0.5382368", "0.5380332", "0.5379467", "0.53764755", "0.5373666", "0.5368879", "0.53686315", "0.53650683", "0.53526825", "0.53489447", "0.5338847", "0.5329385", "0.532814", "0.5313664", "0.5313492", "0.53124255", "0.530799", "0.5301706", "0.52992874", "0.52930963", "0.52800035", "0.5273253", "0.5271801", "0.5270834", "0.5268623", "0.5268107" ]
0.62037754
5
Test that admin can delete a product
def test_admin_delete_product(self): resp = self.admin_register() reply = self.admin_login() token = reply['token'] product = dict( prod_name='NY_denims', category='denims', stock=20, price=150 ) resp = self.client.post( '/api/v1/products', content_type='application/json', data=json.dumps(product), headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Product successfully added to Inventory!') self.assertEqual(resp.status_code, 201) resp = self.client.delete( '/api/v1/products/1', content_type='application/json', headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Product deleted!') self.assertEqual(resp.status_code, 200)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_non_admin_cannot_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_delete_product(self):\n self._require_login(self.user1)\n response = self.client.delete('/api/1.0/products/1/')\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n response = self.client.get('/api/1.0/products/1/')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_delete_product(self):\n view = ProductDeleteView.as_view({'delete': 'destroy'})\n uri = reverse('products:delete-product', kwargs={'pk': self.product_id})\n request = self.factory.delete(uri, HTTP_AUTHORIZATION='Token {}'.format(self.token_admin.key))\n request.user = self.user['admin']\n response = view(request, pk=self.product_id)\n self.assertEqual(response.status_code, 204,\n f'Expected Response Code 204, received {response.status_code} instead.')", "def test_security_on_delete(self):\n # test the delete product url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.delete(url)\n self.failUnlessEqual(response.status_code, 401)", "def test_03_product_delete(self):\n product = self.create_product()\n products = self.product_obj.search([])\n self.assertIn(product, products)\n product.unlink()\n self.assertNotIn(product.exists(), products)", "def test_get_deleted_product(self):\n product = self.add_product()\n product.is_deleted = True\n product.save()\n\n url = f'{self.url}{product.id}/'\n\n response = self.client.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n self.client.force_authenticate(user=self.admin_user)\n response = self.client.get(url, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_delete_not_my_product(self):\n self._require_login(self.user1)\n response = self.client.delete('/api/1.0/products/2/')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_product_delete(self):\n # first performe create\n id = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id:\n # then performe delete\n self._delete_model(\"product\", id)\n self.assertIsNotNone(id)", "def test_admin_cannot_delete_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This product does not exist in Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_delete(self):\n self.assertEqual(Product.objects.count(), 2)\n\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(self.token_admin)\n }\n response = self.client.delete(\n '/api/products/{}/'.format(self.product_1.id), **headers)\n self.assertEqual(response.status_code, 204)\n self.assertEqual(Product.objects.count(), 1)", "def test_delete_store_success(self):\n product = sample_product(supplier_id=self.user)\n url = detail_url(product.id)\n res = self.client.delete(url)\n products = Product.objects.all()\n\n self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(len(products), 0)", "def test_admin_cannot_delete_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_products_ref_users_delete(self):\n pass", "def test_delete_product(self):\n product_pk = 1\n product_count_before = models.Product.objects.count()\n url = reverse('products:detail', kwargs={'pk': product_pk})\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(product_count_before - models.Product.objects.count(), 1)", "def test_post_delete_admin(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n self.client.force_authenticate(user=self.superuser)\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)", "def test_admin_cannot_delete_product_with_non_integer_prod_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/kk',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'The product id should be a number!')\n self.assertEqual(resp.status_code, 400)", "def test_delete_permission(self):\r\n self.assertFalse(self.creator_admin.has_delete_permission(self.request))", "def test_delete(self):\n pass", "def test_admin_cannot_delete_product_from_empty_Inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'There are no products in Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_products_ref_users_user_delete(self):\n pass", "def test_only_admin_can_create_product(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_delete_item_using_delete(self):\n pass", "def test_delete_product_non_valid_pk(self):\n product_pk = 9999\n product_count_before = models.Product.objects.count()\n url = reverse('products:detail', kwargs={'pk': product_pk})\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_delete(self):\n self.client.force_authenticate(user=self.admin)\n\n response = self.client.delete(\n reverse(\n 'retreat:waitqueuenotification-detail',\n kwargs={'pk': 1},\n ),\n )\n\n self.assertEqual(\n response.status_code,\n status.HTTP_405_METHOD_NOT_ALLOWED\n )", "def test_product_remove(self):\n\n flag = \"user\"\n api = \"product.product.remove\"\n current_page = 1\n search_info = json.dumps({\n 'id': 12,\n })\n print('start------------------------>remove')\n result = self.access_api(flag = flag, api = api, current_page = current_page, product_info = search_info)", "def test_delete_of_an_escalated_article_with_admin(self):\n token = self.user3.token()\n self.client.credentials(\n HTTP_AUTHORIZATION='Bearer ' + token)\n resp = self.delete_article()\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertEqual(resp.data[\"message\"], self.admin_delete)", "def test_delete_admin_from_org(self):\n pass", "def test_create_not_admin(self):\n self.assertEqual(Product.objects.count(), 2)\n payload = {\n 'name': 'New product',\n 'category': self.category_1.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99\n }\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n expected = {'detail': 'You do not have permission to perform this action.'}\n response = self.client.post(\n '/api/products/', data=payload,\n content_type='application/json', **headers)\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)\n self.assertEqual(Product.objects.count(), 2)", "def deleteProduct(request,productId):\n deleteObj = Collection()\n deleteObj.id=productId\n productBll.deleteProducts(deleteObj)\n return HttpResponseRedirect('/admin/product/list/')", "def delete_products(request):\n product_obj = Products.objects.get(id=request.data[\"id\"])\n if request.user == product_obj.shop_rel.user:\n product_obj.delete()\n return Response(status=status.HTTP_200_OK)\n\n return Response(status=status.HTTP_401_UNAUTHORIZED)", "def test_none_admin_delete(self):\n\n with self.client:\n token = self.customer()\n id = 1\n response = self.client.delete('api/v1/meals/{}'.format(id),\n headers=({\"token\": token}))\n data = json.loads(response.data.decode())\n self.assertEqual(data.get('message'),\n \"Customer is not authorized to access this page\")\n self.assertEqual(response.status_code, 401)", "def test_delete__valid(self):\n testing_config.sign_in('[email protected]', 123567890)\n\n with register.app.test_request_context(self.request_path):\n actual_json = self.handler.do_delete(self.feature_id)\n self.assertEqual({'message': 'Done'}, actual_json)\n\n revised_feature = models.Feature.get_by_id(self.feature_id)\n self.assertTrue(revised_feature.deleted)", "def test_shoppingcart_delete(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we create a product\n id_prod = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id_prod:\n # then we can create the shoppingcart\n data = self.shoppingcart_data\n data[\"customer_id\"] = id\n data[\"product_id\"] = id\n id_cart = self._create_model(\"shoppingcart\", data, [\"quantity\", \"discount_value\", \"is_closed\"])\n if id_cart:\n # then performe delete\n self._delete_model(\"shoppingcart\", id_cart)\n self.assertIsNotNone(id_cart)\n self.assertIsNotNone(id_prod)\n self.assertIsNotNone(id)", "def test_post_delete_logged_in(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n self.client.force_authenticate(user=self.user)\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_deletehardwares_item(self):\n pass", "def test_delete1(self):\n pass", "def delete_product(request, product_id):\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n product = get_object_or_404(Product, pk=product_id)\n product.delete()\n messages.success(request, 'Product successfully deleted')\n return redirect(reverse('products'))", "def test_handle_delete_as_admin(self):\r\n def facade_retrieve_side_effect(*args, **kwargs):\r\n \"\"\"Return a side effect for the mock facade.\"\"\"\r\n if args[0] == Project:\r\n return Project(\"\", [])\r\n elif args[0] == Team:\r\n team = Team(\"GTID\", \"team-name\", \"display-name\")\r\n return team\r\n else:\r\n calling_user = User(user)\r\n calling_user.permissions_level = Permissions.admin\r\n return calling_user\r\n self.mock_facade.retrieve.side_effect = facade_retrieve_side_effect\r\n self.assertTupleEqual(\r\n self.testcommand.handle(\"project delete ID\",\r\n user),\r\n (\"Project successfully deleted!\", 200))", "def test_categories_product_admin(self):\n response = self.client.post('api/v1/category/products',\n data=json.dumps(category_product[0]),\n content_type='application/json',\n headers=self.admin_headers)\n self.assertEqual(response.status_code, 401)\n self.assertIn('unauthorized', str(response.data))", "def test_delete_user(self):\n pass", "def test_delete_user(self):\n pass", "def test_delete_run(self):\n pass", "def test_admin_create_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)", "def delete_product(request, product_id):\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n product = get_object_or_404(Product, pk=product_id)\n product.delete()\n messages.success(request, 'Product deleted!')\n return redirect(reverse('products'))", "def test_author_delete_post(self):\n self.client.login(username=\"John\", password=\"newpass1234\")\n response = self.client.post('/posts/1/delete/')\n self.assertEqual(list(Post.objects.filter(id=1)), [])", "def delete_product(driver, login_action, open_products_page, products_page):\n products_page.delete_product()\n driver.refresh()", "def test_delete(admin_client):\n book = BookFactory()\n url = reverse(\"admin:books_book_delete\", args=(book.pk,))\n\n response = admin_client.get(url)\n templates_used = [t.name for t in response.templates]\n\n assert response.status_code == 200\n render_counts = {x: templates_used.count(x) for x in set(templates_used)}\n\n # The number of times each template was rendered\n assert render_counts == {\n \"admin/delete_confirmation.html\": 1,\n \"admin/base_site.html\": 1,\n \"admin/base.html\": 1,\n \"admin/includes/object_delete_summary.html\": 1,\n \"jazzmin/includes/ui_builder_panel.html\": 1,\n }\n\n # The templates that were used\n assert set(templates_used) == {\n \"admin/delete_confirmation.html\",\n \"admin/base_site.html\",\n \"admin/base.html\",\n \"admin/includes/object_delete_summary.html\",\n \"jazzmin/includes/ui_builder_panel.html\",\n }\n\n response = admin_client.post(url, data={\"post\": \"yes\"}, follow=True)\n\n # We deleted our object, and are now back on the changelist\n assert not Book.objects.all().exists()\n assert response.resolver_match.url_name == \"books_book_changelist\"", "def test_delete_role(self):\n pass", "def test_delete_device_user(self):\n pass", "def test_products_ref_groups_delete(self):\n pass", "def delete_product(request, product_id):\n if not request.user.is_superuser:\n messages.error(request, 'Access denied!\\\n Sorry, only site owners have this permission.')\n return redirect(reverse('home'))\n\n product = get_object_or_404(Product, pk=product_id)\n product.delete()\n messages.success(request, f'{product.name} was successfully deleted!')\n return redirect(reverse('products'))", "def test_delete(self):\n self.request.access.allow_delete = [\"everyone\"]\n pkg = make_package(factory=SQLPackage)\n self.sql.add(pkg)\n transaction.commit()\n self.sql.add(pkg)\n self.db.delete(pkg)\n count = self.sql.query(SQLPackage).count()\n self.assertEqual(count, 0)\n self.storage.delete.assert_called_with(pkg)", "def delete_product(request, product_id):\n if not request.user.is_superuser:\n messages.error(request, 'Invalid Request: Only admin can delete products/services.')\n return redirect(reverse('home'))\n\n product = get_object_or_404(Product, pk=product_id)\n product.delete()\n messages.success(request, 'Product/Service deleted!')\n return redirect(reverse('products'))", "def delete_product(request, product_id):\n if not request.user.is_superuser:\n messages.error(request, 'This feature is for Admin only.')\n return redirect(reverse('home'))\n\n product = get_object_or_404(Product, pk=product_id)\n product.delete()\n messages.success(request, 'Product Deleted')\n return redirect(reverse('home'))", "def test_shoppinglist_deletion(self):\n # register and login a user\n self.app.post('/register', data=self.user_reg_details)\n self.app.post('/login', data=self.user_login_details)\n # create a shopping list\n self.shopping_class_obj.create_list(\n 'Christmass', '[email protected]')\n # make a post request with the delete name\n res = self.app.post(\n '/delete-list', data={'list_name': 'Christmass'})\n self.assertEqual(res.status_code, 200)\n self.shopping_class_obj.delete_list(\n 'Christmass', '[email protected]')\n # check if delete was successful by looking for the deleted name\n self.assertIn(\"Christmass\", str(res.data))", "def test_post_delete_unauthorized(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_delete_unexisting_product(self):\n response=self.delete_unexisting_products()\n result = json.loads(response.data.decode('utf-8'))\n self.assertEqual(result['Error'],'Product Not found')\n self.assertEqual(response.status_code, 400)", "def test_otoroshi_controllers_adminapi_tcp_service_api_controller_delete_entity_action(self):\n pass", "def test_shoppingitem_deletion(self):\n # register and login a user\n self.app.post('/register', data=self.user_reg_details)\n self.app.post('/login', data=self.user_login_details)\n # create a shopping list\n self.shopping_class_obj.create_list(\n 'Christmass', '[email protected]')\n # create an item\n self.item_class_obj.add_item(\n 'Christmass', 'Bread', '[email protected]')\n # make a post request with the delete name\n res = self.app.post(\n '/delete-item', data={'list_name': 'Christmass', 'item_name': 'Bread'})\n self.assertEqual(res.status_code, 200)\n self.item_class_obj.delete_item(\n 'Bread', '[email protected]', 'Christmass')\n # check if delete was successful\n self.assertIn(\"Successfuly deleted item \", str(res.data))", "def test_delete_device(self):\n pass", "def test_delete_device(self):\n pass", "def test_view_a_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertIn('NY_denims', str(reply['product']))\n self.assertEqual(resp.status_code, 200)", "def test_delete_but_no_view_permission(client):\n user = user_with_permissions(\"polls.delete_poll\")\n\n url = reverse(\"admin:index\")\n client.force_login(user)\n\n response = client.get(url)\n assert parse_sidemenu(response) == {\"Global\": [\"/en/admin/\"], \"Polls\": [None]}", "def test_delete_ingredient(self):\n ingredient = Ingredient.objects.create(user=self.user, name='Lettuce')\n url = detail_url(ingredient.id)\n res = self.client.delete(url)\n\n self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)\n ingredients = Ingredient.objects.filter(user=self.user)\n self.assertFalse(ingredients.exists())", "def test_delete_cloud(self):\n pass", "def test_successfult_post_deletion(self):\n self.user.is_moderator = True\n self.user.save()\n response = self.client.delete(reverse('api:posts-detail', kwargs={'pk': self.post1.id}))\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(Post.objects.count(), 1)", "def test_delete__valid(self):\n testing_config.sign_in('[email protected]', 123567890)\n\n with test_app.test_request_context(self.request_path):\n actual_json = self.handler.do_delete(account_id=self.appuser_id)\n self.assertEqual({'message': 'Done'}, actual_json)\n\n revised_appuser = user_models.AppUser.get_by_id(self.appuser_id)\n self.assertIsNone(revised_appuser)", "def test_delete_from_cart(open_browser, mysql_executor):\n HelperUrl.user_base_url(open_browser)\n MainPage(open_browser).open_product_page()\n ProductPage(open_browser).add_to_cart()\n CheckExistenceDB(mysql_executor).check_exist()\n Header(open_browser).open_cart_block() \\\n .delete_from_cart_block()\n CheckExistenceDB(mysql_executor).check_is_not_exist()", "def test_user_delete_procedure_failure(self):\n p1 = models.Procedure.objects.create(\n name='temp',\n overview='bla bla bla'\n )\n p1.speciality.set([self.speciality.pk])\n p1.save()\n\n res = self.client.get(PROCEDURE_URL)\n\n url = get_item_url(res.data[0]['id'])\n del_procedure = self.client.delete(url)\n\n self.assertEqual(del_procedure.status_code,\n status.HTTP_403_FORBIDDEN)", "def test_delete__DeleteForm__4(address_book, browser, role):\n browser.login(role)\n browser.assert_forbidden(browser.SEARCH_DELETE_URL)", "def test_delete_post_by_user(self):\n\n response = self.client.delete(reverse('api:posts-detail', kwargs={'pk': self.post1.id}))\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_delete_case(self):\n pass", "def test_wrong_admin_delete(self):\n\n with self.client:\n self.get_meals()\n id = 100\n token = self.get_token()\n response = self.client.delete(\n 'api/v1/meals/{}'.format(id), headers=({\"token\": token}))\n data = json.loads(response.data.decode())\n self.assertEqual(data.get('message'), \"Meal not found\")\n self.assertEqual(response.status_code, 400)", "def test_delete_deployment(self):\n pass", "def test_delete(self):\n user = self.custodian_1_user\n urls = [reverse('api:user-detail', kwargs={'pk': user.pk})]\n data = None\n access = {\n \"forbidden\": [self.anonymous_client, self.readonly_client, self.custodian_1_client, self.admin_client,\n self.custodian_2_client],\n \"allowed\": []\n }\n\n for client in access['forbidden']:\n for url in urls:\n self.assertIn(\n client.delete(url, data, format='json').status_code,\n [status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]\n )\n\n for client in access['allowed']:\n for url in urls:\n self.assertEqual(\n client.delete(url, data, format='json').status_code,\n status.HTTP_200_OK\n )", "def tearDown(self):\n ProcessRequest(f\"products/{self.product_id}.json\").send_request(\n 'DELETE', continue_on_error=True)\n super().tearDown()", "def delete_product(conn, product_id: int) -> None:\n with conn.cursor() as cursor:\n cursor.execute(f\"\"\"update products set deleted = True where id = '{product_id}'\"\"\")\n if cursor.rowcount:\n conn.commit()\n else:\n raise errors.StoreError", "def test_add_product(self):\n view = ProductCreateListView.as_view({'post': 'create'})\n uri = reverse('products:create/list-products')\n data = {\n \"name\": \"Iphone 7\",\n \"description\": \"Mobile phone\",\n \"price\": 200,\n \"is_available\": True\n }\n request = self.factory.post(uri, data, HTTP_AUTHORIZATION='Token {}'.format(self.token_admin.key))\n request.user = self.user['admin']\n response = view(request)\n self.assertEqual(response.status_code, 201,\n f'Expected Response Code 201, received {response.status_code} instead.')", "def test_delete_recipe_category(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.login('[email protected]', 'Bo1995')\n self.dashboard()\n self.category('JunkFood')\n self.dashboard()\n self.recipe_dashboard()\n self.create_recipe('cakes', 'blah, blah, blah....mix ingredient, heat')\n self.edit_recipe('edited cakes', 'edited blah blah blah spoon , heat')\n rv = self.del_recipe()\n self.assertIn(b'deleted successfully', rv.data)", "def test_user_delete_procedure_failure(self):\n p1 = models.Procedure.objects.create(\n name='temp',\n overview='bla bla bla'\n )\n p1.speciality.set([self.speciality.pk])\n p1.save()\n\n res = self.client.get(PROCEDURE_URL)\n\n url = get_item_url(res.data[0]['id'])\n del_procedure = self.client.delete(url)\n\n self.assertEqual(del_procedure.status_code,\n status.HTTP_401_UNAUTHORIZED)", "def test_delete7(self):\n pass", "def test_admin_can_delete_a_user(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/users/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], \"User deleted!\")\n self.assertEqual(resp.status_code, 200)", "def test_delete_custom_button(self):\n pass", "def test_vault_delete_vault_item(self):\n pass", "def test_25_admin_delete_category(self):\r\n self.create()\r\n obj = db.session.query(Category).get(2)\r\n category = obj.dictize()\r\n\r\n # Anonymous user GET\r\n url = '/admin/categories/del/%s' % obj.id\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Anonymous users should be redirected to sign in\"\r\n assert dom.find(id='signin') is not None, err_msg\r\n # Anonymous user POST\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Anonymous users should be redirected to sign in\"\r\n assert dom.find(id='signin') is not None, err_msg\r\n\r\n # Authenticated user but not admin GET\r\n self.signin(email=self.email_addr2, password=self.password)\r\n res = self.app.post(url, follow_redirects=True)\r\n err_msg = \"Non-Admin users should get 403\"\r\n assert res.status_code == 403, err_msg\r\n # Authenticated user but not admin POST\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n err_msg = \"Non-Admin users should get 403\"\r\n assert res.status_code == 403, err_msg\r\n self.signout()\r\n\r\n # Admin GET\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.get(url, follow_redirects=True)\r\n err_msg = \"Category should be listed for admin user\"\r\n assert category['name'] in res.data, err_msg\r\n # Admin POST\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n err_msg = \"Category should be deleted\"\r\n assert \"Category deleted\" in res.data, err_msg\r\n assert category['name'] not in res.data, err_msg\r\n output = db.session.query(Category).get(obj.id)\r\n assert output is None, err_msg\r\n # Non existant category\r\n category['id'] = 5000\r\n url = '/admin/categories/del/5000'\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n assert res.status_code == 404, res.status_code\r\n\r\n # Now try to delete the only available Category\r\n obj = db.session.query(Category).first()\r\n url = '/admin/categories/del/%s' % obj.id\r\n category = obj.dictize()\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n print res.data\r\n err_msg = \"Category should not be deleted\"\r\n assert \"Category deleted\" not in res.data, err_msg\r\n assert category['name'] in res.data, err_msg\r\n output = db.session.query(Category).get(obj.id)\r\n assert output.id == category['id'], err_msg", "def test_offers_delete(self, mock_delete):\n form_data = {'_method': 'DELETE'}\n result = self.client.post(f'/offers/{sample_offer_id}/delete',\n data=form_data)\n self.assertEqual(result.status, '302 FOUND')\n mock_delete.assert_called_with({'_id': sample_offer_id})", "def test_delete__forbidden(self):\n testing_config.sign_in('[email protected]', 123567890)\n\n with register.app.test_request_context(self.request_path):\n with self.assertRaises(werkzeug.exceptions.Forbidden):\n self.handler.do_delete(self.feature_id)\n\n revised_feature = models.Feature.get_by_id(self.feature_id)\n self.assertFalse(revised_feature.deleted)", "def delete_item(request, product_id):\n\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, you are not permitted to do that.')\n return redirect(reverse('home'))\n\n product = get_object_or_404(Product, pk=product_id)\n product.delete()\n messages.success(request, 'You have deleted the item!')\n return redirect(reverse('items'))", "def test_delete_collection_user(self):\n pass", "def test_delete_category(self):\n pass", "def test_duo_application_delete(self):\n pass", "def test_no_delete_permission(client):\n user = user_with_permissions(\"polls.view_poll\")\n poll = Poll.objects.create(owner=user, text=\"question\")\n\n url = reverse(\"admin:polls_poll_change\", args=(poll.pk,))\n delete_url = reverse(\"admin:polls_poll_delete\", args=(poll.pk,))\n client.force_login(user)\n\n response = client.get(url)\n assert delete_url not in response.content.decode()", "def test_delete_device_template(self):\n pass", "def test_delete(self):\n self.request.access.allow_delete = [\"everyone\"]\n pkg = make_package(factory=DynamoPackage)\n self._save_pkgs(pkg)\n self.db.delete(pkg)\n count = self.engine.scan(DynamoPackage).count()\n self.assertEqual(count, 0)\n count = self.engine.scan(PackageSummary).count()\n self.assertEqual(count, 0)\n self.storage.delete.assert_called_with(pkg)", "def test_order_product(self):\n self.client.force_authenticate(self.user)\n resp = self.client.post(ORDER_URL, data={\n \"product\": self.product.id,\n \"count\": 1,\n \"option_value\": self.option_value.id\n })\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)", "def delete(self, product):\n product_id = str(product)\n\n\n if product_id in self.sepet:\n del self.sepet[product_id]\n print(product_id)\n self.session.modified=True", "def test_delete_collection(self):\n pass", "def test_delete(self):\n\n value = self.instance.delete()\n self.client.delete_instance.assert_called_once_with('nginx')\n self.assertEqual(value, self.client.delete_instance.return_value)", "def test_delete(self, client, users):\n user = users[0]\n url = reverse('users:delete', args=(user.pk,))\n response = client.get(url)\n assert response.status_code == 405\n response = client.post(url)\n assert response.status_code == 302\n assert response.url == reverse('users:list')\n assert not get_user_model().objects.filter(pk=user.pk).exists()", "def delete_product_view(request, id):\n Product.objects.get(id=id).delete()\n messages.success(request, \"Product deleted successfully.\")\n return redirect(\"products\")" ]
[ "0.83574384", "0.83247185", "0.8285091", "0.82623804", "0.8177644", "0.8141131", "0.80846477", "0.80198777", "0.8002211", "0.7932145", "0.7909494", "0.7733621", "0.7691911", "0.7655855", "0.756794", "0.7516048", "0.75010175", "0.7476865", "0.74705225", "0.7435304", "0.73770976", "0.7331892", "0.7221541", "0.7064374", "0.702435", "0.7012553", "0.6978083", "0.6963353", "0.69624376", "0.6948373", "0.69477266", "0.6933703", "0.6912823", "0.69126415", "0.69112456", "0.6906048", "0.6900772", "0.6898251", "0.6872185", "0.68659496", "0.68659496", "0.68574715", "0.6856891", "0.6829816", "0.67984", "0.67913246", "0.67897594", "0.67868483", "0.67372376", "0.6734866", "0.6728347", "0.6715697", "0.6709869", "0.6704503", "0.67023444", "0.66981596", "0.66924286", "0.6691787", "0.6677779", "0.66766274", "0.66766274", "0.6676373", "0.66758794", "0.66701514", "0.66688937", "0.66568595", "0.66565543", "0.6655199", "0.6654096", "0.6651202", "0.6648696", "0.6641075", "0.663472", "0.6630556", "0.66302145", "0.6620809", "0.6620503", "0.6617677", "0.66120803", "0.65906835", "0.6589396", "0.6586227", "0.65852666", "0.656477", "0.65593386", "0.65568864", "0.6555936", "0.6548028", "0.65401125", "0.65380454", "0.6529058", "0.65177906", "0.65166265", "0.6507653", "0.6507427", "0.6506532", "0.6502844", "0.65007204", "0.6480546", "0.6479511" ]
0.8554349
0
Test that admin can delete a product
def test_admin_cannot_delete_product_with_blacklisted_token(self): resp = self.admin_register() reply = self.admin_login() token = reply['token'] product = dict( prod_name='NY_denims', category='denims', stock=20, price=150 ) resp = self.client.post( '/api/v1/products', content_type='application/json', data=json.dumps(product), headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Product successfully added to Inventory!') self.assertEqual(resp.status_code, 201) resp = self.client.delete( '/api/v1/logout', headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'You are successfully logged out!') self.assertEqual(resp.status_code, 200) resp = self.client.delete( '/api/v1/products/1', content_type='application/json', headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!') self.assertEqual(resp.status_code, 401)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_admin_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Product deleted!')\n self.assertEqual(resp.status_code, 200)", "def test_non_admin_cannot_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_delete_product(self):\n self._require_login(self.user1)\n response = self.client.delete('/api/1.0/products/1/')\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n response = self.client.get('/api/1.0/products/1/')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_delete_product(self):\n view = ProductDeleteView.as_view({'delete': 'destroy'})\n uri = reverse('products:delete-product', kwargs={'pk': self.product_id})\n request = self.factory.delete(uri, HTTP_AUTHORIZATION='Token {}'.format(self.token_admin.key))\n request.user = self.user['admin']\n response = view(request, pk=self.product_id)\n self.assertEqual(response.status_code, 204,\n f'Expected Response Code 204, received {response.status_code} instead.')", "def test_security_on_delete(self):\n # test the delete product url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.delete(url)\n self.failUnlessEqual(response.status_code, 401)", "def test_03_product_delete(self):\n product = self.create_product()\n products = self.product_obj.search([])\n self.assertIn(product, products)\n product.unlink()\n self.assertNotIn(product.exists(), products)", "def test_get_deleted_product(self):\n product = self.add_product()\n product.is_deleted = True\n product.save()\n\n url = f'{self.url}{product.id}/'\n\n response = self.client.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n self.client.force_authenticate(user=self.admin_user)\n response = self.client.get(url, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_delete_not_my_product(self):\n self._require_login(self.user1)\n response = self.client.delete('/api/1.0/products/2/')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_product_delete(self):\n # first performe create\n id = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id:\n # then performe delete\n self._delete_model(\"product\", id)\n self.assertIsNotNone(id)", "def test_admin_cannot_delete_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This product does not exist in Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_delete(self):\n self.assertEqual(Product.objects.count(), 2)\n\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(self.token_admin)\n }\n response = self.client.delete(\n '/api/products/{}/'.format(self.product_1.id), **headers)\n self.assertEqual(response.status_code, 204)\n self.assertEqual(Product.objects.count(), 1)", "def test_delete_store_success(self):\n product = sample_product(supplier_id=self.user)\n url = detail_url(product.id)\n res = self.client.delete(url)\n products = Product.objects.all()\n\n self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(len(products), 0)", "def test_products_ref_users_delete(self):\n pass", "def test_delete_product(self):\n product_pk = 1\n product_count_before = models.Product.objects.count()\n url = reverse('products:detail', kwargs={'pk': product_pk})\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(product_count_before - models.Product.objects.count(), 1)", "def test_post_delete_admin(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n self.client.force_authenticate(user=self.superuser)\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)", "def test_admin_cannot_delete_product_with_non_integer_prod_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/kk',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'The product id should be a number!')\n self.assertEqual(resp.status_code, 400)", "def test_delete_permission(self):\r\n self.assertFalse(self.creator_admin.has_delete_permission(self.request))", "def test_delete(self):\n pass", "def test_admin_cannot_delete_product_from_empty_Inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'There are no products in Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_products_ref_users_user_delete(self):\n pass", "def test_only_admin_can_create_product(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_delete_item_using_delete(self):\n pass", "def test_delete_product_non_valid_pk(self):\n product_pk = 9999\n product_count_before = models.Product.objects.count()\n url = reverse('products:detail', kwargs={'pk': product_pk})\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_delete(self):\n self.client.force_authenticate(user=self.admin)\n\n response = self.client.delete(\n reverse(\n 'retreat:waitqueuenotification-detail',\n kwargs={'pk': 1},\n ),\n )\n\n self.assertEqual(\n response.status_code,\n status.HTTP_405_METHOD_NOT_ALLOWED\n )", "def test_product_remove(self):\n\n flag = \"user\"\n api = \"product.product.remove\"\n current_page = 1\n search_info = json.dumps({\n 'id': 12,\n })\n print('start------------------------>remove')\n result = self.access_api(flag = flag, api = api, current_page = current_page, product_info = search_info)", "def test_delete_of_an_escalated_article_with_admin(self):\n token = self.user3.token()\n self.client.credentials(\n HTTP_AUTHORIZATION='Bearer ' + token)\n resp = self.delete_article()\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertEqual(resp.data[\"message\"], self.admin_delete)", "def test_delete_admin_from_org(self):\n pass", "def test_create_not_admin(self):\n self.assertEqual(Product.objects.count(), 2)\n payload = {\n 'name': 'New product',\n 'category': self.category_1.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99\n }\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n expected = {'detail': 'You do not have permission to perform this action.'}\n response = self.client.post(\n '/api/products/', data=payload,\n content_type='application/json', **headers)\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)\n self.assertEqual(Product.objects.count(), 2)", "def deleteProduct(request,productId):\n deleteObj = Collection()\n deleteObj.id=productId\n productBll.deleteProducts(deleteObj)\n return HttpResponseRedirect('/admin/product/list/')", "def delete_products(request):\n product_obj = Products.objects.get(id=request.data[\"id\"])\n if request.user == product_obj.shop_rel.user:\n product_obj.delete()\n return Response(status=status.HTTP_200_OK)\n\n return Response(status=status.HTTP_401_UNAUTHORIZED)", "def test_none_admin_delete(self):\n\n with self.client:\n token = self.customer()\n id = 1\n response = self.client.delete('api/v1/meals/{}'.format(id),\n headers=({\"token\": token}))\n data = json.loads(response.data.decode())\n self.assertEqual(data.get('message'),\n \"Customer is not authorized to access this page\")\n self.assertEqual(response.status_code, 401)", "def test_delete__valid(self):\n testing_config.sign_in('[email protected]', 123567890)\n\n with register.app.test_request_context(self.request_path):\n actual_json = self.handler.do_delete(self.feature_id)\n self.assertEqual({'message': 'Done'}, actual_json)\n\n revised_feature = models.Feature.get_by_id(self.feature_id)\n self.assertTrue(revised_feature.deleted)", "def test_shoppingcart_delete(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we create a product\n id_prod = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id_prod:\n # then we can create the shoppingcart\n data = self.shoppingcart_data\n data[\"customer_id\"] = id\n data[\"product_id\"] = id\n id_cart = self._create_model(\"shoppingcart\", data, [\"quantity\", \"discount_value\", \"is_closed\"])\n if id_cart:\n # then performe delete\n self._delete_model(\"shoppingcart\", id_cart)\n self.assertIsNotNone(id_cart)\n self.assertIsNotNone(id_prod)\n self.assertIsNotNone(id)", "def test_post_delete_logged_in(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n self.client.force_authenticate(user=self.user)\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_deletehardwares_item(self):\n pass", "def test_delete1(self):\n pass", "def delete_product(request, product_id):\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n product = get_object_or_404(Product, pk=product_id)\n product.delete()\n messages.success(request, 'Product successfully deleted')\n return redirect(reverse('products'))", "def test_handle_delete_as_admin(self):\r\n def facade_retrieve_side_effect(*args, **kwargs):\r\n \"\"\"Return a side effect for the mock facade.\"\"\"\r\n if args[0] == Project:\r\n return Project(\"\", [])\r\n elif args[0] == Team:\r\n team = Team(\"GTID\", \"team-name\", \"display-name\")\r\n return team\r\n else:\r\n calling_user = User(user)\r\n calling_user.permissions_level = Permissions.admin\r\n return calling_user\r\n self.mock_facade.retrieve.side_effect = facade_retrieve_side_effect\r\n self.assertTupleEqual(\r\n self.testcommand.handle(\"project delete ID\",\r\n user),\r\n (\"Project successfully deleted!\", 200))", "def test_categories_product_admin(self):\n response = self.client.post('api/v1/category/products',\n data=json.dumps(category_product[0]),\n content_type='application/json',\n headers=self.admin_headers)\n self.assertEqual(response.status_code, 401)\n self.assertIn('unauthorized', str(response.data))", "def test_delete_user(self):\n pass", "def test_delete_user(self):\n pass", "def test_delete_run(self):\n pass", "def test_admin_create_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)", "def delete_product(request, product_id):\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n product = get_object_or_404(Product, pk=product_id)\n product.delete()\n messages.success(request, 'Product deleted!')\n return redirect(reverse('products'))", "def test_author_delete_post(self):\n self.client.login(username=\"John\", password=\"newpass1234\")\n response = self.client.post('/posts/1/delete/')\n self.assertEqual(list(Post.objects.filter(id=1)), [])", "def delete_product(driver, login_action, open_products_page, products_page):\n products_page.delete_product()\n driver.refresh()", "def test_delete(admin_client):\n book = BookFactory()\n url = reverse(\"admin:books_book_delete\", args=(book.pk,))\n\n response = admin_client.get(url)\n templates_used = [t.name for t in response.templates]\n\n assert response.status_code == 200\n render_counts = {x: templates_used.count(x) for x in set(templates_used)}\n\n # The number of times each template was rendered\n assert render_counts == {\n \"admin/delete_confirmation.html\": 1,\n \"admin/base_site.html\": 1,\n \"admin/base.html\": 1,\n \"admin/includes/object_delete_summary.html\": 1,\n \"jazzmin/includes/ui_builder_panel.html\": 1,\n }\n\n # The templates that were used\n assert set(templates_used) == {\n \"admin/delete_confirmation.html\",\n \"admin/base_site.html\",\n \"admin/base.html\",\n \"admin/includes/object_delete_summary.html\",\n \"jazzmin/includes/ui_builder_panel.html\",\n }\n\n response = admin_client.post(url, data={\"post\": \"yes\"}, follow=True)\n\n # We deleted our object, and are now back on the changelist\n assert not Book.objects.all().exists()\n assert response.resolver_match.url_name == \"books_book_changelist\"", "def test_delete_role(self):\n pass", "def test_delete_device_user(self):\n pass", "def test_products_ref_groups_delete(self):\n pass", "def delete_product(request, product_id):\n if not request.user.is_superuser:\n messages.error(request, 'Access denied!\\\n Sorry, only site owners have this permission.')\n return redirect(reverse('home'))\n\n product = get_object_or_404(Product, pk=product_id)\n product.delete()\n messages.success(request, f'{product.name} was successfully deleted!')\n return redirect(reverse('products'))", "def test_delete(self):\n self.request.access.allow_delete = [\"everyone\"]\n pkg = make_package(factory=SQLPackage)\n self.sql.add(pkg)\n transaction.commit()\n self.sql.add(pkg)\n self.db.delete(pkg)\n count = self.sql.query(SQLPackage).count()\n self.assertEqual(count, 0)\n self.storage.delete.assert_called_with(pkg)", "def delete_product(request, product_id):\n if not request.user.is_superuser:\n messages.error(request, 'Invalid Request: Only admin can delete products/services.')\n return redirect(reverse('home'))\n\n product = get_object_or_404(Product, pk=product_id)\n product.delete()\n messages.success(request, 'Product/Service deleted!')\n return redirect(reverse('products'))", "def delete_product(request, product_id):\n if not request.user.is_superuser:\n messages.error(request, 'This feature is for Admin only.')\n return redirect(reverse('home'))\n\n product = get_object_or_404(Product, pk=product_id)\n product.delete()\n messages.success(request, 'Product Deleted')\n return redirect(reverse('home'))", "def test_shoppinglist_deletion(self):\n # register and login a user\n self.app.post('/register', data=self.user_reg_details)\n self.app.post('/login', data=self.user_login_details)\n # create a shopping list\n self.shopping_class_obj.create_list(\n 'Christmass', '[email protected]')\n # make a post request with the delete name\n res = self.app.post(\n '/delete-list', data={'list_name': 'Christmass'})\n self.assertEqual(res.status_code, 200)\n self.shopping_class_obj.delete_list(\n 'Christmass', '[email protected]')\n # check if delete was successful by looking for the deleted name\n self.assertIn(\"Christmass\", str(res.data))", "def test_post_delete_unauthorized(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_delete_unexisting_product(self):\n response=self.delete_unexisting_products()\n result = json.loads(response.data.decode('utf-8'))\n self.assertEqual(result['Error'],'Product Not found')\n self.assertEqual(response.status_code, 400)", "def test_otoroshi_controllers_adminapi_tcp_service_api_controller_delete_entity_action(self):\n pass", "def test_shoppingitem_deletion(self):\n # register and login a user\n self.app.post('/register', data=self.user_reg_details)\n self.app.post('/login', data=self.user_login_details)\n # create a shopping list\n self.shopping_class_obj.create_list(\n 'Christmass', '[email protected]')\n # create an item\n self.item_class_obj.add_item(\n 'Christmass', 'Bread', '[email protected]')\n # make a post request with the delete name\n res = self.app.post(\n '/delete-item', data={'list_name': 'Christmass', 'item_name': 'Bread'})\n self.assertEqual(res.status_code, 200)\n self.item_class_obj.delete_item(\n 'Bread', '[email protected]', 'Christmass')\n # check if delete was successful\n self.assertIn(\"Successfuly deleted item \", str(res.data))", "def test_delete_device(self):\n pass", "def test_delete_device(self):\n pass", "def test_view_a_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertIn('NY_denims', str(reply['product']))\n self.assertEqual(resp.status_code, 200)", "def test_delete_but_no_view_permission(client):\n user = user_with_permissions(\"polls.delete_poll\")\n\n url = reverse(\"admin:index\")\n client.force_login(user)\n\n response = client.get(url)\n assert parse_sidemenu(response) == {\"Global\": [\"/en/admin/\"], \"Polls\": [None]}", "def test_delete_ingredient(self):\n ingredient = Ingredient.objects.create(user=self.user, name='Lettuce')\n url = detail_url(ingredient.id)\n res = self.client.delete(url)\n\n self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)\n ingredients = Ingredient.objects.filter(user=self.user)\n self.assertFalse(ingredients.exists())", "def test_delete_cloud(self):\n pass", "def test_successfult_post_deletion(self):\n self.user.is_moderator = True\n self.user.save()\n response = self.client.delete(reverse('api:posts-detail', kwargs={'pk': self.post1.id}))\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(Post.objects.count(), 1)", "def test_delete__valid(self):\n testing_config.sign_in('[email protected]', 123567890)\n\n with test_app.test_request_context(self.request_path):\n actual_json = self.handler.do_delete(account_id=self.appuser_id)\n self.assertEqual({'message': 'Done'}, actual_json)\n\n revised_appuser = user_models.AppUser.get_by_id(self.appuser_id)\n self.assertIsNone(revised_appuser)", "def test_delete_from_cart(open_browser, mysql_executor):\n HelperUrl.user_base_url(open_browser)\n MainPage(open_browser).open_product_page()\n ProductPage(open_browser).add_to_cart()\n CheckExistenceDB(mysql_executor).check_exist()\n Header(open_browser).open_cart_block() \\\n .delete_from_cart_block()\n CheckExistenceDB(mysql_executor).check_is_not_exist()", "def test_user_delete_procedure_failure(self):\n p1 = models.Procedure.objects.create(\n name='temp',\n overview='bla bla bla'\n )\n p1.speciality.set([self.speciality.pk])\n p1.save()\n\n res = self.client.get(PROCEDURE_URL)\n\n url = get_item_url(res.data[0]['id'])\n del_procedure = self.client.delete(url)\n\n self.assertEqual(del_procedure.status_code,\n status.HTTP_403_FORBIDDEN)", "def test_delete__DeleteForm__4(address_book, browser, role):\n browser.login(role)\n browser.assert_forbidden(browser.SEARCH_DELETE_URL)", "def test_delete_post_by_user(self):\n\n response = self.client.delete(reverse('api:posts-detail', kwargs={'pk': self.post1.id}))\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_delete_case(self):\n pass", "def test_wrong_admin_delete(self):\n\n with self.client:\n self.get_meals()\n id = 100\n token = self.get_token()\n response = self.client.delete(\n 'api/v1/meals/{}'.format(id), headers=({\"token\": token}))\n data = json.loads(response.data.decode())\n self.assertEqual(data.get('message'), \"Meal not found\")\n self.assertEqual(response.status_code, 400)", "def test_delete_deployment(self):\n pass", "def test_delete(self):\n user = self.custodian_1_user\n urls = [reverse('api:user-detail', kwargs={'pk': user.pk})]\n data = None\n access = {\n \"forbidden\": [self.anonymous_client, self.readonly_client, self.custodian_1_client, self.admin_client,\n self.custodian_2_client],\n \"allowed\": []\n }\n\n for client in access['forbidden']:\n for url in urls:\n self.assertIn(\n client.delete(url, data, format='json').status_code,\n [status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]\n )\n\n for client in access['allowed']:\n for url in urls:\n self.assertEqual(\n client.delete(url, data, format='json').status_code,\n status.HTTP_200_OK\n )", "def tearDown(self):\n ProcessRequest(f\"products/{self.product_id}.json\").send_request(\n 'DELETE', continue_on_error=True)\n super().tearDown()", "def delete_product(conn, product_id: int) -> None:\n with conn.cursor() as cursor:\n cursor.execute(f\"\"\"update products set deleted = True where id = '{product_id}'\"\"\")\n if cursor.rowcount:\n conn.commit()\n else:\n raise errors.StoreError", "def test_add_product(self):\n view = ProductCreateListView.as_view({'post': 'create'})\n uri = reverse('products:create/list-products')\n data = {\n \"name\": \"Iphone 7\",\n \"description\": \"Mobile phone\",\n \"price\": 200,\n \"is_available\": True\n }\n request = self.factory.post(uri, data, HTTP_AUTHORIZATION='Token {}'.format(self.token_admin.key))\n request.user = self.user['admin']\n response = view(request)\n self.assertEqual(response.status_code, 201,\n f'Expected Response Code 201, received {response.status_code} instead.')", "def test_delete_recipe_category(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.login('[email protected]', 'Bo1995')\n self.dashboard()\n self.category('JunkFood')\n self.dashboard()\n self.recipe_dashboard()\n self.create_recipe('cakes', 'blah, blah, blah....mix ingredient, heat')\n self.edit_recipe('edited cakes', 'edited blah blah blah spoon , heat')\n rv = self.del_recipe()\n self.assertIn(b'deleted successfully', rv.data)", "def test_user_delete_procedure_failure(self):\n p1 = models.Procedure.objects.create(\n name='temp',\n overview='bla bla bla'\n )\n p1.speciality.set([self.speciality.pk])\n p1.save()\n\n res = self.client.get(PROCEDURE_URL)\n\n url = get_item_url(res.data[0]['id'])\n del_procedure = self.client.delete(url)\n\n self.assertEqual(del_procedure.status_code,\n status.HTTP_401_UNAUTHORIZED)", "def test_delete7(self):\n pass", "def test_admin_can_delete_a_user(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/users/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], \"User deleted!\")\n self.assertEqual(resp.status_code, 200)", "def test_delete_custom_button(self):\n pass", "def test_vault_delete_vault_item(self):\n pass", "def test_25_admin_delete_category(self):\r\n self.create()\r\n obj = db.session.query(Category).get(2)\r\n category = obj.dictize()\r\n\r\n # Anonymous user GET\r\n url = '/admin/categories/del/%s' % obj.id\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Anonymous users should be redirected to sign in\"\r\n assert dom.find(id='signin') is not None, err_msg\r\n # Anonymous user POST\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Anonymous users should be redirected to sign in\"\r\n assert dom.find(id='signin') is not None, err_msg\r\n\r\n # Authenticated user but not admin GET\r\n self.signin(email=self.email_addr2, password=self.password)\r\n res = self.app.post(url, follow_redirects=True)\r\n err_msg = \"Non-Admin users should get 403\"\r\n assert res.status_code == 403, err_msg\r\n # Authenticated user but not admin POST\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n err_msg = \"Non-Admin users should get 403\"\r\n assert res.status_code == 403, err_msg\r\n self.signout()\r\n\r\n # Admin GET\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.get(url, follow_redirects=True)\r\n err_msg = \"Category should be listed for admin user\"\r\n assert category['name'] in res.data, err_msg\r\n # Admin POST\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n err_msg = \"Category should be deleted\"\r\n assert \"Category deleted\" in res.data, err_msg\r\n assert category['name'] not in res.data, err_msg\r\n output = db.session.query(Category).get(obj.id)\r\n assert output is None, err_msg\r\n # Non existant category\r\n category['id'] = 5000\r\n url = '/admin/categories/del/5000'\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n assert res.status_code == 404, res.status_code\r\n\r\n # Now try to delete the only available Category\r\n obj = db.session.query(Category).first()\r\n url = '/admin/categories/del/%s' % obj.id\r\n category = obj.dictize()\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n print res.data\r\n err_msg = \"Category should not be deleted\"\r\n assert \"Category deleted\" not in res.data, err_msg\r\n assert category['name'] in res.data, err_msg\r\n output = db.session.query(Category).get(obj.id)\r\n assert output.id == category['id'], err_msg", "def test_offers_delete(self, mock_delete):\n form_data = {'_method': 'DELETE'}\n result = self.client.post(f'/offers/{sample_offer_id}/delete',\n data=form_data)\n self.assertEqual(result.status, '302 FOUND')\n mock_delete.assert_called_with({'_id': sample_offer_id})", "def test_delete__forbidden(self):\n testing_config.sign_in('[email protected]', 123567890)\n\n with register.app.test_request_context(self.request_path):\n with self.assertRaises(werkzeug.exceptions.Forbidden):\n self.handler.do_delete(self.feature_id)\n\n revised_feature = models.Feature.get_by_id(self.feature_id)\n self.assertFalse(revised_feature.deleted)", "def delete_item(request, product_id):\n\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, you are not permitted to do that.')\n return redirect(reverse('home'))\n\n product = get_object_or_404(Product, pk=product_id)\n product.delete()\n messages.success(request, 'You have deleted the item!')\n return redirect(reverse('items'))", "def test_delete_collection_user(self):\n pass", "def test_delete_category(self):\n pass", "def test_duo_application_delete(self):\n pass", "def test_no_delete_permission(client):\n user = user_with_permissions(\"polls.view_poll\")\n poll = Poll.objects.create(owner=user, text=\"question\")\n\n url = reverse(\"admin:polls_poll_change\", args=(poll.pk,))\n delete_url = reverse(\"admin:polls_poll_delete\", args=(poll.pk,))\n client.force_login(user)\n\n response = client.get(url)\n assert delete_url not in response.content.decode()", "def test_delete_device_template(self):\n pass", "def test_delete(self):\n self.request.access.allow_delete = [\"everyone\"]\n pkg = make_package(factory=DynamoPackage)\n self._save_pkgs(pkg)\n self.db.delete(pkg)\n count = self.engine.scan(DynamoPackage).count()\n self.assertEqual(count, 0)\n count = self.engine.scan(PackageSummary).count()\n self.assertEqual(count, 0)\n self.storage.delete.assert_called_with(pkg)", "def test_order_product(self):\n self.client.force_authenticate(self.user)\n resp = self.client.post(ORDER_URL, data={\n \"product\": self.product.id,\n \"count\": 1,\n \"option_value\": self.option_value.id\n })\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)", "def delete(self, product):\n product_id = str(product)\n\n\n if product_id in self.sepet:\n del self.sepet[product_id]\n print(product_id)\n self.session.modified=True", "def test_delete_collection(self):\n pass", "def test_delete(self):\n\n value = self.instance.delete()\n self.client.delete_instance.assert_called_once_with('nginx')\n self.assertEqual(value, self.client.delete_instance.return_value)", "def test_delete(self, client, users):\n user = users[0]\n url = reverse('users:delete', args=(user.pk,))\n response = client.get(url)\n assert response.status_code == 405\n response = client.post(url)\n assert response.status_code == 302\n assert response.url == reverse('users:list')\n assert not get_user_model().objects.filter(pk=user.pk).exists()", "def delete_product_view(request, id):\n Product.objects.get(id=id).delete()\n messages.success(request, \"Product deleted successfully.\")\n return redirect(\"products\")" ]
[ "0.8554349", "0.83574384", "0.83247185", "0.8285091", "0.82623804", "0.8177644", "0.8141131", "0.80846477", "0.80198777", "0.8002211", "0.7932145", "0.7909494", "0.7691911", "0.7655855", "0.756794", "0.7516048", "0.75010175", "0.7476865", "0.74705225", "0.7435304", "0.73770976", "0.7331892", "0.7221541", "0.7064374", "0.702435", "0.7012553", "0.6978083", "0.6963353", "0.69624376", "0.6948373", "0.69477266", "0.6933703", "0.6912823", "0.69126415", "0.69112456", "0.6906048", "0.6900772", "0.6898251", "0.6872185", "0.68659496", "0.68659496", "0.68574715", "0.6856891", "0.6829816", "0.67984", "0.67913246", "0.67897594", "0.67868483", "0.67372376", "0.6734866", "0.6728347", "0.6715697", "0.6709869", "0.6704503", "0.67023444", "0.66981596", "0.66924286", "0.6691787", "0.6677779", "0.66766274", "0.66766274", "0.6676373", "0.66758794", "0.66701514", "0.66688937", "0.66568595", "0.66565543", "0.6655199", "0.6654096", "0.6651202", "0.6648696", "0.6641075", "0.663472", "0.6630556", "0.66302145", "0.6620809", "0.6620503", "0.6617677", "0.66120803", "0.65906835", "0.6589396", "0.6586227", "0.65852666", "0.656477", "0.65593386", "0.65568864", "0.6555936", "0.6548028", "0.65401125", "0.65380454", "0.6529058", "0.65177906", "0.65166265", "0.6507653", "0.6507427", "0.6506532", "0.6502844", "0.65007204", "0.6480546", "0.6479511" ]
0.7733621
12
Test that a non admin cannot delete a product
def test_non_admin_cannot_delete_product(self): resp = self.admin_register() reply = self.admin_login() token = reply['token'] product = dict( prod_name='NY_denims', category='denims', stock=20, price=150 ) resp = self.client.post( '/api/v1/products', content_type='application/json', data=json.dumps(product), headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Product successfully added to Inventory!') self.assertEqual(resp.status_code, 201) resp = self.admin_create_user() reply = self.attendant_login() token = reply['token'] resp = self.client.delete( '/api/v1/products/1', content_type='application/json', headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Unauthorized Access!') self.assertEqual(resp.status_code, 401)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_not_my_product(self):\n self._require_login(self.user1)\n response = self.client.delete('/api/1.0/products/2/')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_admin_cannot_delete_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This product does not exist in Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_security_on_delete(self):\n # test the delete product url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.delete(url)\n self.failUnlessEqual(response.status_code, 401)", "def test_admin_cannot_delete_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_only_admin_can_create_product(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_admin_cannot_delete_product_from_empty_Inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'There are no products in Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_admin_cannot_delete_product_with_non_integer_prod_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/kk',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'The product id should be a number!')\n self.assertEqual(resp.status_code, 400)", "def test_03_product_delete(self):\n product = self.create_product()\n products = self.product_obj.search([])\n self.assertIn(product, products)\n product.unlink()\n self.assertNotIn(product.exists(), products)", "def test_delete_permission(self):\r\n self.assertFalse(self.creator_admin.has_delete_permission(self.request))", "def test_get_deleted_product(self):\n product = self.add_product()\n product.is_deleted = True\n product.save()\n\n url = f'{self.url}{product.id}/'\n\n response = self.client.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n self.client.force_authenticate(user=self.admin_user)\n response = self.client.get(url, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_create_not_admin(self):\n self.assertEqual(Product.objects.count(), 2)\n payload = {\n 'name': 'New product',\n 'category': self.category_1.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99\n }\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n expected = {'detail': 'You do not have permission to perform this action.'}\n response = self.client.post(\n '/api/products/', data=payload,\n content_type='application/json', **headers)\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)\n self.assertEqual(Product.objects.count(), 2)", "def test_delete_product(self):\n self._require_login(self.user1)\n response = self.client.delete('/api/1.0/products/1/')\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n response = self.client.get('/api/1.0/products/1/')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_admin_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Product deleted!')\n self.assertEqual(resp.status_code, 200)", "def test_delete_product(self):\n view = ProductDeleteView.as_view({'delete': 'destroy'})\n uri = reverse('products:delete-product', kwargs={'pk': self.product_id})\n request = self.factory.delete(uri, HTTP_AUTHORIZATION='Token {}'.format(self.token_admin.key))\n request.user = self.user['admin']\n response = view(request, pk=self.product_id)\n self.assertEqual(response.status_code, 204,\n f'Expected Response Code 204, received {response.status_code} instead.')", "def test_products_ref_users_delete(self):\n pass", "def test_post_delete_unauthorized(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_not_logged_cannot_delete(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_products_ref_users_user_delete(self):\n pass", "def test_none_admin_delete(self):\n\n with self.client:\n token = self.customer()\n id = 1\n response = self.client.delete('api/v1/meals/{}'.format(id),\n headers=({\"token\": token}))\n data = json.loads(response.data.decode())\n self.assertEqual(data.get('message'),\n \"Customer is not authorized to access this page\")\n self.assertEqual(response.status_code, 401)", "def test_user_delete_procedure_failure(self):\n p1 = models.Procedure.objects.create(\n name='temp',\n overview='bla bla bla'\n )\n p1.speciality.set([self.speciality.pk])\n p1.save()\n\n res = self.client.get(PROCEDURE_URL)\n\n url = get_item_url(res.data[0]['id'])\n del_procedure = self.client.delete(url)\n\n self.assertEqual(del_procedure.status_code,\n status.HTTP_403_FORBIDDEN)", "def test_delete_store_success(self):\n product = sample_product(supplier_id=self.user)\n url = detail_url(product.id)\n res = self.client.delete(url)\n products = Product.objects.all()\n\n self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(len(products), 0)", "def test_cannot_delete_usage(self):\n p = Permission.objects.get(name='Can delete usage')\n self.user.user_permissions.add(p)\n self.client.login(username='testuser', password='q2w3E$R%')\n response = self.client.delete(reverse('api_v1:usage-detail', kwargs={'pk': 1}),\n follow=True)\n self.assertEqual(response.status_code, 405)\n self.assertIn('not allowed', str(response.content))", "def test_handle_delete_not_admin(self):\n team = Team(\"BRS\", \"brs\", \"web\")\n test_user = User(\"userid\")\n self.db.retrieve.return_value = test_user\n self.db.query.return_value = [team]\n self.assertTupleEqual(self.testcommand.handle(\"team delete brs\", user),\n (self.testcommand.permission_error, 200))\n self.db.delete.assert_not_called()\n self.gh.org_delete_team.assert_not_called()", "def test_delete(self):\n self.assertEqual(Product.objects.count(), 2)\n\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(self.token_admin)\n }\n response = self.client.delete(\n '/api/products/{}/'.format(self.product_1.id), **headers)\n self.assertEqual(response.status_code, 204)\n self.assertEqual(Product.objects.count(), 1)", "def test_delete__forbidden(self):\n testing_config.sign_in('[email protected]', 123567890)\n\n with test_app.test_request_context(self.request_path):\n with self.assertRaises(werkzeug.exceptions.Forbidden):\n self.handler.do_delete(account_id=self.appuser_id)\n\n unrevised_appuser = user_models.AppUser.get_by_id(self.appuser_id)\n self.assertEqual('[email protected]', unrevised_appuser.email)", "def test_no_delete_permission(client):\n user = user_with_permissions(\"polls.view_poll\")\n poll = Poll.objects.create(owner=user, text=\"question\")\n\n url = reverse(\"admin:polls_poll_change\", args=(poll.pk,))\n delete_url = reverse(\"admin:polls_poll_delete\", args=(poll.pk,))\n client.force_login(user)\n\n response = client.get(url)\n assert delete_url not in response.content.decode()", "def testDeleteAccessDenied(self):\n self.assertEqual(SequencingMachine.objects.count(), 1)\n self.runDelete(None, sequencer=self.hiseq2000.sodar_uuid)\n self.assertEqual(SequencingMachine.objects.count(), 1)\n self.response_401()\n for user in (self.guest, self.norole, self.unrelated_owner):\n self.assertEqual(SequencingMachine.objects.count(), 1)\n self.runDelete(user, sequencer=self.hiseq2000.sodar_uuid)\n self.assertEqual(SequencingMachine.objects.count(), 1)\n self.response_403()", "def test_delete_product_non_valid_pk(self):\n product_pk = 9999\n product_count_before = models.Product.objects.count()\n url = reverse('products:detail', kwargs={'pk': product_pk})\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_delete__forbidden(self):\n testing_config.sign_in('[email protected]', 123567890)\n\n with register.app.test_request_context(self.request_path):\n with self.assertRaises(werkzeug.exceptions.Forbidden):\n self.handler.do_delete(self.feature_id)\n\n revised_feature = models.Feature.get_by_id(self.feature_id)\n self.assertFalse(revised_feature.deleted)", "def test_categories_product_admin(self):\n response = self.client.post('api/v1/category/products',\n data=json.dumps(category_product[0]),\n content_type='application/json',\n headers=self.admin_headers)\n self.assertEqual(response.status_code, 401)\n self.assertIn('unauthorized', str(response.data))", "def test_delete_cart_item_unauthorized(self):\n user_id = '111'\n cart_id = self.cart_item_manager.create_cart(user_id, 'test cart', False)\n item_id1 = self.cart_item_manager.add_cart_item(self.catalog, user_id, cart_id, '1', 'entity_type',\n 'entity_version')\n with self.assertRaises(ResourceAccessError):\n self.cart_item_manager.delete_cart_item('112', cart_id, item_id1)", "def test_cannot_create_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_create_product_as_customer_fails(self):\n customer = get_user_model().objects.create_user(\n '[email protected]',\n 'Customer',\n 'user123'\n )\n self.client.force_authenticate(customer)\n res = self.client.post(PRODUCTS_URL, PRODUCT_PAYLOAD)\n\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)", "def test_jenkins_user_delete(self):\n ju = JenkinsUser.objects.get(username=\"user_1\")\n self.assertRaises(django.db.models.deletion.ProtectedError, ju.delete)", "def test_cannot_sale_nonexistant_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"Paris_heels\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product is not in the Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_post_delete_admin(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n self.client.force_authenticate(user=self.superuser)\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)", "def test_product_delete(self):\n # first performe create\n id = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id:\n # then performe delete\n self._delete_model(\"product\", id)\n self.assertIsNotNone(id)", "def test_user_delete_procedure_failure(self):\n p1 = models.Procedure.objects.create(\n name='temp',\n overview='bla bla bla'\n )\n p1.speciality.set([self.speciality.pk])\n p1.save()\n\n res = self.client.get(PROCEDURE_URL)\n\n url = get_item_url(res.data[0]['id'])\n del_procedure = self.client.delete(url)\n\n self.assertEqual(del_procedure.status_code,\n status.HTTP_401_UNAUTHORIZED)", "def testDeleteIsDenied(self):\n error = self.assertRaises(PermissionDeniedError, self.users.delete,\n [u'user'])\n self.assertEqual(self.user.username, error.username)\n self.assertEqual([(u'user', Operation.DELETE_USER)],\n error.pathsAndOperations)", "def testDeleteIsDenied(self):\n error = self.assertRaises(PermissionDeniedError, self.users.delete,\n [u'user'])\n self.assertEqual(self.user.username, error.username)\n self.assertEqual([(u'user', Operation.DELETE_USER)],\n error.pathsAndOperations)", "def test_unauthorized_product_update(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_delete_product(self):\n product_pk = 1\n product_count_before = models.Product.objects.count()\n url = reverse('products:detail', kwargs={'pk': product_pk})\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(product_count_before - models.Product.objects.count(), 1)", "def test_delete_author_unlogged(self):\n request = self.client.delete(self.epoint)\n self.assertEqual(request.status_code, status.HTTP_403_FORBIDDEN)", "def test_cannot_view_a_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_delete_reusableitem_api_fails(self):\n self.client.force_authenticate(user=self.user_1)\n\n response = self.client.delete(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_500_INTERNAL_SERVER_ERROR)", "def test_delete_fail(self):\n self.user_api()\n self.base.metadata.create_all(self.engine)\n people = self.provision_users()\n p = {'id': people[2].id}\n self.delete('user', 403, params=p)", "def test_delete_but_no_view_permission(client):\n user = user_with_permissions(\"polls.delete_poll\")\n\n url = reverse(\"admin:index\")\n client.force_login(user)\n\n response = client.get(url)\n assert parse_sidemenu(response) == {\"Global\": [\"/en/admin/\"], \"Polls\": [None]}", "def test_delete_item_using_delete(self):\n pass", "def testDeleteUserIsDenied(self):\n [(objectID, username)] = UserAPI().create(\n [(u'user', u'secret', u'User', u'[email protected]')])\n self.store.commit()\n with login(u'user', objectID, self.transact) as session:\n deferred = self.facade.deleteUser(session, u'doomed')\n error = yield self.assertFailure(deferred, TPathPermissionDenied)\n self.assertEqual(u'doomed', error.path)", "def test_delete_unexisting_product(self):\n response=self.delete_unexisting_products()\n result = json.loads(response.data.decode('utf-8'))\n self.assertEqual(result['Error'],'Product Not found')\n self.assertEqual(response.status_code, 400)", "def test_post_delete_logged_in(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n self.client.force_authenticate(user=self.user)\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_delete(self):\n pass", "def test_deletehardwares_item(self):\n pass", "def test_admin_cannot_delete_non_existant_user(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/users/5',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], \"This attendant does not exist!\")\n self.assertEqual(resp.status_code, 404)", "def test_wrong_admin_delete(self):\n\n with self.client:\n self.get_meals()\n id = 100\n token = self.get_token()\n response = self.client.delete(\n 'api/v1/meals/{}'.format(id), headers=({\"token\": token}))\n data = json.loads(response.data.decode())\n self.assertEqual(data.get('message'), \"Meal not found\")\n self.assertEqual(response.status_code, 400)", "def test_delete_admin_from_org(self):\n pass", "def test_delete(self):\n self.client.force_authenticate(user=self.admin)\n\n response = self.client.delete(\n reverse(\n 'retreat:waitqueuenotification-detail',\n kwargs={'pk': 1},\n ),\n )\n\n self.assertEqual(\n response.status_code,\n status.HTTP_405_METHOD_NOT_ALLOWED\n )", "def test_view_product_that_doesnot_exist_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product does not exist!')\n self.assertEqual(resp.status_code, 404)", "def test_delete_nonexist(self):\n promotion = PromotionFactory()\n promotion.id = '1cak41-nonexist'\n try:\n promotion.delete()\n except KeyError:\n self.assertRaises(KeyError)", "def test_cannotDeleteNonExistent(self):\n store = Store()\n self.assertFailStatus(\n 1, self._makeConfig(store),\n [\"delete\", \"--port-identifier\", \"12345\"])\n self.assertEqual(\n \"12345 does not identify an item.\\n\",\n sys.stdout.getvalue())", "def test_delete_shelf_unauthorized(self, *_):\n with patch(\"bookwyrm.suggested_users.rerank_suggestions_task.delay\"), patch(\n \"bookwyrm.activitystreams.populate_stream_task.delay\"\n ):\n rat = models.User.objects.create_user(\n \"[email protected]\",\n \"[email protected]\",\n \"password\",\n local=True,\n localname=\"rat\",\n )\n request = self.factory.post(\"\")\n request.user = rat\n\n with self.assertRaises(PermissionDenied):\n views.delete_shelf(request, self.shelf.id)\n\n self.assertTrue(models.Shelf.objects.filter(id=self.shelf.id).exists())", "def test_not_author_delete_post(self):\n self.client.login(username=\"Bill\", password=\"newpass1234\")\n response = self.client.post('/posts/1/delete/', {\"next\": \"\"})\n self.assertNotEqual(list(Post.objects.filter(id=1)), [])", "def test_handle_delete_as_admin(self):\r\n def facade_retrieve_side_effect(*args, **kwargs):\r\n \"\"\"Return a side effect for the mock facade.\"\"\"\r\n if args[0] == Project:\r\n return Project(\"\", [])\r\n elif args[0] == Team:\r\n team = Team(\"GTID\", \"team-name\", \"display-name\")\r\n return team\r\n else:\r\n calling_user = User(user)\r\n calling_user.permissions_level = Permissions.admin\r\n return calling_user\r\n self.mock_facade.retrieve.side_effect = facade_retrieve_side_effect\r\n self.assertTupleEqual(\r\n self.testcommand.handle(\"project delete ID\",\r\n user),\r\n (\"Project successfully deleted!\", 200))", "def test_order_cannot_be_deleted_if_not_owner(self):\n\n\t\tres = self.login_user()\n\t\tress = self.login_admin_user()\n\t\taccess_token = json.loads(res.data.decode())['access_token']\n\t\ta_access_token = json.loads(ress.data.decode())['access_token']\n\n\t\tresponse = self.client().post(\n\t\t\t'/api/v2/orders',\n\t\t\theaders={\"x-access-token\": access_token},\n\t\t\tdata = json.dumps(\n\t\t\t\tself.order_data) , content_type = 'application/json')\n\t\tself.assertEqual(response.status_code, 201)\n\n\t\tresponse = self.client().delete(\n\t\t\t'/api/v2/orders/1',\n\t\t\theaders={\"x-access-token\": a_access_token})\n\n\t\tresult = json.loads(response.data)\n\t\tself.assertEqual(response.status_code, 401)\n\t\tself.assertEqual(result[\"message\"], \n\t\t\t\"Not authorized to perform this function!\")", "def test_delete_ga_failure_no_admin(self):\n\n url = reverse('admin_google_authenticator')\n\n data = {\n 'google_authenticator_id': self.ga.id\n }\n\n self.client.force_authenticate(user=self.test_user_obj)\n response = self.client.delete(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_delete__invalid(self):\n testing_config.sign_in('[email protected]', 123567890)\n\n with register.app.test_request_context(self.request_path):\n with self.assertRaises(werkzeug.exceptions.BadRequest):\n self.handler.do_delete(None)\n\n revised_feature = models.Feature.get_by_id(self.feature_id)\n self.assertFalse(revised_feature.deleted)", "def test_detail_odd_product_id_permission(self):\n self.assertEqual(self.product_2.id, 2)\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_2.id), **headers)\n\n expected = {'detail': 'You do not have permission to perform this action.'}\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)", "def test_delete_non_owner(self):\n another_user = CustomUser.objects.create(id=134, email='[email protected]', is_active=True)\n another_user.set_password('qwerty12345')\n another_user.save()\n\n self.client.login(email='[email protected]', password='qwerty12345')\n\n url = reverse('notification',\n kwargs={'way_id': self.notification.way_id, 'notification_id': 87876})\n\n response = self.client.delete(url)\n\n self.assertEqual(response.status_code, 403)", "def test_admin_cannot_create_product_with_empty_fields(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='',\n category='',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Please enter all fields!')\n self.assertEqual(resp.status_code, 400)", "def test_delete_campaign_by_non_admin_fails(self):\n response = self.client.delete(\n f\"{self.endpoint_url}{self.test_campaign.id}/\",\n headers={\"Authorization\": self.non_admin_token},\n )\n response_body = response.get_json()\n self.assertEqual(response.status_code, 403)\n self.assertEqual(\n response_body[\"Error\"], \"CampaignsRestAPI DELETE: User not a Org Manager\"\n )\n self.assertEqual(response_body[\"SubCode\"], \"UserNotPermitted\")", "def test_not_creator_cannot_delete(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url, expected_url)\n self.assertEqual(len(Group.objects.all()), 1)", "def test_not_author_delete_group(self):\n self.client.login(username = \"Bill\", password = \"newpass1234\")\n response = self.client.post('/groups/new_group/delete/')\n self.assertNotEqual(list(Group.objects.filter(slug=\"new_group\")), [])", "def test_update_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], \"This product doesn't exists in the Inventory!\")\n self.assertEqual(resp.status_code, 400)", "def test_delete_shelf_not_editable(self, *_):\n shelf = self.local_user.shelf_set.first()\n self.assertFalse(shelf.editable)\n request = self.factory.post(\"\")\n request.user = self.local_user\n\n with self.assertRaises(PermissionDenied):\n views.delete_shelf(request, shelf.id)\n\n self.assertTrue(models.Shelf.objects.filter(id=shelf.id).exists())", "def test_destroy_not_owner(self):\n\n self.assertEqual(first=1, second=Post.objects.all().count())\n url = reverse('post-detail', args=(self.post.id,))\n self.client.credentials(HTTP_AUTHORIZATION=self.token_1)\n response = self.client.delete(path=url)\n self.assertEqual(first=403, second=response.status_code)\n self.assertEqual(first=1, second=Post.objects.all().count())", "def can_delete(self):\r\n return True", "def test_negative_conditions(self):\r\n outline_url = reverse_course_url('course_handler', self.course.id)\r\n # register a non-staff member and try to delete the course branch\r\n non_staff_client, _ = self.create_non_staff_authed_user_client()\r\n response = non_staff_client.delete(outline_url, {}, HTTP_ACCEPT='application/json')\r\n self.assertEqual(response.status_code, 403)", "def test_delete_users_non_admin(client: FlaskClient) -> None:\n username = create_random_username()\n # Non-admin users are not allowed to make the request\n auth_token = create_auth_token(username)\n response = delete_users(client, auth_token.signed)\n assert_error_response(response, HTTPStatus.FORBIDDEN)", "def test_delete__not_found(self):\n testing_config.sign_in('[email protected]', 123567890)\n\n with test_app.test_request_context(self.request_path):\n with self.assertRaises(werkzeug.exceptions.NotFound):\n self.handler.do_delete(account_id=self.appuser_id + 1)\n\n unrevised_appuser = user_models.AppUser.get_by_id(self.appuser_id)\n self.assertEqual('[email protected]', unrevised_appuser.email)", "def test_delete__not_found(self):\n testing_config.sign_in('[email protected]', 123567890)\n\n with register.app.test_request_context(self.request_path):\n with self.assertRaises(werkzeug.exceptions.NotFound):\n self.handler.do_delete(self.feature_id + 1)\n\n revised_feature = models.Feature.get_by_id(self.feature_id)\n self.assertFalse(revised_feature.deleted)", "def test_delete(self):\n user = self.custodian_1_user\n urls = [reverse('api:user-detail', kwargs={'pk': user.pk})]\n data = None\n access = {\n \"forbidden\": [self.anonymous_client, self.readonly_client, self.custodian_1_client, self.admin_client,\n self.custodian_2_client],\n \"allowed\": []\n }\n\n for client in access['forbidden']:\n for url in urls:\n self.assertIn(\n client.delete(url, data, format='json').status_code,\n [status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]\n )\n\n for client in access['allowed']:\n for url in urls:\n self.assertEqual(\n client.delete(url, data, format='json').status_code,\n status.HTTP_200_OK\n )", "def test_IDeleteCapability(self):\n self.assertFalse(self.ldap.allowDeletePrincipal(\"uid0\"))\n self.assertFalse(self.ldap.allowDeletePrincipal(\"unknownuser\"))", "def test_delete_post_by_user(self):\n\n response = self.client.delete(reverse('api:posts-detail', kwargs={'pk': self.post1.id}))\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_update_product_without_authentication(self):\n post_data = {\n \"category\": {\n \"name\": \"general\",\n \"index\": 0\n },\n \"name\": \"Producto 2 modified\",\n \"description\": \"Descripcion de producto 2 modified\",\n \"selling\": True,\n \"price\": 20,\n }\n\n response = self.client.put('/api/1.0/products/1/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def delete_products(request):\n product_obj = Products.objects.get(id=request.data[\"id\"])\n if request.user == product_obj.shop_rel.user:\n product_obj.delete()\n return Response(status=status.HTTP_200_OK)\n\n return Response(status=status.HTTP_401_UNAUTHORIZED)", "def test_only_attendant_can_make_a_sale(self):\n resp = self.admin_add_product()\n reply = self.admin_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_delete_assigned_resource_by_non_admin(self):\n CommonTestCases.user_token_assert_in(\n self,\n delete_assigned_resource_mutation,\n \"You are not authorized to perform this action\"\n )", "def test_product_remove(self):\n\n flag = \"user\"\n api = \"product.product.remove\"\n current_page = 1\n search_info = json.dumps({\n 'id': 12,\n })\n print('start------------------------>remove')\n result = self.access_api(flag = flag, api = api, current_page = current_page, product_info = search_info)", "def test_delete_of_an_escalated_article_with_admin(self):\n token = self.user3.token()\n self.client.credentials(\n HTTP_AUTHORIZATION='Bearer ' + token)\n resp = self.delete_article()\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertEqual(resp.data[\"message\"], self.admin_delete)", "def test_delete_run(self):\n pass", "def test_post_delete_security(client_with_identity: Client, other_identity: Identity):\n other_post = Post.objects.create(\n content=\"<p>OTHER POST!</p>\",\n author=other_identity,\n local=True,\n visibility=Post.Visibilities.public,\n )\n response = client_with_identity.get(other_post.urls.action_delete)\n assert response.status_code == 403", "def test_delete_device_user(self):\n pass", "def unlink(self):\n raise ValidationError(_(\"Products may not be deleted. Please archive them instead.\"))", "def test_product_uninstalled(self):\n self.assertFalse(\n self.installer.is_product_installed('{{cookiecutter.package_name}}')\n )", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"DELETE\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_cannot_update_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_update_not_my_product(self):\n post_data = {\n \"category\": {\n \"name\": \"general\",\n \"index\": 0\n },\n \"name\": \"Producto 2 modified\",\n \"description\": \"Descripcion de producto 2 modified\",\n \"selling\": True,\n \"price\": 20,\n }\n\n response = self.client.put('/api/1.0/products/2/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_no_enable_shoppingcart(self):\r\n self.add_to_cart()\r\n self.request.user = self.user\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertFalse(context['display_shopping_cart'])", "def test_products_ref_groups_delete(self):\n pass", "def test_not_permitted(self, default_store):\n course = self.create_course_with_orphans(default_store)\n orphan_url = reverse_course_url('orphan_handler', course.id)\n\n test_user_client, test_user = self.create_non_staff_authed_user_client()\n CourseEnrollment.enroll(test_user, course.id)\n response = test_user_client.get(orphan_url)\n self.assertEqual(response.status_code, 403)\n response = test_user_client.delete(orphan_url)\n self.assertEqual(response.status_code, 403)" ]
[ "0.8456681", "0.82070416", "0.8153313", "0.772525", "0.750598", "0.75052166", "0.7497598", "0.74397874", "0.73591393", "0.7334757", "0.7330903", "0.72217363", "0.72114277", "0.71953607", "0.7125004", "0.70727336", "0.70629984", "0.6935391", "0.69298255", "0.6893835", "0.6886094", "0.6867642", "0.6865682", "0.6857384", "0.6854631", "0.6844916", "0.684339", "0.6829421", "0.68289196", "0.6807431", "0.6799571", "0.67832094", "0.6742397", "0.67395455", "0.67375493", "0.6694083", "0.666717", "0.6667103", "0.66492337", "0.66492337", "0.66333175", "0.66307104", "0.6610505", "0.6602673", "0.6568386", "0.65572757", "0.65513945", "0.6543788", "0.6540046", "0.65387523", "0.6533947", "0.65336627", "0.65159136", "0.6503884", "0.6483612", "0.6480333", "0.6480039", "0.64670604", "0.6453575", "0.64533395", "0.64213276", "0.6416734", "0.6414194", "0.64070714", "0.63944465", "0.6394122", "0.63804513", "0.63710594", "0.63601816", "0.63555276", "0.63433415", "0.63431007", "0.6340278", "0.63391244", "0.63372666", "0.6333054", "0.6327842", "0.632608", "0.6325233", "0.6318113", "0.63179946", "0.6317257", "0.6299983", "0.62897825", "0.6288304", "0.62867725", "0.62834644", "0.6276047", "0.6271025", "0.626404", "0.6255111", "0.62520635", "0.6237538", "0.6232796", "0.6230848", "0.6224491", "0.6217728", "0.62082285", "0.6207557", "0.6196914" ]
0.834971
1
Test that admin cannnot delete a product from empty Inventory
def test_admin_cannot_delete_product_from_empty_Inventory(self): resp = self.admin_register() reply = self.admin_login() token = reply['token'] resp = self.client.delete( '/api/v1/products/1', content_type='application/json', headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'There are no products in Inventory!') self.assertEqual(resp.status_code, 404)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_admin_cannot_delete_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This product does not exist in Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_non_admin_cannot_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_admin_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Product deleted!')\n self.assertEqual(resp.status_code, 200)", "def test_delete_inventory(self):\n # save the current number of inventories for later comparision\n inventory_count = self.get_inventory_count()\n # delete a inventory\n resp = self.app.delete('/inventories/1', content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(len(resp.data), 0)\n new_count = self.get_inventory_count()\n self.assertEqual(new_count, inventory_count - 1)", "def test_03_product_delete(self):\n product = self.create_product()\n products = self.product_obj.search([])\n self.assertIn(product, products)\n product.unlink()\n self.assertNotIn(product.exists(), products)", "def test_delete_not_my_product(self):\n self._require_login(self.user1)\n response = self.client.delete('/api/1.0/products/2/')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_view_products_from_empty_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'There are no products yet!')\n self.assertEqual(resp.status_code, 404)", "def test_vault_delete_vault_item(self):\n pass", "def test_deletehardwares_item(self):\n pass", "def test_admin_cannot_delete_product_with_non_integer_prod_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/kk',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'The product id should be a number!')\n self.assertEqual(resp.status_code, 400)", "def test_remove_all(self): #SAUCE-LAB-8\n login = LoginPage(self.driver)\n login.open()\n inventory_page = login.login(_DEF_USER, _DEF_PASSWORD)\n first_item = inventory_page.products\n first_item: InventoryItem\n for item in first_item:\n item.add_to_cart()\n if inventory_page.header.get_total_cart_items() == 6:\n print('\\n')\n print(f'Total of products {inventory_page.header.get_total_cart_items()}')\n else:\n print('Not all items were added')\n for item in first_item:\n item.remove_from_cart()\n if inventory_page.header.get_total_cart_items() == 0:\n print('\\n')\n print(f'Total of products {inventory_page.header.get_total_cart_items()}')\n else:\n print('Not all items were removed')", "def test_admin_cannot_delete_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_delete_product(self):\n self._require_login(self.user1)\n response = self.client.delete('/api/1.0/products/1/')\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n response = self.client.get('/api/1.0/products/1/')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_view_product_that_doesnot_exist_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product does not exist!')\n self.assertEqual(resp.status_code, 404)", "def test_security_on_delete(self):\n # test the delete product url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.delete(url)\n self.failUnlessEqual(response.status_code, 401)", "def test_delete_item_using_delete(self):\n pass", "def test_delete_store_success(self):\n product = sample_product(supplier_id=self.user)\n url = detail_url(product.id)\n res = self.client.delete(url)\n products = Product.objects.all()\n\n self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(len(products), 0)", "def check_and_delete(self, inventory): # used in a transaction block only so dont initiate a transaction here\n try:\n lines = inventory.lines\n for i in lines:\n if i.quantity == 0:\n i.delete((i,))\n # inventory.reload()\n inventory.save()\n chk = inventory.lines\n if len(chk) == 0:\n inventory.state = 'cancel'\n inventory.save()\n inventory.delete((inventory,))\n return True\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return False", "def test_delete(self):\n self.assertEqual(Product.objects.count(), 2)\n\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(self.token_admin)\n }\n response = self.client.delete(\n '/api/products/{}/'.format(self.product_1.id), **headers)\n self.assertEqual(response.status_code, 204)\n self.assertEqual(Product.objects.count(), 1)", "def test_product_delete(self):\n # first performe create\n id = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id:\n # then performe delete\n self._delete_model(\"product\", id)\n self.assertIsNotNone(id)", "def test_cannot_sale_nonexistant_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"Paris_heels\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product is not in the Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_update_inventory(self):\n pass", "def test_delete_product(self):\n view = ProductDeleteView.as_view({'delete': 'destroy'})\n uri = reverse('products:delete-product', kwargs={'pk': self.product_id})\n request = self.factory.delete(uri, HTTP_AUTHORIZATION='Token {}'.format(self.token_admin.key))\n request.user = self.user['admin']\n response = view(request, pk=self.product_id)\n self.assertEqual(response.status_code, 204,\n f'Expected Response Code 204, received {response.status_code} instead.')", "def test_delete_unexisting_product(self):\n response=self.delete_unexisting_products()\n result = json.loads(response.data.decode('utf-8'))\n self.assertEqual(result['Error'],'Product Not found')\n self.assertEqual(response.status_code, 400)", "def test_update_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], \"This product doesn't exists in the Inventory!\")\n self.assertEqual(resp.status_code, 400)", "def test_add_and_remove_two_items(self):\n login = LoginPage(self.driver) #SAUCE-LAB-5\n login.open()\n inventory_page = login.login(_DEF_USER, _DEF_PASSWORD)\n first_item = inventory_page.products[0]\n first_item: InventoryItem\n first_item.add_to_cart()\n print('\\n')\n print(first_item.get_title())\n print(first_item.get_description())\n print(first_item.get_price())\n print('*' * 80)\n second_item = inventory_page.products[4]\n second_item: InventoryItem\n second_item.add_to_cart()\n print('\\n')\n print(second_item.get_title())\n print(second_item.get_description())\n print(second_item.get_price())\n print('*' * 80)\n first_item.remove_from_cart()\n second_item.remove_from_cart()\n print(f'Products {first_item.get_title()} and {second_item.get_title()} were successfully removed')", "def test_admin_cannot_create_product_with_empty_fields(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='',\n category='',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Please enter all fields!')\n self.assertEqual(resp.status_code, 400)", "def test_delete_cart_item_unauthorized(self):\n user_id = '111'\n cart_id = self.cart_item_manager.create_cart(user_id, 'test cart', False)\n item_id1 = self.cart_item_manager.add_cart_item(self.catalog, user_id, cart_id, '1', 'entity_type',\n 'entity_version')\n with self.assertRaises(ResourceAccessError):\n self.cart_item_manager.delete_cart_item('112', cart_id, item_id1)", "def test_delete_product(self):\n product_pk = 1\n product_count_before = models.Product.objects.count()\n url = reverse('products:detail', kwargs={'pk': product_pk})\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(product_count_before - models.Product.objects.count(), 1)", "def test_delete_nveto_pmt_item(self):\n pass", "def test_product_remove(self):\n\n flag = \"user\"\n api = \"product.product.remove\"\n current_page = 1\n search_info = json.dumps({\n 'id': 12,\n })\n print('start------------------------>remove')\n result = self.access_api(flag = flag, api = api, current_page = current_page, product_info = search_info)", "def test_delete_from_cart(open_browser, mysql_executor):\n HelperUrl.user_base_url(open_browser)\n MainPage(open_browser).open_product_page()\n ProductPage(open_browser).add_to_cart()\n CheckExistenceDB(mysql_executor).check_exist()\n Header(open_browser).open_cart_block() \\\n .delete_from_cart_block()\n CheckExistenceDB(mysql_executor).check_is_not_exist()", "def test_products_ref_users_delete(self):\n pass", "def test_show_cart_empty(client):\n raise NotImplemented('Acceptance test failed')", "def test_product_exists_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product exists in the Inventory!')\n self.assertEqual(resp.status_code, 400)", "def test_invoice_item_delete(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we create a invoice\n data = self.invoice_data\n data[\"customer_id\"] = id\n id_inv = self._create_model(\"invoice\", data, [])\n if id_inv:\n # then we create a product\n id_prod = self._create_model(\"product\", self.product_data, [ \"name\", \"description\", \"image_link\", \"price\" ])\n if id_prod:\n # then we can create the invoice's item\n data = self.invoice_item_data\n data[\"invoice_id\"] = id_inv\n data[\"product_id\"] = id_prod\n id_itm = self._create_model(\"invoiceitem\", data, [ \"quantity\", \"quote_price\" ])\n if id_itm:\n # then performe delete\n self._delete_model(\"invoiceitem\", id_itm)\n self.assertIsNotNone(id_itm)\n self.assertIsNotNone(id_prod)\n self.assertIsNotNone(id_inv)\n self.assertIsNotNone(id)", "def test_get_deleted_product(self):\n product = self.add_product()\n product.is_deleted = True\n product.save()\n\n url = f'{self.url}{product.id}/'\n\n response = self.client.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n self.client.force_authenticate(user=self.admin_user)\n response = self.client.get(url, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_delete_ingredient(self):\n ingredient = Ingredient.objects.create(user=self.user, name='Lettuce')\n url = detail_url(ingredient.id)\n res = self.client.delete(url)\n\n self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)\n ingredients = Ingredient.objects.filter(user=self.user)\n self.assertFalse(ingredients.exists())", "def test_only_admin_can_create_product(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_add_to_cart_item_not_in_system(self):\n # test sale products not in db\n\n response = self.client.get(\n '/self.base_url/sales/1999/2',\n headers=dict(Authorization=\"Bearer \" + self.attendant_token),\n content_type = 'application/json'\n )\n\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"This product does not exist\")\n self.assertEqual(response.status_code,200)\n\n\n # test add item which is at minimum stock", "def tearDown(self):\n server.Inventory.remove_all()", "def test_shoppingcart_delete(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we create a product\n id_prod = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id_prod:\n # then we can create the shoppingcart\n data = self.shoppingcart_data\n data[\"customer_id\"] = id\n data[\"product_id\"] = id\n id_cart = self._create_model(\"shoppingcart\", data, [\"quantity\", \"discount_value\", \"is_closed\"])\n if id_cart:\n # then performe delete\n self._delete_model(\"shoppingcart\", id_cart)\n self.assertIsNotNone(id_cart)\n self.assertIsNotNone(id_prod)\n self.assertIsNotNone(id)", "def setUp(self):\n server.Inventory.remove_all()\n server.Inventory(0, \"shampoo\", 2, 'new').save()\n server.Inventory(0, \"conditioner\", 5, 'new').save()\n self.app = server.app.test_client()", "def confirm_inventory(self, data, batch): # not used will be deprecated todo\n try:\n batch = batch\n data = data\n location = self.Location.find(['name', '=', 'MyInventory'])[-1]\n inventory = self.Inventory.find([('batch_number', '=', batch), ('location', '=', location.id)])[-1]\n lines = inventory.lines\n for i in data:\n product = \\\n self.Product.find(\n [('code', '=', i['code']), ('description', '=', 'Stock'), ('type', '=', 'goods')])[\n -1]\n supplier = self.Party.find(['name', '=', i['supplier']])[-1]\n for j in lines:\n if j.product == product:\n pro = j.product\n template = pro.template\n template.list_price = Decimal(i['rate'])\n template.save()\n pro.save()\n j.quantity = float(i['quantity'])\n j.supplier = supplier\n j.expiry_date = i['expiry_date']\n j.save()\n inventory.state = 'done'\n inventory.save()\n return True\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return False", "def test_delete(self):\n pass", "def test_delete_reusableitem_api_fails(self):\n self.client.force_authenticate(user=self.user_1)\n\n response = self.client.delete(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_500_INTERNAL_SERVER_ERROR)", "def test_delete1(self):\n pass", "def test_delete_deployment(self):\n pass", "def test_delete_cart(self):\n user_id = '123'\n cart_id1 = self.cart_item_manager.create_cart(user_id, 'Cart1', True)\n cart_id2 = self.cart_item_manager.create_cart(user_id, 'Cart2', False)\n cart_id3 = self.cart_item_manager.create_cart(user_id, 'Cart3', False)\n self.cart_item_manager.add_cart_item(self.catalog, user_id, cart_id1, '1', 'entity_type', 'entity_version')\n self.cart_item_manager.add_cart_item(self.catalog, user_id, cart_id1, '2', 'entity_type', 'entity_version')\n self.cart_item_manager.add_cart_item(self.catalog, user_id, cart_id2, '2', 'entity_type', 'entity_version')\n # Delete the non-default cart.\n # NOTE: The default cart should be left untouched.\n self.cart_item_manager.delete_cart(user_id, cart_id3)\n with self.assertRaises(ResourceAccessError):\n self.cart_item_manager.get_cart(user_id, cart_id3)\n self.assertEqual(self.cart_item_manager.user_service.get_or_create(user_id)['DefaultCartId'], cart_id1)\n self.assertIsNotNone(self.cart_item_manager.get_cart(user_id, cart_id1))\n # Delete the default cart.\n # NOTE: At this point, the user object should have the default cart ID undefined.\n self.cart_item_manager.delete_cart(user_id, cart_id1)\n with self.assertRaises(ResourceAccessError):\n self.cart_item_manager.get_cart(user_id, cart_id1)\n self.assertIsNone(self.cart_item_manager.user_service.get_or_create(user_id)['DefaultCartId'])\n self.assertIsNotNone(self.cart_item_manager.get_cart(user_id, cart_id2))", "def clean_up_inventory(self):\n self.inventory = [i for i in self.inventory if i.quantity != 0]", "def test_admin_create_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)", "def test_delete_shopping_cart_item(self):\n client = APIClient()\n # First create a user\n Customer.objects.create_user(name=\"kevin\", email=\"[email protected]\", password=\"secret_pass\",\n shipping_region_id=1)\n\n # Then force login with that user\n url = reverse('login')\n data = {'email': \"[email protected]\", 'password': \"secret_pass\"}\n response = client.post(url, data, format='json')\n access_token = response.data['access']\n\n # Then add products to the shopping cart\n url = reverse('shopping_cart_add_product')\n data = {'cart_id': \"\", 'product_id': 1, 'attributes': \"Blue, XL\"}\n response = client.post(url, data, format='json')\n item_id = response.data[0]['item_id']\n\n url = reverse('shopping_cart_remove_products')\n data = {'item_id': item_id, }\n client.credentials(HTTP_AUTHORIZATION='Bearer ' + access_token)\n response = client.delete(url, data, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(ShoppingCart.objects.count(), 0)", "def test_uninstalled(self):\n self.assertFalse(self.qi.isProductInstalled(PROJECTNAME))", "def test_vault_delete_vault_section(self):\n pass", "def test_shoppingitem_deletion(self):\n # register and login a user\n self.app.post('/register', data=self.user_reg_details)\n self.app.post('/login', data=self.user_login_details)\n # create a shopping list\n self.shopping_class_obj.create_list(\n 'Christmass', '[email protected]')\n # create an item\n self.item_class_obj.add_item(\n 'Christmass', 'Bread', '[email protected]')\n # make a post request with the delete name\n res = self.app.post(\n '/delete-item', data={'list_name': 'Christmass', 'item_name': 'Bread'})\n self.assertEqual(res.status_code, 200)\n self.item_class_obj.delete_item(\n 'Bread', '[email protected]', 'Christmass')\n # check if delete was successful\n self.assertIn(\"Successfuly deleted item \", str(res.data))", "def test_product_uninstalled(self):\n self.assertFalse(\n self.installer.is_product_installed('{{cookiecutter.package_name}}')\n )", "def test_delitem(self):\n with self.assertRaises(QiitaDBNotImplementedError):\n del self.tester['1.SKM7.640188']", "def test_delete_run(self):\n pass", "def test_update_product_with_empty_fields(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n product_update = dict(\n prod_name='',\n category='',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'prod_name and category cannot be empty!')\n self.assertEqual(resp.status_code, 400)", "def test_delete_hyperflex_app_catalog(self):\n pass", "def test_delete_admin_from_org(self):\n pass", "def test_products_ref_users_user_delete(self):\n pass", "def test_none_admin_delete(self):\n\n with self.client:\n token = self.customer()\n id = 1\n response = self.client.delete('api/v1/meals/{}'.format(id),\n headers=({\"token\": token}))\n data = json.loads(response.data.decode())\n self.assertEqual(data.get('message'),\n \"Customer is not authorized to access this page\")\n self.assertEqual(response.status_code, 401)", "def test_create_not_admin(self):\n self.assertEqual(Product.objects.count(), 2)\n payload = {\n 'name': 'New product',\n 'category': self.category_1.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99\n }\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n expected = {'detail': 'You do not have permission to perform this action.'}\n response = self.client.post(\n '/api/products/', data=payload,\n content_type='application/json', **headers)\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)\n self.assertEqual(Product.objects.count(), 2)", "def test_deletion(self):\n self.assertEqual(self.store.query(BatchManholePowerup).count(), 0)", "def test_delete_product_non_valid_pk(self):\n product_pk = 9999\n product_count_before = models.Product.objects.count()\n url = reverse('products:detail', kwargs={'pk': product_pk})\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_cannot_sale_out_of_stock_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":20\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'NY_denims is out of stock!')\n self.assertEqual(resp.status_code, 404)", "def test_delete7(self):\n pass", "def test_cannot_view_a_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_delete(self):\n\n\t\titem_id = mock_item()[0]\n\t\tmodels.delete(item_id)\n\n\t\titem = models.item(item_id)\n\t\tself.assertIsNone(item)", "def tearDown(self):\n ProcessRequest(f\"products/{self.product_id}.json\").send_request(\n 'DELETE', continue_on_error=True)\n super().tearDown()", "def test_cannotDeleteNonExistent(self):\n store = Store()\n self.assertFailStatus(\n 1, self._makeConfig(store),\n [\"delete\", \"--port-identifier\", \"12345\"])\n self.assertEqual(\n \"12345 does not identify an item.\\n\",\n sys.stdout.getvalue())", "def test_recipe_deletion(self):\n recipe = sample_recipe()\n recipe.ingredients.create(name='Eggs')\n\n url = recipe_detail_url(recipe.id)\n res = self.client.delete(url)\n\n self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(Recipe.objects.count(), 0)\n self.assertEqual(Ingredient.objects.count(), 0)", "def test_delete(self):\n self.request.access.allow_delete = [\"everyone\"]\n pkg = make_package(factory=DynamoPackage)\n self._save_pkgs(pkg)\n self.db.delete(pkg)\n count = self.engine.scan(DynamoPackage).count()\n self.assertEqual(count, 0)\n count = self.engine.scan(PackageSummary).count()\n self.assertEqual(count, 0)\n self.storage.delete.assert_called_with(pkg)", "def test_delete_item(self, default_ms):\r\n self.initdb(default_ms)\r\n # r/o try deleting the course (is here to ensure it can't be deleted)\r\n with self.assertRaises(AttributeError):\r\n self.store.delete_item(self.xml_chapter_location)\r\n self.store.delete_item(self.import_chapter_location, '**replace_user**')\r\n # verify it's gone\r\n with self.assertRaises(ItemNotFoundError):\r\n self.store.get_item(self.import_chapter_location)", "def test_delete_case(self):\n pass", "def test_delete_deployment_run(self):\n pass", "def test_user_delete_procedure_failure(self):\n p1 = models.Procedure.objects.create(\n name='temp',\n overview='bla bla bla'\n )\n p1.speciality.set([self.speciality.pk])\n p1.save()\n\n res = self.client.get(PROCEDURE_URL)\n\n url = get_item_url(res.data[0]['id'])\n del_procedure = self.client.delete(url)\n\n self.assertEqual(del_procedure.status_code,\n status.HTTP_401_UNAUTHORIZED)", "def test_deleteorganizations_item(self):\n pass", "def test_view_product_with_invalid_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2kk',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Try an interger for product id')\n self.assertEqual(resp.status_code, 400)", "def test_delete_small_and_light_enrollment_by_seller_sku(self):\n pass", "def test_user_delete_procedure_failure(self):\n p1 = models.Procedure.objects.create(\n name='temp',\n overview='bla bla bla'\n )\n p1.speciality.set([self.speciality.pk])\n p1.save()\n\n res = self.client.get(PROCEDURE_URL)\n\n url = get_item_url(res.data[0]['id'])\n del_procedure = self.client.delete(url)\n\n self.assertEqual(del_procedure.status_code,\n status.HTTP_403_FORBIDDEN)", "def test_unauthorized_product_update(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_view_a_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertIn('NY_denims', str(reply['product']))\n self.assertEqual(resp.status_code, 200)", "def test_update_inventory_with_no_name(self):\n new_inventory = {'id': 2, 'quantity': 2, 'status': 'new'}\n resp = self.app.put('/inventories/2', data=new_inventory, content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)", "def test_only_attendant_can_make_a_sale(self):\n resp = self.admin_add_product()\n reply = self.admin_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_data_object_untrash(self):\n pass", "def test_delete__volume(self):\n arglist = [\n '--volume',\n self.projects[0].id,\n ]\n verifylist = [\n ('service', 'volume'),\n ('project', self.projects[0].id),\n ]\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n\n result = self.cmd.take_action(parsed_args)\n\n self.assertIsNone(result)\n self.projects_mock.get.assert_called_once_with(self.projects[0].id)\n self.compute_quotas_mock.delete.assert_not_called()\n self.volume_quotas_mock.delete.assert_called_once_with(\n self.projects[0].id,\n )\n self.network_mock.delete_quota.assert_not_called()", "def test_product_buy_missing_goods(self):\n result_buy = self.info_list.product_buy(\"хлеб серый хлебозавод\", 3)\n self.assertFalse(result_buy)", "def test_delete_device_template(self):\n pass", "def test_create_inventory_with_no_name(self):\n new_inventory = {'status': 'new'}\n resp = self.app.post('/inventories', data=new_inventory, content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)", "def test_delete_variant_delete_product_channel_listing_not_deleted(\n mocked_recalculate_orders_task,\n product_variant_deleted_webhook_mock,\n staff_api_client,\n product_with_two_variants,\n permission_manage_products,\n):\n # given\n query = DELETE_VARIANT_MUTATION\n product = product_with_two_variants\n variant = product.variants.first()\n variant_id = graphene.Node.to_global_id(\"ProductVariant\", variant.pk)\n variant_sku = variant.sku\n variables = {\"id\": variant_id}\n\n product_channel_listing_count = product.channel_listings.count()\n\n # when\n response = staff_api_client.post_graphql(\n query, variables, permissions=[permission_manage_products]\n )\n\n # then\n content = get_graphql_content(response)\n flush_post_commit_hooks()\n data = content[\"data\"][\"productVariantDelete\"]\n\n product_variant_deleted_webhook_mock.assert_called_once_with(variant)\n assert data[\"productVariant\"][\"sku\"] == variant_sku\n with pytest.raises(variant._meta.model.DoesNotExist):\n variant.refresh_from_db()\n mocked_recalculate_orders_task.assert_not_called()\n product.refresh_from_db()\n assert product.channel_listings.count() == product_channel_listing_count", "def test_delete_item_all_fails(self):\n\n web.app.config['READONLY'] = False\n\n # Delete all items\n response = self.client.delete('/item/')\n self.assertEqual(response.status_code, 405)\n\n # Note: if this fails, all items have gone and rest of\n # tests will fail!", "def test_delete(self):\n self.request.access.allow_delete = [\"everyone\"]\n pkg = make_package(factory=SQLPackage)\n self.sql.add(pkg)\n transaction.commit()\n self.sql.add(pkg)\n self.db.delete(pkg)\n count = self.sql.query(SQLPackage).count()\n self.assertEqual(count, 0)\n self.storage.delete.assert_called_with(pkg)", "def test_delete_material(self):\n expected_materials = [\n [],\n ['cotton'],\n ['cotton'],\n ]\n\n select_listings_to_edit(self.driver, 'Delete')\n d = self.driver\n bp = BulkPage(d)\n\n send_keys(bp.operation_input(), 'wool')\n click(bp.operation_apply())\n\n material_names = bp.material_names()\n assert material_names == expected_materials", "def test_delete_variant_delete_product_channel_listing_without_available_channel(\n mocked_recalculate_orders_task,\n product_variant_deleted_webhook_mock,\n staff_api_client,\n product,\n permission_manage_products,\n):\n # given\n query = DELETE_VARIANT_MUTATION\n variant = product.variants.first()\n variant_id = graphene.Node.to_global_id(\"ProductVariant\", variant.pk)\n variant_sku = variant.sku\n variables = {\"id\": variant_id}\n\n # second variant not available\n ProductVariant.objects.create(product=product, sku=\"not-available-variant\")\n\n assert product.channel_listings.count() == 1\n\n # when\n response = staff_api_client.post_graphql(\n query, variables, permissions=[permission_manage_products]\n )\n\n # then\n content = get_graphql_content(response)\n flush_post_commit_hooks()\n data = content[\"data\"][\"productVariantDelete\"]\n\n product_variant_deleted_webhook_mock.assert_called_once_with(variant)\n assert data[\"productVariant\"][\"sku\"] == variant_sku\n with pytest.raises(variant._meta.model.DoesNotExist):\n variant.refresh_from_db()\n mocked_recalculate_orders_task.assert_not_called()\n product.refresh_from_db()\n assert product.channel_listings.count() == 0", "def test_delete_boat(self):\n pass", "def test_main_exit_absent(self, mock_delete_volume, mock_module, mock_client):\n PARAMS = {\n 'storage_system_ip': '192.168.0.1',\n 'storage_system_name': '3PAR',\n 'storage_system_username': 'USER',\n 'storage_system_password': 'PASS',\n 'volume_name': 'test_volume',\n 'cpg': None,\n 'size': None,\n 'size_unit': None,\n 'snap_cpg': None,\n 'wait_for_task_to_end': None,\n 'new_name': None,\n 'expiration_hours': None,\n 'retention_hours': None,\n 'ss_spc_alloc_warning_pct': None,\n 'ss_spc_alloc_limit_pct': None,\n 'usr_spc_alloc_warning_pct': None,\n 'usr_spc_alloc_limit_pct': None,\n 'rm_ss_spc_alloc_warning': None,\n 'rm_usr_spc_alloc_warning': None,\n 'rm_exp_time': None,\n 'rm_usr_spc_alloc_limit': None,\n 'rm_ss_spc_alloc_limit': None,\n 'compression': False,\n 'type': 'thin',\n 'keep_vv': None,\n 'state': 'absent'\n }\n # This creates a instance of the AnsibleModule mock.\n mock_module.params = PARAMS\n mock_module.return_value = mock_module\n instance = mock_module.return_value\n mock_delete_volume.return_value = (True, True, \"Deleted volume successfully.\", {})\n hpe3par_volume.main()\n # AnsibleModule.exit_json should be called\n instance.exit_json.assert_called_with(\n changed=True, msg=\"Deleted volume successfully.\")\n # AnsibleModule.fail_json should not be called\n self.assertEqual(instance.fail_json.call_count, 0)", "def test_delitem(self):\n with self.assertRaises(QiitaDBNotImplementedError):\n del self.tester['pcr_primers']", "def test_delete_item(test_client):\n\n response = test_client.delete(GOOD_ITEM_URL)\n\n assert response.status_code == 204\n assert response.get_data() == b''" ]
[ "0.79532677", "0.7735564", "0.7528131", "0.7521453", "0.7518936", "0.746801", "0.7354772", "0.72263944", "0.71801", "0.71784055", "0.7171436", "0.7112214", "0.7106493", "0.70771635", "0.6879483", "0.68655676", "0.68639493", "0.68453324", "0.6812393", "0.6777014", "0.67697865", "0.6754222", "0.6747571", "0.67225647", "0.66775954", "0.6598845", "0.6592748", "0.65878624", "0.65853804", "0.6577626", "0.6523267", "0.65209746", "0.6514951", "0.6511906", "0.65073067", "0.6497634", "0.6479549", "0.6478376", "0.6426707", "0.6424805", "0.6419926", "0.64175135", "0.6413851", "0.6412969", "0.64031523", "0.6350108", "0.63466495", "0.63314515", "0.633035", "0.6301134", "0.6296863", "0.6295854", "0.629438", "0.6291665", "0.62895495", "0.6285368", "0.6276131", "0.6243001", "0.62368387", "0.62095153", "0.62009", "0.62006056", "0.6193776", "0.61867404", "0.6182765", "0.61827385", "0.6179483", "0.6175918", "0.6171978", "0.6164235", "0.6158744", "0.6152619", "0.61451256", "0.6138175", "0.6137038", "0.6117539", "0.61106557", "0.6106133", "0.61051494", "0.610376", "0.6101968", "0.61007845", "0.60858333", "0.6070537", "0.6068597", "0.6062567", "0.60576785", "0.60572624", "0.6051841", "0.6051534", "0.60499406", "0.60352063", "0.6034134", "0.6030231", "0.601742", "0.6014745", "0.6011776", "0.60090214", "0.6003402", "0.5997467" ]
0.8764794
0
Test that admin cannnot delete a nonexistant product
def test_admin_cannot_delete_nonexistant_product(self): resp = self.admin_register() reply = self.admin_login() token = reply['token'] product = dict( prod_name='NY_denims', category='denims', stock=20, price=150 ) resp = self.client.post( '/api/v1/products', content_type='application/json', data=json.dumps(product), headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Product successfully added to Inventory!') self.assertEqual(resp.status_code, 201) resp = self.client.delete( '/api/v1/products/2', content_type='application/json', headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'This product does not exist in Inventory!') self.assertEqual(resp.status_code, 404)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_not_my_product(self):\n self._require_login(self.user1)\n response = self.client.delete('/api/1.0/products/2/')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_non_admin_cannot_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_03_product_delete(self):\n product = self.create_product()\n products = self.product_obj.search([])\n self.assertIn(product, products)\n product.unlink()\n self.assertNotIn(product.exists(), products)", "def test_security_on_delete(self):\n # test the delete product url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.delete(url)\n self.failUnlessEqual(response.status_code, 401)", "def test_admin_cannot_delete_product_from_empty_Inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'There are no products in Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_delete_product(self):\n self._require_login(self.user1)\n response = self.client.delete('/api/1.0/products/1/')\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n response = self.client.get('/api/1.0/products/1/')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_delete_store_success(self):\n product = sample_product(supplier_id=self.user)\n url = detail_url(product.id)\n res = self.client.delete(url)\n products = Product.objects.all()\n\n self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(len(products), 0)", "def test_admin_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Product deleted!')\n self.assertEqual(resp.status_code, 200)", "def test_products_ref_users_delete(self):\n pass", "def test_admin_cannot_delete_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_admin_cannot_delete_product_with_non_integer_prod_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/kk',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'The product id should be a number!')\n self.assertEqual(resp.status_code, 400)", "def test_delete(self):\n self.assertEqual(Product.objects.count(), 2)\n\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(self.token_admin)\n }\n response = self.client.delete(\n '/api/products/{}/'.format(self.product_1.id), **headers)\n self.assertEqual(response.status_code, 204)\n self.assertEqual(Product.objects.count(), 1)", "def test_delete_product_non_valid_pk(self):\n product_pk = 9999\n product_count_before = models.Product.objects.count()\n url = reverse('products:detail', kwargs={'pk': product_pk})\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_delete_product(self):\n product_pk = 1\n product_count_before = models.Product.objects.count()\n url = reverse('products:detail', kwargs={'pk': product_pk})\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(product_count_before - models.Product.objects.count(), 1)", "def test_get_deleted_product(self):\n product = self.add_product()\n product.is_deleted = True\n product.save()\n\n url = f'{self.url}{product.id}/'\n\n response = self.client.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n self.client.force_authenticate(user=self.admin_user)\n response = self.client.get(url, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_delete_product(self):\n view = ProductDeleteView.as_view({'delete': 'destroy'})\n uri = reverse('products:delete-product', kwargs={'pk': self.product_id})\n request = self.factory.delete(uri, HTTP_AUTHORIZATION='Token {}'.format(self.token_admin.key))\n request.user = self.user['admin']\n response = view(request, pk=self.product_id)\n self.assertEqual(response.status_code, 204,\n f'Expected Response Code 204, received {response.status_code} instead.')", "def test_product_delete(self):\n # first performe create\n id = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id:\n # then performe delete\n self._delete_model(\"product\", id)\n self.assertIsNotNone(id)", "def test_delete_unexisting_product(self):\n response=self.delete_unexisting_products()\n result = json.loads(response.data.decode('utf-8'))\n self.assertEqual(result['Error'],'Product Not found')\n self.assertEqual(response.status_code, 400)", "def test_delete(self):\n pass", "def test_products_ref_users_user_delete(self):\n pass", "def test_delete_item_using_delete(self):\n pass", "def test_none_admin_delete(self):\n\n with self.client:\n token = self.customer()\n id = 1\n response = self.client.delete('api/v1/meals/{}'.format(id),\n headers=({\"token\": token}))\n data = json.loads(response.data.decode())\n self.assertEqual(data.get('message'),\n \"Customer is not authorized to access this page\")\n self.assertEqual(response.status_code, 401)", "def test_create_not_admin(self):\n self.assertEqual(Product.objects.count(), 2)\n payload = {\n 'name': 'New product',\n 'category': self.category_1.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99\n }\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n expected = {'detail': 'You do not have permission to perform this action.'}\n response = self.client.post(\n '/api/products/', data=payload,\n content_type='application/json', **headers)\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)\n self.assertEqual(Product.objects.count(), 2)", "def test_deletehardwares_item(self):\n pass", "def test_delete_permission(self):\r\n self.assertFalse(self.creator_admin.has_delete_permission(self.request))", "def test_user_delete_procedure_failure(self):\n p1 = models.Procedure.objects.create(\n name='temp',\n overview='bla bla bla'\n )\n p1.speciality.set([self.speciality.pk])\n p1.save()\n\n res = self.client.get(PROCEDURE_URL)\n\n url = get_item_url(res.data[0]['id'])\n del_procedure = self.client.delete(url)\n\n self.assertEqual(del_procedure.status_code,\n status.HTTP_403_FORBIDDEN)", "def test_delete_run(self):\n pass", "def test_delete_nonexist(self):\n promotion = PromotionFactory()\n promotion.id = '1cak41-nonexist'\n try:\n promotion.delete()\n except KeyError:\n self.assertRaises(KeyError)", "def test_delete_admin_from_org(self):\n pass", "def test_cannot_sale_nonexistant_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"Paris_heels\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product is not in the Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_user_delete_procedure_failure(self):\n p1 = models.Procedure.objects.create(\n name='temp',\n overview='bla bla bla'\n )\n p1.speciality.set([self.speciality.pk])\n p1.save()\n\n res = self.client.get(PROCEDURE_URL)\n\n url = get_item_url(res.data[0]['id'])\n del_procedure = self.client.delete(url)\n\n self.assertEqual(del_procedure.status_code,\n status.HTTP_401_UNAUTHORIZED)", "def test_only_admin_can_create_product(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_wrong_admin_delete(self):\n\n with self.client:\n self.get_meals()\n id = 100\n token = self.get_token()\n response = self.client.delete(\n 'api/v1/meals/{}'.format(id), headers=({\"token\": token}))\n data = json.loads(response.data.decode())\n self.assertEqual(data.get('message'), \"Meal not found\")\n self.assertEqual(response.status_code, 400)", "def test_cannotDeleteNonExistent(self):\n store = Store()\n self.assertFailStatus(\n 1, self._makeConfig(store),\n [\"delete\", \"--port-identifier\", \"12345\"])\n self.assertEqual(\n \"12345 does not identify an item.\\n\",\n sys.stdout.getvalue())", "def test_view_product_that_doesnot_exist_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product does not exist!')\n self.assertEqual(resp.status_code, 404)", "def test_delete1(self):\n pass", "def test_update_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], \"This product doesn't exists in the Inventory!\")\n self.assertEqual(resp.status_code, 400)", "def test_delete_reusableitem_api_fails(self):\n self.client.force_authenticate(user=self.user_1)\n\n response = self.client.delete(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_500_INTERNAL_SERVER_ERROR)", "def test_delete_deployment(self):\n pass", "def test_post_delete_admin(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n self.client.force_authenticate(user=self.superuser)\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)", "def test_delete7(self):\n pass", "def test_no_delete_permission(client):\n user = user_with_permissions(\"polls.view_poll\")\n poll = Poll.objects.create(owner=user, text=\"question\")\n\n url = reverse(\"admin:polls_poll_change\", args=(poll.pk,))\n delete_url = reverse(\"admin:polls_poll_delete\", args=(poll.pk,))\n client.force_login(user)\n\n response = client.get(url)\n assert delete_url not in response.content.decode()", "def test_delete_case(self):\n pass", "def test_not_logged_cannot_delete(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_vault_delete_vault_item(self):\n pass", "def test_deletion(self):\n self.assertEqual(self.store.query(BatchManholePowerup).count(), 0)", "def test_delete_hyperflex_app_catalog(self):\n pass", "def test_handle_delete_not_admin(self):\n team = Team(\"BRS\", \"brs\", \"web\")\n test_user = User(\"userid\")\n self.db.retrieve.return_value = test_user\n self.db.query.return_value = [team]\n self.assertTupleEqual(self.testcommand.handle(\"team delete brs\", user),\n (self.testcommand.permission_error, 200))\n self.db.delete.assert_not_called()\n self.gh.org_delete_team.assert_not_called()", "def test_delete_ingredient(self):\n ingredient = Ingredient.objects.create(user=self.user, name='Lettuce')\n url = detail_url(ingredient.id)\n res = self.client.delete(url)\n\n self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)\n ingredients = Ingredient.objects.filter(user=self.user)\n self.assertFalse(ingredients.exists())", "def test_delete_no_target(self):\n # login as library manager\n self.authenticate(self.user)\n\n # remove all works\n Work.objects.all().delete()\n\n # prune works\n response = self.client.delete(self.url)\n\n # check http status\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # check the response\n self.assertDictEqual(response.data, {\"deleted_count\": 0})", "def test_product_remove(self):\n\n flag = \"user\"\n api = \"product.product.remove\"\n current_page = 1\n search_info = json.dumps({\n 'id': 12,\n })\n print('start------------------------>remove')\n result = self.access_api(flag = flag, api = api, current_page = current_page, product_info = search_info)", "def test_delete__not_found(self):\n testing_config.sign_in('[email protected]', 123567890)\n\n with test_app.test_request_context(self.request_path):\n with self.assertRaises(werkzeug.exceptions.NotFound):\n self.handler.do_delete(account_id=self.appuser_id + 1)\n\n unrevised_appuser = user_models.AppUser.get_by_id(self.appuser_id)\n self.assertEqual('[email protected]', unrevised_appuser.email)", "def test_delete__not_found(self):\n testing_config.sign_in('[email protected]', 123567890)\n\n with register.app.test_request_context(self.request_path):\n with self.assertRaises(werkzeug.exceptions.NotFound):\n self.handler.do_delete(self.feature_id + 1)\n\n revised_feature = models.Feature.get_by_id(self.feature_id)\n self.assertFalse(revised_feature.deleted)", "def test_delete_fail(self):\n self.user_api()\n self.base.metadata.create_all(self.engine)\n people = self.provision_users()\n p = {'id': people[2].id}\n self.delete('user', 403, params=p)", "def test_delete(self):\n self.request.access.allow_delete = [\"everyone\"]\n pkg = make_package(factory=SQLPackage)\n self.sql.add(pkg)\n transaction.commit()\n self.sql.add(pkg)\n self.db.delete(pkg)\n count = self.sql.query(SQLPackage).count()\n self.assertEqual(count, 0)\n self.storage.delete.assert_called_with(pkg)", "def test_delete(self):\n self.client.force_authenticate(user=self.admin)\n\n response = self.client.delete(\n reverse(\n 'retreat:waitqueuenotification-detail',\n kwargs={'pk': 1},\n ),\n )\n\n self.assertEqual(\n response.status_code,\n status.HTTP_405_METHOD_NOT_ALLOWED\n )", "def test_post_delete_unauthorized(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_delete_cloud(self):\n pass", "def test_admin_cannot_delete_non_existant_user(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/users/5',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], \"This attendant does not exist!\")\n self.assertEqual(resp.status_code, 404)", "def test_products_ref_groups_delete(self):\n pass", "def test_duo_application_delete(self):\n pass", "def test_delete_deployment_run(self):\n pass", "def test_jenkins_user_delete(self):\n ju = JenkinsUser.objects.get(username=\"user_1\")\n self.assertRaises(django.db.models.deletion.ProtectedError, ju.delete)", "def test_delete_cart_item_unauthorized(self):\n user_id = '111'\n cart_id = self.cart_item_manager.create_cart(user_id, 'test cart', False)\n item_id1 = self.cart_item_manager.add_cart_item(self.catalog, user_id, cart_id, '1', 'entity_type',\n 'entity_version')\n with self.assertRaises(ResourceAccessError):\n self.cart_item_manager.delete_cart_item('112', cart_id, item_id1)", "def testDeleteAccessDenied(self):\n self.assertEqual(SequencingMachine.objects.count(), 1)\n self.runDelete(None, sequencer=self.hiseq2000.sodar_uuid)\n self.assertEqual(SequencingMachine.objects.count(), 1)\n self.response_401()\n for user in (self.guest, self.norole, self.unrelated_owner):\n self.assertEqual(SequencingMachine.objects.count(), 1)\n self.runDelete(user, sequencer=self.hiseq2000.sodar_uuid)\n self.assertEqual(SequencingMachine.objects.count(), 1)\n self.response_403()", "def test_delete_category_does_not_exist(self):\n self.delete_does_not_exist_fail('hats')", "def test_delete_device(self):\n pass", "def test_delete_device(self):\n pass", "def tearDown(self):\n ProcessRequest(f\"products/{self.product_id}.json\").send_request(\n 'DELETE', continue_on_error=True)\n super().tearDown()", "def test_not_author_delete_post(self):\n self.client.login(username=\"Bill\", password=\"newpass1234\")\n response = self.client.post('/posts/1/delete/', {\"next\": \"\"})\n self.assertNotEqual(list(Post.objects.filter(id=1)), [])", "def test_delete(self):\n self.request.access.allow_delete = [\"everyone\"]\n pkg = make_package(factory=DynamoPackage)\n self._save_pkgs(pkg)\n self.db.delete(pkg)\n count = self.engine.scan(DynamoPackage).count()\n self.assertEqual(count, 0)\n count = self.engine.scan(PackageSummary).count()\n self.assertEqual(count, 0)\n self.storage.delete.assert_called_with(pkg)", "def test_product_uninstalled(self):\n self.assertFalse(\n self.installer.is_product_installed('{{cookiecutter.package_name}}')\n )", "def test_cannot_delete_usage(self):\n p = Permission.objects.get(name='Can delete usage')\n self.user.user_permissions.add(p)\n self.client.login(username='testuser', password='q2w3E$R%')\n response = self.client.delete(reverse('api_v1:usage-detail', kwargs={'pk': 1}),\n follow=True)\n self.assertEqual(response.status_code, 405)\n self.assertIn('not allowed', str(response.content))", "def test_delete_device_template(self):\n pass", "def test_request_do_delete_non_existent_id(test_dao, test_configuration):\r\n DUT = dtcFunction(test_dao, test_configuration, test=True)\r\n DUT.request_do_select_all(revision_id=1)\r\n\r\n assert DUT.request_do_delete(100)", "def test_delete_device_user(self):\n pass", "def test_delete_error(self):\n with self.assertRaises(QiitaDBExecutionError):\n PrepTemplate.delete(1)", "def test_delete_user(self):\n pass", "def test_delete_user(self):\n pass", "def test_delete_but_no_view_permission(client):\n user = user_with_permissions(\"polls.delete_poll\")\n\n url = reverse(\"admin:index\")\n client.force_login(user)\n\n response = client.get(url)\n assert parse_sidemenu(response) == {\"Global\": [\"/en/admin/\"], \"Polls\": [None]}", "def test_delete__forbidden(self):\n testing_config.sign_in('[email protected]', 123567890)\n\n with test_app.test_request_context(self.request_path):\n with self.assertRaises(werkzeug.exceptions.Forbidden):\n self.handler.do_delete(account_id=self.appuser_id)\n\n unrevised_appuser = user_models.AppUser.get_by_id(self.appuser_id)\n self.assertEqual('[email protected]', unrevised_appuser.email)", "def test_sg_delete_non_associated(self):\n\n # Add a faked storage group to be tested and another one\n faked_storage_group = self.add_storage_group1()\n self.add_storage_group2()\n\n storage_group_mgr = self.console.storage_groups\n\n storage_group = storage_group_mgr.find(name=faked_storage_group.name)\n\n # Execute the code to be tested.\n storage_group.delete()\n\n # Check that the storage group no longer exists\n with pytest.raises(NotFound):\n storage_group_mgr.find(name=faked_storage_group.name)", "def test_delete_boat(self):\n pass", "def test_uninstalled(self):\n self.assertFalse(self.qi.isProductInstalled(PROJECTNAME))", "def test_duo_account_delete(self):\n pass", "def test_delete_non_existing_resource(self):\n CommonTestCases.admin_token_assert_in(\n self,\n delete_assigned_resource_from_non_existing_resource,\n \"Resource does not exist\"\n )", "def test_delete(self):\n\n self.metadata.create_or_update(data=self.create)\n\n # Find by name\n res_name = self.metadata.get_by_name(\n entity=Dashboard, fqn=self.entity.fullyQualifiedName\n )\n # Then fetch by ID\n res_id = self.metadata.get_by_id(\n entity=Dashboard, entity_id=str(res_name.id.__root__)\n )\n\n # Delete\n self.metadata.delete(\n entity=Dashboard, entity_id=str(res_id.id.__root__), recursive=True\n )\n\n # Then we should not find it\n res = self.metadata.list_entities(entity=Dashboard)\n assert not next(\n iter(\n ent\n for ent in res.entities\n if ent.fullyQualifiedName == self.entity.fullyQualifiedName\n ),\n None,\n )", "def test_delete__forbidden(self):\n testing_config.sign_in('[email protected]', 123567890)\n\n with register.app.test_request_context(self.request_path):\n with self.assertRaises(werkzeug.exceptions.Forbidden):\n self.handler.do_delete(self.feature_id)\n\n revised_feature = models.Feature.get_by_id(self.feature_id)\n self.assertFalse(revised_feature.deleted)", "def test_delete_from_cart(open_browser, mysql_executor):\n HelperUrl.user_base_url(open_browser)\n MainPage(open_browser).open_product_page()\n ProductPage(open_browser).add_to_cart()\n CheckExistenceDB(mysql_executor).check_exist()\n Header(open_browser).open_cart_block() \\\n .delete_from_cart_block()\n CheckExistenceDB(mysql_executor).check_is_not_exist()", "def test_categories_product_admin(self):\n response = self.client.post('api/v1/category/products',\n data=json.dumps(category_product[0]),\n content_type='application/json',\n headers=self.admin_headers)\n self.assertEqual(response.status_code, 401)\n self.assertIn('unauthorized', str(response.data))", "def test_client_nationlity_delete(self):\n pass", "def test_unauthorized_product_update(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_deleted(self):\n event = Event.objects.all()[0]\n\n todo = TodoItem.objects.create(\n event=event, completed=False, title=\"Test TODO3\",\n due=datetime.date.today(), additional=\"\",\n )\n\n assert todo in event.todoitem_set.all()\n\n self.client.get(reverse('todo_delete', args=[todo.pk]))\n\n assert event.todoitem_set.all().count() == 0", "def test_delete_idea_not_confirm(self):\n self.newidea()\n rv = self.app.post('/delete/idea/1',\n data=dict(),\n follow_redirects=True)\n self.assertIn(b'required', rv.data)", "def test_delete_record(self):\n pass", "def test_order_cannot_be_deleted_if_dont_exist(self):\n\n\t\tres = self.login_user()\n\t\taccess_token = json.loads(res.data.decode())['access_token']\n\n\t\tresponse = self.client().post(\n\t\t\t'/api/v2/orders',\n\t\t\theaders={\"x-access-token\": access_token},\n\t\t\tdata = json.dumps(\n\t\t\t\tself.order_data) , content_type = 'application/json')\n\t\tself.assertEqual(response.status_code, 201)\n\n\t\tresponse = self.client().delete(\n\t\t\t'/api/v2/orders/5',\n\t\t\theaders={\"x-access-token\": access_token})\n\n\t\tresult = json.loads(response.data)\n\t\tself.assertEqual(response.status_code, 404)\n\t\tself.assertEqual(result[\"message\"], \"That order is not available\")", "def test_delete_nveto_pmt_item(self):\n pass", "def test_client_verification_document_delete(self):\n pass", "def test_recipe_deletion(self):\n recipe = sample_recipe()\n recipe.ingredients.create(name='Eggs')\n\n url = recipe_detail_url(recipe.id)\n res = self.client.delete(url)\n\n self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(Recipe.objects.count(), 0)\n self.assertEqual(Ingredient.objects.count(), 0)", "def test_destroy_not_owner(self):\n\n self.assertEqual(first=1, second=Post.objects.all().count())\n url = reverse('post-detail', args=(self.post.id,))\n self.client.credentials(HTTP_AUTHORIZATION=self.token_1)\n response = self.client.delete(path=url)\n self.assertEqual(first=403, second=response.status_code)\n self.assertEqual(first=1, second=Post.objects.all().count())" ]
[ "0.8434453", "0.81416374", "0.8129146", "0.79895574", "0.7884565", "0.7712441", "0.7642063", "0.75856626", "0.7577927", "0.757099", "0.7507489", "0.75073075", "0.75018024", "0.7473333", "0.7418871", "0.7409584", "0.72817427", "0.7275699", "0.72678584", "0.7262097", "0.7227959", "0.71377033", "0.71309066", "0.71301854", "0.71134305", "0.7075899", "0.70369524", "0.70226353", "0.70187646", "0.7013848", "0.6953603", "0.69524336", "0.69365835", "0.6931486", "0.69301087", "0.69083184", "0.68861777", "0.6873848", "0.6864657", "0.6855874", "0.68451864", "0.6841039", "0.68202066", "0.6817791", "0.68170214", "0.68100464", "0.6807024", "0.68016154", "0.68004096", "0.679966", "0.67714536", "0.67656314", "0.6764172", "0.67629844", "0.6733907", "0.67254376", "0.67163354", "0.6687332", "0.66870403", "0.6682565", "0.6674251", "0.6673119", "0.66637987", "0.6644", "0.6642687", "0.6626325", "0.6615105", "0.6615105", "0.66131", "0.66104984", "0.6603271", "0.65991336", "0.65925467", "0.65915334", "0.6582906", "0.65807486", "0.65780777", "0.6574641", "0.6574641", "0.65679514", "0.65558", "0.6546069", "0.65439737", "0.65433335", "0.65427715", "0.6525241", "0.6519248", "0.65148795", "0.6511476", "0.6506007", "0.65025413", "0.6501324", "0.64814335", "0.6478739", "0.6468321", "0.64670724", "0.6465108", "0.64640397", "0.6462158", "0.6460252" ]
0.8506851
0
Test that admin cannnot delete a product with nointeger prod_id
def test_admin_cannot_delete_product_with_non_integer_prod_id(self): resp = self.admin_register() reply = self.admin_login() token = reply['token'] product = dict( prod_name='NY_denims', category='denims', stock=20, price=150 ) resp = self.client.post( '/api/v1/products', content_type='application/json', data=json.dumps(product), headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Product successfully added to Inventory!') self.assertEqual(resp.status_code, 201) resp = self.client.delete( '/api/v1/products/kk', content_type='application/json', headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'The product id should be a number!') self.assertEqual(resp.status_code, 400)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_03_product_delete(self):\n product = self.create_product()\n products = self.product_obj.search([])\n self.assertIn(product, products)\n product.unlink()\n self.assertNotIn(product.exists(), products)", "def test_delete_not_my_product(self):\n self._require_login(self.user1)\n response = self.client.delete('/api/1.0/products/2/')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_admin_cannot_delete_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This product does not exist in Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_admin_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Product deleted!')\n self.assertEqual(resp.status_code, 200)", "def test_delete_product(self):\n self._require_login(self.user1)\n response = self.client.delete('/api/1.0/products/1/')\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n response = self.client.get('/api/1.0/products/1/')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_non_admin_cannot_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_delete_product(self):\n view = ProductDeleteView.as_view({'delete': 'destroy'})\n uri = reverse('products:delete-product', kwargs={'pk': self.product_id})\n request = self.factory.delete(uri, HTTP_AUTHORIZATION='Token {}'.format(self.token_admin.key))\n request.user = self.user['admin']\n response = view(request, pk=self.product_id)\n self.assertEqual(response.status_code, 204,\n f'Expected Response Code 204, received {response.status_code} instead.')", "def test_security_on_delete(self):\n # test the delete product url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.delete(url)\n self.failUnlessEqual(response.status_code, 401)", "def test_delete_product_non_valid_pk(self):\n product_pk = 9999\n product_count_before = models.Product.objects.count()\n url = reverse('products:detail', kwargs={'pk': product_pk})\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_delete(self):\n self.assertEqual(Product.objects.count(), 2)\n\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(self.token_admin)\n }\n response = self.client.delete(\n '/api/products/{}/'.format(self.product_1.id), **headers)\n self.assertEqual(response.status_code, 204)\n self.assertEqual(Product.objects.count(), 1)", "def test_delete_product(self):\n product_pk = 1\n product_count_before = models.Product.objects.count()\n url = reverse('products:detail', kwargs={'pk': product_pk})\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(product_count_before - models.Product.objects.count(), 1)", "def test_product_delete(self):\n # first performe create\n id = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id:\n # then performe delete\n self._delete_model(\"product\", id)\n self.assertIsNotNone(id)", "def test_delete_store_success(self):\n product = sample_product(supplier_id=self.user)\n url = detail_url(product.id)\n res = self.client.delete(url)\n products = Product.objects.all()\n\n self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(len(products), 0)", "def test_get_deleted_product(self):\n product = self.add_product()\n product.is_deleted = True\n product.save()\n\n url = f'{self.url}{product.id}/'\n\n response = self.client.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n self.client.force_authenticate(user=self.admin_user)\n response = self.client.get(url, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_products_ref_users_delete(self):\n pass", "def test_admin_cannot_delete_product_from_empty_Inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'There are no products in Inventory!')\n self.assertEqual(resp.status_code, 404)", "def test_admin_cannot_delete_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_product_remove(self):\n\n flag = \"user\"\n api = \"product.product.remove\"\n current_page = 1\n search_info = json.dumps({\n 'id': 12,\n })\n print('start------------------------>remove')\n result = self.access_api(flag = flag, api = api, current_page = current_page, product_info = search_info)", "def test_products_ref_users_user_delete(self):\n pass", "def test_delete_unexisting_product(self):\n response=self.delete_unexisting_products()\n result = json.loads(response.data.decode('utf-8'))\n self.assertEqual(result['Error'],'Product Not found')\n self.assertEqual(response.status_code, 400)", "def delete(self, product):\n product_id = str(product)\n\n\n if product_id in self.sepet:\n del self.sepet[product_id]\n print(product_id)\n self.session.modified=True", "def test_delete_nveto_pmt_item(self):\n pass", "def test_delete(self):\n pass", "def test_shoppingcart_delete(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we create a product\n id_prod = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id_prod:\n # then we can create the shoppingcart\n data = self.shoppingcart_data\n data[\"customer_id\"] = id\n data[\"product_id\"] = id\n id_cart = self._create_model(\"shoppingcart\", data, [\"quantity\", \"discount_value\", \"is_closed\"])\n if id_cart:\n # then performe delete\n self._delete_model(\"shoppingcart\", id_cart)\n self.assertIsNotNone(id_cart)\n self.assertIsNotNone(id_prod)\n self.assertIsNotNone(id)", "def test_delete_item_using_delete(self):\n pass", "def test_view_product_with_invalid_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2kk',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Try an interger for product id')\n self.assertEqual(resp.status_code, 400)", "def test_user_delete_procedure_failure(self):\n p1 = models.Procedure.objects.create(\n name='temp',\n overview='bla bla bla'\n )\n p1.speciality.set([self.speciality.pk])\n p1.save()\n\n res = self.client.get(PROCEDURE_URL)\n\n url = get_item_url(res.data[0]['id'])\n del_procedure = self.client.delete(url)\n\n self.assertEqual(del_procedure.status_code,\n status.HTTP_403_FORBIDDEN)", "def delete_product(conn, product_id: int) -> None:\n with conn.cursor() as cursor:\n cursor.execute(f\"\"\"update products set deleted = True where id = '{product_id}'\"\"\")\n if cursor.rowcount:\n conn.commit()\n else:\n raise errors.StoreError", "def deleteProduct(request,productId):\n deleteObj = Collection()\n deleteObj.id=productId\n productBll.deleteProducts(deleteObj)\n return HttpResponseRedirect('/admin/product/list/')", "def test_view_product_that_doesnot_exist_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product does not exist!')\n self.assertEqual(resp.status_code, 404)", "def test_products_ref_groups_delete(self):\n pass", "def test_user_delete_procedure_failure(self):\n p1 = models.Procedure.objects.create(\n name='temp',\n overview='bla bla bla'\n )\n p1.speciality.set([self.speciality.pk])\n p1.save()\n\n res = self.client.get(PROCEDURE_URL)\n\n url = get_item_url(res.data[0]['id'])\n del_procedure = self.client.delete(url)\n\n self.assertEqual(del_procedure.status_code,\n status.HTTP_401_UNAUTHORIZED)", "def test_cannot_sale_nonexistant_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"Paris_heels\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product is not in the Inventory!')\n self.assertEqual(resp.status_code, 404)", "def tearDown(self):\n ProcessRequest(f\"products/{self.product_id}.json\").send_request(\n 'DELETE', continue_on_error=True)\n super().tearDown()", "def test_create_not_admin(self):\n self.assertEqual(Product.objects.count(), 2)\n payload = {\n 'name': 'New product',\n 'category': self.category_1.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99\n }\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n expected = {'detail': 'You do not have permission to perform this action.'}\n response = self.client.post(\n '/api/products/', data=payload,\n content_type='application/json', **headers)\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)\n self.assertEqual(Product.objects.count(), 2)", "def test_only_admin_can_create_product(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_deletehardwares_item(self):\n pass", "def test_delete1(self):\n pass", "def test_update_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], \"This product doesn't exists in the Inventory!\")\n self.assertEqual(resp.status_code, 400)", "def test_delete_run(self):\n pass", "def delete_products(request):\n product_obj = Products.objects.get(id=request.data[\"id\"])\n if request.user == product_obj.shop_rel.user:\n product_obj.delete()\n return Response(status=status.HTTP_200_OK)\n\n return Response(status=status.HTTP_401_UNAUTHORIZED)", "def test_delete_small_and_light_enrollment_by_seller_sku(self):\n pass", "def test_delete_ingredient(self):\n ingredient = Ingredient.objects.create(user=self.user, name='Lettuce')\n url = detail_url(ingredient.id)\n res = self.client.delete(url)\n\n self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)\n ingredients = Ingredient.objects.filter(user=self.user)\n self.assertFalse(ingredients.exists())", "def delete_product(driver, login_action, open_products_page, products_page):\n products_page.delete_product()\n driver.refresh()", "def test_delete_from_cart(open_browser, mysql_executor):\n HelperUrl.user_base_url(open_browser)\n MainPage(open_browser).open_product_page()\n ProductPage(open_browser).add_to_cart()\n CheckExistenceDB(mysql_executor).check_exist()\n Header(open_browser).open_cart_block() \\\n .delete_from_cart_block()\n CheckExistenceDB(mysql_executor).check_is_not_exist()", "def delete(self, product):\n product_id = str(product)\n\n if product_id in self.basket:\n del self.basket[product_id]\n #print(product_id)\n self.save()", "def test_delete_nonexist(self):\n promotion = PromotionFactory()\n promotion.id = '1cak41-nonexist'\n try:\n promotion.delete()\n except KeyError:\n self.assertRaises(KeyError)", "def delete_product(product_id):\n with MY_CONNECTION as connection:\n connection.execute(\"DELETE FROM Products WHERE id_product=?\", (product_id,))", "def test_post_delete_admin(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n self.client.force_authenticate(user=self.superuser)\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)", "def test_detail_odd_product_id_permission(self):\n self.assertEqual(self.product_2.id, 2)\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_2.id), **headers)\n\n expected = {'detail': 'You do not have permission to perform this action.'}\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)", "def test_delete_company_props_using_delete(self):\n pass", "def delete_product(request, product_id):\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n product = get_object_or_404(Product, pk=product_id)\n product.delete()\n messages.success(request, 'Product successfully deleted')\n return redirect(reverse('products'))", "def test_delete_case(self):\n pass", "def test_view_a_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertIn('NY_denims', str(reply['product']))\n self.assertEqual(resp.status_code, 200)", "def test_delete7(self):\n pass", "def test_delete_device(self):\n pass", "def test_delete_device(self):\n pass", "def test_delete_shopping_cart_item(self):\n client = APIClient()\n # First create a user\n Customer.objects.create_user(name=\"kevin\", email=\"[email protected]\", password=\"secret_pass\",\n shipping_region_id=1)\n\n # Then force login with that user\n url = reverse('login')\n data = {'email': \"[email protected]\", 'password': \"secret_pass\"}\n response = client.post(url, data, format='json')\n access_token = response.data['access']\n\n # Then add products to the shopping cart\n url = reverse('shopping_cart_add_product')\n data = {'cart_id': \"\", 'product_id': 1, 'attributes': \"Blue, XL\"}\n response = client.post(url, data, format='json')\n item_id = response.data[0]['item_id']\n\n url = reverse('shopping_cart_remove_products')\n data = {'item_id': item_id, }\n client.credentials(HTTP_AUTHORIZATION='Bearer ' + access_token)\n response = client.delete(url, data, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(ShoppingCart.objects.count(), 0)", "def test_cannot_view_a_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_otoroshi_controllers_adminapi_tcp_service_api_controller_delete_entity_action(self):\n pass", "def delete_product(request, product_id):\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n product = get_object_or_404(Product, pk=product_id)\n product.delete()\n messages.success(request, 'Product deleted!')\n return redirect(reverse('products'))", "def test_shoppingitem_deletion(self):\n # register and login a user\n self.app.post('/register', data=self.user_reg_details)\n self.app.post('/login', data=self.user_login_details)\n # create a shopping list\n self.shopping_class_obj.create_list(\n 'Christmass', '[email protected]')\n # create an item\n self.item_class_obj.add_item(\n 'Christmass', 'Bread', '[email protected]')\n # make a post request with the delete name\n res = self.app.post(\n '/delete-item', data={'list_name': 'Christmass', 'item_name': 'Bread'})\n self.assertEqual(res.status_code, 200)\n self.item_class_obj.delete_item(\n 'Bread', '[email protected]', 'Christmass')\n # check if delete was successful\n self.assertIn(\"Successfuly deleted item \", str(res.data))", "def test_delete_variant_delete_product_channel_listing_not_deleted(\n mocked_recalculate_orders_task,\n product_variant_deleted_webhook_mock,\n staff_api_client,\n product_with_two_variants,\n permission_manage_products,\n):\n # given\n query = DELETE_VARIANT_MUTATION\n product = product_with_two_variants\n variant = product.variants.first()\n variant_id = graphene.Node.to_global_id(\"ProductVariant\", variant.pk)\n variant_sku = variant.sku\n variables = {\"id\": variant_id}\n\n product_channel_listing_count = product.channel_listings.count()\n\n # when\n response = staff_api_client.post_graphql(\n query, variables, permissions=[permission_manage_products]\n )\n\n # then\n content = get_graphql_content(response)\n flush_post_commit_hooks()\n data = content[\"data\"][\"productVariantDelete\"]\n\n product_variant_deleted_webhook_mock.assert_called_once_with(variant)\n assert data[\"productVariant\"][\"sku\"] == variant_sku\n with pytest.raises(variant._meta.model.DoesNotExist):\n variant.refresh_from_db()\n mocked_recalculate_orders_task.assert_not_called()\n product.refresh_from_db()\n assert product.channel_listings.count() == product_channel_listing_count", "def test_delete_admin_from_org(self):\n pass", "def delete(self, Product):\n with api.commit_or_abort(\n db.session,\n default_error_message=\"Some error message\"\n ):\n db.session.delete(Product)\n return None, 204", "def test_delete_deployment(self):\n pass", "def test_admin_create_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)", "def test_none_admin_delete(self):\n\n with self.client:\n token = self.customer()\n id = 1\n response = self.client.delete('api/v1/meals/{}'.format(id),\n headers=({\"token\": token}))\n data = json.loads(response.data.decode())\n self.assertEqual(data.get('message'),\n \"Customer is not authorized to access this page\")\n self.assertEqual(response.status_code, 401)", "def test_delete(self):\n self.client.force_authenticate(user=self.admin)\n\n response = self.client.delete(\n reverse(\n 'retreat:waitqueuenotification-detail',\n kwargs={'pk': 1},\n ),\n )\n\n self.assertEqual(\n response.status_code,\n status.HTTP_405_METHOD_NOT_ALLOWED\n )", "def test_unauthorized_product_update(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_update_not_my_product(self):\n post_data = {\n \"category\": {\n \"name\": \"general\",\n \"index\": 0\n },\n \"name\": \"Producto 2 modified\",\n \"description\": \"Descripcion de producto 2 modified\",\n \"selling\": True,\n \"price\": 20,\n }\n\n response = self.client.put('/api/1.0/products/2/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_delete_record(self):\n pass", "def test_vault_delete_vault_item(self):\n pass", "def test_duo_account_delete(self):\n pass", "def test_order_product(self):\n self.client.force_authenticate(self.user)\n resp = self.client.post(ORDER_URL, data={\n \"product\": self.product.id,\n \"count\": 1,\n \"option_value\": self.option_value.id\n })\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)", "def delete_product(request, product_id):\n if not request.user.is_superuser:\n messages.error(request, 'Invalid Request: Only admin can delete products/services.')\n return redirect(reverse('home'))\n\n product = get_object_or_404(Product, pk=product_id)\n product.delete()\n messages.success(request, 'Product/Service deleted!')\n return redirect(reverse('products'))", "def delete_products_mobile(request, p_k):\n product_obj = Products.objects.get(id=p_k)\n if request.user == product_obj.shop_rel.user:\n product_obj.delete()\n return Response(status=status.HTTP_200_OK)\n\n return Response(status=status.HTTP_401_UNAUTHORIZED)", "def test_delete_device_by_id(self):\n pass", "def test_client_nationlity_delete(self):\n pass", "def test_delete_permission(self):\r\n self.assertFalse(self.creator_admin.has_delete_permission(self.request))", "def test_delete_cloud(self):\n pass", "def test_client_verification_document_delete(self):\n pass", "def delete_product(request, product_id):\n if not request.user.is_superuser:\n messages.error(request, 'This feature is for Admin only.')\n return redirect(reverse('home'))\n\n product = get_object_or_404(Product, pk=product_id)\n product.delete()\n messages.success(request, 'Product Deleted')\n return redirect(reverse('home'))", "def delete(self, product):\n product_id = str(product)\n if product_id in self.cart:\n del self.cart[product_id]\n self.save()", "def delete_product(request, product_id):\n if not request.user.is_superuser:\n messages.error(request, 'Access denied!\\\n Sorry, only site owners have this permission.')\n return redirect(reverse('home'))\n\n product = get_object_or_404(Product, pk=product_id)\n product.delete()\n messages.success(request, f'{product.name} was successfully deleted!')\n return redirect(reverse('products'))", "def test_only_attendant_can_make_a_sale(self):\n resp = self.admin_add_product()\n reply = self.admin_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def test_coupledmodels_id_delete(self):\n pass", "def test_delete_device_by_id1(self):\n pass", "def test_Product(self):\n self.assertEquals(self.prod_1.pk, 1)\n self.assertEquals(self.prod_1.ean, '3350033118072')\n self.assertEquals(self.prod_1.name, 'test 1')\n self.assertEquals(self.prod_1.nutriscore, 'u')\n self.assertEquals(self.prod_1.category, 'cat 1')", "def test_cannot_create_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_delete(self):\n self.request.access.allow_delete = [\"everyone\"]\n pkg = make_package(factory=SQLPackage)\n self.sql.add(pkg)\n transaction.commit()\n self.sql.add(pkg)\n self.db.delete(pkg)\n count = self.sql.query(SQLPackage).count()\n self.assertEqual(count, 0)\n self.storage.delete.assert_called_with(pkg)", "def test_delete_device_user(self):\n pass", "def test_update_product_without_authentication(self):\n post_data = {\n \"category\": {\n \"name\": \"general\",\n \"index\": 0\n },\n \"name\": \"Producto 2 modified\",\n \"description\": \"Descripcion de producto 2 modified\",\n \"selling\": True,\n \"price\": 20,\n }\n\n response = self.client.put('/api/1.0/products/1/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_delete_boat(self):\n pass", "def delete(conn, user_id, product_id) -> None:\n with conn.cursor() as cursor:\n cursor.execute(f\"\"\"delete from cart \n where id_user = {user_id} and id_product = {product_id}\"\"\")\n conn.commit()", "def test_delete_delete_and_delete_id_not_equal(self):\n doc = TestDoc(\"1\", \"test\")\n self.assertNotEqual(\n BulkActionItem.delete(doc),\n BulkActionItem.delete_id(doc.id),\n )", "def test_delete(self):\n self.request.access.allow_delete = [\"everyone\"]\n pkg = make_package(factory=DynamoPackage)\n self._save_pkgs(pkg)\n self.db.delete(pkg)\n count = self.engine.scan(DynamoPackage).count()\n self.assertEqual(count, 0)\n count = self.engine.scan(PackageSummary).count()\n self.assertEqual(count, 0)\n self.storage.delete.assert_called_with(pkg)", "def test_request_do_delete_non_existent_id(test_dao, test_configuration):\r\n DUT = dtcFunction(test_dao, test_configuration, test=True)\r\n DUT.request_do_select_all(revision_id=1)\r\n\r\n assert DUT.request_do_delete(100)", "def test_client_tax_information_delete(self):\n pass", "def test_delete_project(self):\n pass" ]
[ "0.8163337", "0.811003", "0.80877143", "0.80876744", "0.8045905", "0.79786986", "0.7882516", "0.7854732", "0.7846621", "0.7829516", "0.77844703", "0.77800816", "0.7772745", "0.77263916", "0.7600396", "0.7597553", "0.75532126", "0.7346543", "0.72736114", "0.7209453", "0.70817554", "0.7031518", "0.70062274", "0.6978588", "0.6973942", "0.69384265", "0.68938947", "0.686909", "0.68681407", "0.68598217", "0.6803825", "0.6797122", "0.67623496", "0.67581224", "0.6757263", "0.67549217", "0.67451435", "0.6731607", "0.67199475", "0.670733", "0.6688471", "0.665246", "0.6647532", "0.6639037", "0.6609533", "0.66086257", "0.6592098", "0.65745133", "0.6570509", "0.6547072", "0.6535702", "0.65280443", "0.64990675", "0.64943457", "0.64817125", "0.6477539", "0.6477539", "0.64756995", "0.6473847", "0.6464002", "0.6457825", "0.6445716", "0.64438236", "0.6435727", "0.64314556", "0.6429334", "0.6429163", "0.64267904", "0.6426367", "0.6425897", "0.64250034", "0.6417956", "0.6414326", "0.6412133", "0.6408467", "0.64082694", "0.6405664", "0.63957447", "0.63895196", "0.63829976", "0.6377788", "0.6375252", "0.6372528", "0.63673383", "0.6354792", "0.6353425", "0.6346549", "0.6338582", "0.63357425", "0.6332504", "0.6332408", "0.6331019", "0.6328807", "0.6325671", "0.63219255", "0.6320203", "0.6317038", "0.63168246", "0.6313007", "0.6306047" ]
0.8068848
4
Test ComBat feature harmonization.
def test_combat(): # Check if example data directory exists example_data_dir = th.find_exampledatadir() # Check if example data required exists features = glob.glob(os.path.join(example_data_dir, 'examplefeatures_Patient*.hdf5')) if len(features) < 7: message = 'Too few example features for ComBat testing not found! ' +\ 'Run the create_example_data script from the WORC exampledata ' +\ 'directory!' raise WORCValueError(message) elif len(features) > 7: message = 'Too many example features for ComBat testing not found! ' +\ 'Run the create_example_data script from the WORC exampledata ' +\ 'directory!' raise WORCValueError(message) objectlabels = os.path.join(example_data_dir, 'objectlabels.csv') # Python config = os.path.join(example_data_dir, 'ComBatConfig_python.ini') features_train_out = [f.replace('examplefeatures_', 'examplefeatures_ComBat_python_') for f in features] # First run synthetic test # Synthetictest() # # Run the Combat function: only for training # ComBat(features_train_in=features, # labels_train=objectlabels, # config=config, # features_train_out=features_train_out) # # Run the Combat function: now for train + testing ComBat(features_train_in=features[0:4], labels_train=objectlabels, config=config, features_train_out=features_train_out[0:4], features_test_in=features[4:], labels_test=objectlabels, features_test_out=features_train_out[4:]) # # Matlab # config = os.path.join(example_data_dir, 'ComBatConfig_matlab.ini') # features_train_out = [f.replace('examplefeatures_', 'examplefeatures_ComBat_matlab_') for f in features] # # # # Run the Combat function: only for training # ComBat(features_train_in=features, # labels_train=objectlabels, # config=config, # features_train_out=features_train_out) # # # Run the Combat function: now for train + testing # ComBat(features_train_in=features[0:4], # labels_train=objectlabels, # config=config, # features_train_out=features_train_out[0:4], # features_test_in=features[4:], # labels_test=objectlabels, # features_test_out=features_train_out[4:]) # Remove the feature files # for i in glob.glob(os.path.join(example_data_dir, '*features_ComBat*.hdf5')): # os.remove(i)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_feature(feature, value, good_features):\r\n\tbase_write(good_features,\"bin/stanford-ner-2015-04-20/base.prop\")\r\n\tbase_prop = open(\"bin/stanford-ner-2015-04-20/base.prop\", \"a\")\r\n\tbase_prop.write(feature.strip() + \"=\" + str(value) + \"\\n\")\r\n\tbase_prop.close()\r\n\r\n\t#Test read base.prop - To display in console\r\n\tread = open(\"bin/stanford-ner-2015-04-20/base.prop\").read()\r\n\tlogging.warning(read)\r\n\r\n\tos.system(\"bash src/other/features/features_selection.sh\")", "def test_category_and_its_feature(self):\n class RunnerBlah(Runner):\n def __init__(self, renv):\n super(RunnerBlah, self).__init__(renv)\n self.register_feature_class('bravo', Feature)\n self.register_feature_class('charlie', Feature)\n self.register_feature_category_class(\n 'alpha', features=['bravo', 'charlie'], mono=True)\n\n renv = create_runtime(RunnerBlah)\n renv.create_runner('runner')\n\n ctrl = renv.feature_ctrl\n\n total_order, _ = ctrl.get_activation_order(['alpha', 'bravo'])\n self.assertEqual(['bravo'], total_order)", "def test_workbench_scenarios(self):\n result_title = 'Adaptive Numeric Input XBlock'\n basic_scenario = \"<adaptivenumericinput />\"\n test_result = self.xblock.workbench_scenarios()\n self.assertEquals(result_title, test_result[0][0])\n self.assertIn(basic_scenario, test_result[0][1])", "def test_split_feature(tree):\r\n print(\"test_split_feature()...\", end = \"\")\r\n assert (tree.process_split_feature() == True)\r\n print(\"Passed!\")", "def run_feature_extraction_tests():\n test_feature_extraction()\n test_distributed_feature_extraction()\n test_multimodel_feature_extraction()\n test_distributed_multimodel_feature_extraction()", "def test_category_and_its_feature_dep(self):\n class RunnerBlah(Runner):\n def __init__(self, renv):\n super(RunnerBlah, self).__init__(renv)\n self.register_feature_class('bravo', Feature)\n self.register_feature_category_class(\n 'alpha', features=['bravo'], defaults=['bravo'])\n self.register_feature_class(\n 'foxtrot', Feature, requires=['alpha', 'bravo'])\n self.register_feature_category_class('echo', features=['foxtrot'])\n\n renv = create_runtime(RunnerBlah)\n renv.create_runner('runner')\n\n ctrl = renv.feature_ctrl\n\n total_order, _ = ctrl.get_activation_order(['foxtrot'])\n self.assertEqual(['bravo', 'foxtrot'], total_order)", "def feature():\n pass", "def test_combat_fastr():\n # Check if example data directory exists\n example_data_dir = th.find_exampledatadir()\n\n # Check if example data required exists\n features = glob.glob(os.path.join(example_data_dir, 'examplefeatures_Patient*.hdf5'))\n if len(features) < 6:\n message = 'Too few example features for ComBat testing not found!' +\\\n 'Run the create_example_data script from the WORC exampledata ' +\\\n 'directory!'\n raise WORCValueError(message)\n elif len(features) > 6:\n message = 'Too many example features for ComBat testing not found!' +\\\n 'Run the create_example_data script from the WORC exampledata ' +\\\n 'directory!'\n raise WORCValueError(message)\n\n objectlabels = os.path.join(example_data_dir, 'objectlabels.csv')\n\n # Python\n config = os.path.join(example_data_dir, 'ComBatConfig_python.ini')\n\n # Create the fastr network\n experiment = fastr.create_network('test_ComBat')\n\n source_features = experiment.create_source('HDF5', id='features_in', node_group='features')\n source_labels = experiment.create_source('PatientInfoFile', id='labels', node_group='pctrain')\n source_config = experiment.create_source('ParameterFile', id='config', node_group='conf')\n\n sink_features = experiment.create_sink('HDF5', id='features_out')\n\n node_combat = experiment.create_node('combat/ComBat:1.0',\n tool_version='1.0',\n id='ComBat',)\n\n link_combat_1 = experiment.create_link(source_config.output, node_combat.inputs['config'])\n link_combat_2 = experiment.create_link(source_labels.output, node_combat.inputs['patientclass_train'])\n link_combat_1.collapse = 'conf'\n link_combat_2.collapse = 'pctrain'\n\n # Mimic using two feature toolboxes\n links_Combat1_train = node_combat.inputs['features_train']['MR_0'] << source_features.output\n links_Combat1_train.collapse = 'features'\n\n links_Combat2_train = node_combat.inputs['features_train']['MR_1'] << source_features.output\n links_Combat2_train.collapse = 'features'\n\n links_Combat_out_train = sink_features.input << node_combat.outputs['features_train_out']\n links_Combat_out_train.collapse = 'ComBat'\n\n # Provide source and sink data\n source_data = dict()\n source_data['features_in'] = features\n source_data['labels'] = objectlabels\n source_data['config'] = config\n\n sink_data = dict()\n sink_data['features_out'] = \"vfs://output/test_ComBat/ComBat/features_ComBat_{{sample_id}}_{{cardinality}}{{ext}}\"\n\n # Execute\n experiment.execute(source_data, sink_data, execution_plugin='LinearExecution')\n\n # Remove the feature files\n for i in glob.glob(os.path.join(example_data_dir, '*features_ComBat*.hdf5')):\n os.remove(i)", "def test_theft_and_stealing(self):", "def test_predictor():", "def feature(self):\n Feature(run=default_frame, flags=TE)\n Feature(run=load(\"window_functions.tests.rows_frame\", \"feature\"), flags=TE)\n Feature(run=load(\"window_functions.tests.range_frame\", \"feature\"), flags=TE)\n Feature(run=load(\"window_functions.tests.range_overflow\", \"feature\"), flags=TE)\n Feature(run=load(\"window_functions.tests.range_datetime\", \"feature\"), flags=TE)\n Feature(run=load(\"window_functions.tests.range_errors\", \"feature\"), flags=TE)", "def test_machine_learning():", "def feat():\n pass", "def test_features(self):\n assert list(parser.generate_commands(yaml.load(\n '- my_command: {name: my_name}'))) == [('my_command', {'name': 'my_name'})]", "def test_text_classifier_vaporise(self):\n pass", "def test_01_lighting(self):", "def testBeliefs1sk(self):", "def ConstrTest():\n with open(path.join(MAIN_PATH, TEST)) as f:\n for line in f:\n line = line.strip().split(\"\\t\")\n src, dest = line[1:]\n features = Features(src, dest)\n test_instances.append(features)", "def test_active_inference_SPM_1b(self):", "def test_build_feature_base(self):\n data = pd.DataFrame(pd.read_csv(\"tests/in_data/pro1_sub.csv\"))\n\n X = data.ix[:,1]\n Y = data.ix[:,0]\n model_sample = Model([],\"presence\")\n\n feature_base = model_sample.build_feature_base(X,Y)\n feature_evaluation =\n assert_equal(len(feature_base) > 10, True)", "def test_training(self):\n\t\tpass", "def feature(self, node=\"clickhouse1\"):\n self.context.node = self.context.cluster.node(node)\n\n for scenario in loads(current_module(), Scenario):\n scenario()", "def test_series_in_features(self):\n assert parse_command({'test{{A,B}}': {'depends_on': 'name{{A,B}}'}}) == [\n ('testA', {'depends_on': 'nameA'}), ('testB', {'depends_on': 'nameB'})]", "def cli(argv):\r\n argv.append(\"--exhaust-materials\")\r\n cltestbench.cli(argv)", "def main():\n parser = argparse.ArgumentParser(\n description=\"making feature file argsurations.\")\n\n parser.add_argument(\n \"--waveforms\", default=None,\n help=\"directory or list of filename of input wavfile\")\n parser.add_argument(\n \"--hdf5dir\", default=None,\n help=\"directory to save hdf5\")\n parser.add_argument(\n \"--wavdir\", default=None,\n help=\"directory to save of preprocessed wav file\")\n parser.add_argument(\n \"--fs\", default=16000,\n type=int, help=\"Sampling frequency\")\n parser.add_argument(\n \"--shiftms\", default=5,\n type=float, help=\"Frame shift in msec\")\n parser.add_argument(\n \"--feature_type\", default=\"world\", choices=[\"world\", \"melspc\", \"mcep\"],\n type=str, help=\"feature type\")\n parser.add_argument(\n \"--mspc_dim\", default=80,\n type=int, help=\"Dimension of mel spectrogram\")\n parser.add_argument(\n \"--minf0\", default=40,\n type=int, help=\"minimum f0 for world analysis\")\n parser.add_argument(\n \"--maxf0\", default=400,\n type=int, help=\"maximum f0 for world analysis\")\n parser.add_argument(\n \"--fmin\", default=None, nargs=\"?\",\n type=int, help=\"minimum frequency for melspc\")\n parser.add_argument(\n \"--fmax\", default=None, nargs=\"?\",\n type=int, help=\"maximum frequency for melspc\")\n parser.add_argument(\n \"--mcep_dim\", default=24,\n type=int, help=\"Dimension of mel cepstrum\")\n parser.add_argument(\n \"--mcep_alpha\", default=0.41,\n type=float, help=\"Alpha of mel cepstrum\")\n parser.add_argument(\n \"--fftl\", default=1024,\n type=int, help=\"FFT length\")\n parser.add_argument(\n \"--highpass_cutoff\", default=70,\n type=int, help=\"Cut off frequency in lowpass filter\")\n parser.add_argument(\n \"--save_wav\", default=True,\n type=strtobool, help=\"Whether to save filtered wav file\")\n parser.add_argument(\n \"--n_jobs\", default=10,\n type=int, help=\"number of parallel jobs\")\n parser.add_argument(\n \"--verbose\", default=1,\n type=int, help=\"log message level\")\n\n args = parser.parse_args()\n\n # set log level\n if args.verbose == 1:\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S')\n elif args.verbose > 1:\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S')\n else:\n logging.basicConfig(level=logging.WARNING,\n format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S')\n logging.warning(\"logging is disabled.\")\n\n # show arguments\n for key, value in vars(args).items():\n logging.info(\"%s = %s\" % (key, str(value)))\n\n # read list\n if os.path.isdir(args.waveforms):\n file_list = sorted(find_files(args.waveforms, \"*.wav\"))\n else:\n file_list = read_txt(args.waveforms)\n logging.info(\"number of utterances = %d\" % len(file_list))\n\n # check directory existence\n if not os.path.exists(args.wavdir) and args.highpass_cutoff != 0 and args.save_wav:\n os.makedirs(args.wavdir)\n if not os.path.exists(args.hdf5dir):\n os.makedirs(args.hdf5dir)\n\n # divide list\n file_lists = np.array_split(file_list, args.n_jobs)\n file_lists = [f_list.tolist() for f_list in file_lists]\n\n # multi processing\n processes = []\n if args.feature_type == \"world\":\n target_fn = world_feature_extract\n elif args.feature_type == \"melspc\":\n target_fn = melspectrogram_extract\n else:\n target_fn = melcepstrum_extract\n for f in file_lists:\n p = mp.Process(target=target_fn, args=(f, args,))\n p.start()\n processes.append(p)\n\n # wait for all process\n for p in processes:\n p.join()", "def test_add_feature(self):\n fc1 = self.read_feature()\n fc2 = self.read_feature('Aegean_Sea')\n\n # add a feature already in the feature collection\n fc1.add_feature(fc1.features[0])\n assert len(fc1.features) == 1\n\n # add a new feature to the feature collection\n fc1.add_feature(fc2.features[0])\n assert len(fc1.features) == 2\n\n self.check_feature(fc1.features[0])\n self.check_feature(fc1.features[1], expected_name='Aegean Sea')", "def test_all_features_with_data(self):\n feature1 = Feature('looktest1')\n feature1.set_percentage(5)\n\n feature2 = Feature('looktest2')\n feature2.activate()\n feature2.add_to_whitelist(3)\n\n feature3 = Feature('looktest3')\n feature3.activate()\n feature3.add_to_blacklist(4)\n feature3.add_to_blacklist(5)\n\n feature4 = Feature('looktest4')\n feature4.activate()\n feature4.add_to_whitelist(3)\n feature4.add_to_whitelist(5)\n feature4.add_to_blacklist(4)\n\n all_features = Feature.all_features(include_data=True)\n self.assertEqual(len(all_features), 4)\n\n for key in ['looktest1', 'looktest2', 'looktest3', 'looktest4']:\n self.assertTrue(key in all_features)\n if not key == 'looktest1':\n self.assertEqual(all_features[key]['percentage'], 100)\n\n self.assertEqual(all_features['looktest1']['percentage'], 5)\n self.assertFalse('whitelist' in all_features['looktest1'])\n self.assertFalse('blacklist' in all_features['looktest1'])\n\n self.assertTrue('whitelist' in all_features['looktest2'])\n self.assertEqual(all_features['looktest2']['whitelist'], [3])\n self.assertFalse('blacklist' in all_features['looktest2'])\n\n self.assertFalse('whitelist' in all_features['looktest3'])\n self.assertTrue('blacklist' in all_features['looktest3'])\n self.assertEqual(all_features['looktest3']['blacklist'], [4, 5])\n\n self.assertTrue('whitelist' in all_features['looktest4'])\n self.assertEqual(all_features['looktest4']['whitelist'], [3, 5])\n self.assertTrue('blacklist' in all_features['looktest4'])\n self.assertEqual(all_features['looktest4']['blacklist'], [4])", "def test_classify_cuisine(self):\n pass", "def test_T01():", "def setUp(self):\n\n self.niceArgV = (\"--long Alpha -n Beta \"\n \"--shortless Gamma -f --myflag \"\n \"--myparam Tofu\").split()\n\n self.nice = WellBehaved()", "def test__extract_features(self):\n text_sample = \"I really really love this movie\"\n feature_sample = ['really','love','good']\n feature_score_type = \"presence\"\n model_sample = Model(feature_sample,feature_score_type)\n result_features = model_sample.extract_features(text_sample)\n assert_equal(result_features,{'really':1,'love':1,'good':0})\n feature_score_type = \"term_frequency\"\n model_sample = Model(feature_sample,feature_score_type)\n result_features = model_sample.extract_features(text_sample)\n assert_equal(result_features,{'really':2,'love':1,'good':0})", "def do_it(args):\n\n #force = args.force\n #testing = args.testing\n #verbose = args.verbose\n #regions = args.regions\n\n # XXX WORKING HERE", "def run_tests():\n source1 = TextModel('CS111 Syllabus')\n source1.add_file('CS111_Syllabus.txt')\n\n source2 = TextModel('AR Syllabus')\n source2.add_file('AR_Syllabus.txt')\n\n new1 = TextModel('WR120 Syllabus')\n new1.add_file('WR120_Syllabus.txt')\n new1.classify(source1, source2)\n \n new2 = TextModel('CS131 Syllabus')\n new2.add_file('CS131_Syllabus.txt')\n new2.classify(source1, source2)\n \n new3 = TextModel('My Paper 2 for WR120')\n new3.add_file('WR_Paper_2.txt')\n new3.classify(source1, source2)\n \n new4 = TextModel('CS111 PS9PR0')\n new4.add_file('ps9pr0.txt')\n new4.classify(source1, source2)", "def test_features(iris):\n assert iris.num_features == 4\n assert iris.feature_names == [\n \"sepal length (cm)\",\n \"sepal width (cm)\",\n \"petal length (cm)\",\n \"petal width (cm)\",\n ]", "def test_manlext(self):\n self.chck_triple('manlext')", "def test_intent_classifier_vaporise(self):\n pass", "def test_add_feature():\n mock = MagicMock()\n with patch.dict(dism.__salt__, {\"cmd.run_all\": mock}):\n dism.add_feature(\"test\")\n mock.assert_called_once_with(\n [\n dism.bin_dism,\n \"/Quiet\",\n \"/Online\",\n \"/Enable-Feature\",\n \"/FeatureName:test\",\n \"/NoRestart\",\n ]\n )", "def test_make_macrobes(self):\n basic_test_runner(self, 'macrobes')", "def test(): \n\treturn [\"vice.yields.ccsne.import\", \n\t\t[ \n\t\t\ttest_LC18_import(), \n\t\t\ttest_CL13_import(), \n\t\t\ttest_CL04_import(), \n\t\t\ttest_WW95_import(), \n\t\t\ttest_NKT13_import(), \n\t\t\ttest_S16_import() \n\t\t] \n\t]", "def testBeliefs2sk(self):", "def main():\n data = pd.read_csv('./house-votes-84.data', header = None)\n\n class_names = [\"republican\", \"democrat\"]\n\n print(\"\\n-- Train and Test with Winnow --\\n\")\n train_and_test_with_winnow(data, class_names)\n\n print(\"\\n-- Train and Test with Naive Bayes --\\n\")\n train_and_test_with_naive_bayes(data, class_names)", "def test_text_classifier_add_testing_samples(self):\n pass", "def test_bed(self):\n #TODO write bed tests", "def test_all_components(self):\n model_name = 'BCZModel'\n pose_components = [\n ('xyz', 3, True, 100.),\n ('quaternion', 4, False, 10.),\n ('axis_angle', 3, True, 10.),\n ('arm_joints', 7, True, 1.),\n ('target_close', 1, False, 1.),\n ]\n gin.bind_parameter(\n 'BCZModel.action_components', pose_components)\n gin.parse_config('BCZPreprocessor.mock_subtask = True')\n gin.parse_config(\n 'resnet_film_network.film_generator_fn = @linear_film_generator')\n self._fixture.random_train(model, model_name)", "def test_text_classifier_test(self):\n pass", "def test_text_classifier_curate(self):\n pass", "def spec_tests():\n pass", "def test_get_scenario(self):\n pass", "def main():\n feature_fns = [token_features, token_pair_features, lexicon_features]\n # Download and read data.\n download_data()\n docs, labels = read_data(os.path.join('data', 'train'))\n # Evaluate accuracy of many combinations\n # of tokenization/featurization.\n results = eval_all_combinations(docs, labels,\n [True, False],\n feature_fns,\n [2,5,10])\n # Print information about these results.\n best_result = results[0]\n worst_result = results[-1]\n print('best cross-validation result:\\n%s' % str(best_result))\n print('worst cross-validation result:\\n%s' % str(worst_result))\n plot_sorted_accuracies(results)\n print('\\nMean Accuracies per Setting:')\n print('\\n'.join(['%s: %.5f' % (s,v) for v,s in mean_accuracy_per_setting(results)]))\n\n # Fit best classifier.\n clf, vocab = fit_best_classifier(docs, labels, results[0])\n\n # Print top coefficients per class.\n print('\\nTOP COEFFICIENTS PER CLASS:')\n print('negative words:')\n print('\\n'.join(['%s: %.5f' % (t,v) for t,v in top_coefs(clf, 0, 5, vocab)]))\n print('\\npositive words:')\n print('\\n'.join(['%s: %.5f' % (t,v) for t,v in top_coefs(clf, 1, 5, vocab)]))\n\n # Parse test data\n test_docs, test_labels, X_test = parse_test_data(best_result, vocab)\n\n # Evaluate on test set.\n predictions = clf.predict(X_test)\n print('testing accuracy=%f' %\n accuracy_score(test_labels, predictions))\n\n print('\\nTOP MISCLASSIFIED TEST DOCUMENTS:')\n print_top_misclassified(test_docs, test_labels, X_test, clf, 5)", "def test(name, data, classifier):\n classification = classifier.classify(data)\n print('Item ' + name + ' is a ' + classification)", "def setup_features():\n\n core_features = {\"web\": [\"content_directory\", \"controllers\", \"templates\"]}\n\n imported_features = []\n for feature_type, feature_list in core_features.items():\n features_list_names = \", \".join(feature_list)\n print(\n \"** Setting up {0} features {1}\".format(\n info(feature_type), info(features_list_names)\n )\n )\n for feature_name in feature_list:\n script_dir = dirname(abspath(__file__))\n module_fname = join(\n script_dir, \"features\", feature_type, feature_name + \".py\"\n )\n\n feature_dict = {}\n with open(module_fname) as source_file:\n exec(compile(source_file.read(), module_fname, \"exec\"), feature_dict)\n try:\n feature = feature_dict[\"Feature\"]()\n except KeyError:\n print_error(\n \"Feature module '%s' does not provide a Feature class!\"\n % feature_name\n )\n sys.exit(1)\n try:\n feature.setup()\n except: # NOQA: E722\n print_error(\"Failed setting up feature '%s' !\" % feature_name)\n raise\n imported_features.append(feature)\n\n for feature in imported_features:\n if hasattr(feature, \"activate\"):\n feature.activate()", "def test_creating_simple_feature():\n # given & when\n feature = Feature(1, \"Feature\", \"I am a feature\", \"foo.feature\", 1, tags=None)\n\n # then\n assert feature.id == 1\n assert feature.keyword == \"Feature\"\n assert feature.sentence == \"I am a feature\"\n assert feature.path == \"foo.feature\"\n assert feature.line == 1\n assert feature.tags == []", "def main():\n test_runner = TestRunner(\n FLAGS.workspace, FLAGS.bench_home, imagenet_dir=FLAGS.train_data_dir)\n test_runner.run_tests(FLAGS.test_list.split(','))", "def main():\r\n _evaluative_test(5)\r\n _fuzz_test(1)\r\n _fuzz_test(1, 512)\r\n _fuzz_test(1, 1512)\r\n _fuzz_test(1000)\r\n _fuzz_test(1000, 512)\r\n _fuzz_test(1000, 4077)", "def main():\r\n\r\n # Command-line arguments\r\n training_data = argv[1]\r\n hypothesis_out = argv[2]\r\n learning_type = argv[3]\r\n test = argv[4]\r\n labels = None\r\n if len(argv) > 5:\r\n labels = argv[5]\r\n\r\n # Parse data and determine features\r\n feat_obj = FeatureParser(training_data)\r\n data = FeatureData(feat_obj.features)\r\n\r\n # Train model using DT or DT + adaboost\r\n train(data, hypothesis_out, learning_type)\r\n\r\n # Predict on test set with trained model\r\n predictions = predict(hypothesis_out, test, learning_type)\r\n\r\n # Evaluate accuracy of test data if provided lables\r\n if labels:\r\n accuracy = evaluate(predictions, labels)\r\n print('Model accuracy on test data:',str(accuracy) + '%')", "def setUp(self):\n\n self.niceArgV = (\"--long Alpha -n Beta \"\n \"--shortless Gamma -f --myflag \"\n \"--myparam Tofu\").split()\n\n self.nice = WellBehaved()\n\n self.nice.parseOptions(self.niceArgV)", "def test_all_features(self):\n to_create = ['looktest1', 'looktest2', 'looktest3']\n for f in to_create:\n Feature(f).activate()\n\n all_features = Feature.all_features()\n self.assertEqual(len(all_features), len(to_create))\n for f in to_create:\n self.assertTrue(f in all_features)", "def tests():", "def test(self):\n pass", "def run_tests():\r\n source1 = TextModel('50 Shades of Gray')\r\n source1.add_file('50.txt')\r\n \r\n print()\r\n \r\n source2 = TextModel('King James Version of the Bible')\r\n source2.add_file('kjv.txt')\r\n\r\n print()\r\n\r\n new1 = TextModel('Shakespeare')\r\n new1.add_file('shake.txt')\r\n new1.classify(source1, source2)\r\n \r\n print()\r\n \r\n new2 = TextModel('JK Rowling')\r\n new2.add_file('hp.txt')\r\n new2.classify(source1, source2)\r\n \r\n print()\r\n \r\n new3 = TextModel('Breitbart News Network')\r\n new3.add_file('bnn.txt')\r\n new3.classify(source1, source2)\r\n \r\n print()\r\n \r\n new4 = TextModel('Chaucer')\r\n new4.add_file('tct.txt')\r\n new4.classify(source1, source2)", "def test_man9ext(self):\n self.chck_triple('man9ext')", "def test_get_scenarios(self):\n pass", "def test_000_basic_functionality() -> None:\n df = generate_test_data()\n skim(df)", "def test_adaptability():\n assert chap2.adaptability()", "def test_pytest_bdd_scenario(self):\n self.testdir.makefile(\n \".feature\",\n simple=_SIMPLE_SCENARIO,\n )\n py_file = self.testdir.makepyfile(\n \"\"\"\n from pytest_bdd import scenario, given, then, when\n\n @scenario(\"simple.feature\", \"Simple scenario\")\n def test_simple():\n pass\n\n BAR = None\n\n @given(\"I have a bar\")\n def bar():\n global BAR\n BAR = 1\n\n @when(\"I eat it\")\n def eat():\n global BAR\n BAR -= 1\n\n @then(\"I don't have a bar\")\n def check():\n assert BAR == 0\n \"\"\"\n )\n file_name = os.path.basename(py_file.strpath)\n self.inline_run(\"--ddtrace\", file_name)\n spans = self.pop_spans()\n\n assert len(spans) == 7\n assert spans[0].get_tag(\"component\") == \"pytest\"\n assert spans[0].get_tag(\"test.name\") == \"Simple scenario\"\n assert spans[0].span_type == \"test\"\n assert spans[1].resource == \"I have a bar\"\n assert spans[1].name == \"given\"\n assert spans[2].resource == \"I eat it\"\n assert spans[2].name == \"when\"\n assert spans[3].resource == \"I don't have a bar\"\n assert spans[3].name == \"then\"", "def main(args):\n if args.train_test_split < 0.2 or args.train_test_split > 0.8:\n print(\"Bad value for train_test_split, range is 0.2 - 0.8\")\n sys.exit()\n\n dataset = pd.read_csv(args.train_file)\n\n x_data = dataset.loc[:, (dataset.columns != args.classification_column) \\\n & (dataset.columns != \"Survey_id\")]\n y_data = dataset[args.classification_column].to_numpy()\n dataset_headers = list(x_data.columns)\n x_data = x_data.fillna(0).to_numpy()\n\n x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, \\\n test_size=args.train_test_split)\n\n\n dtc = DecisionTreeClassifier(max_depth=args.max_depth, \\\n min_impurity_split=args.acceptable_impurity)\n dtc = dtc.fit(x_train, y_train)\n dtc_score = dtc.score(x_test, y_test)\n\n\n export_graphviz(dtc, out_file=\"decision_tree.dot\", feature_names=dataset_headers, \\\n rounded=True, precision=1, filled=True)\n os.system(\"dot -Tpng decision_tree.dot -o decision_tree.png\")\n\n\n rfc = RandomForestClassifier(n_estimators=args.estimators, max_depth=args.max_depth, \\\n min_impurity_split=args.acceptable_impurity)\n rfc.fit(x_train, y_train)\n rfc_score = rfc.score(x_test, y_test)\n\n file = open('result.txt', 'w')\n file.write(f'Decisions tree score = {dtc_score}\\n')\n file.write(f'Random forest score = {rfc_score}\\n')\n file.close()", "def generate_features_test(stances, dataset, name, feature_list, features_dir):\n h, b, bodyId, headId = [], [], [], []\n\n feature_dict = {'overlap': word_overlap_features,\n 'refuting': refuting_features,\n 'polarity': polarity_features,\n 'hand': hand_features,\n 'word_unigrams_5000_concat_tf_l2_holdout_unlbled_test': word_unigrams_5000_concat_tf_l2_holdout_unlbled_test,\n 'NMF_cos_300_holdout_unlbled_test': NMF_cos_300_holdout_unlbled_test,\n 'NMF_concat_300_holdout_unlbled_test': NMF_concat_300_holdout_unlbled_test,\n 'latent_dirichlet_allocation_25_holdout_unlbled_test': latent_dirichlet_allocation_25_holdout_unlbled_test,\n 'latent_semantic_indexing_gensim_300_concat_holdout_unlbled_test': latent_semantic_indexing_gensim_300_concat_holdout_unlbled_test,\n 'NMF_fit_all_incl_holdout_and_test': NMF_fit_all_incl_holdout_and_test,\n 'latent_dirichlet_allocation_incl_holdout_and_test': latent_dirichlet_allocation_incl_holdout_and_test,\n 'latent_semantic_indexing_gensim_holdout_and_test': latent_semantic_indexing_gensim_holdout_and_test,\n 'NMF_fit_all_concat_300_and_test': NMF_fit_all_concat_300_and_test,\n 'word_ngrams_concat_tf5000_l2_w_holdout_and_test': word_ngrams_concat_tf5000_l2_w_holdout_and_test,\n 'NMF_fit_all': NMF_fit_all,\n 'word_ngrams_concat_tf5000_l2_w_holdout': word_ngrams_concat_tf5000_l2_w_holdout,\n 'latent_dirichlet_allocation': latent_dirichlet_allocation,\n 'latent_semantic_indexing_gensim_test': latent_semantic_indexing_gensim_test,\n 'NMF_fit_all_concat_300': NMF_fit_all_concat_300,\n 'NMF_cos_50': NMF_cos_50,\n 'latent_dirichlet_allocation_25': latent_dirichlet_allocation_25,\n 'latent_semantic_indexing_gensim_300_concat_holdout': latent_semantic_indexing_gensim_300_concat_holdout,\n 'NMF_concat_300_holdout': NMF_concat_300_holdout,\n 'word_unigrams_5000_concat_tf_l2_holdout': word_unigrams_5000_concat_tf_l2_holdout,\n 'ppdb': ppdb,\n 'stanford_ppdb': stanford_ppdb_score,\n 'stanford_ppdb_1sent': stanford_ppdb_score_1sent,\n 'stanford_ppdb_2sent': stanford_ppdb_score_2sent,\n 'stanford_ppdb_3sent': stanford_ppdb_score_3sent,\n 'stanford_sentiment': stanford_sentiment,\n 'stanford_sentiment_1sent': stanford_sentiment_1sent,\n 'stanford_sentiment_2sent': stanford_sentiment_2sent,\n 'stanford_sentiment_3sent': stanford_sentiment_3sent,\n 'stanford_wordsim': stanford_based_verb_noun_sim,\n 'stanford_wordsim_1sent': stanford_based_verb_noun_sim_1sent,\n 'stanford_wordsim_2sent': stanford_based_verb_noun_sim_2sent,\n 'stanford_wordsim_3sent': stanford_based_verb_noun_sim_3sent,\n 'stanford_negation': stanford_negation_features,\n 'stanford_negation_1sent': stanford_negation_features_1sent,\n 'stanford_negation_2sent': stanford_negation_features_2sent,\n 'stanford_negation_3sent': stanford_negation_features_3sent,\n 'stanford_avg_words_per_sent': stanford_avg_words_per_sent,\n 'stanford_avg_words_per_sent_1sent': stanford_avg_words_per_sent_1sent,\n 'stanford_avg_words_per_sent_2sent': stanford_avg_words_per_sent_2sent,\n 'stanford_avg_words_per_sent_3sent': stanford_avg_words_per_sent_3sent,\n 'hedging': hedging_features,\n 'sen2sen': sen2sen_similarity_max,\n 'wmdsenSen': word_mover_distance_similarity_sentence_min,\n 'wmdsenDoc': word_mover_distance_wholebody,\n 'sdm_sim': sdm_sim,\n 'discuss': discuss_features,\n 'single_flat_LSTM_50d_100': single_flat_LSTM_50d_100,\n 'char_3grams_5000_concat_all_data': char_3grams_5000_concat_all_data,\n 'lexical_features': lexical_features,\n 'max_diff_twitter_uni_bigrams': max_diff_twitter_uni_bigrams,\n 'mpqa_unigrams': mpqa_unigrams,\n 'negated_context_word_12grams_concat_tf5000_l2_all_data': negated_context_word_12grams_concat_tf5000_l2_all_data,\n 'nrc_emo_lex': nrc_emo_lex,\n 'nrc_hashtag_sentiment_unigram': nrc_hashtag_sentiment_unigram,\n 'nrc_hashtag_sentiment_unigram_POS': nrc_hashtag_sentiment_unigram_POS,\n #'POS_features': POS_features,\n 'readability_features': readability_features,\n 'sentiment140_unigrams': sentiment140_unigrams,\n 'structural_features': structural_features,\n 'latent_dirichlet_allocation_300': latent_dirichlet_allocation_300,\n 'NMF_cos_300': NMF_cos_300\n }\n\n stanceCounter = 0\n for stance in stances:\n h.append(stance['Headline'])\n b.append(dataset.articles[stance['Body ID']])\n bodyId.append(stance['Body ID'])\n headId.append(name+str(stanceCounter))\n stanceCounter += 1\n\n X_feat = []\n for feature in feature_list:\n print(\"calculate feature: \" + str(feature))\n feat = gen_or_load_feats(feature_dict[feature], h, b, features_dir+\"/\"+feature+\"_test.\"+name+'.npy', bodyId, feature, headId, fold=name)\n X_feat.append(feat)\n print(len(feat))\n X = np.concatenate(X_feat, axis=1)\n return X", "def test_with_bunch(filename) :\n\n\tif not os.path.exists(filename) :\n\t\tprint('File not exists: ' + filename)\n\t\tsys.exit(-1)\n\n\n\t# Read CSV file\n\tprint('Load CSV file')\n\n\tcsv.field_size_limit(sys.maxsize) # Set CSV limit to sys.maxsize\n\tfiledata = []\n\twith open(filename) as csvfile :\n\t\treader = csv.reader(csvfile, delimiter=',')\n\t\tfor row in reader :\n\t\t\tfiledata.append(row)\n\n\n\tdetector = shaman.Shaman.default()\n\n\tcorrect = 0\n\ttotals = len(filedata)\n\n\tresults = {}\n\tprint('Start testing')\n\n\tfor index, (language, code) in enumerate(filedata) :\n\t\tprint ('Testing %s/%s ' % (index, len(filedata)), end=\"\\r\")\n\n\t\tif language not in shaman.SUPPORTING_LANGUAGES:\n\t\t\ttotals -= 1\n\t\t\tcontinue\n\n\t\ttry :\n\t\t\tglang = detector.detect( code )[0][0]\n\t\texcept IndexError :\n\t\t\tglang = None\n\n\t\tif language not in results :\n\t\t\tresults[ language ] = [0, 0, 0]\n\n\t\tif glang == language :\n\t\t\tcorrect += 1\n\t\t\tresults[ language ][0] += 1\n\n\t\t\n\t\tresults[ language ][1] += 1\n\t\tresults[ language ][2] = results[ language ][0] / results[ language ][1]\n\n\n\t\n\tprint(\"------------------------------------------------\")\n\tprint(\"Accuracy: %.2lf%% (Correct: %d / Valid Data: %d)\" % (correct/totals*100, correct, totals))\n\tprint(\"------------------------------------------------\")\n\t\n\tresults = sorted(results.items(), key=lambda x: x[1][0], reverse=True)\n\tfor lang, l in results :\n\t\tprint(\"%s: %.2lf%% (%s/%s)\" % (lang, l[2] * 100, l[0], l[1]))", "def test_categorical_feature():\n\n feature = Categorical(\"abc\")\n\n for element in \"abc\":\n feature.set(element)\n feature.set(\"ignore this\")\n feature.push()\n\n for element in \"abc\":\n getattr(feature, \"set_\" + element)()\n feature.push()\n\n array = feature.array()\n assert array.shape == (6, 3)\n for i, row in enumerate(array):\n assert sum(row) == 1.0 and row[i % 3] == 1.0", "def test():\n\t\treturn [\"vice.multizone\",\n\t\t\t[\n\t\t\t\ttest_from_output(),\n\t\t\t\tmig_matrix_row.test(run = False),\n\t\t\t\tmig_matrix.test(run = False),\n\t\t\t\tmig_specs.test(run = False),\n\t\t\t\tzone_array.test(run = False),\n\t\t\t\t_multizone.test(run = False),\n\t\t\t\tsrc_test(run = False)\n\t\t\t]\n\t\t]", "def workbench_scenarios():\n return [\n (\"HL rubric text XBlock\",\n \"\"\"<hl_rubric_text/>\n \"\"\"),\n\n ]", "def test():\n pass", "def test_add_feature_with_extras():\n mock = MagicMock()\n with patch.dict(dism.__salt__, {\"cmd.run_all\": mock}):\n dism.add_feature(\"sponge\", \"bob\", \"C:\\\\temp\", True, True)\n mock.assert_called_once_with(\n [\n dism.bin_dism,\n \"/Quiet\",\n \"/Online\",\n \"/Enable-Feature\",\n \"/FeatureName:sponge\",\n \"/PackageName:bob\",\n \"/Source:C:\\\\temp\",\n \"/LimitAccess\",\n \"/All\",\n \"/NoRestart\",\n ]\n )", "def main(args):\n bad_words_file = codecs.open(args.language + \"/feature_files/bad_words\", \"r\", \"utf-8\").readlines()\n bad_words = read_known_words(bad_words_file)\n \n good_words_file = codecs.open(args.language + \"/feature_files/good_words\", \"r\", \"utf-8\").readlines()\n good_words = read_known_words(good_words_file)\n\n curse_words_file = codecs.open(args.language + \"/feature_files/curse_words\", \"r\", \"utf-8\").readlines()\n curse_words = read_known_words(curse_words_file)\n\n prepositions_file = codecs.open(args.language + \"/feature_files/prepositions\", \"r\", \"utf-8\").readlines()\n prepositions = read_known_words(prepositions_file)\n\n determiners_file = codecs.open(args.language + \"/feature_files/determiners\", \"r\", \"utf-8\").readlines()\n determiners = read_known_words(determiners_file)\n\n syllables_file = codecs.open(args.language + \"/feature_files/syllables\", \"r\", \"utf-8\").readlines()\n syllable_structure = read_syllables_file(syllables_file)\n\n other_feature_files = glob.glob(args.language + \"/feature_files/*.txt\")\n other_features = set_features_from_files(other_feature_files)\n \n ermaObj = ConllToErma(args, bad_words, good_words, curse_words, prepositions, \\\n determiners, syllable_structure, other_features)\n\n if not args.just_test:\n # Input training file.\n train_id = open(args.train, \"r\")\n train = train_id.readlines()\n train_id.close()\n sys.stdout.write(\"Reading training file...\\n\")\n (train_features, train_skip_chains) = ermaObj.read_conll_file(train)\n sys.stdout.write(\"Building model...\\n\")\n train_hash = ermaObj.make_nodes(train_features)\n # Freeze the known features based on what's seen in the training data\n ermaObj.cutoff_features()\n else:\n train_hash = {}\n train_skip_chains = {}\n # Input testing file.\n test_id = open(args.test, \"r\")\n test = test_id.readlines()\n test_id.close()\n sys.stdout.write(\"Reading test file...\\n\")\n (test_features, test_skip_chains) = ermaObj.read_conll_file(test)\n sys.stdout.write(\"Building model...\\n\")\n test_hash = ermaObj.make_nodes(test_features, test=True)\n ermaObj.write_out(train_hash, train_skip_chains, test_hash, test_skip_chains)", "def test_Acled():\n ad = pytest.importorskip('acled')\n coords_x, coords_y = [1.183056], [9.553300]\n acled = ad.ACLED(\"../tests\")\n acled.download(\"TGO\", '2017-01-01', '2018-01-01')\n d = {}\n for property in [\"fatalities\", \"n_events\", \"violence_civ\"]:\n for k in [10000, 100000]:\n d[property + \"_\" + str(k)] = acled.featurize(coords_x, coords_y, property=property, function='density', buffer=k)\n\n assert sum(d[item][0] for item in d) > 0", "def main():\n\n clues_file = \"data/part1-clues.txt\"\n parsed_clues_file = \"data/part1-parsedclues.txt\"\n cp = ClueParser()\n\n clues = loadList(clues_file)\n gold_parsed_clues = loadList(parsed_clues_file)\n assert(len(clues) == len(gold_parsed_clues))\n\n cp.train(clues, gold_parsed_clues)\n parsed_clues = cp.parseClues(clues)\n cp.evaluate(parsed_clues, gold_parsed_clues)", "def runtest(self):", "def test_recognize_describe(self):\n pass", "def test_intent_classifier_add_testing_samples(self):\n pass", "def feature(self, node=\"clickhouse1\", mysql_node=\"mysql1\", stress=None, parallel=None):\n self.context.node = self.context.cluster.node(node)\n self.context.mysql_node = self.context.cluster.node(mysql_node)\n\n with allow_experimental_bigint(self.context.node):\n Scenario(run=comp_int_inline)\n Scenario(run=comp_int_table)\n Scenario(run=comp_dec_inline)\n Scenario(run=comp_dec_table)", "def main():\n path_for_data = '/Users/avielshtern/Desktop/semb/iml/IML.HUJI-master/data/kc_house_data (1).csv'\n design_matrix, response_vector = load_data(path_for_data)\n putting_it_all_together_1(design_matrix, response_vector)\n putting_it_all_together_2(design_matrix, response_vector)\n feature_evaluation(design_matrix, response_vector)", "def featurewiz(dataname, target, corr_limit=0.7, verbose=0, sep=\",\", header=0,\r\n test_data='', feature_engg='', category_encoders='', **kwargs):\r\n ### set all the defaults here ##############################################\r\n dataname = copy.deepcopy(dataname)\r\n max_nums = 30\r\n max_cats = 15\r\n RANDOM_SEED = 42\r\n ############################################################################\r\n cat_encoders_list = list(settings.cat_encoders_names.keys())\r\n ######################################################################################\r\n ##### MAKING FEATURE_TYPE AND FEATURE_GEN SELECTIONS HERE #############\r\n ######################################################################################\r\n feature_generators = ['interactions', 'groupby', 'target']\r\n feature_gen = ''\r\n if feature_engg:\r\n if isinstance(feature_engg, str):\r\n feature_gen = [feature_engg]\r\n elif isinstance(feature_engg, list):\r\n feature_gen = copy.deepcopy(feature_engg)\r\n else:\r\n print('Skipping feature engineering since no feature_engg input...')\r\n feature_type = ''\r\n if category_encoders:\r\n if isinstance(category_encoders, str):\r\n feature_type = [category_encoders]\r\n elif isinstance(category_encoders, list):\r\n feature_type = category_encoders[:2] ### Only two will be allowed at a time\r\n else:\r\n print('Skipping category encoding since no category encoders specified in input...')\r\n ################## L O A D D A T A N A M E ########################\r\n train = load_file_dataframe(dataname, sep=sep, header=header, verbose=verbose, nrows=1000)\r\n train = remove_duplicate_cols_in_dataset(train)\r\n train_index = train.index\r\n test = load_file_dataframe(test_data, sep=sep, header=header, verbose=verbose,\r\n nrows=1000)\r\n if test is not None:\r\n test = remove_duplicate_cols_in_dataset(test)\r\n test_index = test.index\r\n ############# C L A S S I F Y F E A T U R E S ####################\r\n features_dict = classify_features(train, target)\r\n if len(features_dict['date_vars']) > 0:\r\n #### If there are date-time variables in datatset, it is best to load them using pandas\r\n date_time_vars = features_dict['date_vars']\r\n train = load_file_dataframe(dataname, sep=sep, header=header, verbose=verbose,\r\n nrows='all', parse_dates=date_time_vars)\r\n if not test is None:\r\n test = load_file_dataframe(test_data, sep=sep, header=header, verbose=verbose,\r\n nrows='all', parse_dates=date_time_vars)\r\n else:\r\n train = load_file_dataframe(dataname, sep=sep, header=header, verbose=verbose, nrows='all')\r\n train_index = train.index\r\n if test is not None:\r\n test = load_file_dataframe(test_data, sep=sep, header=header, verbose=verbose,\r\n nrows='all')\r\n test_index = test.index\r\n #### If there are more than 30 categorical variables in a data set, it is worth reducing features.\r\n #### Otherwise. XGBoost is pretty good at finding the best features whether cat or numeric !\r\n start_time = time.time()\r\n n_splits = 5\r\n max_depth = 8\r\n ###################### I M P O R T A N T D E F A U L T S ##############\r\n subsample = 0.7\r\n col_sub_sample = 0.7\r\n test_size = 0.2\r\n seed = 1\r\n early_stopping = 5\r\n ####### All the default parameters are set up now #########\r\n kf = KFold(n_splits=n_splits)\r\n ######### G P U P R O C E S S I N G B E G I N S ############\r\n ###### This is where we set the CPU and GPU parameters for XGBoost\r\n GPU_exists = check_if_GPU_exists()\r\n ##### Set the Scoring Parameters here based on each model and preferences of user ###\r\n cpu_params = {}\r\n param = {}\r\n cpu_params['nthread'] = -1\r\n cpu_params['tree_method'] = 'hist'\r\n cpu_params['grow_policy'] = 'depthwise'\r\n cpu_params['max_depth'] = max_depth\r\n cpu_params['max_leaves'] = 0\r\n cpu_params['verbosity'] = 0\r\n cpu_params['gpu_id'] = 0\r\n cpu_params['updater'] = 'grow_colmaker'\r\n cpu_params['predictor'] = 'cpu_predictor'\r\n cpu_params['num_parallel_tree'] = 1\r\n if GPU_exists:\r\n param['nthread'] = -1\r\n param['tree_method'] = 'gpu_hist'\r\n param['grow_policy'] = 'depthwise'\r\n param['max_depth'] = max_depth\r\n param['max_leaves'] = 0\r\n param['verbosity'] = 0\r\n param['gpu_id'] = 0\r\n param['updater'] = 'grow_gpu_hist' #'prune'\r\n param['predictor'] = 'gpu_predictor'\r\n param['num_parallel_tree'] = 1\r\n print(' Running XGBoost using GPU parameters')\r\n else:\r\n param = copy.deepcopy(cpu_params)\r\n print(' Running XGBoost using CPU parameters')\r\n #################################################################################\r\n ############# D E T E C T SINGLE OR MULTI-LABEL PROBLEM #################\r\n #################################################################################\r\n if isinstance(target, str):\r\n target = [target]\r\n settings.multi_label = False\r\n else:\r\n if len(target) <= 1:\r\n settings.multi_label = False\r\n else:\r\n settings.multi_label = True\r\n #### You need to make sure only Single Label problems are handled in target encoding!\r\n if settings.multi_label:\r\n print('Turning off Target encoding for multi-label problems like this data set...')\r\n #### Since Feature Engineering module cannot handle Multi Label Targets,\r\n #### we will turnoff creating target_enc_cat_features to False\r\n target_enc_cat_features = False\r\n else:\r\n ## If target is specified in feature_gen then use it to Generate target encoded features\r\n target_enc_cat_features = 'target' in feature_gen\r\n ######################################################################################\r\n ######## C L A S S I F Y V A R I A B L E S ##########################\r\n ###### Now we detect the various types of variables to see how to convert them to numeric\r\n ######################################################################################\r\n features_dict = classify_features(train, target)\r\n if len(features_dict['date_vars']) > 0:\r\n date_time_vars = features_dict['date_vars']\r\n date_cols = copy.deepcopy(date_time_vars)\r\n #### Do this only if date time columns exist in your data set!\r\n for date_col in date_cols:\r\n print('Processing %s column for date time features....' %date_col)\r\n train, ts_adds = FE_create_time_series_features(train, date_col)\r\n #date_col_adds_train = left_subtract(date_df_train.columns.tolist(),date_col)\r\n #print(' Adding %d column(s) from date-time column %s in train' %(len(date_col_adds_train),date_col))\r\n #train = train.join(date_df_train, rsuffix='2')\r\n if isinstance(test,str) or test is None:\r\n ### do nothing ####\r\n pass\r\n else:\r\n print(' Adding same time series features to test data...')\r\n test, _ = FE_create_time_series_features(test, date_col, ts_adds)\r\n #date_col_adds_test = left_subtract(date_df_test.columns.tolist(),date_col)\r\n ### Now time to remove the date time column from all further processing ##\r\n #test = test.join(date_df_test, rsuffix='2')\r\n ### Now time to continue with our further processing ##\r\n idcols = features_dict['IDcols']\r\n if isinstance(test,str) or test is None:\r\n pass\r\n else:\r\n test_ids = test[idcols]\r\n train_ids = train[idcols] ### this saves the ID columns of train\r\n cols_to_remove = features_dict['cols_delete'] + idcols + features_dict['discrete_string_vars']\r\n preds = [x for x in list(train) if x not in target+cols_to_remove]\r\n numvars = train[preds].select_dtypes(include = 'number').columns.tolist()\r\n if len(numvars) > max_nums:\r\n if feature_gen:\r\n print('Warning: Too many extra features will be generated by featurewiz. This may take time...')\r\n catvars = left_subtract(preds, numvars)\r\n if len(catvars) > max_cats:\r\n if feature_type:\r\n print('Warning: Too many extra features will be generated by category encoding. This may take time...')\r\n rem_vars = copy.deepcopy(catvars)\r\n ########## Now we need to select the right model to run repeatedly ####\r\n if target is None or len(target) == 0:\r\n cols_list = list(train)\r\n settings.modeltype = 'Clustering'\r\n else:\r\n settings.modeltype = analyze_problem_type(train, target)\r\n cols_list = left_subtract(list(train),target)\r\n ######################################################################################\r\n ###### B E F O R E U S I N G D A T A B U N C H C H E C K ###################\r\n ######################################################################################\r\n ## Before using DataBunch check if certain encoders work with certain kind of data!\r\n if feature_type:\r\n final_cat_encoders = feature_type\r\n else:\r\n final_cat_encoders = []\r\n if settings.modeltype == 'Multi_Classification':\r\n ### you must put a Polynomial Wrapper on the cat_encoder in case the model is multi-class\r\n if final_cat_encoders:\r\n final_cat_encoders = [PolynomialWrapper(x) for x in final_cat_encoders if x in settings.target_encoders_names]\r\n elif settings.modeltype == 'Regression':\r\n if final_cat_encoders:\r\n if 'WOEEncoder' in final_cat_encoders:\r\n print('Removing WOEEncoder from list of encoders since it cannot be used for this Regression problem.')\r\n final_cat_encoders = [x for x in final_cat_encoders if x != 'WOEEncoder' ]\r\n ######################################################################################\r\n ###### F E A T U R E E N G G U S I N G D A T A B U N C H ###################\r\n ######################################################################################\r\n if feature_gen or feature_type:\r\n print('Starting feature engineering...this will take time...')\r\n if isinstance(test, str) or test is None:\r\n if settings.multi_label:\r\n ### if it is a multi_label problem, leave target as it is - a list!\r\n X_train, X_test, y_train, y_test = train_test_split(train[preds],\r\n train[target],\r\n test_size=0.2,\r\n random_state=RANDOM_SEED)\r\n else:\r\n ### if it not a multi_label problem, make target as target[0]\r\n X_train, X_test, y_train, y_test = train_test_split(train[preds],\r\n train[target[0]],\r\n test_size=0.2,\r\n random_state=RANDOM_SEED)\r\n else:\r\n X_train = train[preds]\r\n if settings.multi_label:\r\n y_train = train[target]\r\n else:\r\n y_train = train[target[0]]\r\n X_test = test[preds]\r\n try:\r\n y_test = test[target]\r\n except:\r\n y_test = None\r\n X_train_index = X_train.index\r\n X_test_index = X_test.index\r\n data_tuple = DataBunch(X_train=X_train,\r\n y_train=y_train,\r\n X_test=X_test, # be sure to specify X_test, because the encoder needs all dataset to work.\r\n cat_features = catvars,\r\n clean_and_encod_data=True,\r\n cat_encoder_names=final_cat_encoders, # final list of Encoders selected\r\n clean_nan=True, # fillnan\r\n num_generator_features=np.where('interactions' in feature_gen,True, False).tolist(), # Generate interaction Num Features\r\n group_generator_features=np.where('groupby' in feature_gen,True, False).tolist(), # Generate groupby Features\r\n target_enc_cat_features=target_enc_cat_features,# Generate target encoded features\r\n normalization=False,\r\n random_state=RANDOM_SEED)\r\n #### Now you can process the tuple this way #########\r\n data1 = data_tuple.X_train.join(y_train) ### data_tuple does not have a y_train, remember!\r\n if isinstance(test, str) or test is None:\r\n ### Since you have done a train_test_split using randomized split, you need to put it back again.\r\n data2 = data_tuple.X_test.join(y_test)\r\n train = data1.append(data2)\r\n train = train.reindex(train_index)\r\n else:\r\n try:\r\n test = data_tuple.X_test.join(y_test)\r\n except:\r\n test = copy.deepcopy(data_tuple.X_test)\r\n test = test.reindex(test_index)\r\n train = copy.deepcopy(data1)\r\n print(' Completed feature engineering. Shape of Train (with target) = %s' %(train.shape,))\r\n preds = [x for x in list(train) if x not in target]\r\n numvars = train[preds].select_dtypes(include = 'number').columns.tolist()\r\n catvars = left_subtract(preds, numvars)\r\n ###################### I M P O R T A N T ##############################################\r\n ###### This top_num decides how many top_n features XGB selects in each iteration.\r\n #### There a total of 5 iterations. Hence 5x10 means maximum 50 features will be selected.\r\n ##### If there are more than 50 variables, then maximum 25% of its variables will be selected\r\n if len(preds) <= 50:\r\n top_num = 10\r\n else:\r\n ### the maximum number of variables will 25% of preds which means we divide by 5 and get 5% here\r\n ### The five iterations result in 10% being chosen in each iteration. Hence max 50% of variables!\r\n top_num = int(len(preds)*0.10)\r\n ###################### I M P O R T A N T ##############################################\r\n important_cats = copy.deepcopy(catvars)\r\n if len(numvars) > 1:\r\n final_list = FE_remove_variables_using_SULOV_method(train,numvars,settings.modeltype,target,\r\n corr_limit,verbose)\r\n else:\r\n final_list = copy.deepcopy(numvars)\r\n ####### This is where you draw how featurewiz works when the verbose = 2 ###########\r\n print(' Adding %s categorical variables to reduced numeric variables of %d' %(\r\n len(important_cats),len(final_list)))\r\n if isinstance(final_list,np.ndarray):\r\n final_list = final_list.tolist()\r\n preds = final_list+important_cats\r\n #######You must convert category variables into integers ###############\r\n if len(important_cats) > 0:\r\n train, test = FE_convert_all_object_columns_to_numeric(train, test)\r\n ######## Dont move this train and y definition anywhere else ########\r\n ######## Fill Missing values since XGB for some reason #########\r\n ######## can't handle missing values in early stopping rounds #######\r\n train = train.fillna(0)\r\n y = train[target]\r\n print('############## F E A T U R E S E L E C T I O N ####################')\r\n important_features = []\r\n ########## This is for Single_Label problems ######################\r\n if settings.modeltype == 'Regression':\r\n objective = 'reg:squarederror'\r\n model_xgb = XGBRegressor( n_estimators=100,booster='gbtree',subsample=subsample,objective=objective,\r\n colsample_bytree=col_sub_sample,reg_alpha=0.5, reg_lambda=0.5,\r\n seed=1,n_jobs=-1,random_state=1)\r\n eval_metric = 'rmse'\r\n else:\r\n #### This is for Classifiers only\r\n classes = np.unique(train[target].values)\r\n if len(classes) == 2:\r\n model_xgb = XGBClassifier(base_score=0.5, booster='gbtree', subsample=subsample,\r\n colsample_bytree=col_sub_sample,gamma=1, learning_rate=0.1, max_delta_step=0,\r\n max_depth=max_depth, min_child_weight=1, missing=-999, n_estimators=100,\r\n n_jobs=-1, nthread=None, objective='binary:logistic',\r\n random_state=1, reg_alpha=0.5, reg_lambda=0.5,\r\n seed=1)\r\n eval_metric = 'logloss'\r\n else:\r\n model_xgb = XGBClassifier(base_score=0.5, booster='gbtree', subsample=subsample,\r\n colsample_bytree=col_sub_sample, gamma=1, learning_rate=0.1, max_delta_step=0,\r\n max_depth=max_depth, min_child_weight=1, missing=-999, n_estimators=100,\r\n n_jobs=-1, nthread=None, objective='multi:softmax',\r\n random_state=1, reg_alpha=0.5, reg_lambda=0.5,\r\n seed=1)\r\n eval_metric = 'mlogloss'\r\n #### Now set the parameters for XGBoost ###################\r\n model_xgb.set_params(**param)\r\n #print('Model parameters: %s' %model_xgb)\r\n if settings.multi_label:\r\n ########## This is for settings.multi_label problems ###############################\r\n if settings.modeltype == 'Regression':\r\n model_xgb = MultiOutputRegressor(model_xgb)\r\n #model_xgb = RegressorChain(model_xgb)\r\n else:\r\n ## just do randomized search CV - no need to do one vs rest unless multi-class\r\n model_xgb = MultiOutputClassifier(model_xgb)\r\n #model_xgb = ClassifierChain(model_xgb)\r\n #### This is where you start to Iterate on Finding Important Features ################\r\n save_xgb = copy.deepcopy(model_xgb)\r\n train_p = train[preds]\r\n if train_p.shape[1] < 10:\r\n iter_limit = 2\r\n else:\r\n iter_limit = int(train_p.shape[1]/5+0.5)\r\n print('Current number of predictors = %d ' %(train_p.shape[1],))\r\n print(' Finding Important Features using Boosted Trees algorithm...')\r\n ######## This is where we start training the XGBoost model to find top features ####\r\n try:\r\n for i in range(0,train_p.shape[1],iter_limit):\r\n new_xgb = copy.deepcopy(save_xgb)\r\n print(' using %d variables...' %(train_p.shape[1]-i))\r\n imp_feats = []\r\n if train_p.shape[1]-i < iter_limit:\r\n X = train_p.iloc[:,i:]\r\n cols_sel = X.columns.tolist()\r\n if settings.modeltype == 'Regression':\r\n train_part = int((1-test_size)*X.shape[0])\r\n X_train, X_cv, y_train, y_cv = X[:train_part],X[train_part:],y[:train_part],y[train_part:]\r\n else:\r\n X_train, X_cv, y_train, y_cv = train_test_split(X, y,\r\n test_size=test_size, random_state=seed, stratify=y)\r\n try:\r\n if settings.multi_label:\r\n eval_set = [(X_train.values,y_train.values),(X_cv.values,y_cv.values)]\r\n else:\r\n eval_set = [(X_train,y_train),(X_cv,y_cv)]\r\n if settings.multi_label:\r\n model_xgb.fit(X_train,y_train)\r\n else:\r\n model_xgb.fit(X_train,y_train,early_stopping_rounds=early_stopping,eval_set=eval_set,\r\n eval_metric=eval_metric,verbose=False)\r\n except:\r\n #### On Colab, even though GPU exists, many people don't turn it on.\r\n #### In that case, XGBoost blows up when gpu_predictor is used.\r\n #### This is to turn it back to cpu_predictor in case GPU errors!\r\n if GPU_exists:\r\n print('Warning: GPU exists but it is not turned on. Using CPU for predictions...')\r\n if settings.multi_label:\r\n model_xgb.estimator.set_params(**cpu_params)\r\n model_xgb.fit(X_train,y_train)\r\n else:\r\n model_xgb.set_params(**cpu_params)\r\n model_xgb.fit(X_train,y_train,early_stopping_rounds=early_stopping,eval_set=eval_set,\r\n eval_metric=eval_metric,verbose=False)\r\n #### This is where you collect the feature importances from each run ############\r\n if settings.multi_label:\r\n ### doing this for multi-label is a little different for single label #########\r\n imp_feats = [model_xgb.estimators_[i].feature_importances_ for i in range(len(target))]\r\n imp_feats_df = pd.DataFrame(imp_feats).T\r\n imp_feats_df.columns = target\r\n imp_feats_df.index = cols_sel\r\n imp_feats_df['sum'] = imp_feats_df.sum(axis=1).values\r\n important_features += imp_feats_df.sort_values(by='sum',ascending=False)[:top_num].index.tolist()\r\n else:\r\n ### doing this for single-label is a little different from settings.multi_label #########\r\n imp_feats = model_xgb.get_booster().get_score(importance_type='gain')\r\n #print('%d iteration: imp_feats = %s' %(i+1,imp_feats))\r\n important_features += pd.Series(imp_feats).sort_values(ascending=False)[:top_num].index.tolist()\r\n ####### order this in the same order in which they were collected ######\r\n important_features = list(OrderedDict.fromkeys(important_features))\r\n else:\r\n X = train_p[list(train_p.columns.values)[i:train_p.shape[1]]]\r\n cols_sel = X.columns.tolist()\r\n #### Split here into train and test #####\r\n if settings.modeltype == 'Regression':\r\n train_part = int((1-test_size)*X.shape[0])\r\n X_train, X_cv, y_train, y_cv = X[:train_part],X[train_part:],y[:train_part],y[train_part:]\r\n else:\r\n X_train, X_cv, y_train, y_cv = train_test_split(X, y,\r\n test_size=test_size, random_state=seed, stratify=y)\r\n ### set the validation data as arrays in multi-label case #####\r\n if settings.multi_label:\r\n eval_set = [(X_train.values,y_train.values),(X_cv.values,y_cv.values)]\r\n else:\r\n eval_set = [(X_train,y_train),(X_cv,y_cv)]\r\n ########## Try training the model now #####################\r\n try:\r\n if settings.multi_label:\r\n model_xgb.fit(X_train,y_train)\r\n else:\r\n model_xgb.fit(X_train,y_train,early_stopping_rounds=early_stopping,\r\n eval_set=eval_set,eval_metric=eval_metric,verbose=False)\r\n except:\r\n #### On Colab, even though GPU exists, many people don't turn it on.\r\n #### In that case, XGBoost blows up when gpu_predictor is used.\r\n #### This is to turn it back to cpu_predictor in case GPU errors!\r\n if GPU_exists:\r\n print('Warning: GPU exists but it is not turned on. Using CPU for predictions...')\r\n if settings.multi_label:\r\n model_xgb.estimator.set_params(**cpu_params)\r\n model_xgb.fit(X_train,y_train)\r\n else:\r\n model_xgb.set_params(**cpu_params)\r\n model_xgb.fit(X_train,y_train,early_stopping_rounds=early_stopping,\r\n eval_set=eval_set,eval_metric=eval_metric,verbose=False)\r\n ### doing this for multi-label is a little different for single label #########\r\n if settings.multi_label:\r\n imp_feats = [model_xgb.estimators_[i].feature_importances_ for i in range(len(target))]\r\n imp_feats_df = pd.DataFrame(imp_feats).T\r\n imp_feats_df.columns = target\r\n imp_feats_df.index = cols_sel\r\n imp_feats_df['sum'] = imp_feats_df.sum(axis=1).values\r\n important_features += imp_feats_df.sort_values(by='sum',ascending=False)[:top_num].index.tolist()\r\n else:\r\n imp_feats = model_xgb.get_booster().get_score(importance_type='gain')\r\n #print('%d iteration: imp_feats = %s' %(i+1,imp_feats))\r\n important_features += pd.Series(imp_feats).sort_values(ascending=False)[:top_num].index.tolist()\r\n important_features = list(OrderedDict.fromkeys(important_features))\r\n except:\r\n print('Finding top features using XGB is crashing. Continuing with all predictors...')\r\n important_features = copy.deepcopy(preds)\r\n return important_features, train[important_features+target]\r\n important_features = list(OrderedDict.fromkeys(important_features))\r\n print('Selected %d important features from your dataset' %len(important_features))\r\n numvars = [x for x in numvars if x in important_features]\r\n important_cats = [x for x in important_cats if x in important_features]\r\n print(' Time taken (in seconds) = %0.0f' %(time.time()-start_time))\r\n if isinstance(test, str) or test is None:\r\n print(f'Returning list of {len(important_features)} important features and dataframe.')\r\n if len(np.intersect1d(train_ids.columns.tolist(),train.columns.tolist())) > 0:\r\n return important_features, train[important_features+target]\r\n else:\r\n train = train_ids.join(train)\r\n return important_features, train[idcols+important_features+target]\r\n else:\r\n print('Returning 2 dataframes: train and test with %d important features.' %len(important_features))\r\n if feature_gen or feature_type:\r\n ### if feature engg is performed, id columns are dropped. Hence they must rejoin here.\r\n train = train_ids.join(train)\r\n test = test_ids.join(test)\r\n if target in list(test): ### see if target exists in this test data\r\n return train[idcols+important_features+target], test[idcols+important_features+target]\r\n else:\r\n return train[idcols+important_features+target], test[idcols+important_features]", "def main():\n raw_data = pd.read_csv('data/raw_hospital_data.csv')\n\n fe_data = new_features(raw_data)\n fe_data = compressing_admission_type(data)\n fe_data = age_to_cat(fe_data)\n fe_data = compressing_careunit(fe_data)\n fe_data = compressing_curr_serv(fe_data)\n fe_data = compressing_ethnicity(fe_data)\n fe_data = compressing_marital_status(fe_data)\n fe_data = compressing_religion(fe_data)\n fe_data = compressing_admit_location(fe_data)\n fe_data = compress_icd9_codes(fe_data)\n\n fe_data.to_csv('data/feature_engineering_data.csv')", "def main():\n feature_fns = [token_features, token_pair_features, lexicon_features]\n # Download and read data.\n download_data()\n docs, labels = read_data(os.path.join('data', 'train'))\n # Evaluate accuracy of many combinations\n # of tokenization/featurization.\n results = eval_all_combinations(docs, labels,\n [True, False],\n feature_fns,\n [2,5,10])\n # Print information about these results.\n best_result = results[0]\n worst_result = results[-1]\n print('best cross-validation result:\\n%s' % str(best_result))\n print('worst cross-validation result:\\n%s' % str(worst_result)) \n plot_sorted_accuracies(results)\n print('\\nMean Accuracies per Setting:')\n print('\\n'.join(['%s: %.5f' % (s,v) for v,s in mean_accuracy_per_setting(results)]))\n \n \n # Fit best classifier.\n clf, vocab = fit_best_classifier(docs, labels, results[0])\n\n # Print top coefficients per class.\n print('\\nTOP COEFFICIENTS PER CLASS:')\n print('negative words:')\n print('\\n'.join(['%s: %.5f' % (t,v) for t,v in top_coefs(clf, 0, 5, vocab)]))\n print('\\npositive words:')\n print('\\n'.join(['%s: %.5f' % (t,v) for t,v in top_coefs(clf, 1, 5, vocab)]))\n # Parse test data\n test_docs, test_labels, X_test = parse_test_data(best_result, vocab)\n\n # Evaluate on test set.\n predictions = clf.predict(X_test)\n #print('CSR Test ->')\n #print(X_test.toarray())\n #print('predictions = ',predictions)\n #print('test_labels = ',test_labels)\n\n print('testing accuracy=%f' %\n accuracy_score(test_labels, predictions))\n \n \n print('\\nTOP MISCLASSIFIED TEST DOCUMENTS:')\n print_top_misclassified(test_docs, test_labels, X_test, clf, 5)\n \n \n print('testing accuracy=%f' %\n accuracy_score(test_labels, predictions))\n\n print('\\nTOP MISCLASSIFIED TEST DOCUMENTS:')\n print_top_misclassified(test_docs, test_labels, X_test, clf, 5)", "def test_intent_classifier_test(self):\n pass", "def main(args):\n\n # load dataset\n with open(args.infile, 'rb') as fin:\n x_train, y_train, x_test, y_test = pickle.load(fin)\n\n y_train = y_train.astype('int64')\n y_test = y_test.astype('int64')\n\n random_index = list(range(len(x_train)))\n random.shuffle(random_index)\n x_train = np.array(x_train[random_index])\n y_train = np.array(y_train[random_index])\n\n # y_train = y_train.astype(bool).astype(int)\n # y_test = y_test.astype(bool).astype(int)\n\n # combined different features\n feature_extractors = [\n # ('general', MyScaler(False)),\n # ('wordcount', MyCountVectorizer(ngram_range=(1, 1), stop_words='english')),\n ('tfidf', MyTfidfVectorizer(stop_words='english')),\n ]\n combined_feature = FeatureUnion(feature_extractors)\n\n estimators = [('feature', combined_feature),\n ('clf', svm.LinearSVC(C=0.3))]\n pipeline = Pipeline(estimators)\n\n # pipeline.fit(x_train, y_train)\n # print(pipeline.score(x_test, y_test))\n\n # parameters to search\n param_grid = [\n {\n 'clf': [MultinomialNB()],\n 'clf__alpha': [10, 1.0, 0.1, 0.01],\n },\n {\n 'clf': [svm.LinearSVC()],\n 'clf__C': [3, 1, 0.3, 0.1],\n },\n ]\n\n # start training\n t0 = time.time()\n grid = GridSearchCV(pipeline, param_grid=param_grid, verbose=4, n_jobs=4)\n grid.fit(x_train, y_train)\n\n print()\n print('done in %.2f seconds' % (time.time() - t0))\n print()\n print('train accuracy: %.2f%%' % (100 * grid.score(x_train, y_train)))\n print('test accuracy: %.2f%%' % (100 * grid.score(x_test, y_test)))\n print()\n print('the best parameters are:', grid.best_params_)\n print()\n print('confusion matrix:')\n print(metrics.confusion_matrix(y_test, grid.predict(x_test)))", "def test_apply_endorsements(self):", "def test_extract_categories():\n pass", "def test_category_manip_pipeline(self):\n raise NotImplementedError(\"\")", "def testbed_name(self): \n return \"C-Lab\"", "def main():\n run_test_summary1a()\n run_test_summary1c()\n run_test_summary1c()", "def test_features_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run(\"LR\")\n assert [i == j for i, j in zip(atom.lr.features, atom.features)]", "def findFeatures(self):\n\t\tpass", "def classic_model_testing():\n dataset_path = \"/home/kateryna/Documents\"\n X_train, X_test, y_train, y_test = generate_embeddings_memory(dataset_path, classes=['normal', 'glare_small'])\n contam = 0.08\n models = [XGBOD(), OCSVM(contamination=contam), IForest(contamination=contam, n_estimators=150), XGBOD(learning_rate=0.01, n_estimators=150),\n COPOD(contamination=contam)]\n for model in models:\n model_name = model.__str__().split('(')[0]\n clf = model\n clf.fit(X_train, y_train)\n\n y_train_pred = clf.labels_\n y_train_scores = clf.decision_scores_\n\n # get the prediction on the test data\n # 0 stands for inliers and 1 for outliers.\n y_test_pred = clf.predict(X_test)\n y_test_scores = clf.decision_function(X_test)\n # y_probabilities = clf.predict_proba(X_test)\n print(\"\\nOn Training Data:\")\n evaluate_print(model_name, y_train, y_train_scores)\n print(\"\\nOn Test Data:\")\n evaluate_print(model_name, y_test, y_test_scores)\n print('roc auc', roc_auc_score(y_test, y_test_scores))\n\n conf_mtx_test = confusion_matrix(y_test, y_test_pred, labels=[0, 1])\n print(conf_mtx_test)\n conf_mtx_train = confusion_matrix(y_train, y_train_pred, labels=[0, 1])\n print(conf_mtx_train)\n print('~~~')", "def test_intent_classifier_curate(self):\n pass", "def test_Demo(self):\n self._run(self._example_scenarios, \"Demo\")", "def testHandbrakeCLI(self):\n self.assertEqual(\n self.handBrake,\n self.config.handBrake\n )", "def test_stage_0():\n\tra_1 = readImage(TRAIN_RAW_IMAGE_1)\n\tre_1 = readImage(TRAIN_RESULT_IMAGE_1)\n\n\tra_2 = readImage(TRAIN_RAW_IMAGE_2)\n\tre_2 = readImage(TRAIN_RESULT_IMAGE_2)\n\n\t# Uncomment below if more examples are required.\n\t# ra_3 = readImage(TRAIN_RAW_IMAGE_3)\n\t# re_3 = readImage(TRAIN_RESULT_IMAGE_3)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_1 += (\n\t# \tlaplace_operator(TRAIN_RAW_IMAGE_1),\\\n\t# \t# k_means(TRAIN_RAW_IMAGE_1)[0],\\\n\t# \t)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_2 += (\n\t# \tlaplace_operator(TRAIN_RAW_IMAGE_2),\\\n\t# \t# k_means(TRAIN_RAW_IMAGE_2)[0],\\\n\t# \t)\n\n\t# The prediction model is obtained and trained.\n\tengine = get_model((ra_1, ra_2,), (re_1, re_2,), model_type=SVM, percentage=0.1)\n\n\ttest_percentage = float(1) # how many tests\n\n\tra_1 = readImage(TEST_RAW_IMAGE_1)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_1 += (\n\t# \tlaplace_operator(TEST_RAW_IMAGE_1),\\\n\t# \t# k_means(TEST_RAW_IMAGE_1)[0],\\\n\t# \t)\n\n\tre_1 = readImage(TEST_RESULT_IMAGE_1)\n\n\t# ra_2 = readImage(TEST_RAW_IMAGE_2)\n\t# re_2 = readImage(TEST_RESULT_IMAGE_2)\n\n\tinput_vec = []\n\t# The features are extracted.\n\tinput_vec += buildFeatureArray_2(ra_1[0], ra_1[1], ra_1[2],\\\n\t\tRADIUS_ARRAY,\\\n\t\tadditional_feats=([] if len(ra_1) == 3 else ra_1[3:]))\n\n\tex_no = int(test_percentage * len(input_vec)) # actual number of the test sample\n\n\toutput_vec = []\n\toutput_vec += matrixToArray(re_1[0], lambda el: 1 if el == 255 else 0)\n\n\tprint('Will start predicting...')\n\n\tpredicted_vec = engine.predict(input_vec[:ex_no])\n\n\tcounter = float(0)\n\tfor y, p in zip(output_vec[:ex_no], predicted_vec[:ex_no]):\n\t\tif y == p: counter += 1\n\n\tprint('Accuracy: ' + str(counter/ex_no))\n\n\tpredicted_mat = arrayToMatrix( predicted_vec, len(re_1[0]), len(re_1[0][0]),\\\n\t\tlambda el: 255 if el == 1 else 0)\n\n\t# The predicted segmentation is saved.\n\tsave_rgb_img(\\\n\t np.array(predicted_mat).transpose(),\\\n\t np.array(predicted_mat).transpose(),\\\n\t np.array(predicted_mat).transpose(),\\\n\t 'pred.bmp',\\\n\t)", "def test_proper(self):\n\n self.assertTrue(self.cs.isProper)\n self.assertFalse(self.cs.isDegenerate)", "def main():\n # get config and processing of clauses\n config = Config(load=False)\n\n # Generators\n dev = Dataset(config.filename_dev)\n test = Dataset(config.filename_test)\n train = Dataset(config.filename_train)\n\n # Build tags vocab\n vocab_tags = get_tag_vocab([train, dev, test])\n vocab_tags.add(UNK)\n\n # Save vocab\n write_vocab(vocab_tags, config.filename_tags)\n\n\n # Build and save char vocab\n train = Dataset(config.filename_train)\n vocab_chars = get_char_vocab(train)\n write_vocab(vocab_chars, config.filename_chars)" ]
[ "0.652699", "0.64427763", "0.6373243", "0.6333925", "0.6331873", "0.6215436", "0.6211196", "0.59231627", "0.59107846", "0.5880464", "0.5870288", "0.58623254", "0.58284056", "0.58262885", "0.58067703", "0.5803254", "0.57745475", "0.5774168", "0.57514584", "0.5704505", "0.5700603", "0.5693078", "0.5652558", "0.56517303", "0.5650566", "0.5643128", "0.56402546", "0.56365436", "0.56352496", "0.56278056", "0.562442", "0.562376", "0.5618224", "0.5615385", "0.56007", "0.5599648", "0.5588481", "0.5558653", "0.5544523", "0.55427504", "0.55403906", "0.5539311", "0.5512834", "0.550815", "0.5504436", "0.54862106", "0.5479062", "0.5475934", "0.54731476", "0.5465907", "0.5465783", "0.5457624", "0.5451471", "0.54503125", "0.5443729", "0.5441925", "0.54407585", "0.54348904", "0.5425329", "0.54249865", "0.54245305", "0.5423331", "0.542093", "0.54207623", "0.54195625", "0.5411961", "0.5407852", "0.53989893", "0.53988767", "0.539806", "0.53965735", "0.5396401", "0.53954244", "0.5395154", "0.5386821", "0.5382551", "0.5380843", "0.5379364", "0.53752905", "0.536914", "0.53660816", "0.5364716", "0.5364346", "0.5363949", "0.5354994", "0.5353202", "0.535319", "0.5350873", "0.5347", "0.5346832", "0.5338407", "0.53377986", "0.5334163", "0.5333795", "0.53289306", "0.5318871", "0.53127897", "0.53091097", "0.53061885", "0.5303061" ]
0.6563471
0
Test ComBat feature harmonization.
def test_combat_fastr(): # Check if example data directory exists example_data_dir = th.find_exampledatadir() # Check if example data required exists features = glob.glob(os.path.join(example_data_dir, 'examplefeatures_Patient*.hdf5')) if len(features) < 6: message = 'Too few example features for ComBat testing not found!' +\ 'Run the create_example_data script from the WORC exampledata ' +\ 'directory!' raise WORCValueError(message) elif len(features) > 6: message = 'Too many example features for ComBat testing not found!' +\ 'Run the create_example_data script from the WORC exampledata ' +\ 'directory!' raise WORCValueError(message) objectlabels = os.path.join(example_data_dir, 'objectlabels.csv') # Python config = os.path.join(example_data_dir, 'ComBatConfig_python.ini') # Create the fastr network experiment = fastr.create_network('test_ComBat') source_features = experiment.create_source('HDF5', id='features_in', node_group='features') source_labels = experiment.create_source('PatientInfoFile', id='labels', node_group='pctrain') source_config = experiment.create_source('ParameterFile', id='config', node_group='conf') sink_features = experiment.create_sink('HDF5', id='features_out') node_combat = experiment.create_node('combat/ComBat:1.0', tool_version='1.0', id='ComBat',) link_combat_1 = experiment.create_link(source_config.output, node_combat.inputs['config']) link_combat_2 = experiment.create_link(source_labels.output, node_combat.inputs['patientclass_train']) link_combat_1.collapse = 'conf' link_combat_2.collapse = 'pctrain' # Mimic using two feature toolboxes links_Combat1_train = node_combat.inputs['features_train']['MR_0'] << source_features.output links_Combat1_train.collapse = 'features' links_Combat2_train = node_combat.inputs['features_train']['MR_1'] << source_features.output links_Combat2_train.collapse = 'features' links_Combat_out_train = sink_features.input << node_combat.outputs['features_train_out'] links_Combat_out_train.collapse = 'ComBat' # Provide source and sink data source_data = dict() source_data['features_in'] = features source_data['labels'] = objectlabels source_data['config'] = config sink_data = dict() sink_data['features_out'] = "vfs://output/test_ComBat/ComBat/features_ComBat_{{sample_id}}_{{cardinality}}{{ext}}" # Execute experiment.execute(source_data, sink_data, execution_plugin='LinearExecution') # Remove the feature files for i in glob.glob(os.path.join(example_data_dir, '*features_ComBat*.hdf5')): os.remove(i)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_combat():\n # Check if example data directory exists\n example_data_dir = th.find_exampledatadir()\n\n # Check if example data required exists\n features = glob.glob(os.path.join(example_data_dir, 'examplefeatures_Patient*.hdf5'))\n if len(features) < 7:\n message = 'Too few example features for ComBat testing not found! ' +\\\n 'Run the create_example_data script from the WORC exampledata ' +\\\n 'directory!'\n raise WORCValueError(message)\n elif len(features) > 7:\n message = 'Too many example features for ComBat testing not found! ' +\\\n 'Run the create_example_data script from the WORC exampledata ' +\\\n 'directory!'\n raise WORCValueError(message)\n\n objectlabels = os.path.join(example_data_dir, 'objectlabels.csv')\n\n # Python\n config = os.path.join(example_data_dir, 'ComBatConfig_python.ini')\n features_train_out = [f.replace('examplefeatures_', 'examplefeatures_ComBat_python_') for f in features]\n\n # First run synthetic test\n # Synthetictest()\n\n # # Run the Combat function: only for training\n # ComBat(features_train_in=features,\n # labels_train=objectlabels,\n # config=config,\n # features_train_out=features_train_out)\n\n # # Run the Combat function: now for train + testing\n ComBat(features_train_in=features[0:4],\n labels_train=objectlabels,\n config=config,\n features_train_out=features_train_out[0:4],\n features_test_in=features[4:],\n labels_test=objectlabels,\n features_test_out=features_train_out[4:])\n\n # # Matlab\n # config = os.path.join(example_data_dir, 'ComBatConfig_matlab.ini')\n # features_train_out = [f.replace('examplefeatures_', 'examplefeatures_ComBat_matlab_') for f in features]\n #\n # # # Run the Combat function: only for training\n # ComBat(features_train_in=features,\n # labels_train=objectlabels,\n # config=config,\n # features_train_out=features_train_out)\n #\n # # Run the Combat function: now for train + testing\n # ComBat(features_train_in=features[0:4],\n # labels_train=objectlabels,\n # config=config,\n # features_train_out=features_train_out[0:4],\n # features_test_in=features[4:],\n # labels_test=objectlabels,\n # features_test_out=features_train_out[4:])\n\n # Remove the feature files\n # for i in glob.glob(os.path.join(example_data_dir, '*features_ComBat*.hdf5')):\n # os.remove(i)", "def test_feature(feature, value, good_features):\r\n\tbase_write(good_features,\"bin/stanford-ner-2015-04-20/base.prop\")\r\n\tbase_prop = open(\"bin/stanford-ner-2015-04-20/base.prop\", \"a\")\r\n\tbase_prop.write(feature.strip() + \"=\" + str(value) + \"\\n\")\r\n\tbase_prop.close()\r\n\r\n\t#Test read base.prop - To display in console\r\n\tread = open(\"bin/stanford-ner-2015-04-20/base.prop\").read()\r\n\tlogging.warning(read)\r\n\r\n\tos.system(\"bash src/other/features/features_selection.sh\")", "def test_category_and_its_feature(self):\n class RunnerBlah(Runner):\n def __init__(self, renv):\n super(RunnerBlah, self).__init__(renv)\n self.register_feature_class('bravo', Feature)\n self.register_feature_class('charlie', Feature)\n self.register_feature_category_class(\n 'alpha', features=['bravo', 'charlie'], mono=True)\n\n renv = create_runtime(RunnerBlah)\n renv.create_runner('runner')\n\n ctrl = renv.feature_ctrl\n\n total_order, _ = ctrl.get_activation_order(['alpha', 'bravo'])\n self.assertEqual(['bravo'], total_order)", "def test_workbench_scenarios(self):\n result_title = 'Adaptive Numeric Input XBlock'\n basic_scenario = \"<adaptivenumericinput />\"\n test_result = self.xblock.workbench_scenarios()\n self.assertEquals(result_title, test_result[0][0])\n self.assertIn(basic_scenario, test_result[0][1])", "def test_split_feature(tree):\r\n print(\"test_split_feature()...\", end = \"\")\r\n assert (tree.process_split_feature() == True)\r\n print(\"Passed!\")", "def run_feature_extraction_tests():\n test_feature_extraction()\n test_distributed_feature_extraction()\n test_multimodel_feature_extraction()\n test_distributed_multimodel_feature_extraction()", "def test_category_and_its_feature_dep(self):\n class RunnerBlah(Runner):\n def __init__(self, renv):\n super(RunnerBlah, self).__init__(renv)\n self.register_feature_class('bravo', Feature)\n self.register_feature_category_class(\n 'alpha', features=['bravo'], defaults=['bravo'])\n self.register_feature_class(\n 'foxtrot', Feature, requires=['alpha', 'bravo'])\n self.register_feature_category_class('echo', features=['foxtrot'])\n\n renv = create_runtime(RunnerBlah)\n renv.create_runner('runner')\n\n ctrl = renv.feature_ctrl\n\n total_order, _ = ctrl.get_activation_order(['foxtrot'])\n self.assertEqual(['bravo', 'foxtrot'], total_order)", "def feature():\n pass", "def test_theft_and_stealing(self):", "def test_predictor():", "def feature(self):\n Feature(run=default_frame, flags=TE)\n Feature(run=load(\"window_functions.tests.rows_frame\", \"feature\"), flags=TE)\n Feature(run=load(\"window_functions.tests.range_frame\", \"feature\"), flags=TE)\n Feature(run=load(\"window_functions.tests.range_overflow\", \"feature\"), flags=TE)\n Feature(run=load(\"window_functions.tests.range_datetime\", \"feature\"), flags=TE)\n Feature(run=load(\"window_functions.tests.range_errors\", \"feature\"), flags=TE)", "def test_machine_learning():", "def feat():\n pass", "def test_features(self):\n assert list(parser.generate_commands(yaml.load(\n '- my_command: {name: my_name}'))) == [('my_command', {'name': 'my_name'})]", "def test_text_classifier_vaporise(self):\n pass", "def test_01_lighting(self):", "def testBeliefs1sk(self):", "def ConstrTest():\n with open(path.join(MAIN_PATH, TEST)) as f:\n for line in f:\n line = line.strip().split(\"\\t\")\n src, dest = line[1:]\n features = Features(src, dest)\n test_instances.append(features)", "def test_active_inference_SPM_1b(self):", "def test_build_feature_base(self):\n data = pd.DataFrame(pd.read_csv(\"tests/in_data/pro1_sub.csv\"))\n\n X = data.ix[:,1]\n Y = data.ix[:,0]\n model_sample = Model([],\"presence\")\n\n feature_base = model_sample.build_feature_base(X,Y)\n feature_evaluation =\n assert_equal(len(feature_base) > 10, True)", "def test_training(self):\n\t\tpass", "def feature(self, node=\"clickhouse1\"):\n self.context.node = self.context.cluster.node(node)\n\n for scenario in loads(current_module(), Scenario):\n scenario()", "def test_series_in_features(self):\n assert parse_command({'test{{A,B}}': {'depends_on': 'name{{A,B}}'}}) == [\n ('testA', {'depends_on': 'nameA'}), ('testB', {'depends_on': 'nameB'})]", "def cli(argv):\r\n argv.append(\"--exhaust-materials\")\r\n cltestbench.cli(argv)", "def main():\n parser = argparse.ArgumentParser(\n description=\"making feature file argsurations.\")\n\n parser.add_argument(\n \"--waveforms\", default=None,\n help=\"directory or list of filename of input wavfile\")\n parser.add_argument(\n \"--hdf5dir\", default=None,\n help=\"directory to save hdf5\")\n parser.add_argument(\n \"--wavdir\", default=None,\n help=\"directory to save of preprocessed wav file\")\n parser.add_argument(\n \"--fs\", default=16000,\n type=int, help=\"Sampling frequency\")\n parser.add_argument(\n \"--shiftms\", default=5,\n type=float, help=\"Frame shift in msec\")\n parser.add_argument(\n \"--feature_type\", default=\"world\", choices=[\"world\", \"melspc\", \"mcep\"],\n type=str, help=\"feature type\")\n parser.add_argument(\n \"--mspc_dim\", default=80,\n type=int, help=\"Dimension of mel spectrogram\")\n parser.add_argument(\n \"--minf0\", default=40,\n type=int, help=\"minimum f0 for world analysis\")\n parser.add_argument(\n \"--maxf0\", default=400,\n type=int, help=\"maximum f0 for world analysis\")\n parser.add_argument(\n \"--fmin\", default=None, nargs=\"?\",\n type=int, help=\"minimum frequency for melspc\")\n parser.add_argument(\n \"--fmax\", default=None, nargs=\"?\",\n type=int, help=\"maximum frequency for melspc\")\n parser.add_argument(\n \"--mcep_dim\", default=24,\n type=int, help=\"Dimension of mel cepstrum\")\n parser.add_argument(\n \"--mcep_alpha\", default=0.41,\n type=float, help=\"Alpha of mel cepstrum\")\n parser.add_argument(\n \"--fftl\", default=1024,\n type=int, help=\"FFT length\")\n parser.add_argument(\n \"--highpass_cutoff\", default=70,\n type=int, help=\"Cut off frequency in lowpass filter\")\n parser.add_argument(\n \"--save_wav\", default=True,\n type=strtobool, help=\"Whether to save filtered wav file\")\n parser.add_argument(\n \"--n_jobs\", default=10,\n type=int, help=\"number of parallel jobs\")\n parser.add_argument(\n \"--verbose\", default=1,\n type=int, help=\"log message level\")\n\n args = parser.parse_args()\n\n # set log level\n if args.verbose == 1:\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S')\n elif args.verbose > 1:\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S')\n else:\n logging.basicConfig(level=logging.WARNING,\n format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S')\n logging.warning(\"logging is disabled.\")\n\n # show arguments\n for key, value in vars(args).items():\n logging.info(\"%s = %s\" % (key, str(value)))\n\n # read list\n if os.path.isdir(args.waveforms):\n file_list = sorted(find_files(args.waveforms, \"*.wav\"))\n else:\n file_list = read_txt(args.waveforms)\n logging.info(\"number of utterances = %d\" % len(file_list))\n\n # check directory existence\n if not os.path.exists(args.wavdir) and args.highpass_cutoff != 0 and args.save_wav:\n os.makedirs(args.wavdir)\n if not os.path.exists(args.hdf5dir):\n os.makedirs(args.hdf5dir)\n\n # divide list\n file_lists = np.array_split(file_list, args.n_jobs)\n file_lists = [f_list.tolist() for f_list in file_lists]\n\n # multi processing\n processes = []\n if args.feature_type == \"world\":\n target_fn = world_feature_extract\n elif args.feature_type == \"melspc\":\n target_fn = melspectrogram_extract\n else:\n target_fn = melcepstrum_extract\n for f in file_lists:\n p = mp.Process(target=target_fn, args=(f, args,))\n p.start()\n processes.append(p)\n\n # wait for all process\n for p in processes:\n p.join()", "def test_add_feature(self):\n fc1 = self.read_feature()\n fc2 = self.read_feature('Aegean_Sea')\n\n # add a feature already in the feature collection\n fc1.add_feature(fc1.features[0])\n assert len(fc1.features) == 1\n\n # add a new feature to the feature collection\n fc1.add_feature(fc2.features[0])\n assert len(fc1.features) == 2\n\n self.check_feature(fc1.features[0])\n self.check_feature(fc1.features[1], expected_name='Aegean Sea')", "def test_all_features_with_data(self):\n feature1 = Feature('looktest1')\n feature1.set_percentage(5)\n\n feature2 = Feature('looktest2')\n feature2.activate()\n feature2.add_to_whitelist(3)\n\n feature3 = Feature('looktest3')\n feature3.activate()\n feature3.add_to_blacklist(4)\n feature3.add_to_blacklist(5)\n\n feature4 = Feature('looktest4')\n feature4.activate()\n feature4.add_to_whitelist(3)\n feature4.add_to_whitelist(5)\n feature4.add_to_blacklist(4)\n\n all_features = Feature.all_features(include_data=True)\n self.assertEqual(len(all_features), 4)\n\n for key in ['looktest1', 'looktest2', 'looktest3', 'looktest4']:\n self.assertTrue(key in all_features)\n if not key == 'looktest1':\n self.assertEqual(all_features[key]['percentage'], 100)\n\n self.assertEqual(all_features['looktest1']['percentage'], 5)\n self.assertFalse('whitelist' in all_features['looktest1'])\n self.assertFalse('blacklist' in all_features['looktest1'])\n\n self.assertTrue('whitelist' in all_features['looktest2'])\n self.assertEqual(all_features['looktest2']['whitelist'], [3])\n self.assertFalse('blacklist' in all_features['looktest2'])\n\n self.assertFalse('whitelist' in all_features['looktest3'])\n self.assertTrue('blacklist' in all_features['looktest3'])\n self.assertEqual(all_features['looktest3']['blacklist'], [4, 5])\n\n self.assertTrue('whitelist' in all_features['looktest4'])\n self.assertEqual(all_features['looktest4']['whitelist'], [3, 5])\n self.assertTrue('blacklist' in all_features['looktest4'])\n self.assertEqual(all_features['looktest4']['blacklist'], [4])", "def test_classify_cuisine(self):\n pass", "def test_T01():", "def setUp(self):\n\n self.niceArgV = (\"--long Alpha -n Beta \"\n \"--shortless Gamma -f --myflag \"\n \"--myparam Tofu\").split()\n\n self.nice = WellBehaved()", "def test__extract_features(self):\n text_sample = \"I really really love this movie\"\n feature_sample = ['really','love','good']\n feature_score_type = \"presence\"\n model_sample = Model(feature_sample,feature_score_type)\n result_features = model_sample.extract_features(text_sample)\n assert_equal(result_features,{'really':1,'love':1,'good':0})\n feature_score_type = \"term_frequency\"\n model_sample = Model(feature_sample,feature_score_type)\n result_features = model_sample.extract_features(text_sample)\n assert_equal(result_features,{'really':2,'love':1,'good':0})", "def do_it(args):\n\n #force = args.force\n #testing = args.testing\n #verbose = args.verbose\n #regions = args.regions\n\n # XXX WORKING HERE", "def run_tests():\n source1 = TextModel('CS111 Syllabus')\n source1.add_file('CS111_Syllabus.txt')\n\n source2 = TextModel('AR Syllabus')\n source2.add_file('AR_Syllabus.txt')\n\n new1 = TextModel('WR120 Syllabus')\n new1.add_file('WR120_Syllabus.txt')\n new1.classify(source1, source2)\n \n new2 = TextModel('CS131 Syllabus')\n new2.add_file('CS131_Syllabus.txt')\n new2.classify(source1, source2)\n \n new3 = TextModel('My Paper 2 for WR120')\n new3.add_file('WR_Paper_2.txt')\n new3.classify(source1, source2)\n \n new4 = TextModel('CS111 PS9PR0')\n new4.add_file('ps9pr0.txt')\n new4.classify(source1, source2)", "def test_features(iris):\n assert iris.num_features == 4\n assert iris.feature_names == [\n \"sepal length (cm)\",\n \"sepal width (cm)\",\n \"petal length (cm)\",\n \"petal width (cm)\",\n ]", "def test_manlext(self):\n self.chck_triple('manlext')", "def test_intent_classifier_vaporise(self):\n pass", "def test_add_feature():\n mock = MagicMock()\n with patch.dict(dism.__salt__, {\"cmd.run_all\": mock}):\n dism.add_feature(\"test\")\n mock.assert_called_once_with(\n [\n dism.bin_dism,\n \"/Quiet\",\n \"/Online\",\n \"/Enable-Feature\",\n \"/FeatureName:test\",\n \"/NoRestart\",\n ]\n )", "def test_make_macrobes(self):\n basic_test_runner(self, 'macrobes')", "def test(): \n\treturn [\"vice.yields.ccsne.import\", \n\t\t[ \n\t\t\ttest_LC18_import(), \n\t\t\ttest_CL13_import(), \n\t\t\ttest_CL04_import(), \n\t\t\ttest_WW95_import(), \n\t\t\ttest_NKT13_import(), \n\t\t\ttest_S16_import() \n\t\t] \n\t]", "def testBeliefs2sk(self):", "def main():\n data = pd.read_csv('./house-votes-84.data', header = None)\n\n class_names = [\"republican\", \"democrat\"]\n\n print(\"\\n-- Train and Test with Winnow --\\n\")\n train_and_test_with_winnow(data, class_names)\n\n print(\"\\n-- Train and Test with Naive Bayes --\\n\")\n train_and_test_with_naive_bayes(data, class_names)", "def test_text_classifier_add_testing_samples(self):\n pass", "def test_bed(self):\n #TODO write bed tests", "def test_all_components(self):\n model_name = 'BCZModel'\n pose_components = [\n ('xyz', 3, True, 100.),\n ('quaternion', 4, False, 10.),\n ('axis_angle', 3, True, 10.),\n ('arm_joints', 7, True, 1.),\n ('target_close', 1, False, 1.),\n ]\n gin.bind_parameter(\n 'BCZModel.action_components', pose_components)\n gin.parse_config('BCZPreprocessor.mock_subtask = True')\n gin.parse_config(\n 'resnet_film_network.film_generator_fn = @linear_film_generator')\n self._fixture.random_train(model, model_name)", "def test_text_classifier_test(self):\n pass", "def test_text_classifier_curate(self):\n pass", "def spec_tests():\n pass", "def test_get_scenario(self):\n pass", "def main():\n feature_fns = [token_features, token_pair_features, lexicon_features]\n # Download and read data.\n download_data()\n docs, labels = read_data(os.path.join('data', 'train'))\n # Evaluate accuracy of many combinations\n # of tokenization/featurization.\n results = eval_all_combinations(docs, labels,\n [True, False],\n feature_fns,\n [2,5,10])\n # Print information about these results.\n best_result = results[0]\n worst_result = results[-1]\n print('best cross-validation result:\\n%s' % str(best_result))\n print('worst cross-validation result:\\n%s' % str(worst_result))\n plot_sorted_accuracies(results)\n print('\\nMean Accuracies per Setting:')\n print('\\n'.join(['%s: %.5f' % (s,v) for v,s in mean_accuracy_per_setting(results)]))\n\n # Fit best classifier.\n clf, vocab = fit_best_classifier(docs, labels, results[0])\n\n # Print top coefficients per class.\n print('\\nTOP COEFFICIENTS PER CLASS:')\n print('negative words:')\n print('\\n'.join(['%s: %.5f' % (t,v) for t,v in top_coefs(clf, 0, 5, vocab)]))\n print('\\npositive words:')\n print('\\n'.join(['%s: %.5f' % (t,v) for t,v in top_coefs(clf, 1, 5, vocab)]))\n\n # Parse test data\n test_docs, test_labels, X_test = parse_test_data(best_result, vocab)\n\n # Evaluate on test set.\n predictions = clf.predict(X_test)\n print('testing accuracy=%f' %\n accuracy_score(test_labels, predictions))\n\n print('\\nTOP MISCLASSIFIED TEST DOCUMENTS:')\n print_top_misclassified(test_docs, test_labels, X_test, clf, 5)", "def test(name, data, classifier):\n classification = classifier.classify(data)\n print('Item ' + name + ' is a ' + classification)", "def setup_features():\n\n core_features = {\"web\": [\"content_directory\", \"controllers\", \"templates\"]}\n\n imported_features = []\n for feature_type, feature_list in core_features.items():\n features_list_names = \", \".join(feature_list)\n print(\n \"** Setting up {0} features {1}\".format(\n info(feature_type), info(features_list_names)\n )\n )\n for feature_name in feature_list:\n script_dir = dirname(abspath(__file__))\n module_fname = join(\n script_dir, \"features\", feature_type, feature_name + \".py\"\n )\n\n feature_dict = {}\n with open(module_fname) as source_file:\n exec(compile(source_file.read(), module_fname, \"exec\"), feature_dict)\n try:\n feature = feature_dict[\"Feature\"]()\n except KeyError:\n print_error(\n \"Feature module '%s' does not provide a Feature class!\"\n % feature_name\n )\n sys.exit(1)\n try:\n feature.setup()\n except: # NOQA: E722\n print_error(\"Failed setting up feature '%s' !\" % feature_name)\n raise\n imported_features.append(feature)\n\n for feature in imported_features:\n if hasattr(feature, \"activate\"):\n feature.activate()", "def test_creating_simple_feature():\n # given & when\n feature = Feature(1, \"Feature\", \"I am a feature\", \"foo.feature\", 1, tags=None)\n\n # then\n assert feature.id == 1\n assert feature.keyword == \"Feature\"\n assert feature.sentence == \"I am a feature\"\n assert feature.path == \"foo.feature\"\n assert feature.line == 1\n assert feature.tags == []", "def main():\n test_runner = TestRunner(\n FLAGS.workspace, FLAGS.bench_home, imagenet_dir=FLAGS.train_data_dir)\n test_runner.run_tests(FLAGS.test_list.split(','))", "def main():\r\n _evaluative_test(5)\r\n _fuzz_test(1)\r\n _fuzz_test(1, 512)\r\n _fuzz_test(1, 1512)\r\n _fuzz_test(1000)\r\n _fuzz_test(1000, 512)\r\n _fuzz_test(1000, 4077)", "def main():\r\n\r\n # Command-line arguments\r\n training_data = argv[1]\r\n hypothesis_out = argv[2]\r\n learning_type = argv[3]\r\n test = argv[4]\r\n labels = None\r\n if len(argv) > 5:\r\n labels = argv[5]\r\n\r\n # Parse data and determine features\r\n feat_obj = FeatureParser(training_data)\r\n data = FeatureData(feat_obj.features)\r\n\r\n # Train model using DT or DT + adaboost\r\n train(data, hypothesis_out, learning_type)\r\n\r\n # Predict on test set with trained model\r\n predictions = predict(hypothesis_out, test, learning_type)\r\n\r\n # Evaluate accuracy of test data if provided lables\r\n if labels:\r\n accuracy = evaluate(predictions, labels)\r\n print('Model accuracy on test data:',str(accuracy) + '%')", "def setUp(self):\n\n self.niceArgV = (\"--long Alpha -n Beta \"\n \"--shortless Gamma -f --myflag \"\n \"--myparam Tofu\").split()\n\n self.nice = WellBehaved()\n\n self.nice.parseOptions(self.niceArgV)", "def test_all_features(self):\n to_create = ['looktest1', 'looktest2', 'looktest3']\n for f in to_create:\n Feature(f).activate()\n\n all_features = Feature.all_features()\n self.assertEqual(len(all_features), len(to_create))\n for f in to_create:\n self.assertTrue(f in all_features)", "def tests():", "def test(self):\n pass", "def run_tests():\r\n source1 = TextModel('50 Shades of Gray')\r\n source1.add_file('50.txt')\r\n \r\n print()\r\n \r\n source2 = TextModel('King James Version of the Bible')\r\n source2.add_file('kjv.txt')\r\n\r\n print()\r\n\r\n new1 = TextModel('Shakespeare')\r\n new1.add_file('shake.txt')\r\n new1.classify(source1, source2)\r\n \r\n print()\r\n \r\n new2 = TextModel('JK Rowling')\r\n new2.add_file('hp.txt')\r\n new2.classify(source1, source2)\r\n \r\n print()\r\n \r\n new3 = TextModel('Breitbart News Network')\r\n new3.add_file('bnn.txt')\r\n new3.classify(source1, source2)\r\n \r\n print()\r\n \r\n new4 = TextModel('Chaucer')\r\n new4.add_file('tct.txt')\r\n new4.classify(source1, source2)", "def test_man9ext(self):\n self.chck_triple('man9ext')", "def test_get_scenarios(self):\n pass", "def test_000_basic_functionality() -> None:\n df = generate_test_data()\n skim(df)", "def test_adaptability():\n assert chap2.adaptability()", "def test_pytest_bdd_scenario(self):\n self.testdir.makefile(\n \".feature\",\n simple=_SIMPLE_SCENARIO,\n )\n py_file = self.testdir.makepyfile(\n \"\"\"\n from pytest_bdd import scenario, given, then, when\n\n @scenario(\"simple.feature\", \"Simple scenario\")\n def test_simple():\n pass\n\n BAR = None\n\n @given(\"I have a bar\")\n def bar():\n global BAR\n BAR = 1\n\n @when(\"I eat it\")\n def eat():\n global BAR\n BAR -= 1\n\n @then(\"I don't have a bar\")\n def check():\n assert BAR == 0\n \"\"\"\n )\n file_name = os.path.basename(py_file.strpath)\n self.inline_run(\"--ddtrace\", file_name)\n spans = self.pop_spans()\n\n assert len(spans) == 7\n assert spans[0].get_tag(\"component\") == \"pytest\"\n assert spans[0].get_tag(\"test.name\") == \"Simple scenario\"\n assert spans[0].span_type == \"test\"\n assert spans[1].resource == \"I have a bar\"\n assert spans[1].name == \"given\"\n assert spans[2].resource == \"I eat it\"\n assert spans[2].name == \"when\"\n assert spans[3].resource == \"I don't have a bar\"\n assert spans[3].name == \"then\"", "def main(args):\n if args.train_test_split < 0.2 or args.train_test_split > 0.8:\n print(\"Bad value for train_test_split, range is 0.2 - 0.8\")\n sys.exit()\n\n dataset = pd.read_csv(args.train_file)\n\n x_data = dataset.loc[:, (dataset.columns != args.classification_column) \\\n & (dataset.columns != \"Survey_id\")]\n y_data = dataset[args.classification_column].to_numpy()\n dataset_headers = list(x_data.columns)\n x_data = x_data.fillna(0).to_numpy()\n\n x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, \\\n test_size=args.train_test_split)\n\n\n dtc = DecisionTreeClassifier(max_depth=args.max_depth, \\\n min_impurity_split=args.acceptable_impurity)\n dtc = dtc.fit(x_train, y_train)\n dtc_score = dtc.score(x_test, y_test)\n\n\n export_graphviz(dtc, out_file=\"decision_tree.dot\", feature_names=dataset_headers, \\\n rounded=True, precision=1, filled=True)\n os.system(\"dot -Tpng decision_tree.dot -o decision_tree.png\")\n\n\n rfc = RandomForestClassifier(n_estimators=args.estimators, max_depth=args.max_depth, \\\n min_impurity_split=args.acceptable_impurity)\n rfc.fit(x_train, y_train)\n rfc_score = rfc.score(x_test, y_test)\n\n file = open('result.txt', 'w')\n file.write(f'Decisions tree score = {dtc_score}\\n')\n file.write(f'Random forest score = {rfc_score}\\n')\n file.close()", "def generate_features_test(stances, dataset, name, feature_list, features_dir):\n h, b, bodyId, headId = [], [], [], []\n\n feature_dict = {'overlap': word_overlap_features,\n 'refuting': refuting_features,\n 'polarity': polarity_features,\n 'hand': hand_features,\n 'word_unigrams_5000_concat_tf_l2_holdout_unlbled_test': word_unigrams_5000_concat_tf_l2_holdout_unlbled_test,\n 'NMF_cos_300_holdout_unlbled_test': NMF_cos_300_holdout_unlbled_test,\n 'NMF_concat_300_holdout_unlbled_test': NMF_concat_300_holdout_unlbled_test,\n 'latent_dirichlet_allocation_25_holdout_unlbled_test': latent_dirichlet_allocation_25_holdout_unlbled_test,\n 'latent_semantic_indexing_gensim_300_concat_holdout_unlbled_test': latent_semantic_indexing_gensim_300_concat_holdout_unlbled_test,\n 'NMF_fit_all_incl_holdout_and_test': NMF_fit_all_incl_holdout_and_test,\n 'latent_dirichlet_allocation_incl_holdout_and_test': latent_dirichlet_allocation_incl_holdout_and_test,\n 'latent_semantic_indexing_gensim_holdout_and_test': latent_semantic_indexing_gensim_holdout_and_test,\n 'NMF_fit_all_concat_300_and_test': NMF_fit_all_concat_300_and_test,\n 'word_ngrams_concat_tf5000_l2_w_holdout_and_test': word_ngrams_concat_tf5000_l2_w_holdout_and_test,\n 'NMF_fit_all': NMF_fit_all,\n 'word_ngrams_concat_tf5000_l2_w_holdout': word_ngrams_concat_tf5000_l2_w_holdout,\n 'latent_dirichlet_allocation': latent_dirichlet_allocation,\n 'latent_semantic_indexing_gensim_test': latent_semantic_indexing_gensim_test,\n 'NMF_fit_all_concat_300': NMF_fit_all_concat_300,\n 'NMF_cos_50': NMF_cos_50,\n 'latent_dirichlet_allocation_25': latent_dirichlet_allocation_25,\n 'latent_semantic_indexing_gensim_300_concat_holdout': latent_semantic_indexing_gensim_300_concat_holdout,\n 'NMF_concat_300_holdout': NMF_concat_300_holdout,\n 'word_unigrams_5000_concat_tf_l2_holdout': word_unigrams_5000_concat_tf_l2_holdout,\n 'ppdb': ppdb,\n 'stanford_ppdb': stanford_ppdb_score,\n 'stanford_ppdb_1sent': stanford_ppdb_score_1sent,\n 'stanford_ppdb_2sent': stanford_ppdb_score_2sent,\n 'stanford_ppdb_3sent': stanford_ppdb_score_3sent,\n 'stanford_sentiment': stanford_sentiment,\n 'stanford_sentiment_1sent': stanford_sentiment_1sent,\n 'stanford_sentiment_2sent': stanford_sentiment_2sent,\n 'stanford_sentiment_3sent': stanford_sentiment_3sent,\n 'stanford_wordsim': stanford_based_verb_noun_sim,\n 'stanford_wordsim_1sent': stanford_based_verb_noun_sim_1sent,\n 'stanford_wordsim_2sent': stanford_based_verb_noun_sim_2sent,\n 'stanford_wordsim_3sent': stanford_based_verb_noun_sim_3sent,\n 'stanford_negation': stanford_negation_features,\n 'stanford_negation_1sent': stanford_negation_features_1sent,\n 'stanford_negation_2sent': stanford_negation_features_2sent,\n 'stanford_negation_3sent': stanford_negation_features_3sent,\n 'stanford_avg_words_per_sent': stanford_avg_words_per_sent,\n 'stanford_avg_words_per_sent_1sent': stanford_avg_words_per_sent_1sent,\n 'stanford_avg_words_per_sent_2sent': stanford_avg_words_per_sent_2sent,\n 'stanford_avg_words_per_sent_3sent': stanford_avg_words_per_sent_3sent,\n 'hedging': hedging_features,\n 'sen2sen': sen2sen_similarity_max,\n 'wmdsenSen': word_mover_distance_similarity_sentence_min,\n 'wmdsenDoc': word_mover_distance_wholebody,\n 'sdm_sim': sdm_sim,\n 'discuss': discuss_features,\n 'single_flat_LSTM_50d_100': single_flat_LSTM_50d_100,\n 'char_3grams_5000_concat_all_data': char_3grams_5000_concat_all_data,\n 'lexical_features': lexical_features,\n 'max_diff_twitter_uni_bigrams': max_diff_twitter_uni_bigrams,\n 'mpqa_unigrams': mpqa_unigrams,\n 'negated_context_word_12grams_concat_tf5000_l2_all_data': negated_context_word_12grams_concat_tf5000_l2_all_data,\n 'nrc_emo_lex': nrc_emo_lex,\n 'nrc_hashtag_sentiment_unigram': nrc_hashtag_sentiment_unigram,\n 'nrc_hashtag_sentiment_unigram_POS': nrc_hashtag_sentiment_unigram_POS,\n #'POS_features': POS_features,\n 'readability_features': readability_features,\n 'sentiment140_unigrams': sentiment140_unigrams,\n 'structural_features': structural_features,\n 'latent_dirichlet_allocation_300': latent_dirichlet_allocation_300,\n 'NMF_cos_300': NMF_cos_300\n }\n\n stanceCounter = 0\n for stance in stances:\n h.append(stance['Headline'])\n b.append(dataset.articles[stance['Body ID']])\n bodyId.append(stance['Body ID'])\n headId.append(name+str(stanceCounter))\n stanceCounter += 1\n\n X_feat = []\n for feature in feature_list:\n print(\"calculate feature: \" + str(feature))\n feat = gen_or_load_feats(feature_dict[feature], h, b, features_dir+\"/\"+feature+\"_test.\"+name+'.npy', bodyId, feature, headId, fold=name)\n X_feat.append(feat)\n print(len(feat))\n X = np.concatenate(X_feat, axis=1)\n return X", "def test_with_bunch(filename) :\n\n\tif not os.path.exists(filename) :\n\t\tprint('File not exists: ' + filename)\n\t\tsys.exit(-1)\n\n\n\t# Read CSV file\n\tprint('Load CSV file')\n\n\tcsv.field_size_limit(sys.maxsize) # Set CSV limit to sys.maxsize\n\tfiledata = []\n\twith open(filename) as csvfile :\n\t\treader = csv.reader(csvfile, delimiter=',')\n\t\tfor row in reader :\n\t\t\tfiledata.append(row)\n\n\n\tdetector = shaman.Shaman.default()\n\n\tcorrect = 0\n\ttotals = len(filedata)\n\n\tresults = {}\n\tprint('Start testing')\n\n\tfor index, (language, code) in enumerate(filedata) :\n\t\tprint ('Testing %s/%s ' % (index, len(filedata)), end=\"\\r\")\n\n\t\tif language not in shaman.SUPPORTING_LANGUAGES:\n\t\t\ttotals -= 1\n\t\t\tcontinue\n\n\t\ttry :\n\t\t\tglang = detector.detect( code )[0][0]\n\t\texcept IndexError :\n\t\t\tglang = None\n\n\t\tif language not in results :\n\t\t\tresults[ language ] = [0, 0, 0]\n\n\t\tif glang == language :\n\t\t\tcorrect += 1\n\t\t\tresults[ language ][0] += 1\n\n\t\t\n\t\tresults[ language ][1] += 1\n\t\tresults[ language ][2] = results[ language ][0] / results[ language ][1]\n\n\n\t\n\tprint(\"------------------------------------------------\")\n\tprint(\"Accuracy: %.2lf%% (Correct: %d / Valid Data: %d)\" % (correct/totals*100, correct, totals))\n\tprint(\"------------------------------------------------\")\n\t\n\tresults = sorted(results.items(), key=lambda x: x[1][0], reverse=True)\n\tfor lang, l in results :\n\t\tprint(\"%s: %.2lf%% (%s/%s)\" % (lang, l[2] * 100, l[0], l[1]))", "def test_categorical_feature():\n\n feature = Categorical(\"abc\")\n\n for element in \"abc\":\n feature.set(element)\n feature.set(\"ignore this\")\n feature.push()\n\n for element in \"abc\":\n getattr(feature, \"set_\" + element)()\n feature.push()\n\n array = feature.array()\n assert array.shape == (6, 3)\n for i, row in enumerate(array):\n assert sum(row) == 1.0 and row[i % 3] == 1.0", "def test():\n\t\treturn [\"vice.multizone\",\n\t\t\t[\n\t\t\t\ttest_from_output(),\n\t\t\t\tmig_matrix_row.test(run = False),\n\t\t\t\tmig_matrix.test(run = False),\n\t\t\t\tmig_specs.test(run = False),\n\t\t\t\tzone_array.test(run = False),\n\t\t\t\t_multizone.test(run = False),\n\t\t\t\tsrc_test(run = False)\n\t\t\t]\n\t\t]", "def workbench_scenarios():\n return [\n (\"HL rubric text XBlock\",\n \"\"\"<hl_rubric_text/>\n \"\"\"),\n\n ]", "def test():\n pass", "def test_add_feature_with_extras():\n mock = MagicMock()\n with patch.dict(dism.__salt__, {\"cmd.run_all\": mock}):\n dism.add_feature(\"sponge\", \"bob\", \"C:\\\\temp\", True, True)\n mock.assert_called_once_with(\n [\n dism.bin_dism,\n \"/Quiet\",\n \"/Online\",\n \"/Enable-Feature\",\n \"/FeatureName:sponge\",\n \"/PackageName:bob\",\n \"/Source:C:\\\\temp\",\n \"/LimitAccess\",\n \"/All\",\n \"/NoRestart\",\n ]\n )", "def main(args):\n bad_words_file = codecs.open(args.language + \"/feature_files/bad_words\", \"r\", \"utf-8\").readlines()\n bad_words = read_known_words(bad_words_file)\n \n good_words_file = codecs.open(args.language + \"/feature_files/good_words\", \"r\", \"utf-8\").readlines()\n good_words = read_known_words(good_words_file)\n\n curse_words_file = codecs.open(args.language + \"/feature_files/curse_words\", \"r\", \"utf-8\").readlines()\n curse_words = read_known_words(curse_words_file)\n\n prepositions_file = codecs.open(args.language + \"/feature_files/prepositions\", \"r\", \"utf-8\").readlines()\n prepositions = read_known_words(prepositions_file)\n\n determiners_file = codecs.open(args.language + \"/feature_files/determiners\", \"r\", \"utf-8\").readlines()\n determiners = read_known_words(determiners_file)\n\n syllables_file = codecs.open(args.language + \"/feature_files/syllables\", \"r\", \"utf-8\").readlines()\n syllable_structure = read_syllables_file(syllables_file)\n\n other_feature_files = glob.glob(args.language + \"/feature_files/*.txt\")\n other_features = set_features_from_files(other_feature_files)\n \n ermaObj = ConllToErma(args, bad_words, good_words, curse_words, prepositions, \\\n determiners, syllable_structure, other_features)\n\n if not args.just_test:\n # Input training file.\n train_id = open(args.train, \"r\")\n train = train_id.readlines()\n train_id.close()\n sys.stdout.write(\"Reading training file...\\n\")\n (train_features, train_skip_chains) = ermaObj.read_conll_file(train)\n sys.stdout.write(\"Building model...\\n\")\n train_hash = ermaObj.make_nodes(train_features)\n # Freeze the known features based on what's seen in the training data\n ermaObj.cutoff_features()\n else:\n train_hash = {}\n train_skip_chains = {}\n # Input testing file.\n test_id = open(args.test, \"r\")\n test = test_id.readlines()\n test_id.close()\n sys.stdout.write(\"Reading test file...\\n\")\n (test_features, test_skip_chains) = ermaObj.read_conll_file(test)\n sys.stdout.write(\"Building model...\\n\")\n test_hash = ermaObj.make_nodes(test_features, test=True)\n ermaObj.write_out(train_hash, train_skip_chains, test_hash, test_skip_chains)", "def test_Acled():\n ad = pytest.importorskip('acled')\n coords_x, coords_y = [1.183056], [9.553300]\n acled = ad.ACLED(\"../tests\")\n acled.download(\"TGO\", '2017-01-01', '2018-01-01')\n d = {}\n for property in [\"fatalities\", \"n_events\", \"violence_civ\"]:\n for k in [10000, 100000]:\n d[property + \"_\" + str(k)] = acled.featurize(coords_x, coords_y, property=property, function='density', buffer=k)\n\n assert sum(d[item][0] for item in d) > 0", "def main():\n\n clues_file = \"data/part1-clues.txt\"\n parsed_clues_file = \"data/part1-parsedclues.txt\"\n cp = ClueParser()\n\n clues = loadList(clues_file)\n gold_parsed_clues = loadList(parsed_clues_file)\n assert(len(clues) == len(gold_parsed_clues))\n\n cp.train(clues, gold_parsed_clues)\n parsed_clues = cp.parseClues(clues)\n cp.evaluate(parsed_clues, gold_parsed_clues)", "def runtest(self):", "def test_recognize_describe(self):\n pass", "def test_intent_classifier_add_testing_samples(self):\n pass", "def feature(self, node=\"clickhouse1\", mysql_node=\"mysql1\", stress=None, parallel=None):\n self.context.node = self.context.cluster.node(node)\n self.context.mysql_node = self.context.cluster.node(mysql_node)\n\n with allow_experimental_bigint(self.context.node):\n Scenario(run=comp_int_inline)\n Scenario(run=comp_int_table)\n Scenario(run=comp_dec_inline)\n Scenario(run=comp_dec_table)", "def main():\n path_for_data = '/Users/avielshtern/Desktop/semb/iml/IML.HUJI-master/data/kc_house_data (1).csv'\n design_matrix, response_vector = load_data(path_for_data)\n putting_it_all_together_1(design_matrix, response_vector)\n putting_it_all_together_2(design_matrix, response_vector)\n feature_evaluation(design_matrix, response_vector)", "def featurewiz(dataname, target, corr_limit=0.7, verbose=0, sep=\",\", header=0,\r\n test_data='', feature_engg='', category_encoders='', **kwargs):\r\n ### set all the defaults here ##############################################\r\n dataname = copy.deepcopy(dataname)\r\n max_nums = 30\r\n max_cats = 15\r\n RANDOM_SEED = 42\r\n ############################################################################\r\n cat_encoders_list = list(settings.cat_encoders_names.keys())\r\n ######################################################################################\r\n ##### MAKING FEATURE_TYPE AND FEATURE_GEN SELECTIONS HERE #############\r\n ######################################################################################\r\n feature_generators = ['interactions', 'groupby', 'target']\r\n feature_gen = ''\r\n if feature_engg:\r\n if isinstance(feature_engg, str):\r\n feature_gen = [feature_engg]\r\n elif isinstance(feature_engg, list):\r\n feature_gen = copy.deepcopy(feature_engg)\r\n else:\r\n print('Skipping feature engineering since no feature_engg input...')\r\n feature_type = ''\r\n if category_encoders:\r\n if isinstance(category_encoders, str):\r\n feature_type = [category_encoders]\r\n elif isinstance(category_encoders, list):\r\n feature_type = category_encoders[:2] ### Only two will be allowed at a time\r\n else:\r\n print('Skipping category encoding since no category encoders specified in input...')\r\n ################## L O A D D A T A N A M E ########################\r\n train = load_file_dataframe(dataname, sep=sep, header=header, verbose=verbose, nrows=1000)\r\n train = remove_duplicate_cols_in_dataset(train)\r\n train_index = train.index\r\n test = load_file_dataframe(test_data, sep=sep, header=header, verbose=verbose,\r\n nrows=1000)\r\n if test is not None:\r\n test = remove_duplicate_cols_in_dataset(test)\r\n test_index = test.index\r\n ############# C L A S S I F Y F E A T U R E S ####################\r\n features_dict = classify_features(train, target)\r\n if len(features_dict['date_vars']) > 0:\r\n #### If there are date-time variables in datatset, it is best to load them using pandas\r\n date_time_vars = features_dict['date_vars']\r\n train = load_file_dataframe(dataname, sep=sep, header=header, verbose=verbose,\r\n nrows='all', parse_dates=date_time_vars)\r\n if not test is None:\r\n test = load_file_dataframe(test_data, sep=sep, header=header, verbose=verbose,\r\n nrows='all', parse_dates=date_time_vars)\r\n else:\r\n train = load_file_dataframe(dataname, sep=sep, header=header, verbose=verbose, nrows='all')\r\n train_index = train.index\r\n if test is not None:\r\n test = load_file_dataframe(test_data, sep=sep, header=header, verbose=verbose,\r\n nrows='all')\r\n test_index = test.index\r\n #### If there are more than 30 categorical variables in a data set, it is worth reducing features.\r\n #### Otherwise. XGBoost is pretty good at finding the best features whether cat or numeric !\r\n start_time = time.time()\r\n n_splits = 5\r\n max_depth = 8\r\n ###################### I M P O R T A N T D E F A U L T S ##############\r\n subsample = 0.7\r\n col_sub_sample = 0.7\r\n test_size = 0.2\r\n seed = 1\r\n early_stopping = 5\r\n ####### All the default parameters are set up now #########\r\n kf = KFold(n_splits=n_splits)\r\n ######### G P U P R O C E S S I N G B E G I N S ############\r\n ###### This is where we set the CPU and GPU parameters for XGBoost\r\n GPU_exists = check_if_GPU_exists()\r\n ##### Set the Scoring Parameters here based on each model and preferences of user ###\r\n cpu_params = {}\r\n param = {}\r\n cpu_params['nthread'] = -1\r\n cpu_params['tree_method'] = 'hist'\r\n cpu_params['grow_policy'] = 'depthwise'\r\n cpu_params['max_depth'] = max_depth\r\n cpu_params['max_leaves'] = 0\r\n cpu_params['verbosity'] = 0\r\n cpu_params['gpu_id'] = 0\r\n cpu_params['updater'] = 'grow_colmaker'\r\n cpu_params['predictor'] = 'cpu_predictor'\r\n cpu_params['num_parallel_tree'] = 1\r\n if GPU_exists:\r\n param['nthread'] = -1\r\n param['tree_method'] = 'gpu_hist'\r\n param['grow_policy'] = 'depthwise'\r\n param['max_depth'] = max_depth\r\n param['max_leaves'] = 0\r\n param['verbosity'] = 0\r\n param['gpu_id'] = 0\r\n param['updater'] = 'grow_gpu_hist' #'prune'\r\n param['predictor'] = 'gpu_predictor'\r\n param['num_parallel_tree'] = 1\r\n print(' Running XGBoost using GPU parameters')\r\n else:\r\n param = copy.deepcopy(cpu_params)\r\n print(' Running XGBoost using CPU parameters')\r\n #################################################################################\r\n ############# D E T E C T SINGLE OR MULTI-LABEL PROBLEM #################\r\n #################################################################################\r\n if isinstance(target, str):\r\n target = [target]\r\n settings.multi_label = False\r\n else:\r\n if len(target) <= 1:\r\n settings.multi_label = False\r\n else:\r\n settings.multi_label = True\r\n #### You need to make sure only Single Label problems are handled in target encoding!\r\n if settings.multi_label:\r\n print('Turning off Target encoding for multi-label problems like this data set...')\r\n #### Since Feature Engineering module cannot handle Multi Label Targets,\r\n #### we will turnoff creating target_enc_cat_features to False\r\n target_enc_cat_features = False\r\n else:\r\n ## If target is specified in feature_gen then use it to Generate target encoded features\r\n target_enc_cat_features = 'target' in feature_gen\r\n ######################################################################################\r\n ######## C L A S S I F Y V A R I A B L E S ##########################\r\n ###### Now we detect the various types of variables to see how to convert them to numeric\r\n ######################################################################################\r\n features_dict = classify_features(train, target)\r\n if len(features_dict['date_vars']) > 0:\r\n date_time_vars = features_dict['date_vars']\r\n date_cols = copy.deepcopy(date_time_vars)\r\n #### Do this only if date time columns exist in your data set!\r\n for date_col in date_cols:\r\n print('Processing %s column for date time features....' %date_col)\r\n train, ts_adds = FE_create_time_series_features(train, date_col)\r\n #date_col_adds_train = left_subtract(date_df_train.columns.tolist(),date_col)\r\n #print(' Adding %d column(s) from date-time column %s in train' %(len(date_col_adds_train),date_col))\r\n #train = train.join(date_df_train, rsuffix='2')\r\n if isinstance(test,str) or test is None:\r\n ### do nothing ####\r\n pass\r\n else:\r\n print(' Adding same time series features to test data...')\r\n test, _ = FE_create_time_series_features(test, date_col, ts_adds)\r\n #date_col_adds_test = left_subtract(date_df_test.columns.tolist(),date_col)\r\n ### Now time to remove the date time column from all further processing ##\r\n #test = test.join(date_df_test, rsuffix='2')\r\n ### Now time to continue with our further processing ##\r\n idcols = features_dict['IDcols']\r\n if isinstance(test,str) or test is None:\r\n pass\r\n else:\r\n test_ids = test[idcols]\r\n train_ids = train[idcols] ### this saves the ID columns of train\r\n cols_to_remove = features_dict['cols_delete'] + idcols + features_dict['discrete_string_vars']\r\n preds = [x for x in list(train) if x not in target+cols_to_remove]\r\n numvars = train[preds].select_dtypes(include = 'number').columns.tolist()\r\n if len(numvars) > max_nums:\r\n if feature_gen:\r\n print('Warning: Too many extra features will be generated by featurewiz. This may take time...')\r\n catvars = left_subtract(preds, numvars)\r\n if len(catvars) > max_cats:\r\n if feature_type:\r\n print('Warning: Too many extra features will be generated by category encoding. This may take time...')\r\n rem_vars = copy.deepcopy(catvars)\r\n ########## Now we need to select the right model to run repeatedly ####\r\n if target is None or len(target) == 0:\r\n cols_list = list(train)\r\n settings.modeltype = 'Clustering'\r\n else:\r\n settings.modeltype = analyze_problem_type(train, target)\r\n cols_list = left_subtract(list(train),target)\r\n ######################################################################################\r\n ###### B E F O R E U S I N G D A T A B U N C H C H E C K ###################\r\n ######################################################################################\r\n ## Before using DataBunch check if certain encoders work with certain kind of data!\r\n if feature_type:\r\n final_cat_encoders = feature_type\r\n else:\r\n final_cat_encoders = []\r\n if settings.modeltype == 'Multi_Classification':\r\n ### you must put a Polynomial Wrapper on the cat_encoder in case the model is multi-class\r\n if final_cat_encoders:\r\n final_cat_encoders = [PolynomialWrapper(x) for x in final_cat_encoders if x in settings.target_encoders_names]\r\n elif settings.modeltype == 'Regression':\r\n if final_cat_encoders:\r\n if 'WOEEncoder' in final_cat_encoders:\r\n print('Removing WOEEncoder from list of encoders since it cannot be used for this Regression problem.')\r\n final_cat_encoders = [x for x in final_cat_encoders if x != 'WOEEncoder' ]\r\n ######################################################################################\r\n ###### F E A T U R E E N G G U S I N G D A T A B U N C H ###################\r\n ######################################################################################\r\n if feature_gen or feature_type:\r\n print('Starting feature engineering...this will take time...')\r\n if isinstance(test, str) or test is None:\r\n if settings.multi_label:\r\n ### if it is a multi_label problem, leave target as it is - a list!\r\n X_train, X_test, y_train, y_test = train_test_split(train[preds],\r\n train[target],\r\n test_size=0.2,\r\n random_state=RANDOM_SEED)\r\n else:\r\n ### if it not a multi_label problem, make target as target[0]\r\n X_train, X_test, y_train, y_test = train_test_split(train[preds],\r\n train[target[0]],\r\n test_size=0.2,\r\n random_state=RANDOM_SEED)\r\n else:\r\n X_train = train[preds]\r\n if settings.multi_label:\r\n y_train = train[target]\r\n else:\r\n y_train = train[target[0]]\r\n X_test = test[preds]\r\n try:\r\n y_test = test[target]\r\n except:\r\n y_test = None\r\n X_train_index = X_train.index\r\n X_test_index = X_test.index\r\n data_tuple = DataBunch(X_train=X_train,\r\n y_train=y_train,\r\n X_test=X_test, # be sure to specify X_test, because the encoder needs all dataset to work.\r\n cat_features = catvars,\r\n clean_and_encod_data=True,\r\n cat_encoder_names=final_cat_encoders, # final list of Encoders selected\r\n clean_nan=True, # fillnan\r\n num_generator_features=np.where('interactions' in feature_gen,True, False).tolist(), # Generate interaction Num Features\r\n group_generator_features=np.where('groupby' in feature_gen,True, False).tolist(), # Generate groupby Features\r\n target_enc_cat_features=target_enc_cat_features,# Generate target encoded features\r\n normalization=False,\r\n random_state=RANDOM_SEED)\r\n #### Now you can process the tuple this way #########\r\n data1 = data_tuple.X_train.join(y_train) ### data_tuple does not have a y_train, remember!\r\n if isinstance(test, str) or test is None:\r\n ### Since you have done a train_test_split using randomized split, you need to put it back again.\r\n data2 = data_tuple.X_test.join(y_test)\r\n train = data1.append(data2)\r\n train = train.reindex(train_index)\r\n else:\r\n try:\r\n test = data_tuple.X_test.join(y_test)\r\n except:\r\n test = copy.deepcopy(data_tuple.X_test)\r\n test = test.reindex(test_index)\r\n train = copy.deepcopy(data1)\r\n print(' Completed feature engineering. Shape of Train (with target) = %s' %(train.shape,))\r\n preds = [x for x in list(train) if x not in target]\r\n numvars = train[preds].select_dtypes(include = 'number').columns.tolist()\r\n catvars = left_subtract(preds, numvars)\r\n ###################### I M P O R T A N T ##############################################\r\n ###### This top_num decides how many top_n features XGB selects in each iteration.\r\n #### There a total of 5 iterations. Hence 5x10 means maximum 50 features will be selected.\r\n ##### If there are more than 50 variables, then maximum 25% of its variables will be selected\r\n if len(preds) <= 50:\r\n top_num = 10\r\n else:\r\n ### the maximum number of variables will 25% of preds which means we divide by 5 and get 5% here\r\n ### The five iterations result in 10% being chosen in each iteration. Hence max 50% of variables!\r\n top_num = int(len(preds)*0.10)\r\n ###################### I M P O R T A N T ##############################################\r\n important_cats = copy.deepcopy(catvars)\r\n if len(numvars) > 1:\r\n final_list = FE_remove_variables_using_SULOV_method(train,numvars,settings.modeltype,target,\r\n corr_limit,verbose)\r\n else:\r\n final_list = copy.deepcopy(numvars)\r\n ####### This is where you draw how featurewiz works when the verbose = 2 ###########\r\n print(' Adding %s categorical variables to reduced numeric variables of %d' %(\r\n len(important_cats),len(final_list)))\r\n if isinstance(final_list,np.ndarray):\r\n final_list = final_list.tolist()\r\n preds = final_list+important_cats\r\n #######You must convert category variables into integers ###############\r\n if len(important_cats) > 0:\r\n train, test = FE_convert_all_object_columns_to_numeric(train, test)\r\n ######## Dont move this train and y definition anywhere else ########\r\n ######## Fill Missing values since XGB for some reason #########\r\n ######## can't handle missing values in early stopping rounds #######\r\n train = train.fillna(0)\r\n y = train[target]\r\n print('############## F E A T U R E S E L E C T I O N ####################')\r\n important_features = []\r\n ########## This is for Single_Label problems ######################\r\n if settings.modeltype == 'Regression':\r\n objective = 'reg:squarederror'\r\n model_xgb = XGBRegressor( n_estimators=100,booster='gbtree',subsample=subsample,objective=objective,\r\n colsample_bytree=col_sub_sample,reg_alpha=0.5, reg_lambda=0.5,\r\n seed=1,n_jobs=-1,random_state=1)\r\n eval_metric = 'rmse'\r\n else:\r\n #### This is for Classifiers only\r\n classes = np.unique(train[target].values)\r\n if len(classes) == 2:\r\n model_xgb = XGBClassifier(base_score=0.5, booster='gbtree', subsample=subsample,\r\n colsample_bytree=col_sub_sample,gamma=1, learning_rate=0.1, max_delta_step=0,\r\n max_depth=max_depth, min_child_weight=1, missing=-999, n_estimators=100,\r\n n_jobs=-1, nthread=None, objective='binary:logistic',\r\n random_state=1, reg_alpha=0.5, reg_lambda=0.5,\r\n seed=1)\r\n eval_metric = 'logloss'\r\n else:\r\n model_xgb = XGBClassifier(base_score=0.5, booster='gbtree', subsample=subsample,\r\n colsample_bytree=col_sub_sample, gamma=1, learning_rate=0.1, max_delta_step=0,\r\n max_depth=max_depth, min_child_weight=1, missing=-999, n_estimators=100,\r\n n_jobs=-1, nthread=None, objective='multi:softmax',\r\n random_state=1, reg_alpha=0.5, reg_lambda=0.5,\r\n seed=1)\r\n eval_metric = 'mlogloss'\r\n #### Now set the parameters for XGBoost ###################\r\n model_xgb.set_params(**param)\r\n #print('Model parameters: %s' %model_xgb)\r\n if settings.multi_label:\r\n ########## This is for settings.multi_label problems ###############################\r\n if settings.modeltype == 'Regression':\r\n model_xgb = MultiOutputRegressor(model_xgb)\r\n #model_xgb = RegressorChain(model_xgb)\r\n else:\r\n ## just do randomized search CV - no need to do one vs rest unless multi-class\r\n model_xgb = MultiOutputClassifier(model_xgb)\r\n #model_xgb = ClassifierChain(model_xgb)\r\n #### This is where you start to Iterate on Finding Important Features ################\r\n save_xgb = copy.deepcopy(model_xgb)\r\n train_p = train[preds]\r\n if train_p.shape[1] < 10:\r\n iter_limit = 2\r\n else:\r\n iter_limit = int(train_p.shape[1]/5+0.5)\r\n print('Current number of predictors = %d ' %(train_p.shape[1],))\r\n print(' Finding Important Features using Boosted Trees algorithm...')\r\n ######## This is where we start training the XGBoost model to find top features ####\r\n try:\r\n for i in range(0,train_p.shape[1],iter_limit):\r\n new_xgb = copy.deepcopy(save_xgb)\r\n print(' using %d variables...' %(train_p.shape[1]-i))\r\n imp_feats = []\r\n if train_p.shape[1]-i < iter_limit:\r\n X = train_p.iloc[:,i:]\r\n cols_sel = X.columns.tolist()\r\n if settings.modeltype == 'Regression':\r\n train_part = int((1-test_size)*X.shape[0])\r\n X_train, X_cv, y_train, y_cv = X[:train_part],X[train_part:],y[:train_part],y[train_part:]\r\n else:\r\n X_train, X_cv, y_train, y_cv = train_test_split(X, y,\r\n test_size=test_size, random_state=seed, stratify=y)\r\n try:\r\n if settings.multi_label:\r\n eval_set = [(X_train.values,y_train.values),(X_cv.values,y_cv.values)]\r\n else:\r\n eval_set = [(X_train,y_train),(X_cv,y_cv)]\r\n if settings.multi_label:\r\n model_xgb.fit(X_train,y_train)\r\n else:\r\n model_xgb.fit(X_train,y_train,early_stopping_rounds=early_stopping,eval_set=eval_set,\r\n eval_metric=eval_metric,verbose=False)\r\n except:\r\n #### On Colab, even though GPU exists, many people don't turn it on.\r\n #### In that case, XGBoost blows up when gpu_predictor is used.\r\n #### This is to turn it back to cpu_predictor in case GPU errors!\r\n if GPU_exists:\r\n print('Warning: GPU exists but it is not turned on. Using CPU for predictions...')\r\n if settings.multi_label:\r\n model_xgb.estimator.set_params(**cpu_params)\r\n model_xgb.fit(X_train,y_train)\r\n else:\r\n model_xgb.set_params(**cpu_params)\r\n model_xgb.fit(X_train,y_train,early_stopping_rounds=early_stopping,eval_set=eval_set,\r\n eval_metric=eval_metric,verbose=False)\r\n #### This is where you collect the feature importances from each run ############\r\n if settings.multi_label:\r\n ### doing this for multi-label is a little different for single label #########\r\n imp_feats = [model_xgb.estimators_[i].feature_importances_ for i in range(len(target))]\r\n imp_feats_df = pd.DataFrame(imp_feats).T\r\n imp_feats_df.columns = target\r\n imp_feats_df.index = cols_sel\r\n imp_feats_df['sum'] = imp_feats_df.sum(axis=1).values\r\n important_features += imp_feats_df.sort_values(by='sum',ascending=False)[:top_num].index.tolist()\r\n else:\r\n ### doing this for single-label is a little different from settings.multi_label #########\r\n imp_feats = model_xgb.get_booster().get_score(importance_type='gain')\r\n #print('%d iteration: imp_feats = %s' %(i+1,imp_feats))\r\n important_features += pd.Series(imp_feats).sort_values(ascending=False)[:top_num].index.tolist()\r\n ####### order this in the same order in which they were collected ######\r\n important_features = list(OrderedDict.fromkeys(important_features))\r\n else:\r\n X = train_p[list(train_p.columns.values)[i:train_p.shape[1]]]\r\n cols_sel = X.columns.tolist()\r\n #### Split here into train and test #####\r\n if settings.modeltype == 'Regression':\r\n train_part = int((1-test_size)*X.shape[0])\r\n X_train, X_cv, y_train, y_cv = X[:train_part],X[train_part:],y[:train_part],y[train_part:]\r\n else:\r\n X_train, X_cv, y_train, y_cv = train_test_split(X, y,\r\n test_size=test_size, random_state=seed, stratify=y)\r\n ### set the validation data as arrays in multi-label case #####\r\n if settings.multi_label:\r\n eval_set = [(X_train.values,y_train.values),(X_cv.values,y_cv.values)]\r\n else:\r\n eval_set = [(X_train,y_train),(X_cv,y_cv)]\r\n ########## Try training the model now #####################\r\n try:\r\n if settings.multi_label:\r\n model_xgb.fit(X_train,y_train)\r\n else:\r\n model_xgb.fit(X_train,y_train,early_stopping_rounds=early_stopping,\r\n eval_set=eval_set,eval_metric=eval_metric,verbose=False)\r\n except:\r\n #### On Colab, even though GPU exists, many people don't turn it on.\r\n #### In that case, XGBoost blows up when gpu_predictor is used.\r\n #### This is to turn it back to cpu_predictor in case GPU errors!\r\n if GPU_exists:\r\n print('Warning: GPU exists but it is not turned on. Using CPU for predictions...')\r\n if settings.multi_label:\r\n model_xgb.estimator.set_params(**cpu_params)\r\n model_xgb.fit(X_train,y_train)\r\n else:\r\n model_xgb.set_params(**cpu_params)\r\n model_xgb.fit(X_train,y_train,early_stopping_rounds=early_stopping,\r\n eval_set=eval_set,eval_metric=eval_metric,verbose=False)\r\n ### doing this for multi-label is a little different for single label #########\r\n if settings.multi_label:\r\n imp_feats = [model_xgb.estimators_[i].feature_importances_ for i in range(len(target))]\r\n imp_feats_df = pd.DataFrame(imp_feats).T\r\n imp_feats_df.columns = target\r\n imp_feats_df.index = cols_sel\r\n imp_feats_df['sum'] = imp_feats_df.sum(axis=1).values\r\n important_features += imp_feats_df.sort_values(by='sum',ascending=False)[:top_num].index.tolist()\r\n else:\r\n imp_feats = model_xgb.get_booster().get_score(importance_type='gain')\r\n #print('%d iteration: imp_feats = %s' %(i+1,imp_feats))\r\n important_features += pd.Series(imp_feats).sort_values(ascending=False)[:top_num].index.tolist()\r\n important_features = list(OrderedDict.fromkeys(important_features))\r\n except:\r\n print('Finding top features using XGB is crashing. Continuing with all predictors...')\r\n important_features = copy.deepcopy(preds)\r\n return important_features, train[important_features+target]\r\n important_features = list(OrderedDict.fromkeys(important_features))\r\n print('Selected %d important features from your dataset' %len(important_features))\r\n numvars = [x for x in numvars if x in important_features]\r\n important_cats = [x for x in important_cats if x in important_features]\r\n print(' Time taken (in seconds) = %0.0f' %(time.time()-start_time))\r\n if isinstance(test, str) or test is None:\r\n print(f'Returning list of {len(important_features)} important features and dataframe.')\r\n if len(np.intersect1d(train_ids.columns.tolist(),train.columns.tolist())) > 0:\r\n return important_features, train[important_features+target]\r\n else:\r\n train = train_ids.join(train)\r\n return important_features, train[idcols+important_features+target]\r\n else:\r\n print('Returning 2 dataframes: train and test with %d important features.' %len(important_features))\r\n if feature_gen or feature_type:\r\n ### if feature engg is performed, id columns are dropped. Hence they must rejoin here.\r\n train = train_ids.join(train)\r\n test = test_ids.join(test)\r\n if target in list(test): ### see if target exists in this test data\r\n return train[idcols+important_features+target], test[idcols+important_features+target]\r\n else:\r\n return train[idcols+important_features+target], test[idcols+important_features]", "def main():\n raw_data = pd.read_csv('data/raw_hospital_data.csv')\n\n fe_data = new_features(raw_data)\n fe_data = compressing_admission_type(data)\n fe_data = age_to_cat(fe_data)\n fe_data = compressing_careunit(fe_data)\n fe_data = compressing_curr_serv(fe_data)\n fe_data = compressing_ethnicity(fe_data)\n fe_data = compressing_marital_status(fe_data)\n fe_data = compressing_religion(fe_data)\n fe_data = compressing_admit_location(fe_data)\n fe_data = compress_icd9_codes(fe_data)\n\n fe_data.to_csv('data/feature_engineering_data.csv')", "def main():\n feature_fns = [token_features, token_pair_features, lexicon_features]\n # Download and read data.\n download_data()\n docs, labels = read_data(os.path.join('data', 'train'))\n # Evaluate accuracy of many combinations\n # of tokenization/featurization.\n results = eval_all_combinations(docs, labels,\n [True, False],\n feature_fns,\n [2,5,10])\n # Print information about these results.\n best_result = results[0]\n worst_result = results[-1]\n print('best cross-validation result:\\n%s' % str(best_result))\n print('worst cross-validation result:\\n%s' % str(worst_result)) \n plot_sorted_accuracies(results)\n print('\\nMean Accuracies per Setting:')\n print('\\n'.join(['%s: %.5f' % (s,v) for v,s in mean_accuracy_per_setting(results)]))\n \n \n # Fit best classifier.\n clf, vocab = fit_best_classifier(docs, labels, results[0])\n\n # Print top coefficients per class.\n print('\\nTOP COEFFICIENTS PER CLASS:')\n print('negative words:')\n print('\\n'.join(['%s: %.5f' % (t,v) for t,v in top_coefs(clf, 0, 5, vocab)]))\n print('\\npositive words:')\n print('\\n'.join(['%s: %.5f' % (t,v) for t,v in top_coefs(clf, 1, 5, vocab)]))\n # Parse test data\n test_docs, test_labels, X_test = parse_test_data(best_result, vocab)\n\n # Evaluate on test set.\n predictions = clf.predict(X_test)\n #print('CSR Test ->')\n #print(X_test.toarray())\n #print('predictions = ',predictions)\n #print('test_labels = ',test_labels)\n\n print('testing accuracy=%f' %\n accuracy_score(test_labels, predictions))\n \n \n print('\\nTOP MISCLASSIFIED TEST DOCUMENTS:')\n print_top_misclassified(test_docs, test_labels, X_test, clf, 5)\n \n \n print('testing accuracy=%f' %\n accuracy_score(test_labels, predictions))\n\n print('\\nTOP MISCLASSIFIED TEST DOCUMENTS:')\n print_top_misclassified(test_docs, test_labels, X_test, clf, 5)", "def test_intent_classifier_test(self):\n pass", "def main(args):\n\n # load dataset\n with open(args.infile, 'rb') as fin:\n x_train, y_train, x_test, y_test = pickle.load(fin)\n\n y_train = y_train.astype('int64')\n y_test = y_test.astype('int64')\n\n random_index = list(range(len(x_train)))\n random.shuffle(random_index)\n x_train = np.array(x_train[random_index])\n y_train = np.array(y_train[random_index])\n\n # y_train = y_train.astype(bool).astype(int)\n # y_test = y_test.astype(bool).astype(int)\n\n # combined different features\n feature_extractors = [\n # ('general', MyScaler(False)),\n # ('wordcount', MyCountVectorizer(ngram_range=(1, 1), stop_words='english')),\n ('tfidf', MyTfidfVectorizer(stop_words='english')),\n ]\n combined_feature = FeatureUnion(feature_extractors)\n\n estimators = [('feature', combined_feature),\n ('clf', svm.LinearSVC(C=0.3))]\n pipeline = Pipeline(estimators)\n\n # pipeline.fit(x_train, y_train)\n # print(pipeline.score(x_test, y_test))\n\n # parameters to search\n param_grid = [\n {\n 'clf': [MultinomialNB()],\n 'clf__alpha': [10, 1.0, 0.1, 0.01],\n },\n {\n 'clf': [svm.LinearSVC()],\n 'clf__C': [3, 1, 0.3, 0.1],\n },\n ]\n\n # start training\n t0 = time.time()\n grid = GridSearchCV(pipeline, param_grid=param_grid, verbose=4, n_jobs=4)\n grid.fit(x_train, y_train)\n\n print()\n print('done in %.2f seconds' % (time.time() - t0))\n print()\n print('train accuracy: %.2f%%' % (100 * grid.score(x_train, y_train)))\n print('test accuracy: %.2f%%' % (100 * grid.score(x_test, y_test)))\n print()\n print('the best parameters are:', grid.best_params_)\n print()\n print('confusion matrix:')\n print(metrics.confusion_matrix(y_test, grid.predict(x_test)))", "def test_apply_endorsements(self):", "def test_extract_categories():\n pass", "def test_category_manip_pipeline(self):\n raise NotImplementedError(\"\")", "def testbed_name(self): \n return \"C-Lab\"", "def main():\n run_test_summary1a()\n run_test_summary1c()\n run_test_summary1c()", "def test_features_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run(\"LR\")\n assert [i == j for i, j in zip(atom.lr.features, atom.features)]", "def findFeatures(self):\n\t\tpass", "def classic_model_testing():\n dataset_path = \"/home/kateryna/Documents\"\n X_train, X_test, y_train, y_test = generate_embeddings_memory(dataset_path, classes=['normal', 'glare_small'])\n contam = 0.08\n models = [XGBOD(), OCSVM(contamination=contam), IForest(contamination=contam, n_estimators=150), XGBOD(learning_rate=0.01, n_estimators=150),\n COPOD(contamination=contam)]\n for model in models:\n model_name = model.__str__().split('(')[0]\n clf = model\n clf.fit(X_train, y_train)\n\n y_train_pred = clf.labels_\n y_train_scores = clf.decision_scores_\n\n # get the prediction on the test data\n # 0 stands for inliers and 1 for outliers.\n y_test_pred = clf.predict(X_test)\n y_test_scores = clf.decision_function(X_test)\n # y_probabilities = clf.predict_proba(X_test)\n print(\"\\nOn Training Data:\")\n evaluate_print(model_name, y_train, y_train_scores)\n print(\"\\nOn Test Data:\")\n evaluate_print(model_name, y_test, y_test_scores)\n print('roc auc', roc_auc_score(y_test, y_test_scores))\n\n conf_mtx_test = confusion_matrix(y_test, y_test_pred, labels=[0, 1])\n print(conf_mtx_test)\n conf_mtx_train = confusion_matrix(y_train, y_train_pred, labels=[0, 1])\n print(conf_mtx_train)\n print('~~~')", "def test_intent_classifier_curate(self):\n pass", "def test_Demo(self):\n self._run(self._example_scenarios, \"Demo\")", "def testHandbrakeCLI(self):\n self.assertEqual(\n self.handBrake,\n self.config.handBrake\n )", "def test_stage_0():\n\tra_1 = readImage(TRAIN_RAW_IMAGE_1)\n\tre_1 = readImage(TRAIN_RESULT_IMAGE_1)\n\n\tra_2 = readImage(TRAIN_RAW_IMAGE_2)\n\tre_2 = readImage(TRAIN_RESULT_IMAGE_2)\n\n\t# Uncomment below if more examples are required.\n\t# ra_3 = readImage(TRAIN_RAW_IMAGE_3)\n\t# re_3 = readImage(TRAIN_RESULT_IMAGE_3)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_1 += (\n\t# \tlaplace_operator(TRAIN_RAW_IMAGE_1),\\\n\t# \t# k_means(TRAIN_RAW_IMAGE_1)[0],\\\n\t# \t)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_2 += (\n\t# \tlaplace_operator(TRAIN_RAW_IMAGE_2),\\\n\t# \t# k_means(TRAIN_RAW_IMAGE_2)[0],\\\n\t# \t)\n\n\t# The prediction model is obtained and trained.\n\tengine = get_model((ra_1, ra_2,), (re_1, re_2,), model_type=SVM, percentage=0.1)\n\n\ttest_percentage = float(1) # how many tests\n\n\tra_1 = readImage(TEST_RAW_IMAGE_1)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_1 += (\n\t# \tlaplace_operator(TEST_RAW_IMAGE_1),\\\n\t# \t# k_means(TEST_RAW_IMAGE_1)[0],\\\n\t# \t)\n\n\tre_1 = readImage(TEST_RESULT_IMAGE_1)\n\n\t# ra_2 = readImage(TEST_RAW_IMAGE_2)\n\t# re_2 = readImage(TEST_RESULT_IMAGE_2)\n\n\tinput_vec = []\n\t# The features are extracted.\n\tinput_vec += buildFeatureArray_2(ra_1[0], ra_1[1], ra_1[2],\\\n\t\tRADIUS_ARRAY,\\\n\t\tadditional_feats=([] if len(ra_1) == 3 else ra_1[3:]))\n\n\tex_no = int(test_percentage * len(input_vec)) # actual number of the test sample\n\n\toutput_vec = []\n\toutput_vec += matrixToArray(re_1[0], lambda el: 1 if el == 255 else 0)\n\n\tprint('Will start predicting...')\n\n\tpredicted_vec = engine.predict(input_vec[:ex_no])\n\n\tcounter = float(0)\n\tfor y, p in zip(output_vec[:ex_no], predicted_vec[:ex_no]):\n\t\tif y == p: counter += 1\n\n\tprint('Accuracy: ' + str(counter/ex_no))\n\n\tpredicted_mat = arrayToMatrix( predicted_vec, len(re_1[0]), len(re_1[0][0]),\\\n\t\tlambda el: 255 if el == 1 else 0)\n\n\t# The predicted segmentation is saved.\n\tsave_rgb_img(\\\n\t np.array(predicted_mat).transpose(),\\\n\t np.array(predicted_mat).transpose(),\\\n\t np.array(predicted_mat).transpose(),\\\n\t 'pred.bmp',\\\n\t)", "def test_proper(self):\n\n self.assertTrue(self.cs.isProper)\n self.assertFalse(self.cs.isDegenerate)", "def main():\n # get config and processing of clauses\n config = Config(load=False)\n\n # Generators\n dev = Dataset(config.filename_dev)\n test = Dataset(config.filename_test)\n train = Dataset(config.filename_train)\n\n # Build tags vocab\n vocab_tags = get_tag_vocab([train, dev, test])\n vocab_tags.add(UNK)\n\n # Save vocab\n write_vocab(vocab_tags, config.filename_tags)\n\n\n # Build and save char vocab\n train = Dataset(config.filename_train)\n vocab_chars = get_char_vocab(train)\n write_vocab(vocab_chars, config.filename_chars)" ]
[ "0.6563471", "0.652699", "0.64427763", "0.6373243", "0.6333925", "0.6331873", "0.6215436", "0.6211196", "0.59107846", "0.5880464", "0.5870288", "0.58623254", "0.58284056", "0.58262885", "0.58067703", "0.5803254", "0.57745475", "0.5774168", "0.57514584", "0.5704505", "0.5700603", "0.5693078", "0.5652558", "0.56517303", "0.5650566", "0.5643128", "0.56402546", "0.56365436", "0.56352496", "0.56278056", "0.562442", "0.562376", "0.5618224", "0.5615385", "0.56007", "0.5599648", "0.5588481", "0.5558653", "0.5544523", "0.55427504", "0.55403906", "0.5539311", "0.5512834", "0.550815", "0.5504436", "0.54862106", "0.5479062", "0.5475934", "0.54731476", "0.5465907", "0.5465783", "0.5457624", "0.5451471", "0.54503125", "0.5443729", "0.5441925", "0.54407585", "0.54348904", "0.5425329", "0.54249865", "0.54245305", "0.5423331", "0.542093", "0.54207623", "0.54195625", "0.5411961", "0.5407852", "0.53989893", "0.53988767", "0.539806", "0.53965735", "0.5396401", "0.53954244", "0.5395154", "0.5386821", "0.5382551", "0.5380843", "0.5379364", "0.53752905", "0.536914", "0.53660816", "0.5364716", "0.5364346", "0.5363949", "0.5354994", "0.5353202", "0.535319", "0.5350873", "0.5347", "0.5346832", "0.5338407", "0.53377986", "0.5334163", "0.5333795", "0.53289306", "0.5318871", "0.53127897", "0.53091097", "0.53061885", "0.5303061" ]
0.59231627
8
Returns true for all hostclasses which aren't tagged as nonZDD hostclasses
def is_deployable(self, hostclass): return ((hostclass in self._hostclasses and is_truthy(self._hostclasses[hostclass].get("deployable"))) or hostclass not in self._hostclasses)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def IsNoHost(self):\n if self.no_host:\n return True\n return any([node.no_host for node in self.GetAncestorGroups()])", "def is_opaque(self, classobj):\n try:\n return self.instance_vars[classobj] == []\n except KeyError:\n return False", "def has_ghosts(self):\n return not np.all(self.mesh.discretization.ghosts == 0)", "def include_up_hosts(nmap_host):\n if nmap_host.status == 'up':\n return True\n return False", "def test_no_unlisted_classes_derived_from_Target(self):\n self.skipTest(\"Not sure if test is working properly.\")\n forcebalance_modules=[module[:-3] for module in os.listdir(forcebalance.__path__[0])\n if re.compile(\".*\\.py$\").match(module)\n and module not in [\"__init__.py\"]]\n for module in forcebalance_modules:\n # LPW: I don't think dcdlib should be imported this way.\n print(module)\n if module == \"_dcdlib\": continue\n m = __import__('forcebalance.' + module)\n objs = dir(eval('m.' + module))\n print(objs)\n for obj in objs:\n obj = eval('m.'+module+'.'+obj)\n if type(obj) == abc.ABCMeta:\n implemented = [i for i in forcebalance.objective.Implemented_Targets.values()]\n # list of documented exceptions\n # Basically, platform-independent targets are excluded.\n exclude = ['Target',\n 'AbInitio',\n 'Interaction',\n 'Interaction_GMX',\n 'Liquid',\n 'Lipid',\n 'BindingEnergy',\n 'LeastSquares',\n 'Vibration',\n 'Thermo',\n 'Hydration',\n 'Moments']\n print(obj)\n if obj not in implemented and obj.__name__ not in exclude:\n self.fail(\"Unknown class '%s' not listed in Implemented_Targets\" % obj.__name__)", "def test_no_unlisted_classes_derived_from_Target(self):\n forcebalance_modules=[module[:-3] for module in os.listdir(forcebalance.__path__[0])\n if re.compile(\".*\\.py$\").match(module)\n and module not in [\"__init__.py\"]]\n for module in forcebalance_modules:\n # LPW: I don't think dcdlib should be imported this way.\n self.logger.debug(module)\n # Skip over smirnoff_hack because it is not intended to contain any Target implementations.\n if module in [\"_dcdlib\", \"smirnoff_hack\"]: continue\n m = __import__('forcebalance.' + module)\n objs = dir(eval('m.' + module))\n self.logger.debug(objs)\n for obj in objs:\n obj = eval('m.'+module+'.'+obj)\n if inspect.isclass(obj) and issubclass(obj, forcebalance.target.Target):\n implemented = [i for i in forcebalance.objective.Implemented_Targets.values()]\n # list of documented exceptions\n # Basically, platform-independent targets are excluded.\n exclude = ['Target',\n 'AbInitio',\n 'Interaction',\n 'Interaction_GMX',\n 'Liquid',\n 'Lipid',\n 'BindingEnergy',\n 'LeastSquares',\n 'Vibration',\n 'Hessian',\n 'Thermo',\n 'Hydration',\n 'Moments', \n 'OptGeoTarget',\n 'TorsionProfileTarget']\n self.logger.debug(obj)\n if obj not in implemented and obj.__name__ not in exclude:\n pytest.fail(\"Unknown class '%s' not listed in Implemented_Targets\" % obj.__name__)", "def any_public_tests(self):\n return any([not t.hidden for t in self.tests])", "def include_hostnames(nmap_host):\n if nmap_host.hostnames:\n return True\n return False", "def is_ncar_host():\n hostname = socket.getfqdn()\n \n return any([re.compile(ncar_host).search(hostname) \n for ncar_host in ['cheyenne', 'casper', 'hobart']])", "def filter_dont_care(gt: NDArrayObject, class_name: str) -> bool:\n if gt == \"ignore\":\n return True\n\n if gt == class_name:\n return True\n\n else:\n return False", "def _should_reject_unexamined(self, base_cls):\n result = (\n self.serialize_type(base_cls) not in self.classes_examined\n and base_cls.__module__ not in self.modules_examined\n and not qcore.inspection.is_cython_class(base_cls)\n )\n if not result:\n self.unexamined_base_classes.add(base_cls)\n return result", "def no_classes(mask):\n extrema = ImageStat.Stat(mask).extrema\n r = extrema[0][1]\n g = extrema[1][1]\n b = extrema[2][1]\n\n if r == 0 and g == 0 and b == 0:\n return True\n\n return False", "def no_ext_grid(net):\n\n if net.ext_grid.in_service.sum() + (net.gen.slack & net.gen.in_service).sum() == 0:\n return True", "def predicate(cls: nodes.ClassDef) -> bool:\n if cls.name in CLASS_NAME_SKIPLIST:\n # class looks like an API model class, but it isn't.\n return False\n\n if not cls.name.endswith(\"API\") and \"schema\" not in cls.locals:\n # class does not look like an API model class.\n return False\n\n return True", "def in_host():\n return not in_docker()", "def is_ssh_up_on_all_instances(self, stack_id):\n instances = self.get_instance_public_ips(\n self.cfn.get_stack_instance_ids(stack_id))\n if not instances:\n return False\n if all([ssh.is_ssh_up(i) for i in instances]):\n return True\n return False", "def is_virtual_network_host():\n return False", "def is_builtin_dataclass(_cls: Type[Any]) -> bool:\n import dataclasses\n\n return not hasattr(_cls, '__processed__') and dataclasses.is_dataclass(_cls)", "def is_process_class(node):\n if isinstance(node, ClassDef):\n for b in node.bases:\n if isinstance(b, Name) and b.id == KW_PROCESS_DEF:\n return True\n return False", "def class_is_interesting(name: str):\n if name.startswith('org.chromium.'):\n return True\n return False", "def has_classname(self):\n return self.unpack_word(0x4A) > 0", "def is_ghost(self):\n\t\treturn False", "def _has_all_host_addresses(self, addresses):\n for s_id, s_size in enumerate(self.subnets[1:]):\n for m in range(s_size):\n # +1 to s_id since first subnet is 1\n if str((s_id + 1, m)) not in addresses:\n return False\n return True", "def all_nss(classifications):\n\n return (classifications['warmup'] == 0 and classifications['slowdown'] == 0 and\n classifications['flat'] == 0)", "def __ne__(self, other):\n if not isinstance(other, NestedDiscoveredHostDimms):\n return True\n\n return self.to_dict() != other.to_dict()", "def test_doesnt_report_disabled_hosts_as_up_no_queue(self):\n # NOTE(vish): constructing service without create method\n # because we are going to use it without queue\n compute1 = service.Service('host1',\n 'nova-compute',\n 'compute',\n FLAGS.compute_manager)\n compute1.start()\n compute2 = service.Service('host2',\n 'nova-compute',\n 'compute',\n FLAGS.compute_manager)\n compute2.start()\n s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')\n s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute')\n db.service_update(self.context, s1['id'], {'disabled': True})\n db.service_update(self.context, s2['id'], {'disabled': True})\n hosts = self.scheduler.driver.hosts_up(self.context, 'compute')\n self.assertEqual(0, len(hosts))\n compute1.kill()\n compute2.kill()", "def all_is_stopped(self):\r\n return all(not p.running for p in self._platforms.values())", "def any_nss(classifications):\n\n return classifications['no steady state'] > 0", "def is_all_in_one(config):\n return len(filtered_hosts(config, exclude=False)) == 1", "def is_standalone():\n\n return not any(pkg in list(main.__dict__.keys()) for pkg in Dccs.packages)", "def all_hosts(*args, **kwargs):\n return True", "def is_noncopyable(class_):\n logger = utils.loggers.cxx_parser\n class_ = class_traits.get_declaration(class_)\n\n true_header = \"is_noncopyable(TRUE) - %s - \" % class_.decl_string\n # false_header = \"is_noncopyable(false) - %s - \" % class_.decl_string\n\n if class_.class_type == class_declaration.CLASS_TYPES.UNION:\n return False\n\n if class_.is_abstract:\n logger.debug(true_header + \"abstract client\")\n return True\n\n # if class has public, user defined copy constructor, than this class is\n # copyable\n copy_ = class_.find_copy_constructor()\n if copy_ and copy_.access_type == 'public' and not copy_.is_artificial:\n return False\n\n for base_desc in class_.recursive_bases:\n assert isinstance(base_desc, class_declaration.hierarchy_info_t)\n\n if base_desc.related_class.decl_string in \\\n ('::boost::noncopyable', '::boost::noncopyable_::noncopyable'):\n logger.debug(true_header + \"derives from boost::noncopyable\")\n return True\n\n if not has_copy_constructor(base_desc.related_class):\n\n base_copy_ = base_desc.related_class.find_copy_constructor()\n\n if base_copy_:\n\n if base_copy_.access_type == 'private':\n logger.debug(\n true_header +\n \"there is private copy constructor\")\n return True\n else:\n if __is_noncopyable_single(base_desc.related_class):\n logger.debug(\n true_header +\n \"__is_noncopyable_single returned True\")\n return True\n\n if __is_noncopyable_single(base_desc.related_class):\n logger.debug(\n true_header +\n \"__is_noncopyable_single returned True\")\n return True\n\n if not has_copy_constructor(class_):\n logger.debug(true_header + \"does not have trivial copy constructor\")\n return True\n elif not has_public_constructor(class_):\n logger.debug(true_header + \"does not have a public constructor\")\n return True\n elif has_destructor(class_) and not has_public_destructor(class_):\n logger.debug(true_header + \"has private destructor\")\n return True\n else:\n return __is_noncopyable_single(class_)", "def class_is(cls: Class) -> bool:\n pass", "def __ne__(self, other):\n if not isinstance(other, RuntimeHostProfile):\n return True\n\n return self.to_dict() != other.to_dict()", "def check_if_host_disabled(self, host, binary='nova-compute'):\n _service_id = self._find_service_id_by_host(host, binary)\n if _service_id:\n _ser = self._client.services.find(id=_service_id)\n if _ser.status == u'enabled':\n return False\n elif _ser.status == u'disabled':\n return True\n else:\n return False", "def is_virtualized (self):\n return len([i for i in self.infras if\n i.infra_type not in (self.TYPE_INFRA_SDN_SW, self.TYPE_INFRA_EE,\n self.TYPE_INFRA_STATIC_EE)]) > 0", "def _host_blocked(self, host: str) -> bool:\n bad_masks = self.config.core.host_blocks\n for bad_mask in bad_masks:\n bad_mask = bad_mask.strip()\n if not bad_mask:\n continue\n if (re.match(bad_mask + '$', host, re.IGNORECASE) or\n bad_mask == host):\n return True\n return False", "def is_not_subclass(self, cls, seconds=60):\n st = '('+') & ('.join(cls.axioms)+')'\n m = prover9(self.axioms, [st], seconds, 1, options=self.options)\n if type(m)==list:\n return True, m[0]\n else:\n return False, m", "def isclassinstance(object):\n if not hasattr(object, \"__class__\"):\n return False\n if isbuiltin(object.__class__):\n return False\n return True", "def is_host_accessible(self):\n return self._host_array is not None", "def test_reports_enabled_hosts_as_up_no_queue(self):\n # NOTE(vish): constructing service without create method\n # because we are going to use it without queue\n compute1 = service.Service('host1',\n 'nova-compute',\n 'compute',\n FLAGS.compute_manager)\n compute1.start()\n compute2 = service.Service('host2',\n 'nova-compute',\n 'compute',\n FLAGS.compute_manager)\n compute2.start()\n hosts = self.scheduler.driver.hosts_up(self.context, 'compute')\n self.assertEqual(2, len(hosts))\n compute1.kill()\n compute2.kill()", "def has_apt(klass):\n return False", "def host_valid_lenient(self, host: str) -> bool:\n return WebCrawler.resolve_domain(host) in self.root_domains", "def is_dataclass_instance(obj: Any) -> bool:\n return dataclasses.is_dataclass(obj) and not isinstance(obj, type)", "def isSingletonClass(cls, tmpcls):\n if tmpcls in [defined_configuration, effective_configuration]:\n if cls.ignoreClass(tmpcls):\n return False\n return True\n return False", "def test_all_no_class(self):", "def test_all_no_class(self):", "def check_plugins(classes):\n class_names = get_module_class_names(classes)\n check_duplicate_class_names(class_names)\n for _class in classes:\n check_implemented_functions(_class)", "def check_hypernet(seq_iter):\n\treturn not any(filter(has_abba, seq_iter))", "def check_all_neighbors_present(duthosts, nbrhosts, all_cfg_facts, nbr_macs, check_nbr_state=True):\n for per_host in duthosts.frontend_nodes:\n for asic in per_host.asics:\n logger.info(\"Checking local neighbors on host: %s, asic: %s\", per_host.hostname, asic.asic_index)\n cfg_facts = all_cfg_facts[per_host.hostname][asic.asic_index]['ansible_facts']\n if 'BGP_NEIGHBOR' in cfg_facts:\n neighs = cfg_facts['BGP_NEIGHBOR']\n else:\n logger.info(\"No local neighbors for host: %s/%s, skipping\", per_host.hostname, asic.asic_index)\n continue\n\n dump_and_verify_neighbors_on_asic(duthosts, per_host, asic, list(neighs.keys()),\n nbrhosts, all_cfg_facts, nbr_macs, check_nbr_state=check_nbr_state)", "def isInternal(self):\n\n\t\t# TODO optimization do we really need to look at the host attributes?\n\t\t# maybe we can just use the global attribute (faster)\n\t\tfe = self.newdb.getFrontendName()\n\t\tnetwork = self.newdb.getHostAttr(fe, 'Kickstart_PrivateNetwork')\n\t\tnetmask = self.newdb.getHostAttr(fe, 'Kickstart_PrivateNetmask')\n\n\t\t# Test based on our client's IP address.\n\t\twork = string.split(network, '.')\n\t\tmask = string.split(netmask, '.')\n\t\tip = string.split(self.clientList[-1], '.')\n\n\t\tfor i in range(0, len(ip)):\n\t\t\ta = int(ip[i]) & int(mask[i])\n\t\t\tb = int(work[i]) & int(mask[i])\n\n\t\t\tif a != b:\n\t\t\t\treturn 0\n\n\t\treturn 1", "def daemon(self) -> bool:\n return any(thread.daemon for thread in self.threads)", "def is_class_discoverable(_class, default_discoverability=False):\n return bool(getattr(_class, _get_discoverable_attribute(_class),\n default_discoverability))", "def sees_home_tag(self):\n detections = self.swarmie.get_latest_targets().detections\n\n for detection in detections:\n if detection.id == 256:\n return True\n\n return False", "def test_implemented_targets_derived_from_target(self):\n for key in forcebalance.objective.Implemented_Targets.keys():\n self.logger.debug(\"Assert %s is subclass of target\\n\" % str(forcebalance.objective.Implemented_Targets[key]))\n self.assertTrue(issubclass(forcebalance.objective.Implemented_Targets[key],forcebalance.target.Target))", "def test_doesnt_report_disabled_hosts_as_up2(self):\n compute1 = self.start_service('compute', host='host1')\n compute2 = self.start_service('compute', host='host2')\n s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')\n s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute')\n db.service_update(self.context, s1['id'], {'disabled': True})\n db.service_update(self.context, s2['id'], {'disabled': True})\n hosts = self.scheduler.driver.hosts_up(self.context, 'compute')\n self.assertEqual(0, len(hosts))\n compute1.kill()\n compute2.kill()", "def is_end_host(num):\n\treturn not num in routers", "def valid_endpoint(cls):\n\t\treturn cls.__subclasses__() == []", "def host_okay(self, host: str) -> bool:\n host = host.lower()\n if host in self.root_domains:\n return True\n\n if re.match(r'\\A[\\d\\.]*\\Z', host):\n return False\n\n if self.strict:\n return self.host_valid_strict(host)\n\n return self.host_valid_lenient(host)", "def has_z(self): # -> bool:\n ...", "def is_ghost(self):\n return self._is_ghost", "def __ne__(self, other):\n if not isinstance(other, HyperflexClusterAllOf):\n return True\n\n return self.to_dict() != other.to_dict()", "def test_vms_host(self):\n testflow.step(\"Check if VM's started on different hosts\")\n assert (\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[0]) !=\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[1])\n )", "def test_vms_host(self):\n testflow.step(\"Check if VM's started on different hosts\")\n assert (\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[0]) !=\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[1])\n )", "def has_ssh_devices(self) -> bool:\n return self._has_ssh_devices", "def _has_compute_uncompute_tag(cmd):\n for tag in cmd.tags:\n if tag in [UncomputeTag(), ComputeTag()]:\n return True\n return False", "def is_enabled(env, cls):\n try:\n return env.is_enabled(cls)\n except AttributeError:\n if cls not in env.enabled:\n env.enabled[cls] = env.is_component_enabled(cls)\n return env.enabled[cls]", "def are_all_snp_disabled(self):\r\n for snp in self.snp:\r\n if snp.attributes.active:\r\n return False\r\n self.primers_are_useless()\r\n return True", "def cs_classes(post):\n return bool(re.search(r\"\\s+\\[cs|CS]\\d+[a-cA-C]\\s+\", post))", "def host_valid_strict(self, host: str) -> bool:\n host = host[4:] if host.startswith('www.') else 'www.' + host\n return host in self.root_domains", "def check_eclass(eclass):\n eclass = str(eclass)\n # Should be a valid eclass\n return eclass not in LIST_ECLASS", "def nfvi_compute_plugin_disabled():\n return (_compute_plugin is None)", "def ignore_all(self):\n # type: () -> bool\n return False", "def __ne__(self, other: 'GatewayTemplateGatewayTypeDedicatedTemplate') -> bool:\n return not self == other", "def __is_all_connected(self) -> bool:\n\n return self._nb_modules == len(self._modules)", "def checkIgnored(self, hostmask):\n if self.lobotomized:\n return True\n if world.testing:\n return False\n assert ircutils.isUserHostmask(hostmask), 'got %s' % hostmask\n if self.checkBan(hostmask):\n return True\n now = time.time()\n for (pattern, expiration) in self.ignores.items():\n if now < expiration or not expiration:\n if ircutils.hostmaskPatternEqual(pattern, hostmask):\n return True\n else:\n del self.ignores[pattern]\n # Later we may wish to keep expiredIgnores, but not now.\n return False", "def check_bgp_neighbors(duthosts, excluded_ips=[]):\n down_nbrs = 0\n for node in duthosts.frontend_nodes:\n for asic in node.asics:\n bgp_facts = asic.bgp_facts()['ansible_facts']\n\n for address in bgp_facts['bgp_neighbors']:\n if address.lower() not in excluded_ips and \\\n bgp_facts['bgp_neighbors'][address]['state'] != \"established\":\n logger.info(\"BGP neighbor: %s is down: %s.\" % (\n address, bgp_facts['bgp_neighbors'][address]['state']))\n down_nbrs += 1\n if down_nbrs != 0:\n logger.warning(\"Neighbors are still down: %d\", down_nbrs)\n return False\n else:\n logger.info(\"All BGP neighbors are restored.\")\n return True", "def has_nodes(self) -> bool:\n\n return len(self.dirs) > 0", "def all_same_class(instances):\n # assumption: instances is not empty and class label is at index -1\n first_label = instances[0][-1]\n for instance in instances:\n if instance[-1] != first_label:\n return False \n return True # if we get here, all instance labels matched the first label", "def has_registered_subclasses(cls: type) -> bool:\n has_subclasses = False\n if issubclass(cls, RegistersSubclasses):\n if cls.registered_subclasses():\n has_subclasses = True\n return has_subclasses", "def filter_out_localhost(packet):\n if not scapy.layers.inet.IP in packet: # no IP layer\n return True\n hosts = [packet[scapy.layers.inet.IP].dst,\n packet[scapy.layers.inet.IP].src]\n localhosts = [\"127.0.0.1\", \"0.0.0.0\", \"localhost\",\n \"localhost.localdomain\", socket.gethostname(), inet.get_ip()]\n return len(set(hosts + localhosts)) == len(localhosts) + 2 # repeats?", "def valid_host(host):\n if host in ACCEPTED_HOSTS:\n return True\n return False", "def ignore_dnt(self):\n # type: () -> bool\n return self._ignore_dnt", "def verify_stack_has_no_loop(self):\n for ping_host, tcpdump_host in (\n (self.hosts_name_ordered()[0], self.hosts_name_ordered()[-1]),\n (self.hosts_name_ordered()[-1], self.hosts_name_ordered()[0])):\n self.verify_no_arp_storm(ping_host, tcpdump_host)", "def _is_filter_class(cls):\n return type(cls) is types.TypeType and issubclass(cls, BaseHostFilter)", "def checkHost(host):\n if \"192.168.\" in host:\n return False\n elif \"169.254.\" in host: #APIPA (Automatic Private Internet Protocol Addressing)\n return False\n elif re.match(\"^(127\\.)\",host):\n return False\n elif re.match(\"^(10\\.)\",host):\n return False\n elif re.match(\"^(172\\.1[6-9]\\.)|(172\\.2[0-9]\\.)|(172\\.3[0-1]\\.)\",host):\n return False\n else:\n return True", "def __hasHubs(self):\n for c in self.__comps:\n if c.isHub():\n return True\n return False", "def enabled_on_host() -> Callable[[Loader.ModInfo], bool]:\n def filter_fn(mod_info: Loader.ModInfo) -> bool:\n if len(mod_info.mod_def.hostnames) == 0:\n return True\n\n if HOSTNAME not in mod_info.mod_def.hostnames:\n _LOG.info(\n f\"{mod_info.mod_def.name} is not enabled for host {HOSTNAME}\"\n )\n return False\n return True\n\n return filter_fn", "def class_name_arg_required(args):\n no_class_name_flags = ['list_courses', 'version']\n return not any(\n getattr(args, flag)\n for flag in no_class_name_flags\n )", "def has_sclass(self, w: Wrapper, prop: Any) -> bool:\n if not prop:\n return None\n props = self.sclasses(w)\n if isinstance(prop, str):\n ans = [prop in props]\n else:\n ans = [i in props for i in prop]\n return all(ans)", "def detect(cls):\n return False", "def isSane(self):\n\n if self.host == 'localhost':\n return True\n\n host_parts = self.host.split('.')\n if self.wildcard:\n assert host_parts[0] == '', host_parts\n del host_parts[0]\n\n # If it's an absolute domain name, remove the empty string\n # from the end.\n if host_parts and not host_parts[-1]:\n del host_parts[-1]\n\n if not host_parts:\n return False\n\n # Do not allow adjacent dots\n if '' in host_parts:\n return False\n\n tld = host_parts[-1]\n if tld not in _top_level_domains:\n return False\n\n if len(tld) == 2:\n if len(host_parts) == 1:\n # entire host part is 2-letter tld\n return False\n\n if len(host_parts[-2]) <= 3:\n # It's a 2-letter tld with a short second to last segment\n # so there needs to be more than two segments specified \n # (e.g. *.co.uk is insane)\n return len(host_parts) > 2\n else:\n # A long second to last segment is specified.\n return len(host_parts) > 1\n else:\n # It's a regular tld, so it needs at least one more segment\n return len(host_parts) > 1\n\n # Fell through, so not sane\n return False", "def EmptyTarget(self):\n return not self.objects", "def is_subclass(self, cls, seconds=60):\n proofs = []\n for ax in cls.axioms:\n p = pr9(self.axioms, [ax], seconds, self.options)\n if type(p)==list:\n print ax, \"proved\"\n else:\n print ax, p\n return False, 'No conclusions'\n proofs.append(p)\n return True, proofs", "def isUnConditional(self) -> bool:\n ...", "def _check(self, class_):\r\n\r\n if isinstance(class_, (types.FunctionType, types.LambdaType,\r\n types.ClassType, types.InstanceType)):\r\n return False\r\n if not hasattr(class_, '__dict__'):\r\n if not hasattr(class_, '__slots__'):\r\n return False\r\n return True", "def test_reports_enabled_hosts_as_up(self):\n compute1 = self.start_service('compute', host='host1')\n compute2 = self.start_service('compute', host='host2')\n hosts = self.scheduler.driver.hosts_up(self.context, 'compute')\n self.assertEqual(2, len(hosts))\n compute1.kill()\n compute2.kill()", "def with_necks(self):\n return hasattr(self, 'necks')", "def is_trained(self) -> bool:\r\n return not getattr(self._qda, \"classes_\", None) is None", "def has_name(self, name):\n\t\treturn name in self.classes" ]
[ "0.6732368", "0.61149806", "0.60162634", "0.598662", "0.5910872", "0.57612085", "0.5761027", "0.5724565", "0.57139426", "0.56720996", "0.56686395", "0.5650766", "0.5641392", "0.56388974", "0.5596937", "0.55439645", "0.55356926", "0.5530277", "0.55288005", "0.5514988", "0.54905534", "0.5474688", "0.54731774", "0.5432726", "0.5423214", "0.5399945", "0.53964704", "0.539353", "0.5364832", "0.5362249", "0.5358262", "0.53432655", "0.53196573", "0.53145695", "0.5314326", "0.53134185", "0.5307122", "0.5299262", "0.52979505", "0.52967715", "0.5292635", "0.52848345", "0.5265103", "0.5256573", "0.5224211", "0.5222108", "0.5222108", "0.52204883", "0.5195453", "0.51896596", "0.51562566", "0.51528484", "0.5151541", "0.5138653", "0.51345825", "0.513247", "0.51324075", "0.5131656", "0.5124319", "0.512153", "0.51167333", "0.51126343", "0.5111048", "0.5111048", "0.51077527", "0.5103546", "0.50975776", "0.5081709", "0.5077921", "0.5041214", "0.5038251", "0.50371075", "0.503633", "0.50360656", "0.5029345", "0.5025976", "0.50226825", "0.5021362", "0.50200564", "0.50122064", "0.50083107", "0.5005814", "0.4999462", "0.4991597", "0.49913442", "0.49866515", "0.4984389", "0.497932", "0.49753258", "0.49714682", "0.49705207", "0.49693182", "0.49684298", "0.49629962", "0.4959976", "0.4959069", "0.49585676", "0.49582657", "0.49535874", "0.495203" ]
0.66115427
1
Returns the integration test for this hostclass, or None if none exists
def get_integration_test(self, hostclass): return (hostclass in self._hostclasses and self._hostclasses[hostclass].get("integration_test")) or None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_test(self,test_id):\n for test in self.suite.get_tests():\n if test.id == test_id:\n return test\n return None", "def GetHWTestSuite(self):\n hw_tests = self._run.config['hw_tests']\n if not hw_tests:\n # TODO(milleral): Add HWTests back to lumpy-chrome-perf.\n raise unittest.SkipTest('Missing HWTest for %s' % (self._bot_id,))\n\n return hw_tests[0]", "def test_runner_class(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"test_runner_class\")", "def get_test(self, t_id: int) -> Optional[Tests]:\n try:\n test = self.session.query(Tests).get(t_id)\n return test\n except Exception as excpt:\n self.session.rollback()\n print(f'Could not get test: {excpt}')\n return None", "def get_test_type(self):\n return self.test_type", "def GetTestExtension(self):\n\n return self.test_extension", "def test_runner_class(self) -> str:\n return pulumi.get(self, \"test_runner_class\")", "def get_implementation(self):\n return self.__capabilities[\"IMPLEMENTATION\"]", "def test_get_host(self):\n pass", "def get_test_server():\n\n #TODO: make this lazy initialization thread-safe\n if '__instance' not in globals():\n server_thread = TestServerThread(settings.SELENIUM_TESTSERVER_HOST, settings.SELENIUM_TESTSERVER_PORT)\n server_thread.start()\n server_thread._start_event.wait()\n if server_thread._error:\n raise server_thread._error\n globals()['__instance'] = server_thread\n\n return globals()['__instance']", "def test_suite_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"test_suite_name\")", "def _get_test(self, obj, name, module, globs, source_lines):\n if getattr(obj, '__name__', '') in self.skipped:\n return None\n return doctest.DocTestFinder._get_test(self, obj, name, module,\n globs, source_lines)", "def test_api(self) -> 'test_descriptor.TestApi':\n if not self.running_phase_state:\n raise ValueError('test_api only available when phase is running.')\n if not self._running_test_api:\n self._running_test_api = openhtf.TestApi(\n measurements=measurements.Collection(\n self.running_phase_state.measurements),\n running_phase_state=self.running_phase_state,\n running_test_state=self,\n )\n return self._running_test_api", "def get_id(self):\n return \"unittest_required_plugin\"", "def get_local_hypervisor(self):\n # Look up hypervisors available filtered by my hostname\n host = self.get_my_hostname()\n hyp = self.get_all_hypervisor_ids(filter_by_host=host)\n if hyp:\n return hyp[0]", "def get_instance():\n if not TestConfiguration._instance:\n TestConfiguration._instance = TestConfiguration()\n return TestConfiguration._instance", "def host(self) -> \"IStageHost\":\n return self._values.get(\"host\")", "def getNodeTest(nodeTestId: int):\n\n nodeTestQuery = NodeTest.query.get(nodeTestId)\n\n if nodeTestQuery:\n return nodeTestQueryToObject(nodeTestQuery)\n else:\n return None", "def test_case(self) -> bool:\n return pulumi.get(self, \"test_case\")", "def GetTestWrapper(self):\n return ''", "def health_check_host(self) -> Optional[str]:\n return pulumi.get(self, \"health_check_host\")", "def get_host(self):\r\n return self.host", "def get(self, host):\n return self.__locusts__[host]", "def get_host(self):\n return self.host", "def service(self):\n return self.__stackdriver", "def host(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"host\")", "def host(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"host\")", "def host(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"host\")", "def get(self) -> Optional[es.ExpectationSuite]:\n _client = client.get_instance()\n path_params = [\n \"project\",\n _client._project_id,\n \"featurestores\",\n self._feature_store_id,\n \"featuregroups\",\n self._feature_group_id,\n \"expectationsuite\",\n ]\n\n return es.ExpectationSuite.from_response_json(\n _client._send_request(\"GET\", path_params)\n )", "def driver(self):\r\n ext = self.extensions[0]\r\n return ext.obj if ext.obj else ext.plugin", "def getControllingHost(self):\r\n if len(self.controllingClient) > 0:\r\n return self.controllingClient.values()[0]\r\n else:\r\n return None", "def testCase(self):\r\n from adrest.tests import AdrestTestCase\r\n\r\n return type('TestCase', (AdrestTestCase, ), dict(api=self))", "def get_suite(arn=None):\n pass", "def test(self):\n\t\treturn describeInterface(self)", "def get_test_index(self):\n return self.test_index", "def get_scanner(hass, config):\n scanner = BTSmartHubScanner(config[DOMAIN])\n\n return scanner if scanner.success_init else None", "def get_test_frame(self):\n\n # get function from end of unittest id()\n target = self.id().split('.')[-1]\n\n # traverse frames until function name is found\n for frame in inspect.stack():\n if frame[3] == target:\n return frame\n return None", "def get_issue_tracker_for_testcase(testcase):\n issue_tracker_project_name = _get_issue_tracker_project_name(testcase)\n if not issue_tracker_project_name or issue_tracker_project_name == 'disabled':\n return None\n\n return get_issue_tracker(issue_tracker_project_name)", "def _get_test_method(self):\n return getattr(self, self._testMethodName)", "def get_test(arn=None):\n pass", "def available(self, app):\n return self.xlist(\"testfor\", app)[0]", "def findHostFromInstrument(self, instrument):\n return instrument.host", "def get_host(name):\n raise NotImplementedError('derived class should overload me')", "def get_run_host(self):\n comp = self.get_run_from()\n host = (comp.host_ref\n if isinstance(comp.host_ref, basestring)\n else comp.host_ref.value())\n if isinstance(host, IPAddressable):\n host.fix_arguments()\n host = host.get_ip()\n return host", "def get_run_host(self):\n comp = self.get_run_from()\n host = (comp.host_ref\n if isinstance(comp.host_ref, basestring)\n else comp.host_ref.value())\n if isinstance(host, IPAddressable):\n host.fix_arguments()\n host = host.get_ip()\n return host", "def host(self) :\n\t\ttry :\n\t\t\treturn self._host\n\t\texcept Exception as e:\n\t\t\traise e", "def get_driver(self):\n return self.driver", "def TestConnection(self):\n return self._analyzer.TestConnection()", "def host_instance_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"host_instance_type\")", "def test_type(self):\n return self._test_type", "def test(cls, hostname):\n pass", "def get_driver(self):\n\t\treturn self.driver", "def GetTest(self, test_id):\n\n path = self.GetTestPath(test_id)\n if not self._IsTestFile(path):\n raise NoSuchTestError, test_id\n\n return self._GetTestFromPath(test_id, os.path.normpath(path))", "def get_test_method(self):\n return getattr(self, self.get_test_method_name())", "def _get_driver():\n return etcd_driver.get_driver()", "def get_hypervisor(self, graph_db):\n node = neo_resource.get_node_by_property(graph_db,\n self.label,\n property_key='hostname',\n property_value=self.hostname)\n return node", "def get_from_host(cls, host, silent=False):\n if cls.search([], count=True) == 1:\n return cls.search([])[0]\n try:\n website, = cls.search([('name', '=', host)])\n except ValueError:\n if not silent:\n raise WebsiteNotFound()\n else:\n return website", "def get_url(self):\n if self.url is None: # pragma: no cover\n raise ImproperlyConfigured(\n \"{0} is missing a url to test. Define {0}.url \"\n \"or override {0}.get_url().\".format(self.__class__.__name__)\n )\n return self.url", "def testing(self):\n return self.settings['tangled.app.testing']", "def integration_provider(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"integration_provider\")", "def integration_provider(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"integration_provider\")", "def getDisplay(self):\n for display in self.listDisplays():\n if self.testDisplay(display):\n if self.verbose:\n print \"Got an existing working display on %s\" % display\n return display\n if self.verbose:\n print \"Not found any existing working display\"\n\n return self.startServer()", "def test_get_current_request_hostname(self):\r\n assert_is_none(get_current_request_hostname())", "def get_agent(self):\n servers = self.get_agents()\n assert servers, \"starter: have no instances!\"\n return servers[0]", "def test_suite_name(self) -> str:\n return pulumi.get(self, \"test_suite_name\")", "def get_task_host(self):\n comp = self.get_task_role()\n host = (comp.host_ref\n if isinstance(comp.host_ref, basestring)\n else comp.host_ref.value())\n if isinstance(host, IPAddressable):\n host.fix_arguments()\n host = host.get_ip()\n return host", "def get_task_host(self):\n comp = self.get_task_role()\n host = (comp.host_ref\n if isinstance(comp.host_ref, basestring)\n else comp.host_ref.value())\n if isinstance(host, IPAddressable):\n host.fix_arguments()\n host = host.get_ip()\n return host", "def test_getHostByName(self):\n d = client.getHostByName(self.ghbntest)\n d.addCallback(self.checkGetHostByName)\n return d", "def get_object(self, *args, **kwargs):\n\t\n #Setting the test_id\n\ttest_id = self.kwargs['test_id']\n try:\n return api.nova.server_get(self.request, test_id)\n except Exception:\n redirect = reverse(\"horizon:rally_dashboard:events:index\")\n msg = _('Unable to retrieve instance details.')\n exceptions.handle(self.request, msg, redirect=redirect)", "def fallback_host(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"fallback_host\")", "def fallback_host(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"fallback_host\")", "def get_active(cls):\n\n\t\tif cls._interface:\n\t\t\twindow = cls._interface.get_active()\n\t\t\tif window:\n\t\t\t\treturn cls(window)\n\t\t\telse:\n\t\t\t\treturn None\n\t\telse:\n\t\t\traise NotImplementedError('Unsupported platform')", "def test_get_driver_test_class(self):\n global locator, config_paths\n locator.load_config(config_paths[0])\n self.assertIsInstance(locator.get_driver(), TestDriver,\n 'get_driver did not return a test_driver when it was expected to.')\n self.assertNotIsInstance(locator.get_driver(), SimDriver,\n 'get_driver returned a sim_driver when it was expect to return a test_driver')", "def host():\n return platform.node()", "def type(self) -> Optional[pulumi.Input['TestIssueType']]:\n return pulumi.get(self, \"type\")", "def setup_search(self):\n if self.testing:\n return MockExternalSearchIndex()\n else:\n search = ExternalSearchIndex(self._db)\n if not search:\n self.log.warning(\"No external search server configured.\")\n return None\n return search", "def getHost(self):\n\n\t\treturn HOST", "def GetSuiteExtension(self):\n\n return self.suite_extension", "def TestTrafficType(self):\n\t\treturn self._get_attribute('testTrafficType')", "def get_TestEntry_instance(string, config):\n paren_i = string.find(\"(\")\n if paren_i > 0:\n args = string[paren_i+1:-1]\n string = string[:paren_i]\n args, kwargs = core.parse_args(args)\n else:\n args = ()\n kwargs = {}\n try:\n cls = module.get_object(string)\n except (module.ModuleImportError, module.ObjectImportError), err:\n logging.warn(err)\n return None\n testinstance = cls(config)\n return core.TestEntry(testinstance, args, kwargs, False)", "def backend_object(self, id):\n return self.model.Suite.everything.get(id=id)", "def driver(self):\n \n return self.__driver", "def get_questions_test(self, question_id: int) -> Optional[QuestionsTests]:\n try:\n questions_tests = self.session.query(QuestionsTests).get(question_id)\n return questions_tests\n except Exception as excpt:\n self.session.rollback()\n print(f'Couldn\\'t get question test link: {excpt}')\n return None", "def test1(self):\n\t\treturn describeInterface(self)", "def getTestClass(self):\n\t\treturn AbstraccionPrueba", "def ios_test(self) -> Optional[pulumi.Input['IosTestArgs']]:\n return pulumi.get(self, \"ios_test\")", "def get_tech(self, tech_opt: Optional[hammer_tech.HammerTechnology]) -> hammer_tech.HammerTechnology:\n self.assertTrue(tech_opt is not None, \"Technology must be loaded\")\n assert tech_opt is not None # type checking\n return tech_opt", "def get_hub_if_exists():\n return _threadlocal.hub", "def get_backend():\n global _ACTIVE_BACKEND\n if not _ACTIVE_BACKEND:\n _ACTIVE_BACKEND = locate(settings.SITE_BACKEND)()\n return _ACTIVE_BACKEND", "def get(cls):\n if not cls._jss:\n cls.setup()\n return cls._jss", "def get_tgis_backend():\n global tgis_backend\n return tgis_backend", "def get_hosted_zone(session):\n account = get_account_id_from_session(session)\n if account == hosts.PROD_ACCOUNT:\n return hosts.PROD_DOMAIN\n elif account == hosts.DEV_ACCOUNT:\n return hosts.DEV_DOMAIN\n else:\n return None", "def backend_getInterface(self):\n\t\treturn describeInterface(self)", "def get_active_test_session():\n result=None\n sql=\"SELECT * FROM sessions WHERE is_over=0\"\n conn=sqlite3.connect(CONNECTION_STRING)\n c=conn.cursor()\n c.execute(sql)\n result=c.fetchone()\n conn.close()\n return result", "def GetUtilWrapper(self):\n return self.GetTestWrapper()", "def getHost(self):\n return self._host", "def Experiment(self, default=None):\n return self.data.get('experiment', default)", "def _get_infrastructure_engine():\n\n LOG.debug(\"Infrastructure engine {engine} is loading\".format(\n engine=CONF.infrastructure_engine))\n\n return _load_driver('sahara.infrastructure.engine',\n CONF.infrastructure_engine)", "def getHost(self):\n return self._host", "def test_get_current_request_hostname(self):\n assert get_current_request_hostname() is None" ]
[ "0.6346583", "0.6162359", "0.5884836", "0.5876703", "0.55224764", "0.5468428", "0.5372038", "0.529812", "0.52784747", "0.52683514", "0.5250328", "0.52476835", "0.5240323", "0.5180137", "0.51473355", "0.51330215", "0.51204187", "0.5118088", "0.5116481", "0.50992554", "0.5088099", "0.50844663", "0.5072845", "0.5072533", "0.5064881", "0.5057838", "0.5057838", "0.5057838", "0.50508636", "0.5039868", "0.50369394", "0.50179213", "0.500894", "0.4996498", "0.49911866", "0.4964381", "0.4957863", "0.49526536", "0.49425292", "0.49315277", "0.49183628", "0.4917726", "0.4914399", "0.49132928", "0.49132928", "0.491294", "0.4910819", "0.4899838", "0.48871368", "0.4867629", "0.48669106", "0.4855476", "0.48518458", "0.48459682", "0.4842156", "0.48398325", "0.4833393", "0.48266906", "0.48266834", "0.48244348", "0.48244348", "0.48144662", "0.4809479", "0.4791846", "0.47888684", "0.4783476", "0.4783476", "0.47797877", "0.4763207", "0.47579676", "0.47579676", "0.4755414", "0.47527766", "0.47505152", "0.4743197", "0.47359675", "0.47310206", "0.47209218", "0.47178778", "0.47147104", "0.47112265", "0.47094414", "0.4708201", "0.47081944", "0.47018594", "0.47009993", "0.4699888", "0.46989012", "0.46892765", "0.4688766", "0.4682471", "0.46786934", "0.46761292", "0.4675785", "0.46576664", "0.4657512", "0.4650981", "0.46464878", "0.46449375", "0.46440738" ]
0.8695572
0
Promote AMI to specified stage. And, conditionally, make executable by production account if ami is staged as tested.
def _promote_ami(self, ami, stage): prod_baker = self._disco_bake.option("prod_baker") promote_conditions = [ stage == "tested", prod_baker, ami.tags.get("baker") == prod_baker, ] try: self._disco_bake.promote_ami(ami, stage) if all(promote_conditions): self._disco_bake.promote_ami_to_production(ami) except: logging.exception("promotion failed")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stage(self, stage: osbuild.Stage):", "def deploy():\n stage(branch='live', role='live')", "def setup(self, stage: Optional[str] = None) -> None:", "def deploy():\n test()\n if not env.is_staging:\n backup()\n prepare()\n restart_api()", "def Stage(self, descriptor, app_dir, runtime, environment):\n command = self.registry.get((runtime, environment))\n\n if not command:\n # Many runtimes do not require a staging step; this isn't a problem.\n log.debug(('No staging command found for runtime [%s] and environment '\n '[%s].'), runtime, environment.name)\n return\n\n command.EnsureInstalled()\n return command.Run(self.staging_area, descriptor, app_dir)", "def _stage(self):\n\n local_source_path = join(BespokeGlobals.ABS_LOCAL_TOOLS,\n self._tool.install_properties['source_file'])\n\n self._remote_target_path = join(self._sut.bespoke_root,\n BespokeGlobals.TOOLS,\n self._tool.install_properties['source_file'])\n\n if isfile(local_source_path):\n self._staf_file_copy(local_source_path, self._remote_target_path)\n else:\n raise CoreError('Failed to stage tool \"{0}\" on remote machine! The file/directory '\n '\"{1}\" does not exist!'.format(self._tool.name, local_source_path))", "def stage():\n _setup_env()\n\n if not 'stage' in _config:\n abort('Could not find \"stage\" in config file')\n\n # Make sure cdn exists\n exists(dirname(env.cdn_path), required=True)\n\n # Ask user for a new version\n _config['version'] = git.prompt_tag('Enter a new version number',\n unique=True)\n\n # Build version\n # use execute to allow for other implementations of 'build'\n execute('build')\n\n # Commit/push/tag\n with lcd(env.project_path):\n with settings(warn_only=True):\n local('git add build')\n # support builds where there's no change; sometimes comes up when\n # reusing a tag because of an unexpected problem\n with settings(warn_only=True):\n msg = local('git commit -m \"Release %(version)s\"' % _config,capture=True)\n if 'nothing to commit' in msg:\n warn(msg)\n warn('continuing anyway')\n elif not msg.startswith('[master'):\n abort(\"Unexpected result: %s\" % msg)\n local('git push')\n\n git.push_tag(_config['version'])\n\n # Copy to local CDN repository\n cdn_path = join(env.cdn_path, _config['version'])\n clean(cdn_path)\n\n for r in _config['stage']:\n static.copy(_config, [{\n \"src\": r['src'],\n \"dst\": cdn_path, \"regex\": r['regex']}])\n\n # Create zip file in local CDN repository\n _make_zip(join(cdn_path, '%(name)s.zip' % _config))", "def process_deploybuild ( vpc_conn,\n ec2_conn,\n elb_conn,\n cloudwatch_conn,\n r53_conn,\n s3_infra_conn,\n vpc,\n base_name,\n app_name,\n app_type,\n region_name,\n aws_account_type,\n params ) :\n APP_NAME = app_name.upper( )\n master_tomcat_ami_name = 'Master-Tomcat.v8'\n master_nodejs_ami_name = 'Master-NodeJS.v6'\n master_python_ami_name = 'Master-Python.v2'\n deployment_ami_name = params.get( 'source-ami' )\n deployment_secgrp = get_deployment_secgrp_name( )\n deployment_keypair = get_keypair_name( aws_account_type, vpc.region.name, 'deployment' )\n instance_keypair = get_keypair_name( aws_account_type, vpc.region.name, app_type )\n deployment_keypair_file = os.environ[ 'HOME' ] + '/.ssh/' + deployment_keypair + '.pem'\n wait_on_launch = params.get( 'wait-on-launch', 'YES' ) == 'YES'\n\n ##\n ## Find the correct AMI to use for deployment\n ##\n if not deployment_ami_name or len( deployment_ami_name ) < 1 :\n deployment_ami_name = get_current_ami( s3_infra_conn, region_name, get_env_type( base_name ), app_name )\n if not deployment_ami_name :\n print \"No AMI found, defaulting to master AMI!\"\n if app_type == 'jee' :\n deployment_ami_name = master_tomcat_ami_name\n elif app_type == 'nodejs' :\n deployment_ami_name = master_nodejs_ami_name\n elif app_type == 'python' :\n deployment_ami_name = master_python_ami_name\n\n print \"Deployment ami to be used: \" + deployment_ami_name\n deployment_ami = get_ami_by_name( ec2_conn, deployment_ami_name )\n if not deployment_ami :\n print \"Could not find AMI to use for deployment! \" + deployment_ami_name\n sys.exit( 2 )\n\n ##\n ## Launch the deployment server to deploy the new warfile to\n ##\n print \"Launching deployment instance.\"\n deploy_i = launch_instance_nonvpc( ec2_conn = ec2_conn,\n ami = deployment_ami,\n base_name = base_name,\n instance_type = APP_NAME + '-DEPLOYMENT',\n keypair = deployment_keypair,\n machine_type = 't1.micro',\n security_group = deployment_secgrp )\n deployment_servername = deploy_i.public_dns_name\n print \"Waiting for SSH to be available on deployment server\"\n sshd_started = wait_on_server_sshd( deployment_servername, deployment_keypair_file )\n if not sshd_started :\n print \"SSH is not available after a long time! \" + deployment_servername\n sys.exit( 3 )\n\n ##\n ## Update the instance software before deploying the new code.\n ##\n update_os = params.get( 'update-os', 'YES' ) == 'YES'\n if update_os :\n if app_type == 'jee' :\n os_update_cmd = 'sudo yum -y update'\n elif app_type == 'nodejs' :\n os_update_cmd = 'sudo yum update -y --enablerepo=epel'\n elif app_type == 'python' :\n os_update_cmd = 'sudo yum -y update'\n\n ssh_call( deployment_servername, deployment_keypair_file, os_update_cmd )\n\n # Deploy the code to the server based on app_type\n if app_type == 'jee' :\n print \"Waiting for HTTP to be available on deployment server\"\n tomcat_started = wait_on_server_httpd( deployment_servername )\n if not tomcat_started :\n print \"Tomcat is not available after a long time! \" + deployment_servername\n sys.exit( 4 )\n\n ##\n ## Deploy the warfile to the deployment server\n ##\n print \"Deploying warfile to deployment server\"\n deploy_warfile ( deployment_servername,\n deployment_keypair_file,\n app_name,\n params[ 'warfile-path' ] )\n\n elif app_type == 'nodejs' :\n print \"Stopping existing node instance.\"\n ssh_call( deployment_servername, deployment_keypair_file, 'sudo /etc/init.d/nodejs stop' )\n print \"Deploying source files to deployment server\"\n src_dir = params[ 'src-dir' ]\n tgt_dir = '/usr/share/node'\n status = bulk_upload( deployment_servername, deployment_keypair_file, src_dir, tgt_dir )\n if status != 0 :\n print \"Unable to upload source files to the deployment server!\"\n sys.exit( 5 )\n print \"Updating project dependencies on deployment server\"\n status = ssh_call( deployment_servername, deployment_keypair_file, 'cd ' + tgt_dir + ' && sudo npm install' )\n if status != 0 :\n print \"Unable to update project dependencies on deployment server!\"\n sys.exit( 5 )\n print \"If found bower.json file, install bower and download bower resource.\"\n status = ssh_call(deployment_servername, deployment_keypair_file, 'cd ' + tgt_dir + ' && [ -f bower.json ] && (yes | sudo bower install --allow-root) || echo \"Not found bower.json file\"')\n if status != 0 :\n print \"Action of node js plugin bower failed!\"\n sys.exit( 5 )\n elif app_type == 'python':\n src_dir = params[ 'src-dir' ]\n tgt_dir = '/usr/share/scripts/s3-delivery-agent/'\n print \"Creating app folder\"\n ssh_call( deployment_servername, deployment_keypair_file, 'sudo mkdir -p ' + tgt_dir )\n print \"Deploying source files to deployment server\"\n status = bulk_upload( deployment_servername, deployment_keypair_file, src_dir, tgt_dir )\n if status != 0 :\n print \"Unable to upload source files to the deployment server!\"\n sys.exit( 5 )\n print \"Stopping python app.\"\n ssh_call( deployment_servername, deployment_keypair_file, 'cd ' + tgt_dir + '; python fatcontroller/command/stop.py' )\n print \"Updating project dependencies on deployment server\"\n status = ssh_call( deployment_servername, deployment_keypair_file, 'cd ' + tgt_dir + ' && sudo pip install -r config/requirements.txt' )\n if status != 0 :\n print \"Unable to update project dependencies on deployment server!\"\n sys.exit( 5 )\n print \"Start python app\"\n print params\n print \"Current environment type: \" + get_env_type( base_name )\n status = ssh_call(deployment_servername, deployment_keypair_file, 'export SERVER_ENV=' + get_env_type( base_name ) + '; cd ' + tgt_dir + '; sudo python fatcontroller/command/startup.py ' + get_env_type( base_name ))\n if status != 0 :\n print \"Starting python app failed.\"\n sys.exit( 5 )\n\n\n ##\n ## Create AMI\n ##\n print \"Creating AMI from deployment server.\"\n timestamp = get_current_datetime_string( )\n new_ami_name = base_name + '-' + APP_NAME + '-' + timestamp\n new_ami = create_ami_from_instance( aws_account_type, ec2_conn, deploy_i, new_ami_name )\n if not new_ami :\n print \"Could not create new AMI!\"\n sys.exit( 5 )\n\n ##\n ## Remove the deployment instance, since we no longer need it.\n ##\n print \"Terminating deployment instance.\"\n deploy_i.terminate( )\n\n ##\n ## Launch new instance\n ##\n print \"Find secgrp for \" + base_name + \" and \" + APP_NAME + \" result: \"\n secgrp = find_group( ec2_conn, base_name, APP_NAME )\n\n instance_subnet_cidr = params[ 'subnet-cidr' ]\n subnets = vpc_conn.get_all_subnets( filters = [ ( \"vpcId\", [ vpc.id ] ),\n ( \"cidrBlock\", [ instance_subnet_cidr ] ) ] )\n userdata = get_userdata( app_type, base_name, app_name )\n \n old_instance = find_instance_by_type(ec2_conn,base_name,app_name)\n \n print \"Launching new instance.\"\n instance = launch_instance_vpc( ec2_conn,\n new_ami,\n base_name = base_name,\n instance_type = APP_NAME,\n keypair = instance_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = secgrp.id,\n subnet_id = subnets[ 0 ].id,\n user_data = userdata,\n public_ip = True,\n wait_for_running = wait_on_launch )\n\n print \"Storing new AMI as the current.\"\n save_current_ami( s3_infra_conn, region_name, get_env_type( base_name ), app_name, new_ami.name )\n\n ## if there is a internal or public load balancer, re-register the new instance. Otherwise, update route53 dns record.\n ## added by yliu, 2015/06/16\n load_balancer_internal = get_elb_name( base_name, app_name )\n elb_internal = find_elb( elb_conn, load_balancer_internal )\n\n load_balancer_public = get_elb_name( base_name, app_name + '-PB' )\n elb_public = find_elb( elb_conn, load_balancer_public )\n \n new_instance_ids = [ instance.id ]\n\n if elb_internal is not None or elb_public is not None:\n if elb_internal is not None:\n print \"Adding the new app instances into the internal load balancer.\"\n\n is_terminator_now = True\n if elb_public is not None:\n is_terminator_now = False\n\n status = swap_elb_instances( elb_conn = elb_conn,\n elb = elb_internal,\n new_instance_ids = new_instance_ids,\n terminate_old_instances = is_terminator_now,\n ec2_conn = ec2_conn,\n cloudwatch_conn = cloudwatch_conn,\n swap_smoothly = wait_on_launch )\n\n if elb_public is not None:\n print \"Adding the new app instances into the public load balancer.\"\n status = swap_elb_instances( elb_conn = elb_conn,\n elb = elb_public,\n new_instance_ids = new_instance_ids,\n terminate_old_instances = True,\n ec2_conn = ec2_conn,\n cloudwatch_conn = cloudwatch_conn,\n swap_smoothly = wait_on_launch )\n print \"Added the new app instances into the public load balancer.\"\n\n else :\n public_dns_alias = create_dns_name( base_name, app_name )\n internal_dns_alias = create_internal_dns_name( base_name, app_name )\n \n if old_instance :\n print \"Terminating old instance.\"\n old_instance.terminate( )\n\n print \"Configuring DNS entry for new instance.\"\n if elb_public is not None:\n set_dns_cname( r53_conn, public_dns_alias, instance.public_dns_name )\n \n if elb_internal is not None:\n set_dns_atype( r53_conn, internal_dns_alias, instance.private_ip_address )\n\n print \"New instance is now available at: \" + public_dns_alias\n print \"New instance internal DNS name: \" + internal_dns_alias", "def provision(vm='', env=''):\n local( main_dir + '/vagrant/bin/vm.sh provision ' + str(vm) + ' ' + str(env) )\n #result = local( main_dir + '/vagrant/bin/vm.sh provision ' + str(vm) + ' ' + str(env) )\n #if result != '0'\n # abort( \"Failed test - Aborting\")", "def launch_instance_nonvpc ( ec2_conn,\n ami,\n base_name,\n instance_type,\n keypair,\n security_group,\n machine_type = 'm1.small',\n user_data = None,\n wait_for_running = True ) :\n instance_r = ami.run( key_name = keypair,\n instance_type = machine_type,\n security_groups = [ security_group ],\n user_data = user_data )\n instance = instance_r.instances[ 0 ];\n aws_cmd( ec2_conn.create_tags,\n [ instance.id, { \"Name\": get_instance_name( base_name, instance_type ) } ] )\n if wait_for_running :\n running = wait_on_object_state( instance, 'running', failure_state = 'terminated' )\n if not running :\n print \"Deployment instance still not up after long period of time! Exiting...\"\n sys.exit( 3 )\n\n return instance", "def install_sm_local_dependencies(framework, job_type, image, ec2_conn, ec2_instance_ami):\n python_invoker = get_python_invoker(ec2_instance_ami)\n # Install custom packages which need to be latest version\"\n # using virtualenv to avoid package conflicts with the current packages\n ec2_conn.run(f\"sudo apt-get install virtualenv -y \")\n ec2_conn.run(f\"virtualenv env --python {python_invoker}\")\n ec2_conn.run(f\"source ./env/bin/activate\")\n if framework == \"pytorch\":\n # The following distutils package conflict with test dependencies\n ec2_conn.run(\"sudo apt-get remove python3-scipy python3-yaml -y\")\n ec2_conn.run(f\"sudo {python_invoker} -m pip install -r requirements.txt \", warn=True)", "def stage_dev():\n _setup_env()\n\n if not 'stage' in _config:\n abort('Could not find \"stage\" in config file')\n\n # Make sure cdn exists\n exists(dirname(env.cdn_path), required=True)\n\n # Build version\n build()\n\n # Copy to local CDN repository\n cdn_path = join(env.cdn_path, 'dev')\n clean(cdn_path)\n\n for r in _config['stage']:\n static.copy(_config, [{\n \"src\": r['src'],\n \"dst\": cdn_path, \"regex\": r['regex']}])\n\n # Create zip file in local CDN repository\n _make_zip(join(cdn_path, '%(name)s.zip' % _config))", "def create_ami_from_instance ( aws_account_type,\n ec2_conn,\n instance,\n ami_name,\n ami_description = None,\n wait_for_available = True ) :\n ami_id = instance.create_image( ami_name, ami_description )\n ami = aws_wait( ec2_conn.get_all_images, ami_id, [ ami_id ] )\n if not ami :\n print \"AMI is not available after a long time! \" + ami.name\n return None\n\n if wait_for_available :\n ami_available = wait_on_object_state( ami, 'available' ,max_wait=3600)\n if not ami_available :\n print \"AMI is not available after a long time! \" + ami.name\n return None\n\n # Allow other AWS accounts the ability to see this AMI.\n if aws_account_type == 'esp-nonprod' :\n priv_account_id = esp_prod[ 'accountid' ]\n else :\n priv_account_id = esp_nonprod[ 'accountid' ]\n\n ami.set_launch_permissions( user_ids = [ priv_account_id ] )\n\n return ami", "def _stage_pkg(self):\n context = self._config.context\n context.package.file = os.path.basename(context.package.arg)\n root_path = self._distro.root_mountspec.mountpoint\n stage_path = os.path.join(root_path, context.package.dir.lstrip('/'))\n context.package.full_path = os.path.join(stage_path, context.package.file)\n try:\n if any(protocol in context.package.arg for protocol in ['http://', 'https://']):\n self._download_pkg(context)\n else:\n self._move_pkg(context)\n except Exception:\n errstr = 'Exception encountered while staging package'\n log.critical(errstr)\n log.debug(errstr, exc_info=True)\n return False\n # reset to chrooted file path\n context.package.arg = os.path.join(context.package.dir, context.package.file)\n return True", "def deploy(\n context, instance, user=get_local_user(), initial=False, stack=None, branch=BRANCH,\n):\n remote = True\n\n if initial:\n clone(context, instance, user, branch)\n else:\n backup(context, user, remote, instance, stack)\n\n update(context, user, remote, instance, branch)\n up(context, user, remote, instance, stack)", "def prepare_image_for_deploy(runtime: \"mlrun.runtimes.BaseRuntime\"):\n pass", "def on(stage):\n localhosts = ['localhost', '127.0.0.1']\n env.stage = stage\n env.context = get_context()\n hosts = env.context['hosts']\n if stage == 'dev' and len(hosts) == 1 and hosts[0] in localhosts:\n env.hosts = []\n else:\n env.hosts = env.context['hosts']", "def main(cmd_line=None):\n release_config = 'CentOS-8/master.yaml'\n logging.basicConfig(level=logging.DEBUG)\n log = logging.getLogger('dlrnapi_promoter')\n log.setLevel(logging.DEBUG)\n\n log.info(\"Checking for log directory\")\n log_file = os.path.expanduser(get_log_file('staging',\n release_config))\n log_dir = \"/\".join(log_file.split(\"/\")[:-1])\n if not os.path.exists(log_dir):\n log.info(\"Creating log directory : {}\".format(log_dir))\n os.makedirs(log_dir)\n config_builder = PromoterConfigFactory(config_class=StageConfig)\n\n logging.basicConfig(level=logging.DEBUG)\n log = logging.getLogger(\"promoter-staging\")\n log.setLevel(logging.DEBUG)\n\n args = parse_args(config_builder.global_defaults, cmd_line=cmd_line)\n\n if hasattr(args, \"release_config\"):\n release_config = args.release_config\n config_builder = PromoterConfigFactory(config_class=StageConfig,\n **{'log_file': log_file})\n\n config = config_builder(\"staging\", release_config,\n validate=None)\n # Export dlrn password\n os.environ['DLRNAPI_PASSWORD'] = config.dlrn['server']['password']\n staged_env = StageOrchestrator(config)\n args.handler(staged_env)\n\n if cmd_line is not None:\n return config", "def deploy(self, image_name, ip, flavor='m1.small'):\n body_value = {\n \"port\": {\n \"admin_state_up\": True,\n \"name\": self.name + '_provision',\n \"network_id\": os_utils.get_network_id(self.nova_api, 'provision_bob'),\n 'fixed_ips': [{'ip_address': ip}]}}\n response = self.neutron.create_port(body=body_value)\n self._provision_port_id = response['port']['id']\n self.mac = response['port']['mac_address']\n\n image_id_to_boot_from = os_utils.get_image_id(self.nova_api, image_name)\n flavor_id = os_utils.get_flavor_id(self.nova_api, flavor)\n # TODO(Gonéri): We don't need keypair for the BM nodes\n keypair_id = os_utils.get_keypair_id(self.nova_api, self._keypair)\n # Ensure with get DHCP lease on the provision network first\n nics = [{'port-id': self._provision_port_id}]\n\n self._os_instance = os_provisioner.build_openstack_instance(\n self.nova_api,\n self.name,\n image_id_to_boot_from,\n flavor_id,\n keypair_id,\n nics)\n\n if not self._os_instance:\n LOG.error(\"deployment has failed\")\n raise Exception()\n\n os_provisioner.add_provision_security_group(self.nova_api)\n os_utils.add_security_groups(self._os_instance, ['provision'])\n os_utils.add_security_groups(self._os_instance, self._security_groups)\n LOG.info(\"add security groups '%s'\" % self._security_groups)\n LOG.info(\"instance '%s' ready to use\" % self.name)\n\n # the instance should be off for Ironic\n self._os_instance.stop()", "def dev_up():\n _with_deploy_env(['./bin/develop up'])", "def test_stage_pre_boot(self, mock_stage_pre_boot):\n app = MDFakeFSTestSite(\n \"MDWeb\",\n app_options={}\n )\n app.start()\n\n self.assertTrue(mock_stage_pre_boot.called)", "def process_deployapp ( vpc_conn,\n ec2_conn,\n elb_conn,\n cloudwatch_conn,\n s3_infra_conn,\n r53_conn,\n vpc,\n base_name,\n base_topicarn,\n app_name,\n app_type,\n region_name,\n aws_account_type,\n params,\n monitor_params = None ) :\n target_env = base_name\n APP_NAME = app_name.upper( )\n deployment_ami_name = params.get( 'source-ami' )\n source_env = params[ 'source-env' ]\n TARGET_ENV = target_env.upper( )\n SOURCE_ENV = source_env.upper( )\n load_balancer = get_elb_name( target_env, app_name )\n instance_name = get_instance_name( target_env, app_name )\n wait_on_launch = params.get( 'wait-on-launch', 'YES' ) == 'YES'\n if not monitor_params :\n monitor_params = params.get( 'monitors' )\n\n instance_secgrp_name = get_secgrp_name( target_env, app_name )\n instance_keypair = get_keypair_name( aws_account_type, vpc.region.name, app_type )\n\n ##\n ## Find the correct AMI to use for deployment\n ##\n if not deployment_ami_name or len( deployment_ami_name ) < 1 :\n deployment_ami_name = get_current_ami( s3_infra_conn, region_name, get_env_type( SOURCE_ENV ), app_name )\n if not deployment_ami_name :\n print \"Could not find AMI to use for deployment! \" + deployment_ami_name\n sys.exit( 2 )\n\n deployment_ami = get_ami_by_name( ec2_conn, deployment_ami_name )\n if not deployment_ami :\n print \"Could not find AMI to use for deployment! \" + deployment_ami_name\n sys.exit( 2 )\n\n subnets = get_vpc_subnets( vpc_conn, vpc, params.get( 'subnet-type', 'PRIVATE' ) )\n secgrps = ec2_conn.get_all_security_groups( filters = { \"group-name\" : [ instance_secgrp_name ] } )\n \n userdata = get_userdata( app_type, TARGET_ENV, app_name )\n \n new_instances = []\n num_instances = int( params.get( 'num-instances', len( subnets ) ) )\n if num_instances > len( subnets ) :\n num_instances = len( subnets )\n\n while num_instances > 0 :\n instance = launch_instance_vpc( ec2_conn,\n deployment_ami,\n base_name = base_name,\n instance_type = app_name,\n keypair = instance_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = secgrps[ 0 ].id ,\n subnet_id = subnets[ num_instances - 1 ].id,\n user_data = userdata,\n public_ip = False,\n wait_for_running = wait_on_launch )\n new_instances.append( instance )\n\n if monitor_params :\n print \"Setting alarms on the instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, instance.id, APP_NAME, base_topicarn, monitor_params )\n\n num_instances -= 1\n\n new_instance_ids = [ i.id for i in new_instances ]\n\n if ( wait_on_launch ) :\n print \"Waiting for instances to be ready\"\n aws_waits( ec2_conn.get_only_instances, new_instance_ids )\n\n print \"Creating AMI from instance server.\"\n timestamp = get_current_datetime_string( )\n new_ami_name = target_env + '-' + APP_NAME + '-' + timestamp\n ami_instance = new_instances[ 0 ]\n if not wait_on_launch :\n # We must wait for at least the ami instance to be available so we can create a new AMI from it.\n wait_on_object_state( ami_instance, 'running' )\n new_ami = create_ami_from_instance( aws_account_type, ec2_conn, new_instances[ 0 ], new_ami_name )\n if not new_ami :\n print \"Could not create new AMI!\"\n sys.exit( 5 )\n\n print \"Storing new AMI as the current.\"\n save_current_ami( s3_infra_conn, region_name, get_env_type( TARGET_ENV ), app_name, new_ami.name )\n\n print \"Adding the new app instances into the load balancer.\"\n elb = find_elb( elb_conn, load_balancer )\n status = swap_elb_instances( elb_conn = elb_conn,\n elb = elb,\n new_instance_ids = new_instance_ids,\n terminate_old_instances = True,\n ec2_conn = ec2_conn,\n cloudwatch_conn = cloudwatch_conn,\n swap_smoothly = wait_on_launch )\n if not status :\n print \"WARNING: Not all new app instances came up in the load balancer! Check the load balancer.\"\n\n print \"Deployment complete.\"", "def deploy(upgrade=False):\n print(\"Deploying project on {} !\".format(env.stage))\n execute('system.setup')\n execute('git.checkout')\n execute('virtualenv.setup')\n execute('django.setup')\n execute('cron.setup')\n execute('uwsgi.setup')\n execute('supervisor.setup')\n execute('nginx.setup')", "def provision(project, node, img, network, nic):\n data = {constants.PROJECT_PARAMETER: project,\n constants.NODE_NAME_PARAMETER: node,\n constants.IMAGE_NAME_PARAMETER: img,\n constants.NETWORK_PARAMETER: network,\n constants.NIC_PARAMETER: nic}\n res = requests.put(_url + \"provision/\", data=data,\n auth=(_username, _password))\n click.echo(res.content)", "def set_stage(stage):\n try:\n filename = os.path.join(get_var('SITE'), \".stage\")\n f = open(filename, \"w\")\n f.write(\"%s\\n\" % stage)\n f.close()\n logger.debug(\"set stage: %s\" % (stage))\n except:\n raise AssertionError(\"Unable to save setup/teardown stage! %s\" % (sys.exc_info()[1]))\n return stage", "def deploy_stack():\n build = \"sam build --use-container --manifest src/images/requirements.txt\"\n local(build)\n\n #package = f\"sam package --template-file template.yaml --output-template-file \\\n # packaged.yaml --s3-bucket {env.bucket_name} --region {env.aws_region}\"\n #local(package)\n\n deploy = f\"sam deploy --stack-name storge-machine-service \\\n --s3-bucket {env.bucket_name}\\\n --parameter-overrides env=dev --capabilities CAPABILITY_IAM CAPABILITY_AUTO_EXPAND --region {env.aws_region}\"\n #deploy = \"sam deploy\"\n local(deploy)", "def _pre_provisioning_steps(self, context, res_id, data, res_inventory):\n LOG.info(\"[%s] Executing pre provisioning steps\" % res_id)\n expected_state = [eon_const.EON_RESOURCE_STATE_IMPORTED]\n state_in_db = res_inventory.get(eon_const.EON_RESOURCE_STATE)\n # if state not imported raise error\n self.validator.validate_state(expected_state, state_in_db)\n # if resource not baremetal, raise error\n type_in_db = res_inventory.get(eon_const.EON_RESOURCE_TYPE)\n self.validator.validate_type(type_in_db,\n eon_const.EON_RESOURCE_TYPE_BAREMETAL)\n next_state = eon_const.RESOURCE_STATE_PROVISON_INITIATED\n self.virt_utils.update_prop(context, res_id,\n eon_const.EON_RESOURCE_STATE,\n next_state)\n res_inventory[\"state\"] = next_state\n\n # update the type from baremetal to given resource type\n type_ = data[eon_const.EON_RESOURCE_TYPE]\n self.virt_utils.update_prop(context, res_id, 'type',\n type_)\n res_inventory[\"type\"] = data[\"type\"]\n LOG.debug(\"[%s] pre provisioning comple\" % res_id)", "def then_app_running_stage(context):\n result = context.result\n result | should.equal('Success').desc(\"Application is reachable in the Stage stage.\")", "def execute_sagemaker_remote_tests(image):\n pytest_command, path, tag, job_type = generate_sagemaker_pytest_cmd(image, SAGEMAKER_REMOTE_TEST_TYPE)\n context = Context()\n with context.cd(path):\n context.run(f\"virtualenv {tag}\")\n with context.prefix(f\"source {tag}/bin/activate\"):\n context.run(\"pip install -r requirements.txt\", warn=True)\n res = context.run(pytest_command, warn=True)\n metrics_utils.send_test_result_metrics(res.return_code)\n if res.failed:\n raise DLCSageMakerRemoteTestFailure(\n f\"{pytest_command} failed with error code: {res.return_code}\\n\"\n f\"Traceback:\\n{res.stdout}\"\n )", "def transition_model_version_stage(self, stage):\n try:\n for model in self.client.search_model_versions(f\"name='{self.model_name}'\"):\n if model.current_stage in ['Staging', 'Production']:\n self.client.transition_model_version_stage(\n name=model.name,\n version=model.version,\n stage=\"Archived\"\n )\n logging.info(f'Transitioning {model.name}/{model.version} to Archived')\n\n self.client.transition_model_version_stage(\n name=self.model_name,\n version=self.model_version,\n stage=stage\n )\n logging.info(f'Model transitioned to {stage}')\n\n except Exception as e:\n logging.error(e)", "def create_instance_by_image(self):\n print '# Start a new instance based on an existing AMI'\n ami = raw_input('Enter AMI (empty to cancel): ')\n\n # Cancel\n if not ami:\n print 'Operation cancelled'\n return\n\n # Start the instance\n if self.compute.create_instance_by_image(ami):\n print 'Instance started!'\n else:\n print 'It was not possible to create an instance with the given AMI'", "def _stage_test_step(self):\n\n local_source_path = join(BespokeGlobals.ABS_LOCAL_TESTS, self._test_directory)\n\n if isdir(local_source_path):\n self._staf_dir_copy(local_source_path, self._remote_target_path)\n else:\n raise CoreError('Failed to stage test step \"{0}\" on remote machine! The test directory '\n '\"{1}\" does not exist!'.format(self._description, local_source_path))", "def prepareInstance(image, instancetype, accesskey, secretkey, pkname,\n softwareList, pipelineUrl):\n # Start up the AMI\n dnsName = startami(image, instancetype, accesskey, secretkey, pkname)\n\n # SSH onto the machine and run the webserver\n\n # SSH onto the machine and run the chef-solo\n installSoftware(dnsName, softwareList)\n\n return ((get_image_username(image), dnsName))", "def _deploy_salt_minion(name, session, vm_):\n # Get bootstrap values\n vm_[\"ssh_host\"] = get_vm_ip(name, session)\n vm_[\"user\"] = vm_.get(\"user\", \"root\")\n vm_[\"password\"] = vm_.get(\"password\", \"p@ssw0rd!\")\n vm_[\"provider\"] = vm_.get(\"provider\", \"xen\")\n log.debug(\"%s has IP of %s\", name, vm_[\"ssh_host\"])\n # Bootstrap Salt minion!\n if vm_[\"ssh_host\"] is not None:\n log.info(\"Installing Salt minion on %s\", name)\n boot_ret = __utils__[\"cloud.bootstrap\"](vm_, __opts__)\n log.debug(\"boot return: %s\", boot_ret)", "def boot_from_image(dest_project_id, bootable_image_id, flavor, name,\n objects_created):\n command = 'nova --os-project-id %s boot --image %s --flavor %s' \\\n ' --poll %s' % (dest_project_id, bootable_image_id, flavor, name)\n output = Popen(command.split(), stdout=STDOUT,\n stderr=STDERR).communicate()[0].split('\\n\\n')[0]\n return parse_output(output)", "def RunStage(self):\n\n # Stage construction is usually done as late as possible because the tests\n # set up the build configuration and options used in constructing the stage.\n results_lib.Results.Clear()\n stage = self.ConstructStage()\n stage.Run()\n self.assertTrue(results_lib.Results.BuildSucceededSoFar())", "def launch_instance(tag, key_name, group_name, inst_type, ami_name, user_data,\n wait=True, returninfo=None):\n ec2 = boto.ec2.connect_to_region(AWS_REGION)\n failures = 0\n max_failures = 10\n while True:\n try:\n reservation = ec2.run_instances(ami_name,\n key_name=key_name,\n security_groups=[group_name],\n instance_type=inst_type,\n user_data=None)\n break\n except Exception, err:\n # Failed to get instance; wait 15 seconds and then try again (up to\n # 10 total times)\n errortext = str(err)\n if errortext.find(\"Not authorized for images\") >= 0:\n print \"**************************************\"\n print \"* Error from AWS suggests that the AMI code in\"\n print \"* CloudSetup.py is deprecated. Please go to\"\n print \"* https://aws.amazon.com/marketplace/ and search for\"\n print \"* \\\"Ubuntu server lts hvm\\\", selecting the most recent\"\n print \"* version. Click \\\"Continue\\\", \\\"Manual Launch\\\",\"\n print \"* and then copy the AMI ID for the US East region.\"\n print \"* Copy that to the AMI_NAME value in CloudSetup.py\"\n print \"* and re-run.\"\n print \"***************************************\"\n print \"* (Full text of error):\"\n print errortext\n print \"***************************************\"\n return None\n elif errortext.find(\"accept terms and subscribe\") >= 0:\n print \"**************************************\"\n print \"* Error from AWS suggests that you have never used this\"\n print \"* AMI before and need to accept its terms and\"\n print \"* subscribe to it. Please follow the link in the below\"\n print \"* error text. Click \\\"Continue\\\", \\\"Manual Launch\\\",\"\n print \"* and \\\"Accept Terms\\\". After receiving email\"\n print \"* confirmation, you can re-run the code.\"\n print \"**************************************\"\n print \"* (Full text of error):\"\n print errortext\n print \"**************************************\"\n return None\n failures += 1\n if failures == max_failures:\n print \"**************************************\"\n print \"* Maximum number of instance launch failures reached.\"\n print \"* (Full text of error):\"\n print errortext\n print \"**************************************\"\n return None\n print \" ** ec2.run_instances failed for tag\", tag, \"; waiting 15\"\n print \" ** seconds and then trying again...\"\n time.sleep(15)\n\n time.sleep(5) # Slow things down -- they're never running super fast anyway\n instance = reservation.instances[0]\n time.sleep(5) # Slow things down -- they're never running super fast anyway\n instance.add_tag(\"tag\", tag)\n time.sleep(5) # Slow things down -- they're never running super fast anyway\n\n if wait:\n print \" Instance requested, waiting for 'running' for tag\", tag\n while instance.state != \"running\":\n print \" %s ...\" % tag\n time.sleep(5)\n try:\n instance.update()\n except boto.exception.EC2ResponseError as e:\n print \"******************\"\n print \"Error caught in instance.update():\"\n print e.strerror\n print \"******************\"\n print \" %s done!\" % tag\n if returninfo:\n returninfo.put(tag)\n return instance", "def first_deployment_mode():\n env.initial_deploy = True", "def deploy():\n _confirm_branch()\n \n require('settings', provided_by=[production, staging])\n require('branch', provided_by=[stable, master, branch])\n \n with settings(warn_only=True):\n maintenance_up()\n \n checkout_latest()\n gzip_assets()\n deploy_to_s3()\n maintenance_down()", "def should_run_stage(stage):\n sequence = ['', # initial condition\n 'precheck_system', 'install_openafs', 'create_test_cell',\n 'shutdown_openafs', 'remove_openafs', 'purge_files']\n last = get_stage()\n if last == sequence[-1]:\n last = sequence[0] # next cycle\n if not stage in sequence[1:]:\n raise AssertionError(\"Internal error: invalid stage name '%s'\" % stage)\n if not last in sequence:\n filename = os.path.join(get_var('SITE'), \".stage\")\n raise AssertionError(\"Invalid stage name '%s' in file '%s'\" % (last, filename))\n if sequence.index(stage) <= sequence.index(last):\n logger.info(\"Skipping %s; already done\" % (stage))\n return False\n if sequence.index(stage) != sequence.index(last) + 1:\n logger.info(\"Skipping %s; out of sequence! last stage was '%s'\" % (stage, last))\n return False\n return True", "def _staging():\n env.environment = 'staging'\n env.server_name = 'project-staging.dimagi.com'\n env.hosts = [settings.STAGING_HOST]", "def _pre_deploy_exec(self):\n app.env['JUJU_PROVIDERTYPE'] = model_info(\n juju.get_current_model())['provider-type']\n\n pre_deploy_sh = os.path.join(app.config['spell-dir'],\n 'conjure/steps/00_pre-deploy')\n if os.path.isfile(pre_deploy_sh) \\\n and os.access(pre_deploy_sh, os.X_OK):\n utils.pollinate(app.session_id, 'J001')\n msg = \"Running pre-deployment tasks.\"\n app.log.debug(msg)\n app.ui.set_footer(msg)\n return run(pre_deploy_sh,\n shell=True,\n stdout=PIPE,\n stderr=PIPE,\n env=app.env)\n return json.dumps({'message': 'No pre deploy necessary',\n 'returnCode': 0,\n 'isComplete': True})", "def test_launch_deployment(self):\n pass", "def run_sagemaker_pytest_cmd(image):\n pytest_command, path, tag = generate_sagemaker_pytest_cmd(image)\n\n context = Context()\n with context.cd(path):\n context.run(f\"virtualenv {tag}\")\n with context.prefix(f\"source {tag}/bin/activate\"):\n context.run(\"pip install -r requirements.txt\", warn=True)\n context.run(pytest_command)", "def test_stage_post_boot(self, mock_stage_post_boot):\n app = MDFakeFSTestSite(\n \"MDWeb\",\n app_options={}\n )\n app.start()\n\n self.assertTrue(mock_stage_post_boot.called)", "def full_deploy():\n refresh_cts()\n push_mockups()\n deploy()", "def power_on(self, ec2_session, ami_id):\n instance = self.aws_api.get_instance_by_id(ec2_session, ami_id)\n instance.start()\n self.instance_waiter.wait(instance, self.instance_waiter.RUNNING)\n return True", "def execute(helper, config, args):\n env_config = parse_env_config(config, args.environment)\n cname_prefix = env_config.get('cname_prefix', None)\n env_name = args.environment\n\n # change version\n if args.version_label:\n helper.deploy_version(env_name, args.version_label)\n if not args.dont_wait:\n helper.wait_for_environments(env_name, status='Ready', version_label=args.version_label)\n\n # update it\n env = parse_env_config(config, env_name)\n option_settings = parse_option_settings(env.get('option_settings', {}))\n helper.update_environment(env_name,\n description=env.get('description', None),\n option_settings=option_settings,\n tier_type=env.get('tier_type'),\n tier_name=env.get('tier_name'),\n tier_version=env.get('tier_version'))\n\n # wait\n if not args.dont_wait:\n helper.wait_for_environments(env_name, health='Green', status='Ready', version_label=args.version_label)\n\n # delete unused\n helper.delete_unused_versions(versions_to_keep=int( get(config, 'app.versions_to_keep', 10) ))", "def _execute_stage(self, index, stage, stop):\n if stop.is_set():\n _LOGGER.info(\"Stopped pipeline on group %s\", self._group)\n return\n _LOGGER.info(\" -> Running stage '%s' on group %s\", stage, self._group)\n if stage.name == 'on':\n self._group.on = True\n elif stage.name == 'off':\n self._group.on = False\n elif stage.name == 'hue':\n self._group.hue = stage.args[0]\n elif stage.name == 'saturation':\n self._group.saturation = stage.args[0]\n elif stage.name == 'color':\n self._group.color = Color(*stage.args)\n elif stage.name == 'brightness':\n self._group.brightness = stage.args[0]\n elif stage.name == 'temperature':\n self._group.temperature = stage.args[0]\n elif stage.name == 'transition':\n self._group.transition(*stage.args, **stage.kwargs)\n elif stage.name == 'white':\n self._group.white()\n elif stage.name == 'white_up':\n self._group.white_up()\n elif stage.name == 'white_down':\n self._group.white_down()\n elif stage.name == 'red_up':\n self._group.red_up()\n elif stage.name == 'red_down':\n self._group.red_down()\n elif stage.name == 'green_up':\n self._group.green_up()\n elif stage.name == 'green_down':\n self._group.green_down()\n elif stage.name == 'blue_up':\n self._group.blue_up()\n elif stage.name == 'blue_down':\n self._group.blue_down()\n elif stage.name == 'night_light':\n self._group.night_light()\n elif stage.name == 'link':\n self._group.link()\n elif stage.name == 'unlink':\n self._group.unlink()\n elif stage.name == 'flash':\n self._group.flash(**stage.kwargs)\n elif stage.name == 'repeat':\n self._repeat(index, stage, stop)\n elif stage.name == 'wait':\n time.sleep(*stage.args)\n elif stage.name == 'callback':\n stage.args[0](*stage.args[1:], **stage.kwargs)", "def launch_sagemaker_local_ec2_instance(image, ami_id, ec2_key_name, region):\n instance_type = assign_sagemaker_local_job_instance_type(image)\n instance_name = image.split(\"/\")[-1]\n instance = ec2_utils.launch_instance(\n ami_id,\n region=region,\n ec2_key_name=ec2_key_name,\n instance_type=instance_type,\n # EIA does not have SM Local test\n ei_accelerator_type=None,\n user_data=None,\n iam_instance_profile_name=ec2_utils.EC2_INSTANCE_ROLE_NAME,\n instance_name=f\"sm-local-{instance_name}\",\n )\n instance_id = instance[\"InstanceId\"]\n public_ip_address = ec2_utils.get_public_ip(instance_id, region=region)\n ec2_utils.check_instance_state(instance_id, state=\"running\", region=region)\n ec2_utils.check_system_state(instance_id, system_status=\"ok\", instance_status=\"ok\", region=region)\n return instance_id, public_ip_address", "def stage(branch=None, role='dev'):\n update_function = get_update_function()\n branch = branch or get_git_branch()\n\n project_path = fb_env.role(role, 'project_path')\n\n with cd(project_path):\n previous_head = update_function(branch)\n puts('Previous remote HEAD: {0}'.format(previous_head))\n run('./fbmvc dbdump')\n run('./fbmvc migrate latest')", "def test_deploy_instance_with_network_and_associate_public_ip(self):\n\n # skip test if suite couldn't start from an empty, clean list of allocated IPs (to avoid cascading failures)\n if self.suite_world['allocated_ips']:\n self.skipTest(\"There were pre-existing, not deallocated IPs\")\n\n # Allocate IP\n allocated_ip = self.__allocate_ip_test_helper__()\n\n # Create Router with an external network gateway\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n router_name = TEST_ROUTER_PREFIX + \"_public_ip_\" + suffix\n external_network_id = self.__get_external_network_test_helper__()\n router_id = self.__create_router_test_helper__(router_name, external_network_id)\n\n # Create Network\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 247\n network_id, subnet_id = self.__create_network_and_subnet_test_helper__(network_name, network_cidr)\n\n # Add interface to router\n port_id = self.neutron_operations.add_interface_router(router_id, subnet_id)\n self.test_world['ports'].append(port_id)\n\n # Deploy VM (it will have only one IP from the Public Pool)\n instance_name = TEST_SERVER_PREFIX + \"_public_ip_\" + suffix\n server_id = self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name, is_network_new=False)\n\n # Associate Public IP to Server\n self.nova_operations.add_floating_ip_to_instance(server_id=server_id, ip_address=allocated_ip)", "def proceed(self):\n if (self.clock_cycles == 0):\n STAGE_FLAG['IBUS'] = AVAILABLE\n \n if (self.clock_cycles <= 0):\n if (REGISTERS['CLEAN']):\n STAGE_FLAG[self.name] = AVAILABLE\n STAGE_FLAG['IBUS'] = AVAILABLE\n REGISTERS['CLEAN'] = False\n return None\n if (STAGE_FLAG['ID'] == AVAILABLE):\n STAGE_FLAG[self.name] = AVAILABLE\n return Decode_Stage(self.instruction)\n\n return self", "def postdeploy_plan(plan_name):\n pass", "def test_baseimage_first(self):\n s = Stage()\n s += shell(commands=['abc'])\n s.name = 'bar'\n s.baseimage('foo')\n self.assertEqual(str(s), 'FROM foo AS bar\\n\\nRUN abc')", "def _provision(self, context, id_, data, resource_inventory):\n resource_type = data.get(eon_const.EON_RESOURCE_TYPE)\n LOG.info(\"[%s] Provision started.\" % id_)\n try:\n resource_id = resource_inventory.get(\n eon_const.EON_RESOURCE_ID)\n resource_password = resource_inventory.get(\n eon_const.EON_RESOURCE_PASSWORD)\n # Update the state to Provisioning post cobbler deploy call\n next_state = eon_const.EON_RESOURCE_STATE_PROVISIONING\n self.virt_utils.update_prop(context, id_,\n eon_const.EON_RESOURCE_STATE,\n next_state)\n hux_obj = HLMFacadeWrapper(context)\n model = self.virt_utils.create_servers_payload(data,\n resource_inventory)\n # Remove once cobbler deploy role check if resolved\n fake_role_key = eon_const.HLM_PAYLOAD_MAP[eon_const.SERVER_ROLE]\n fake_group_key = eon_const.HLM_PAYLOAD_MAP[eon_const.SERVER_GROUP]\n server_roles = vir_utils.get_hypervisor_roles(hux_obj,\n resource_type)\n server_groups = vir_utils.get_server_groups_with_no_child(hux_obj)\n model[fake_role_key] = server_roles[0]\n model[fake_group_key] = server_groups[0].get(\"name\")\n try:\n hux_obj.get_server_by_id(resource_id)\n except facade_excep.NotFound:\n LOG.info(\"[%s] Resource not found in input model. \"\n \"Creating full spec.\" % id_)\n hux_obj.create_server(model)\n hux_obj.commit_changes(\n resource_id, \"Provision KVM compute resource\")\n hux_obj.config_processor_run()\n hux_obj.cobbler_deploy(resource_id, resource_password)\n hux_obj.cobbler_deploy_status(resource_id)\n LOG.info(\"[%s] Provision complete\" % id_)\n # Update the state to Provisioned\n next_state = eon_const.EON_RESOURCE_STATE_PROVISIONED\n self.virt_utils.update_prop(context, id_,\n eon_const.EON_RESOURCE_STATE,\n next_state)\n except Exception as e:\n LOG.error(\"[%s] Provisioning failed. %s \" % (id_, e.message))\n self.virt_utils.update_prop(context, id_, 'state',\n eon_const.EON_RESOURCE_STATE_IMPORTED)\n self.virt_utils.update_prop(context, id_, 'type',\n eon_const.EON_RESOURCE_TYPE_BAREMETAL)\n hux_obj.revert_changes()\n hux_obj.delete_server(resource_id)\n hux_obj.commit_changes(resource_id, \"Delete KVM compute resource\")\n hux_obj.config_processor_run()\n extra_args = {\"extraVars\": {\n \"nodename\": id_\n }}\n LOG.info(\"Deleting node %s from cobbler db\" % str(id_))\n hux_obj.run_playbook('hlm_remove_cobbler_node',\n extra_args=extra_args)\n LOG.exception(e)", "def launch_instance_vpc ( ec2_conn,\n ami,\n base_name,\n instance_type,\n keypair,\n subnet_id,\n security_group_id,\n machine_type = 'm3.medium',\n user_data = None,\n wait_for_running = True,\n public_ip = False,\n static_ip_address = None,\n monitor_params = None ) :\n interfaces = None\n subnet = None\n security_group_ids = None\n \n if static_ip_address is None:\n spec = boto.ec2.networkinterface.NetworkInterfaceSpecification( subnet_id = subnet_id,\n groups = [ security_group_id ],\n associate_public_ip_address = public_ip )\n interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection( spec )\n else:\n subnet = subnet_id\n security_group_ids = [security_group_id]\n\n instance_r = ec2_conn.run_instances( image_id = ami.id,\n key_name = keypair,\n instance_type = machine_type,\n monitoring_enabled = True,\n network_interfaces = interfaces,\n subnet_id = subnet, \n user_data = user_data,\n security_group_ids = security_group_ids,\n private_ip_address = static_ip_address )\n instance = instance_r.instances[ 0 ];\n aws_cmd( ec2_conn.create_tags,\n [ instance.id, { \"Name\": get_instance_name( base_name, instance_type ) } ] )\n \n print \"Waiting for instance to be ready\"\n \n if wait_for_running :\n running = wait_on_object_state( instance, 'running', max_wait = 600, failure_state = 'terminated' )\n if not running :\n print \"Deployment instance still not up after long period of time! Exiting...\"\n sys.exit( 3 )\n\n if monitor_params :\n print \"Adding monitoring to the instance.\"\n\n return instance", "def prepare_deploy():\n from fabdeploy.django import test as django_test\n django_test()\n git.add_commit_pull()\n git.push()", "def launch(\n *,\n key_name: Optional[str],\n instance_type: str,\n ami: str,\n ami_user: str,\n tags: Dict[str, str],\n display_name: Optional[str] = None,\n size_gb: int,\n security_group_name: str,\n instance_profile: Optional[str],\n nonce: str,\n delete_after: datetime.datetime,\n) -> Instance:\n\n if display_name:\n tags[\"Name\"] = display_name\n tags[\"scratch-delete-after\"] = str(delete_after.timestamp())\n tags[\"nonce\"] = nonce\n tags[\"git_ref\"] = git.describe()\n tags[\"ami-user\"] = ami_user\n\n ec2 = boto3.client(\"ec2\")\n groups = ec2.describe_security_groups()\n security_group_id = None\n for group in groups[\"SecurityGroups\"]:\n if group[\"GroupName\"] == security_group_name:\n security_group_id = group[\"GroupId\"]\n break\n\n if security_group_id is None:\n vpcs = ec2.describe_vpcs()\n vpc_id = None\n for vpc in vpcs[\"Vpcs\"]:\n if vpc[\"IsDefault\"] == True:\n vpc_id = vpc[\"VpcId\"]\n break\n if vpc_id is None:\n default_vpc = ec2.create_default_vpc()\n vpc_id = default_vpc[\"Vpc\"][\"VpcId\"]\n securitygroup = ec2.create_security_group(\n GroupName=security_group_name,\n Description=\"Allows all.\",\n VpcId=vpc_id,\n )\n security_group_id = securitygroup[\"GroupId\"]\n ec2.authorize_security_group_ingress(\n GroupId=security_group_id,\n CidrIp=\"0.0.0.0/0\",\n IpProtocol=\"tcp\",\n FromPort=22,\n ToPort=22,\n )\n\n network_interface: InstanceNetworkInterfaceSpecificationTypeDef = {\n \"AssociatePublicIpAddress\": True,\n \"DeviceIndex\": 0,\n \"Groups\": [security_group_id],\n }\n\n say(f\"launching instance {display_name or '(unnamed)'}\")\n with open(ROOT / \"misc\" / \"scratch\" / \"provision.bash\") as f:\n provisioning_script = f.read()\n kwargs: RunInstancesRequestRequestTypeDef = {\n \"MinCount\": 1,\n \"MaxCount\": 1,\n \"ImageId\": ami,\n \"InstanceType\": cast(InstanceTypeType, instance_type),\n \"UserData\": provisioning_script,\n \"TagSpecifications\": [\n {\n \"ResourceType\": \"instance\",\n \"Tags\": [{\"Key\": k, \"Value\": v} for (k, v) in tags.items()],\n }\n ],\n \"NetworkInterfaces\": [network_interface],\n \"BlockDeviceMappings\": [\n {\n \"DeviceName\": \"/dev/sda1\",\n \"Ebs\": {\n \"VolumeSize\": size_gb,\n \"VolumeType\": \"gp3\",\n },\n }\n ],\n \"MetadataOptions\": {\n # Allow Docker containers to access IMDSv2.\n \"HttpPutResponseHopLimit\": 2,\n },\n }\n if key_name:\n kwargs[\"KeyName\"] = key_name\n if instance_profile:\n kwargs[\"IamInstanceProfile\"] = {\"Name\": instance_profile}\n i = boto3.resource(\"ec2\").create_instances(**kwargs)[0]\n\n return i", "def quick_instance(self, name, image, instance_type, env_tag='dev', zone_tag='starwatts', os_tag='debian', sg_id=None,\n private=True, extra_sg_ids=None, extra_tags=None, terminate_on_shutdown=False,\n debug=False):\n # Debug setting\n if debug:\n logging.basicConfig(level=logging.DEBUG)\n\n # Preliminary tests\n try:\n ami = self.get_image(image_id=image)\n except EC2ResponseError:\n logging.error(\"The image {} could not be found. Aborting.\".format(image))\n return\n print(\"Using AMI {} : {}\".format(image, ami.name))\n if len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'running'})) > 0 or \\\n len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'stopped'})) > 0:\n logging.error(\"An instance with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No instance has the same 'name' tag.\")\n if self.keypair_exists(name):\n logging.error(\"A keypair with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No keypair was found with the same name.\")\n if sg_id is None:\n if self.security_group_exists(name=name):\n logging.error(\"A security group with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No security group was found with the same name.\")\n\n # Tags generation\n logging.debug(\"Generating tags to apply.\")\n tags = dict(name=name, os=os_tag, env=env_tag, zone=zone_tag, privacy='true' if private else 'false')\n if extra_tags is not None:\n tags.update(extra_tags)\n print(\"Tags : {}\".format(tags))\n\n # Fetching needed security groups (bastion and zabbix)\n standard_sg = self.get_all_security_groups(groupnames=['standard'])\n if len(standard_sg) != 1:\n logging.error(\"Multiple or no security group was found for the 'bastion' search. Aborting.\")\n return\n standard_sg = standard_sg[0]\n logging.debug(\"The following security group was found for 'standard : {} {}\".format(standard_sg.id,\n standard_sg.description))\n\n # Security group creation\n if sg_id is None:\n sg = self.create_security_group(name, \"SG applied to {} VM\".format(name))\n sg_id = sg.id\n\n sg_ids = [sg_id, standard_sg.id, ]\n # Using the extra security groups if any\n if extra_sg_ids is not None:\n logging.debug(\"Extra security groups to add : {}\".format(extra_sg_ids))\n sg_ids.extend(extra_sg_ids)\n logging.debug(\"Security Groups : {}\".format(sg_ids))\n\n user_data = \"-----BEGIN OUTSCALE SECTION-----\\nprivate_only=true\\n-----END OUTSCALE SECTION-----\" if private else \"\"\n logging.debug(\"Creating keypair.\")\n kp = self.create_key_pair(key_name=name)\n fp = os.path.join(os.path.expanduser('~/.ssh'), '%s.pem' % kp.name)\n with open(fp, 'wb') as fd:\n fd.write(bytes(kp.material, \"UTF-8\"))\n logging.debug(\"Keypair written to ~/.ssh/{}.pem\".format(name))\n\n resa = self.run_instances(image_id=image, key_name=name, security_groups=sg_ids, instance_type=instance_type,\n user_data=user_data,\n instance_initiated_shutdown_behavior='terminate' if terminate_on_shutdown else 'stop')\n inst = resa.instances[0]\n logging.debug(\"Adding tags to the newly created machine.\")\n inst.add_tags(tags)\n return inst", "def execute(self, instruction):\n STAGE_FLAG[self.name] = OCCUPIED\n\n if (not self.isHit):\n if ((self.clock_cycles > 0) and\n (STAGE_FLAG['DBUS'] == AVAILABLE)):\n STAGE_FLAG['IBUS'] = OCCUPIED\n\n if (Memory_Stage.bus_access_flag):\n STAGE_FLAG['IBUS'] = OCCUPIED\n STAGE_FLAG['DBUS'] = AVAILABLE\n Memory_Stage.bus_access_flag = False\n\n if ((STAGE_FLAG['IBUS'] == OCCUPIED) or\n (self.isHit)):\n self.clock_cycles -= 1", "def test_update(self, client, stage, agent_token):\n stage_url = stage_url_for(stage)\n response = client.put(\n stage_url,\n headers={'x_dockci_api_key': agent_token},\n data={'success': 'false'},\n )\n\n assert response.status_code == 200\n\n response_data = json.loads(response.data.decode())\n assert response_data.pop('success') == False\n\n response = client.get(stage_url)\n response_data = json.loads(response.data.decode())\n assert response_data.pop('success') == False", "def deploy():\n require(\"hosts\", provided_by=[production, staging])\n env.release = time.strftime(\"%Y-%m-%d_%H:%M:%S\")\n upload_tar_from_git()\n install_requirements()\n setup_webserver()\n symlink_current_release()\n restart_webserver()", "def deploy_plan(plan_name):\n pass", "def production():\n env.run = run\n env.cd = cd\n env.deployment = 'remote'", "def _stage(self):\n\n pass", "def up(\n context,\n user=get_local_user(),\n remote=False,\n instance=None,\n stack=None,\n services=None,\n):\n command = \"up --build\"\n\n if remote:\n command = f\"{command} --detach\"\n\n run_command_with_services(context, user, remote, instance, stack, command, services)", "def prepare_deployment(function, *args, **kwargs):\n if (os.environ.get(\"TRAVIS_PULL_REQUEST\", None) == \"false\" and\n os.environ.get(\"TRAVIS_BRANCH\", None) == \"master\"):\n function(*args, **kwargs)", "def assimilate(ip_addr, config, instance_data, deploypass):\n env.host_string = ip_addr\n env.user = 'root'\n env.abort_on_prompts = True\n env.disable_known_hosts = True\n\n # Sanity check\n run(\"date\")\n\n distro = config.get('distro')\n # Set our hostname\n hostname = \"{hostname}\".format(**instance_data)\n run(\"hostname %s\" % hostname)\n if distro in ('ubuntu', 'debian'):\n run(\"echo %s > /etc/hostname\" % hostname)\n\n # Resize the file systems\n # We do this because the AMI image usually has a smaller filesystem than\n # the instance has.\n if 'device_map' in config:\n for mapping in config['device_map'].values():\n run('resize2fs {dev}'.format(dev=mapping['instance_dev']))\n\n # Set up /etc/hosts to talk to 'puppet'\n hosts = ['127.0.0.1 %s localhost' % hostname,\n '::1 localhost6.localdomain6 localhost6']\n hosts = StringIO.StringIO(\"\\n\".join(hosts) + \"\\n\")\n put(hosts, '/etc/hosts')\n\n if distro in ('ubuntu', 'debian'):\n put('releng.list', '/etc/apt/sources.list')\n run(\"apt-get update\")\n run(\"apt-get install -y --allow-unauthenticated puppet\")\n run(\"apt-get clean\")\n else:\n # Set up yum repos\n run('rm -f /etc/yum.repos.d/*')\n put('releng-public.repo', '/etc/yum.repos.d/releng-public.repo')\n run('yum clean all')\n run('yum install -q -y puppet')\n\n run(\"wget -O /root/puppetize.sh https://hg.mozilla.org/build/puppet/raw-file/default/modules/puppet/files/puppetize.sh\")\n run(\"chmod 755 /root/puppetize.sh\")\n put(StringIO.StringIO(deploypass), \"/root/deploypass\")\n put(StringIO.StringIO(\"exit 0\\n\"), \"/root/post-puppetize-hook.sh\")\n\n puppet_master = random.choice(instance_data[\"puppet_masters\"])\n run(\"PUPPET_SERVER=%s /root/puppetize.sh\" % puppet_master)\n\n if 'home_tarball' in instance_data:\n put(instance_data['home_tarball'], '/tmp/home.tar.gz')\n with cd('~cltbld'):\n sudo('tar xzf /tmp/home.tar.gz', user=\"cltbld\")\n sudo('chmod 700 .ssh', user=\"cltbld\")\n sudo('chmod 600 .ssh/*', user=\"cltbld\")\n run('rm -f /tmp/home.tar.gz')\n\n if \"buildslave_password\" in instance_data:\n # Set up a stub buildbot.tac\n sudo(\"/tools/buildbot/bin/buildslave create-slave /builds/slave \"\n \"{buildbot_master} {name} \"\n \"{buildslave_password}\".format(**instance_data), user=\"cltbld\")\n if instance_data.get(\"hg_shares\"):\n hg = \"/tools/python27-mercurial/bin/hg\"\n for share, bundle in instance_data['hg_shares'].iteritems():\n target_dir = '/builds/hg-shared/%s' % share\n sudo('rm -rf {d} && mkdir -p {d}'.format(d=target_dir), user=\"cltbld\")\n sudo('{hg} init {d}'.format(hg=hg, d=target_dir), user=\"cltbld\")\n hgrc = \"[path]\\n\"\n hgrc += \"default = http://hg.mozilla.org/%s\\n\" % share\n put(StringIO.StringIO(hgrc), '%s/.hg/hgrc' % target_dir)\n run(\"chown cltbld: %s/.hg/hgrc\" % target_dir)\n sudo('{hg} -R {d} unbundle {b}'.format(hg=hg, d=target_dir,\n b=bundle), user=\"cltbld\")\n\n run(\"reboot\")", "def main():\n\n if not os.environ.get('TRAVIS_PULL_REQUEST', 'false') == 'false':\n return\n\n git_config_setup()\n populate_source()\n build_and_deploy()", "def _stage1(self):\n self.start_progress()\n tasks = list(self._chain_dict(self._model.adjust_tasks))\n if len(tasks) == 0:\n self._stage2(self._no_adjustments_case())\n else:\n task = lambda : self._run_adjust_tasks(tasks)\n locator.get(\"pool\").submit(task, self._stage2)", "def release(context):\n print(f\"Starting a release of v{IMAGE_VER} on GitHub!\")\n run_cmd(context, exec_cmd=\"git checkout main\", pty=False, error_message=\"Failed to checkout main!\")\n\n run_cmd(context, exec_cmd=\"git pull origin main\", pty=False, error_message=\"Failed to pull from origin/main\")\n\n run_cmd(\n context, exec_cmd=f\"git tag v{IMAGE_VER}\", pty=False, error_message=f\"Failed to create the tag 'v{IMAGE_VER}'!\"\n )\n\n run_cmd(context, exec_cmd=\"git push --tags\", pty=False, error_message=f\"Failed to push the tag 'v{IMAGE_VER}'!\")", "def launch_instance(ec2, ami, itype, kp_name, sec_group_name):\n\n\n instance = ec2.run_instances(\n ami,\n key_name=kp_name,\n instance_type=itype,\n security_groups=[sec_group_name]\n ).instances[0]\n\n while instance.state != 'running':\n sys.stdout.write('Waiting for instance: {}, at DNS: {} to start\\n'.format(instance.id,\n str(instance.dns_name).split('.')[0]))\n time.sleep(5)\n instance.update()\n\n sys.stdout.write('\\nSuccess! EC2 Instance Launched \\nInstance_Type: {} in {}'.format(instance.instance_type,\n instance.placement))\n return instance", "def test_stage_create_app(self, mock_stage_create_app):\n app = MDFakeFSTestSite(\n \"MDWeb\",\n app_options={}\n )\n app.start()\n\n self.assertTrue(mock_stage_create_app.called)", "def run(tag, devmode, img_passwd_file, install_server_hostname,\n custom_cli_subnet, custom_db_subnet, clitests, builder):\n manager = Manager(\n 'run', tag, devmode=devmode, img_passwd_file=img_passwd_file,\n install_server_hostname=install_server_hostname,\n custom_cli_subnet=custom_cli_subnet, custom_db_subnet=custom_db_subnet,\n clitests=clitests, builder_hostname=builder)\n manager.run()", "def deploy(non_interactive, minion_id):\n executor = CephSaltExecutor(not non_interactive, minion_id)\n retcode = executor.run()\n sys.exit(retcode)", "def startami(image, instancetype, accesskey, secretkey, pkname):\n if not is_valid_instance_type(image, instancetype):\n raise ValueError(\"Invalid instance type: '%s'\" % instancetype)\n\n conn = EC2Connection(accesskey, secretkey)\n image = conn.get_image(get_image_id(image))\n reservation = image.run(instance_type=instancetype, key_name=pkname)\n instance = reservation.instances[0]\n\n waitForInstanceToRun(instance)\n\n # [AN] call script instanceStartup.py\n return str(instance.dns_name)", "def test_ec2_up(runner, ec2):\n result = runner.invoke(cli.cli, ['ec2', 'up', '-i', ec2['server'].id])\n assert result.exit_code == 0", "def deploy(\n self,\n watch=True,\n with_mlrun=True,\n skip_deployed=False,\n is_kfp=False,\n mlrun_version_specifier=None,\n builder_env: dict = None,\n show_on_failure: bool = False,\n ):\n # connect will populate the config from the server config\n mlrun.db.get_run_db()\n if not self.spec.build.base_image:\n self.spec.build.base_image = self._default_image\n return super().deploy(\n watch=watch,\n with_mlrun=with_mlrun,\n skip_deployed=skip_deployed,\n is_kfp=is_kfp,\n mlrun_version_specifier=mlrun_version_specifier,\n builder_env=builder_env,\n show_on_failure=show_on_failure,\n )", "def main():\n if args.lock_bucket_name:\n if check_for_lock(args.lock_bucket_name, args.environment):\n check_error(\"Environment is locked. Unable to proceed.\")\n\n if args.singleasg:\n return handle_single_asg()\n\n if args.instance_count_step > args.instance_count:\n args.instance_count_step = args.instance_count\n\n if (args.instance_count_step % args.instance_count) != 0:\n check_error(\"Step counter %d must be divisable by %d\" % (args.instance_count_step, args.instance_count))\n\n environment_a = asg.describe_auto_scaling_groups(AutoScalingGroupNames=[\"%s-a\" % args.environment], MaxRecords=1)\n environment_b = asg.describe_auto_scaling_groups(AutoScalingGroupNames=[\"%s-b\" % args.environment], MaxRecords=1)\n\n if_verbose(\"I have AutoScaling Groups: %s and %s\" % (\"%s-a\" % args.environment, \"%s-b\" % args.environment))\n\n if (environment_a[\"AutoScalingGroups\"][0][\"DesiredCapacity\"] == 0) and (environment_b[\"AutoScalingGroups\"][0][\"DesiredCapacity\"] == 0):\n if args.lock_bucket_name:\n lock_environment(args.lock_bucket_name, args.environment)\n\n if args.zero:\n if args.lock_bucket_name:\n unlock_environment(args.environment)\n\n check_error(\"Nothinargs.lock_bucket_name, g to zero. Both ASGs are empty.\")\n\n logging.info(\"No active ASG; starting with %s-a\" % args.environment)\n\n if not args.dryrun:\n scale_up_application(\"%s-%s\" % (args.environment, \"a\"))\n scale_down_application(\"%s-%s\" % (args.environment, \"b\"))\n\n if args.lock_bucket_name:\n unlock_environment(args.lock_bucket_name, args.environment)\n\n elif len(environment_a[\"AutoScalingGroups\"][0][\"Instances\"]) > 0 and len(environment_b[\"AutoScalingGroups\"][0][\"Instances\"]) > 0:\n check_error(\"Failure. Unable to find an ASG that is empty. Both contain instances.\")\n\n elif environment_a[\"AutoScalingGroups\"][0][\"DesiredCapacity\"] > 0:\n if args.lock_bucket_name:\n lock_environment(args.lock_bucket_name, args.environment)\n\n if not args.zero:\n logging.info(\"Currently active ASG is %s-a; bringing up %s-b\" % (args.environment, args.environment))\n\n if not args.dryrun:\n scale_up_application(\"%s-%s\" % (args.environment, \"b\"))\n scale_down_application(\"%s-%s\" % (args.environment, \"a\"))\n else:\n scale_down_application(\"%s-%s\" % (args.environment, \"a\"))\n\n if args.lock_bucket_name:\n unlock_environment(args.lock_bucket_name, args.environment)\n\n elif environment_b[\"AutoScalingGroups\"][0][\"DesiredCapacity\"] > 0:\n if args.lock_bucket_name:\n lock_environment(args.lock_bucket_name, args.environment)\n\n if not args.zero:\n logging.info(\"Currently active ASG is %s-b; bringing up %s-a\" % (args.environment, args.environment))\n\n if not args.dryrun:\n scale_up_application(\"%s-%s\" % (args.environment, \"a\"))\n scale_down_application(\"%s-%s\" % (args.environment, \"b\"))\n else:\n scale_down_application(\"%s-%s\" % (args.environment, \"b\"))\n \n if args.lock_bucket_name:\n unlock_environment(args.lock_bucket_name, args.environment)\n\n if_verbose(\"Finished.\")\n if_verbose(\"Execution time: %d\" % global_execution_in_minutes())", "def test_release_deployment_run(self):\n pass", "def run_presub(stages):\n if is_presub(stages[0]):\n # Pop the first stage\n stage, stages = stages[0], stages[1:]\n # Execute it only if Python was invoked normally, not in a queue job\n if STAGE_ID is None:\n msg = \"Only the first stage can execute on the login node\"\n assert all([not is_presub(i) for i in stages]), msg\n alog.info(\"Pre-submit stage starting: %s\", stage)\n stage()\n alog.info(\"Pre-submit stage done: %s\", stage)\n return stages", "def main():\n try:\n name = sys.argv[1]\n asset_id = sys.argv[2]\n is_new = int(sys.argv[3]) != 0\n\n work_item = {\n 'Process-Name' : name,\n 'Asset-ID' : asset_id,\n 'Is-New' : is_new,\n }\n\n module = driver.init_module(name)\n processor = driver.init_processor(module)\n\n\n try:\n work_item.update(operations.instantiate_asset(asset_id))\n except models.Asset.DoesNotExist:\n logging.error('Asset no longer exists: %s' % asset_id)\n except S3ResponseError, error:\n if error.status == 404:\n logging.error('Could not find asset in S3: %s' % asset_id)\n else:\n logging.exception('Unexpected error!')\n raise\n else:\n new_items = handle_work_item(module, processor, work_item)\n operations.publish_work_item( *new_items )\n\n except NotReadyException, e:\n logging.info(e)\n sys.exit(1)\n\n except:\n logging.exception('Failed to run processor')\n sys.exit(1)\n\n finally:\n local_path = work_item.get('Local-Path')\n if local_path and os.path.exists(local_path):\n shutil.rmtree(os.path.dirname(local_path))", "def _transaction(context, stage, target_repoids, tasks, plugin_info, xfs_info,\n test=False, cmd_prefix=None, on_aws=False):\n\n # we do not want\n if stage not in ['dry-run', 'upgrade']:\n create_config(\n context=context,\n target_repoids=target_repoids,\n debug=config.is_debug(),\n test=test, tasks=tasks,\n on_aws=on_aws\n )\n backup_config(context=context)\n\n # FIXME: rhsm\n with guards.guarded_execution(guards.connection_guard(), guards.space_guard()):\n cmd_prefix = cmd_prefix or []\n common_params = []\n if config.is_verbose():\n common_params.append('-v')\n if rhsm.skip_rhsm():\n common_params += ['--disableplugin', 'subscription-manager']\n if plugin_info:\n for info in plugin_info:\n if stage in info.disable_in:\n common_params += ['--disableplugin', info.name]\n env = {}\n if get_target_major_version() == '9':\n # allow handling new RHEL 9 syscalls by systemd-nspawn\n env = {'SYSTEMD_SECCOMP': '0'}\n\n # We need to reset modules twice, once before we check, and the second time before we actually perform\n # the upgrade. Not more often as the modules will be reset already.\n if stage in ('check', 'upgrade') and tasks.modules_to_reset:\n # We shall only reset modules that are not going to be enabled\n # This will make sure it is so\n modules_to_reset = {(module.name, module.stream) for module in tasks.modules_to_reset}\n modules_to_enable = {(module.name, module.stream) for module in tasks.modules_to_enable}\n module_reset_list = [module[0] for module in modules_to_reset - modules_to_enable]\n # Perform module reset\n cmd = ['/usr/bin/dnf', 'module', 'reset', '--enabled', ] + module_reset_list\n cmd += ['--disablerepo', '*', '-y', '--installroot', '/installroot']\n try:\n context.call(\n cmd=cmd_prefix + cmd + common_params,\n callback_raw=utils.logging_handler,\n env=env\n )\n except (CalledProcessError, OSError):\n api.current_logger().debug('Failed to reset modules via dnf with an error. Ignoring.',\n exc_info=True)\n\n cmd = [\n '/usr/bin/dnf',\n 'rhel-upgrade',\n stage,\n DNF_PLUGIN_DATA_PATH\n ]\n try:\n context.call(\n cmd=cmd_prefix + cmd + common_params,\n callback_raw=utils.logging_handler,\n env=env\n )\n except OSError as e:\n api.current_logger().error('Could not call dnf command: Message: %s', str(e), exc_info=True)\n raise StopActorExecutionError(\n message='Failed to execute dnf. Reason: {}'.format(str(e))\n )\n except CalledProcessError as e:\n api.current_logger().error('Cannot calculate, check, test, or perform the upgrade transaction.')\n _handle_transaction_err_msg(stage, xfs_info, e, is_container=False)\n finally:\n if stage == 'check':\n backup_debug_data(context=context)", "def beehive_deploy(self, subsystem, vassal):\n run_data = {\n u'subsystem':subsystem,\n u'vassal':u'%s-%s' % (subsystem, vassal),\n u'tags':[u'deploy']\n } \n self.ansible_playbook(u'beehive', run_data, \n playbook=self.beehive_playbook)", "def switchToAppInstaller(dev):\n print('Switching to app install mode')\n SonyExtCmdCamera(dev).switchToAppInstaller()", "def test_deploy_with_remote_host(self):\n remote_host = CONF.tests.remote_host\n transportfile = self._make_transport_file()\n self.sdkapi.guest_create(self.userid, 1, 1024, disk_list=self.disks)\n self.sdkapi.guest_deploy(self.userid,\n self.image_name,\n transportfiles=transportfile,\n remotehost=remote_host)\n self.sdkapi.guest_start(self.userid)\n powered_on = self.test_util.wait_until_guest_in_power_state(\n self.userid, 'on')\n self.assertTrue(powered_on)", "def sandbox(verbose, app, archive):\n return _deploy_in_mode(\n mode=\"sandbox\", verbose=verbose, log=log, app=app, archive=archive\n )", "def YumInstall(vm):\n vm.RobustRemoteCommand('sudo yum {}'.format(REMOVE_MPI_CMD))\n _Install(vm)", "def test_ec2_up_no_instance(runner, ec2):\n result = runner.invoke(cli.cli, ['ec2', 'up', '-i', 'dummy'])\n assert result.exit_code == 2", "def deploy(verbose, app, archive):\n return _deploy_in_mode(\n mode=\"live\", verbose=verbose, log=log, app=app, archive=archive\n )", "def deploy_vm(context, vm):\n monitor = context.getMonitoringService().getVirtualMachineMonitor()\n print \"Deploying virtual machine %s... This may take some time.\" \\\n % vm.getInternalName()\n vm.deploy()\n monitor.awaitCompletionDeploy(vm)\n return refresh_vm(context, vm)", "def lambda_handler(event, context):\n\n # Get details from the event.\n job = event[\"CodePipeline.job\"]\n input_bucket, input_key = get_input_artifact_location(job)\n output_bucket, output_key = get_output_artifact_location(job)\n user_params = get_user_parameters(job)\n assume_role_arn = user_params[\"AssumeRoleArn\"]\n image_parameter_name = user_params[\"ImageParameterName\"]\n stack_name = user_params[\"StackName\"]\n template_filename = user_params[\"TemplateFilename\"]\n\n # Create client in the pipeline account.\n pipeline_s3_client = get_artifact_s3_client(job)\n\n # Create clients in the target account.\n target_session = get_session(\n role_arn=assume_role_arn, session_name=\"prepare-ami-deployment\"\n )\n target_cfn_client = target_session.client(\"cloudformation\")\n target_ssm_client = target_session.client(\"ssm\")\n\n # Download the input artifact zip file, read manifest.json from it,\n # and get the AMI it references. Also look up the associated image name.\n with download_zip_file(\n s3_client=pipeline_s3_client, bucket=input_bucket, key=input_key\n ) as zip_file:\n image_detail_string = zip_file.read(\"imageDetail.json\").decode(\"utf-8\")\n log(\"IMAGE_DETAIL_STRING\", image_detail_string)\n image_detail = json.loads(image_detail_string)\n image = image_detail[\"ImageURI\"]\n log(\"IMAGE\", image)\n\n # Update the SSM parameters with the image,\n # to be used by the CloudFormation deployment stage of the pipeline.\n target_ssm_client.put_parameter(\n Name=image_parameter_name, Value=image, Type=\"String\", Overwrite=True\n )\n\n # Write the CloudFormation stack's template to the output artifact location,\n # to be used by the CloudFormation deployment stage of the pipeline.\n template = get_cloudformation_template(\n cfn_client=target_cfn_client, stack_name=stack_name\n )\n with create_zip_file({template_filename: template}) as zip_path:\n pipeline_s3_client.upload_file(zip_path, output_bucket, output_key)", "def deploy():\n comp = do_pack()\n\n if (not comp):\n return False\n return do_deploy(comp)", "def run(self, image, flavor, **kwargs):\n self._boot_servers(image, flavor, **kwargs)", "def ChurnBFEBS(self):\n if self.reservation:\n self.tester.ec2.terminate_instances(self.reservation)\n self.image = self.tester.ec2.get_emi(emi=self.args.emi,\n root_device_type=\"ebs\",\n basic_image=True)\n self.Churn()", "def assembler(self, assembler: osbuild.Stage):", "def move_stage(stage, dest):\n files = gather_required_files(stage.path)\n files.add(stage.path)\n\n if all(exists(fn) for fn in files):\n for fn in files:\n shutil.move(fn, dest)\n return True\n\n return False", "def test_enterprise_add_new_pi_to_vmi(self):\n validation = 'enterprise'\n proj_obj, fabric_obj, pr_obj = self._create_prerequisites()\n self._test_add_new_pi_to_vmi(\n proj_obj, fabric_obj, pr_obj, validation)", "def test_execute_deployment(self):\n pass" ]
[ "0.600044", "0.58983445", "0.5597183", "0.55506766", "0.55401236", "0.5401714", "0.5389094", "0.53601116", "0.5331389", "0.52924347", "0.526997", "0.52426094", "0.5238283", "0.5174726", "0.5095269", "0.5092394", "0.50896496", "0.5085712", "0.5071455", "0.50698596", "0.50681335", "0.5065932", "0.5060449", "0.50470936", "0.50415754", "0.5012595", "0.5008688", "0.50085163", "0.49992743", "0.49952036", "0.49770635", "0.49754268", "0.4965856", "0.4964872", "0.4963427", "0.49626586", "0.49228397", "0.49111164", "0.4908201", "0.49004397", "0.48952445", "0.4887144", "0.48768142", "0.48609605", "0.48605108", "0.48559693", "0.48263872", "0.4813811", "0.48120168", "0.48116592", "0.48066795", "0.48060095", "0.48016962", "0.48001269", "0.47991198", "0.47978842", "0.4797243", "0.47939733", "0.4781725", "0.47771168", "0.4771939", "0.4771126", "0.47634038", "0.47600105", "0.47596616", "0.4732919", "0.4727124", "0.47268248", "0.4716641", "0.47103205", "0.47101623", "0.47036865", "0.4691585", "0.46886387", "0.4677814", "0.46679136", "0.466013", "0.4641232", "0.4640465", "0.46355155", "0.46335033", "0.46321407", "0.46305576", "0.4622525", "0.46192658", "0.46180215", "0.46130976", "0.45960537", "0.45797604", "0.45716935", "0.456734", "0.45666823", "0.45598385", "0.45589554", "0.45555836", "0.455459", "0.45512828", "0.45485744", "0.45463505", "0.45406875" ]
0.7794168
0
Define data used in test.
def setUp(self): pwd = self.get_script_path() self.test_drug_info_file = pwd+'/../insight_testsuite/tests/my_test/input/test_input_file.txt' self.test_raw_tuple= [('jordanmichael', 'A', 23.00), ('jameslebron', 'C', 23.10), ('bryantkobe', 'B', 8), ('bryantkobe', 'C', 24.9)] self.test_sorted_tuple = sorted(self.test_raw_tuple, key=operator.itemgetter(1)) #print self.test_sorted_tuple self.test_dict = {'C':2, 'A':1, 'B':1} self.test_num_unique_name = [1, 1, 2] self.test_total_cost_each_drug = [23.00,8.00,48.00] self.test_output_file = pwd+'/../insight_testsuite/tests/my_test/output/test_output_file_1.txt'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setUpTestData(cls):\n data_gen.run()", "def setUpTestData(cls):\n data_gen.run()", "def setUpTestData(cls):\n data_gen.run()", "def setUpTestData(cls):\n data_gen.run()", "def setUp(self):\n\n self.data_list = [\n \"hello\", \"world\", \"funilrys\", \"funceble\", \"PyFunceble\", \"pyfunceble\"\n ]\n self.data = \"Hello, this is Fun Ilrys. I just wanted to know how things goes around the tests.\" # pylint: disable=line-too-long", "def test_setup(self, test_data: list=None):\n print(\"[dataset]: using test setup ...\")\n self.vocabulary = [\"empty\"]\n self.eval_dataset = ABSADataset(data_path=self.dev_path, mode=self.in_mode, task=self.task,\n tokenizer=self.tokenizer, vocab=\"bert\", test=True)\n return", "def test_process_data(self):\n pass", "def setup_class(self):\n self.data_type = 'pytest'", "def setUp(self):\n self.dataset = get_test_dataset()", "def setUpTestData(cls):\n # volunteer user\n common.initialize_empty_volunteer()", "def getTestData(self):\n raise NotImplementedError", "def test_data(self):\n return self._test_data", "def _load_test_data(self):\n self._save_test_data()", "def setUp(self):\n self.data = DatabaseIntermediary()", "def setUp(self):\n\n self.test_data_path = 'testing/test_data/'", "def setUpTestData(cls):\n call_command('loaddata', 'db.json', verbosity=0)", "def test_data(self, data):\n print('-'*30)\n print('Starting test: {}'.format(data['name']))\n self.set_resolution(data['resolution']['width'], data['resolution']['height'])\n self.test_actions(data['actions'])\n print('Test finished')\n print('-'*30)", "def setUpClass(cls):\n super(TestPatientStatsHistory, cls).setUpClass()\n cls.stats_data = {\n \"num_patients_visited\": 1,\n \"num_patients_home_quarantine\": 2,\n \"num_patients_isolation\": 3,\n \"num_patient_referred\": 4,\n }", "def setUp(self):\n self.data = {'username': 'seiph',\n 'first_name': 'Jean',\n 'last_name': 'Robert',\n 'email': '[email protected]',\n 'password1': 'kevin1234',\n 'password2': 'kevin1234'}", "def test_set_data_attributes(self):\n\n self.mediator.get_results()", "def setUp(self):\n self.dataset = self.dataset_cls()", "def setUp(self):\n self.TestData = array([0,1,1,4,2,5,2,4,1,2])\n self.NoSingles = array([0,2,2,4,5,0,0,0,0,0])\n self.NoDoubles = array([0,1,1,4,5,0,0,0,0,0])", "def setUpClass(cls):\n super(Module05Tests, cls).setUpClass()\n cls.datasets = {\n 0: DATASETS_ROOT + 'diffusion_synthetic_normal_L8_r2_slices_41_50_gr15_b1200',\n 1: DATASETS_ROOT + 'filtered',\n 2: DATASETS_ROOT + 'noise'\n }\n cls.data = smns.load_object(file_path=cls.datasets[2])", "def test_alien_data(self):", "def setUp(self):\n self.family = Family()\n self.decoder = Decoder()\n self.data1 = ['Atya', 'Sister-In-Law']\n self.data2 = ['Satya', 'Ketu', 'Male']", "def setUpTestData(cls) -> None:\n\n # Define base url\n cls.url = BASE_URL + '/'\n\n # Make 9 \"normal\" authors.\n cls.authors: typing.List[Author] = [\n create_author() for _ in range(9)\n ]\n\n # Make 1 superuser author.\n cls.super_author: Author = create_author(True)\n\n # Serialize data once so that it's not called in ever test\n cls.serialized_data = AuthorListSerializer(Author.objects.all(), many=True).data", "def test_data(self):\n\n return self.__valid_data, self.__valid_labels", "def test_data(self):\n if self._test_data is None:\n self._load_test_data()\n if self._swapped_test_data is None:\n self._swapped_test_data = {}\n for key, value in self._test_data.items():\n self._swapped_test_data[key] = value\n return self._swapped_test_data", "def prepare_data(self):", "def setUp(self):\n self.report = dict(title=\"Report 1\", url=\"https://report1\")\n self.data_model = dict(\n metrics=dict(metric_type=dict(name=\"type\")),\n sources=dict(\n quality_time=dict(\n parameters=dict(\n status=dict(\n api_values={\n \"target met (green)\": \"target_met\",\n \"near target met (yellow)\": \"near_target_met\",\n \"target not met (red)\": \"target_not_met\",\n \"technical debt target met (grey)\": \"debt_target_met\",\n \"unknown (white)\": \"unknown\",\n }\n )\n )\n )\n ),\n )", "def setUpTestData(cls):\n # Set up non-modified objects used by all test methods\n Prohibited.objects.create(credential_type=2, credential='google.com')\n Prohibited.objects.create(credential_type=1, credential='127.0.0.1')", "def setUp(self):\n patientgen = PatientsGenerator(0, 1, 0, 'a')\n self.record = patientgen.data.find('record')\n self.gender_sex = patientgen.gender_sex_list\n self.ethnicities = patientgen.ethnicity_list\n # self.female_names = patientgen.data_generator.first_names_female\n # self.male_names = patientgen.data_generator.first_names_male\n # self.last_names = patientgen.data_generator.last_names", "def setUp(self):\n self.test_data = self.read_data('test_data/clients.txt')", "def test_preprocessed_data(self):\n self.assertEqual(self.tester.preprocessed_data, [1, 2])", "def test_data_object_vaporise(self):\n pass", "def set_data():\r\n #if not os.path.exists(filepath):\r\n #download_data()\r\n metadata = read(filepath + flist[-1])\r\n ndata = metadata['num_cases_per_batch']\r\n ndim = metadata['num_vis']\r\n\r\n data, train, test = {}, {}, {}\r\n data['labels'] = metadata['label_names']\r\n data['ntraindata'] = metadata['num_cases_per_batch'] * (len(flist) - 2)\r\n data['ntestdata'] = metadata['num_cases_per_batch']\r\n data['ndim'] = metadata['num_vis']\r\n\r\n train['x'], train['y'] = convert_train(data['ntraindata'], data['ndim'])\r\n\r\n testdata = read(filepath + flist[-2])\r\n test['x'] = testdata['data']\r\n test['y'] = testdata['labels']\r\n\r\n data['train'], data['test'] = train, test\r\n save_pkl(data)", "def setUpClass(cls):\n dt_index = pd.date_range(start=datetime(2019, 1, 1, 0, 1), periods=15,\n freq='1Min')\n\n # Create a temperature array with an average of 2.\n temp = [1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3]\n\n # Create ghi array with an average of 3.\n ghi = [2, 3, 4, 2, 3, 4, 2, 3, 4, 2, 3, 4, 2, 3, 4]\n\n # Create DataFrame.\n cls.weather_data = pd.DataFrame({'temperature': temp, 'ghi': ghi},\n index=dt_index)\n\n # Create expected data.\n dt_index_2 = pd.date_range(start=datetime(2019, 1, 1, 0, 15), periods=1,\n freq='15Min')\n cls.expected_data = pd.DataFrame({'temperature': [2], 'ghi': [3]},\n index=dt_index_2)", "def fixture_example_data():\n import_example_data()", "def test_data():\n return {\"David Andrews\" : [200.50, 400.00, 250.75],\n \"John Goodfellow\" : [25.00, 175.50],\n \"Mary Suzuki\" : [75.00, 125.00, 250.00],\n \"Bonney Lake\" : [500.50, 700.75, 500.25],\n \"DeMarcus Rollins\" : [155.00, 165.00]\n }", "def set_data(self, data):\n\n pass", "def _test_template_data(self):\n chars=string.ascii_uppercase + string.digits\n id = ''.join(random.choice(chars) for x in range(6))\n\n return {\n 'test_module': self.test_modulename(),\n 'driver_module': self.driver_modulename(),\n 'driver_dir': self.driver_dir(),\n 'file': self.driver_relative_path(),\n 'author': self.metadata.author,\n 'driver_name': self.metadata.driver_name,\n 'constructor': self.metadata.constructor,\n 'full_instrument_lower': self.metadata.driver_name.lower(),\n 'full_instrument_camelcase': self.driver_name_camelcase(),\n }", "def test_init(self, testdata: TestData) -> None:\n for data in testdata['observation_type']:\n observation_type = ObservationType(**data)\n for key, value in data.items():\n assert getattr(observation_type, key) == value", "def prepare_data(self):\n try:\n self.train_dataset = self.datasets['train']\n self.val_dataset = self.datasets['val']\n try:\n self.test_dataset = self.datasets['test']\n except:\n pass\n except Exception as e:\n print('Data was not succesfully prepared:', e)", "def test_data_in_param(self):", "def _create_data():\n tf.logging.info(\"Create records..\")\n train, val, test = util.load_data(data_dir, FLAGS[\"is_aug\"])\n tf.logging.info(\"Dataset size: Train-{} Test-{} Val-{}\".format(len(train), len(test), len(val)))\n return train, val, test", "def setUpTestData(cls):\n\t\thierarchy = Hierarchy(name=\"TestHierarchy\", graph_representation=\"{}\")\n\t\thierarchy.save()\n\t\tevent_type = EventType(name=\"asd\", hierarchy=hierarchy)\n\t\tevent_type.save()\n\t\tquery = Query(\n\t\t\thierarchy=hierarchy, query_string=\"INSERT INTO asd SELECT * FROM asd\",\n\t\t\toutput_event_type=event_type,\n\t\t\teqmn_representation=\"{'output': {'name': 'asd', 'select': '*'}, 'input': {'single': 'asd'}}\")\n\t\tquery.save()\n\t\tquery.input_event_types.add(event_type)\n\t\tquery.save()", "def setUp(self):\n self.env = EnvironmentStub(default_data=True,\n enable=['ticket-field-config.*'])\n\n # this is the default data that is in the test Trac database\n self.default = {\n 'priority':['blocker', 'critical', 'major', 'minor', 'trivial'],\n 'severity':[],\n 'resolution': ['fixed','invalid','wontfix','duplicate','worksforme'],\n 'ticket_type':['defect', 'enhancement', 'task'],\n 'component':['component1', 'component2'],\n }\n\n # this is the new data we plan to put in configuration\n self.new = {\n 'priority': ['P1','P2','P3'],\n 'severity': ['High','Medium','Low'],\n 'resolution': ['fixed','wontfix','invalid','duplicate','worksforme'],\n 'ticket_type': ['Bug','Release','Project'],\n 'component': ['new/blog','new/site','old/blog','old/site'],\n }", "def define_testdata():\n wata_dict = {\n # info taken from main_hdr dict\n 'filename': ['jw09999001001_02101_00001_nrs1_uncal.fits'],\n 'date_obs': ['2022-06-22'],\n 'visit_id': ['V09999001001P0000000002101'],\n 'tafilter': ['F110W'],\n 'detector': ['NRS1'],\n 'readout': ['NRSRAPID'],\n 'subarray': ['FULL'],\n # info taken from ta_hdr dict\n 'ta_status': ['SUCCESSFUL'],\n 'status_reason': ['-999'],\n 'star_name': ['-999'],\n 'star_ra': [-999.0],\n 'star_dec': [-999.0],\n 'star_mag': [-999.0],\n 'star_catalog': [-999],\n 'planned_v2': [-999.0],\n 'planned_v3': [-999.0],\n 'stamp_start_col': [-999],\n 'stamp_start_row': [-999],\n 'star_detector': ['-999'],\n 'max_val_box': [-999.0],\n 'max_val_box_col': [-999.0],\n 'max_val_box_row': [-999.0],\n 'iterations': [-999],\n 'corr_col': [-999.0],\n 'corr_row': [-999.0],\n 'stamp_final_col': [-999.0],\n 'stamp_final_row': [-999.0],\n 'detector_final_col': [-999.0],\n 'detector_final_row': [-999.0],\n 'final_sci_x': [-999.0],\n 'final_sci_y': [-999.0],\n 'measured_v2': [-999.0],\n 'measured_v3': [-999.0],\n 'ref_v2': [-999.0],\n 'ref_v3': [-999.0],\n 'v2_offset': [-999.0],\n 'v3_offset': [-999.0],\n 'sam_x': [-999.0],\n 'sam_y': [-999.0],\n }\n # create the additional arrays\n bool_status, status_colors = [], []\n for tas, do_str in zip(wata_dict['ta_status'], wata_dict['date_obs']):\n if 'unsuccessful' not in tas.lower():\n bool_status.append(1)\n status_colors.append('blue')\n else:\n bool_status.append(0)\n status_colors.append('red')\n\n # add these to the bokeh data structure\n wata_dict['ta_status_bool'] = bool_status\n wata_dict['status_colors'] = status_colors\n\n # create the dataframe\n wata_data = pd.DataFrame(wata_dict)\n return wata_data", "def setup(self):\n self.rows = test_helpers.fetch_sample_teradata_rows()\n self.csv_path = 'not/a/real/path'", "def setUp(self):\n self.test_data = MockPyMySqlDataSource().load()", "def _initialize_data(self):\n self.unique_id = 123\n\n self.gas_valve_open = False\n self.buffer_valve_open = False\n self.pump_valve_open = False\n\n self.operatingmode = 0\n\n self.sample_pressure_high_limit = 100\n self.sample_pressure_low_limit = 10\n self.sample_pressure = 0\n\n self.error = 0\n\n self.buffer_pressure_high = True", "def putTestData(self):\n # print 'Not Yet implement / sample DB table create'\n tkMessageBox.showinfo(\"Message\", \"Sample DB Table Create\")", "def __fake_data__(self):\n\n # Set directory for configuration files\n self.configFilePath = q.system.fs.joinPaths(q.dirs.varDir, 'tftproot')\n \n # Add some share's\n for i in xrange(3):\n share = NFSShare()\n share.name = 'share-%s' % q.base.idgenerator.generateRandomInt(0, 255)\n self.shares[share.name] = share", "def test_prep_new_data(self):\n pass", "def setUpTestData(cls):\n User.objects.create_user('Claire', '[email protected]', '12345678')\n User.objects.create_user('Georgie', '[email protected]', '12345678')\n User.objects.create_user('Tristan', '[email protected]', '12345678')\n\n Expense.objects.create(\n date=date.today(),\n description=\"Test balance 1\",\n category=\"Food\",\n amount=20,\n converted_amount=20,\n currency=\"GBP\",\n who_for=\"Everyone\",\n who_paid=\"Georgie\"\n )\n Expense.objects.create(\n date=date.today(),\n description=\"Test balance 2\",\n category=\"Food\",\n amount=10,\n converted_amount=10,\n currency=\"GBP\",\n who_for=\"Everyone\",\n who_paid=\"Claire\"\n )\n Expense.objects.create(\n date=date.today(),\n description=\"Test balance 3\",\n category=\"Food\",\n amount=30,\n converted_amount=30,\n currency=\"GBP\",\n who_for=\"Everyone\",\n who_paid=\"Tristan\"\n )", "def set_predefined_data(self, data: dict) -> None:\n\n metainfo = {\n self.META_KEYS[k]: v\n for (k, v) in data.items() if k in self.META_KEYS\n }\n self.set_metainfo(metainfo)\n\n data = {\n self.DATA_KEYS[k]: v\n for (k, v) in data.items() if k in self.DATA_KEYS\n }\n self.set_tensor_data(data)", "def get_data():\n pass", "def setUpTestData(cls):\n cls.board = Board.objects.create(name = DICT.get('board_name') )\n\n cls.task = Task.objects.create(head = DICT.get('task_head'),\n description = DICT.get('task_description'),\n board = cls.board )", "def setUp(self):\n self.aoSamples = [ModelDataBase(),];", "def setup(self, ds):\n pass", "def setUpTestData(cls):\n cls.user = UserFactory()\n cls.auth = AuthFactory()\n\n cls.device = TOTPDevice.objects.create(user=cls.user)\n cls.relate = TOTPDevice.challenge.objects.create(\n device=cls.device, token=cls.auth\n )\n\n cls.algorithm = TOTPAlgorithm()", "def data_manager_fixture():\n\n class DataManager:\n def __init__(self):\n self.gen = 1000\n self.cfg = get_cfg_defaults()\n mode = \"test_inference\"\n self.dataset = Dataset(None, self.cfg, mode)\n self.auto_anchors = AutoAnchors(self.dataset, self.cfg.model, self.gen)\n self.k_points = torch.ones((12, 2)) * 2.0\n self.wh = torch.ones((1000, 2)) * 2.0\n\n return DataManager()", "def setup(self):\n self.testInst = pysat.Instrument('pysat', 'testing2D_xarray',\n clean_level='clean')\n self.testInst.bounds = (dt.datetime(2008, 1, 1),\n dt.datetime(2008, 2, 1))\n self.dname = 'variable_profiles'\n self.test_val_length = 15\n\n return", "def newTestData(self):\n self.newTab( extension = TestData.TYPE, repoDest=UCI.REPO_UNDEFINED )", "def setUp(self):\n\n # Create a data pipe.\n self.interpreter.pipe.create('test', 'mf')\n\n # Create a temporary file name.\n ds.tmpfile = mktemp()", "def setUpTestData(cls):\n cls.post = PostFactory()", "def mk_data(self):\n self.data = self.DEFAULTS.copy()\n\n for template in self.raw_data.get('extends', []):\n template_data = self.load_template(template)\n self.data.update(template_data)\n\n self.data.update(self.raw_data)\n\n str_replace(self.data)\n\n if self.data.get('redirect_stderr'):\n self.data.pop('stderr')", "def prepare_data(self, config: TreeConfigParser) -> None:\n self.data = Data(config)\n self.data.prepare_input()\n self.data.prepare_output()", "def _init_data(self, data):\n assert type(data) is dict, \"dict expected: %r\" % type(data)\n assert len(data) is 1, \"size of dict should be 1: %r\" % len(data)\n self._name = data.keys()[0]\n self._data = np.asarray(data[self._name])\n self._set = True", "def __init__(self, data):\n\n self.produce_csv = data['produce_csv']\n self.produce_graphics = data['produce_graphics']\n self.report_name = data['report_name']\n self.file_name = self.report_name + '.csv'\n self.annual_file_name = self.report_name + '_annual.csv'\n self.csv_dir = ''\n self.diagnostic_dir = ''\n\n self.daily_variables = {\n 'year': ['time.cal_year', '', []],\n 'j_day': ['time.day', '', []]\n }\n\n self.annual_variables = {\n 'year': ['time.cal_year', '', 0]\n }", "def load_data(self):\n self.data = self.read_var(self.datavar)\n self.test_shape(self.datavar, self.data.shape, 2)", "def setUp(self):\n self.t = True\n self.f = False\n self.value = 25", "def setUpClass(cls):\n # print(\"SWEData.setUpClass\", flush=True)\n super(TestIonSpecificsOptions, cls).setUpClass()\n # print(cls.data.iloc[:, :7])\n # print(cls.data.columns.values)\n cls.data = cls.data.xs(\"\", axis=1, level=\"N\")", "def setup(self, stage=None):\n self.data_train, self.data_val, self.data_test = [None] * 3", "def __init__(self, name, data):\n super(PoissonEM, self).__init__(name)\n\n self._register_variable('mock_data', differentiable=True)\n self.data = data\n self.update_var_param_types(mock_data=ArrayParameter)\n self._set_original_variables()", "def create_test_data(self):\n fake = Faker(['en_US', 'ja_JP', 'el_GR', 'de_DE'])\n\n self.actor_request = {\n 'name': fake.name(),\n 'age': random.randint(22, 88),\n 'gender': random.choice(['M', 'F'])\n }\n\n self.movie_request = {\n 'title': fake.color_name() + ' ' + fake.street_suffix(),\n 'releaseDate': str(fake.date_between())\n }\n\n self.actor_update_request = {\n 'name': fake.name(),\n }\n\n self.movie_update_request = {\n 'title': fake.color_name() + ' ' + fake.street_suffix(),\n }\n\n for _ in range(30):\n actor_name = fake.name()\n actor_age = random.randint(22, 88)\n actor_gender = random.choice(['M', 'F'])\n\n movie_title = fake.color_name() + ' ' + fake.street_suffix()\n movie_release_date = str(fake.date_between())\n\n actor = Actor(actor_name, actor_age, actor_gender)\n actor.insert()\n\n movie = Movie(movie_title, movie_release_date)\n movie.insert()\n\n for _ in range(20):\n actors = Actor.query.all()\n movies = Movie.query.all()\n\n actor_to_update = random.choice(actors)\n movie_to_update = random.choice(movies)\n actor_to_update.movies.append(movie_to_update)", "def mock_rdata(): \n return {\n \"authors\": [{\"full_name\": \"N. Ame\"}],\n \"owners\": [{\"full_name\": \"N. Ame\"}],\n \"submitter\": {\"full_name\": \"N. Ame\"},\n \"paper_id\": \"1234.56789\",\n \"title\": \"some title\",\n \"abstract\": \"An abstract with math $/alpha * /alpha$ for you.\",\n }", "def _initialize_data(self):\n self.reset_count = 0\n self._idn_no_firmware = \"KEPCO,BOP 50-20,E1234,\"\n self._firmware = 2.6\n self._init_data()", "def _prepare_data(self):\n #TODO hardcoded values need to change\n print_info(\"Preprocessing the train data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"train\"),\n self.TRAIN_OUT_PATH)\n\n print_info(\"Preprocessing the test data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"test\"),\n self.TEST_OUT_PATH)\n\n print_info(\"Preprocessing the validation data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"val\"),\n self.VAL_OUT_PATH)", "def Test_data():\n print (\"loading test data ...\")\n time_start = time.time()\n data_root = '/media/keziwen/86AA9651AA963E1D'\n\n with h5py.File(join(data_root, './data/test_real2.h5')) as f:\n test_real = f['test_real'][:]\n with h5py.File(join(data_root, './data/test_imag2.h5')) as f:\n test_imag = f['test_imag'][:]\n test_real = np.transpose(test_real, (0, 1, 3, 2))\n test_imag = np.transpose(test_imag, (0, 1, 3, 2))\n test_data = test_real+1j*test_imag\n time_end = time.time()\n print ('dataset has been created using {}s'.format(time_end - time_start))\n return test_data", "def setUp(self):\n self.params_1 = {\"S\":100,\"K\":100,\"Vol\":0.2,\"R\":0.05,\"T\":1}\n self.params_2 = {\"S\":100,\"K\":100,\"Vol\":0.2,\"R\":0.00,\"T\":1}\n pass", "def setUp(self):\n self.user_data = {\n 'username': 'test_case_user',\n 'first_name': 'Test',\n 'last_name': 'Case',\n 'email': '[email protected]',\n 'password': 'TestCaseUserPassword123',\n 'password_confirm': 'TestCaseUserPassword123'\n }\n\n self.invalid_user_data = {\n 'username': '',\n 'first_name': True,\n 'last_name': 12333,\n 'email': False,\n 'password': 1333,\n 'password_confirm': 'hellllllooooo'\n }", "def setUp(self):\n\n # Load the data\n dataset = tagging.data.DataSet.from_fits(DATA_PATH, extension=1)\n\n # Assign all as field.\n dataset.data[\"FIELD/CLUSTER\"] = \"FIELD\"\n\n # [TODO] Delete benchmarks\n clusters = (\"Cha_I\", \"Br81\", \"M15\", \"NGC2808\", \"NGC6633\", \"IC4665\", \n \"NGC104\", \"gamma2_Vel\", \"GJ880\", \"NGC4815\", \"NGC2547\", \"NGC5927\",\n \"NGC4833\", \"NGC1851\", \"NGC2243\", \"NGC3532\", \"NGC6752\", \"Br25\", \n \"NGC4372\", \"NGC6705\", \"M67\", \"NGC2516\", \"Trumpler20\")\n\n # Assign all as members.\n for cluster in clusters:\n members = dataset.assign_cluster_members(cluster,\n lambda row: row[\"TARGET\"].startswith(cluster))\n\n # Special hack:\n if cluster == \"Trumpler20\":\n members += dataset.assign_cluster_members(cluster,\n lambda row: row[\"TARGET\"].startswith(\"Trumpler_20\"))\n\n logger.info(\"Assigned stars to {} clusters\".format(len(clusters)))\n self.dataset = dataset\n return None", "def test_create_device_data(self):\n pass", "def setUp(self):\n super().setUp()\n self.percent = self.params.get(\"size\", '/run/ior/data_percentage/*')", "def test(self, dataset) -> None:\n raise NotImplementedError()", "def test_loadData():\n \n sys = LVsystem.Ecosystem()\n \n sys.loadSetup('2Prey1Predator')\n \n \n data = sys.create_data()\n \n assert data[0] == 3\n assert data[1] == ['rabbit', 'hen', 'fox']\n assert data[2] == [30,10,20]\n assert data[3] == [0.09,0.07,-0.06] \n assert data[4] == [10000,10000,1]\n assert data[5] == [400,500,250]\n assert data[6][1][2] == -data[6][2][1]\n assert data[6][2][2] == 0\n\n sys.removeSpecies('rabbit')\n sys.removeSpecies('fox')\n sys.removeSpecies('hen')", "def setUpTestData(cls):\n countries = [\"MX\", \"CHL\", \"USA\", \"PER\", \"COL\"]\n slack_user_ids = [\"UP0918MAV\", \"UP0918MAV\", \"UP0918MAV\", None, None]\n cls.menu = Menu.objects.create(available_on=date.today())\n for count in range(5):\n user = User.objects.create(username=f\"johny.doe {count}\")\n Employee.objects.create(\n user=user, country=countries[count], slack_user_id=slack_user_ids[count]\n )", "def setup_data(self, data: pd.DataFrame) -> pd.DataFrame:\n return data", "def __init__(self, dat):\n self.data = dat", "def __post_init__(self):\n # Only do this if source_data already exists (not during its own initialization)\n if \"SOURCE_DATA\" in globals():\n for data_field in fields(self):\n setattr(self, data_field.name, getattr(SOURCE_DATA, data_field.name))", "def test_init(self):\n xtal_model_data = XtalModelData(self.params)\n\n assert xtal_model_data.pdb == self.params.input.pdb\n\n assert xtal_model_data.mtz == self.params.input.mtz\n\n # TODO Assert utilised method calls of these classes\n # Assert is innstance causses issues if called from somewhere else\n\n self.assertIsInstance(xtal_model_data.xrs, cctbx.xray.structure)\n\n self.assertIsInstance(\n xtal_model_data.inputs, mmtbx.utils.process_command_line_args\n )\n\n self.assertIsInstance(\n xtal_model_data.crystal_gridding, cctbx.maptbx.crystal_gridding\n )\n\n self.assertIsInstance(xtal_model_data.fmodel, mmtbx.f_model.f_model.manager)", "def test_data(self):\n\n # Boolean tests\n is_datas = [True, False]\n for is_data in is_datas:\n self.colorspace.setIsData(is_data)\n self.assertEqual(is_data, self.colorspace.isData())\n\n # Wrong type tests\n wrong_is_datas = [['test'], 'test']\n for wrong_is_data in wrong_is_datas:\n with self.assertRaises(TypeError):\n self.colorspace.setIsData(wrong_is_data)", "def setUpTestData(cls):\n cls.emulate_off_api_manager_categories()\n cls.emulate_off_api_manager_products()\n cls.db_manager = Command()", "def set_data(self, data):\n self.data = data", "def setUpClass(cls):\n cls.celltype_analyse = \"Adipocyte - Breast\"\n cls.data_type = \"promoters\"\n cls.sample_type = \"primary cells\"\n cls.algorithm = \"heuristic\"\n cls.k = 4\n cls.thresholds = (0.5, 0, 0) # act, inact, and sparseness, respectively\n cls.parsed = True\n cls.files_path = \"test\"", "def setUp(self):\n\n self.to_test = {\n \"Hello\": \"world\",\n \"World\": {\"world\", \"hello\"},\n \"funilrys\": [\"Fun\", \"Ilrys\"],\n \"Py\": \"Funceble\",\n \"pyfunceble\": [\"funilrys\"],\n }", "def test_init_prediction_data(raw_data):\n prediction_data = PredictionData(**raw_data)\n assert prediction_data", "def test_init(self):\n\t\ts = Student_Analytics()\n\t\tself.assertEqual(len(s.data),89)", "def setUp(self):\n temperature = np.array([[185.0, 260.65, 273.15, 338.15]], dtype=np.float32)\n self.temperature = set_up_variable_cube(temperature)\n humidity = np.array([[60.0, 70.0, 75.0, 80.0]], dtype=np.float32)\n self.relative_humidity = set_up_variable_cube(\n humidity, name=\"relative_humidity\", units=\"%\"\n )\n pressure = np.array([[1.0e5, 9.9e4, 9.85e4, 9.8e4]], dtype=np.float32)\n self.pressure = set_up_variable_cube(pressure, name=\"air_pressure\", units=\"Pa\")\n self.expected_wbt_data = np.array(\n [[185.0, 259.88306, 271.78006, 333.96066]], dtype=np.float32\n )", "def set_data(data: list):\n return {\"data\": data}" ]
[ "0.7128387", "0.7128387", "0.7128387", "0.7128387", "0.6960889", "0.6884261", "0.68240964", "0.68195206", "0.6783465", "0.6741896", "0.6734708", "0.6721453", "0.6721293", "0.67041653", "0.6678538", "0.66765255", "0.6659576", "0.6627872", "0.66132146", "0.66129124", "0.6599503", "0.6585318", "0.65763825", "0.6551341", "0.6544381", "0.6543199", "0.6525274", "0.65081346", "0.64930356", "0.64919895", "0.6490789", "0.6479884", "0.64746106", "0.64643615", "0.6457175", "0.63798684", "0.6367838", "0.6360927", "0.6347542", "0.63465816", "0.63310045", "0.6327175", "0.631875", "0.63132805", "0.6308957", "0.62938005", "0.62860936", "0.62828594", "0.6277726", "0.6277484", "0.626915", "0.6267309", "0.6254843", "0.6248604", "0.62266755", "0.62187123", "0.62177885", "0.6211377", "0.6183837", "0.6183319", "0.6179875", "0.6172943", "0.6171741", "0.6167956", "0.6163564", "0.6154802", "0.6154608", "0.61513114", "0.6150872", "0.6125889", "0.6125281", "0.6116443", "0.61145896", "0.6106092", "0.6093725", "0.60858417", "0.6082145", "0.60728604", "0.6062426", "0.6054276", "0.6052349", "0.60437196", "0.60390973", "0.60374653", "0.60332024", "0.60317975", "0.6030296", "0.60299546", "0.602483", "0.6020908", "0.6014875", "0.6009933", "0.60070837", "0.6000442", "0.59961647", "0.59934133", "0.5992084", "0.5982352", "0.5977045", "0.59626526", "0.59513277" ]
0.0
-1
Line is correctlt split and missing/corrupetd fields are checked.
def test_read_line(self): expected_data = ['\"lu, jr\"','ming-yuan','\"DRUG,1\"',135.999,True,3] input_string = '001,\"LU, JR\",MING-YUAN,\"DRUG,1\",135.999\n' data = read_line(input_string) self.assertEqual(expected_data[0],data[0]) self.assertEqual(expected_data[1],data[1]) self.assertEqual(expected_data[2],data[2]) self.assertAlmostEqual(expected_data[3],data[3]) self.assertEqual(expected_data[4],data[4]) self.assertAlmostEqual(expected_data[5],data[5]) #Check for odd numers of quotation marks input_string = '001,\"LU\",\"MING-YUAN,DRUG1,135\n' data = read_line(input_string) self.assertFalse(data[4]) #Check for missing fields input_string = '001,,MING-YUAN,DRUG1,135\n' data = read_line(input_string) self.assertFalse(data[4]) input_string = '001,LU,MING-YUAN,DRUG1,\n' data = read_line(input_string) self.assertFalse(data[4]) #Check for corrupted fields input_string = '001x,LU,MING-YUAN,DRUG1,135\n' data = read_line(input_string) self.assertFalse(data[4]) input_string = '001,LU,MING-YUAN,DRUG1,1ag5\n' data = read_line(input_string) self.assertFalse(data[4])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_Lines(self):\n\n pass", "def check_record(idline,nclline,sepline,qualiline):\n return check_idline(idline) and check_sepline(sepline)", "def is_line(self): \n return False", "def check_line(self):\n if not self.hosts and not self.line:\n self.msg(\"There is no line here. You can create one with +line/createline.\")\n return\n return True", "def testBadLine(self):\n\n self.assertRaises(\n ValueError,\n tools._trackInfo,\n 'not a real line'\n )", "def checkLineStandardCompliance(line):\n if len(line) != 5:\n print(line + \" HAS WRONG NUMBER OF COLUMNS: \" + str(len(line)))\n exit(5)", "def line_is_valid(line):\n if '-' in map(lambda item: item.strip(), line.strip().split(\";\")):\n return False\n else:\n return True", "def split_line_robust(line):\n\n line_split0 = [x.rstrip('\\n') for x in line.split(' ') if x]\n line_split1 = [x.split('\\t') for x in line_split0 if x]\n line_split = []\n for l_one in line_split1:\n for l_two in l_one:\n if l_two: line_split.append(l_two)\n return(line_split)", "def validate_line(self, line):\n splitline = line.split('\\t')\n if len(splitline) is not 9:\n return []\n if not \"ID\" in splitline[8]:\n return []\n if not int(splitline[3]) <= int(splitline[4]):\n return []\n # Everything except genes must have parent id\n if not \"Parent\" in splitline[8] and not splitline[2] == \"gene\":\n return []\n return splitline", "def testSplitLine_one_split():\n line = np.array([1, 2, 3, 3, 3, 4, 5])\n split_lines = splitLine(line)\n if np.all(np.concatenate(split_lines) == line):\n print(str(np.concatenate(split_lines)), \" == \", str(line))\n else:\n print(str(np.concatenate(split_lines)), \" != \", str(line))", "def test_parse_no_fields(self):\n received = self._p.parse_line(self._line)\n expected = {}\n msg = 'Line parse with no fields should return None'\n self.assertDictEqual(received, expected, msg)", "def check_line(self, line):\n line = line.rstrip('\\r\\n')\n try:\n line = line.decode('utf8')\n except:\n pass\n return self.rules['all'].validate(line)", "def process_line(line):\n\n name_comp_list = []\n givenname_comp_list = []\n surname_comp_list = []\n geocode_comp_list = []\n locality_comp_list = []\n date1_comp_list = []\n date2_comp_list = []\n\n # Split the line into the basic fields - - - - - - - - - - - - - - - - - - -\n #\n if (config.in_file_type in ['CSV','CSVQ','TAB','TABQ']):\n # Comma or tabulator separated\n try:\n line_list = config.line_parser.parse(line)\n except:\n log_message('CSV line parsing failed with inout: '+line,'err')\n\n if (len(line_list) < config.input_len):\n log_message('Input line does not contain enough fields,' +\\\n 'fill up with empty fields','warn')\n while (len(line_list) < config.input_len):\n line_list.append('')\n\n config.curr_line_list = line_list # Save current line list\n\n # Extract fields into different component lists - - - - - - - - - - - - - -\n #\n if (config.input_component['name'] != []): # Extract name fields\n for i in config.input_component['name']:\n name_comp_list.append(line_list[i])\n\n else: # Extract givenname and surname into separate components - - - - - -\n if (config.input_component['givenname'] != []): # Extract g-name fields\n for i in config.input_component['givenname']:\n givenname_comp_list.append(line_list[i])\n\n if (config.input_component['surname'] != []): # Extract surname fields\n for i in config.input_component['surname']:\n surname_comp_list.append(line_list[i])\n\n if (config.input_component['geocode'] != []): # Extract geocode fields\n for i in config.input_component['geocode']:\n geocode_comp_list.append(line_list[i])\n\n if (config.input_component['locality'] != []): # Extract locality fields\n for i in config.input_component['locality']:\n locality_comp_list.append(line_list[i])\n\n if (config.input_component['date1'] != []): # Extract date1 fields\n for i in config.input_component['date1']:\n date1_comp_list.append(line_list[i])\n\n if (config.input_component['date2'] != []): # Extract date2 fields\n for i in config.input_component['date2']:\n date2_comp_list.append(line_list[i])\n\n elif (config.in_file_type == 'COL'): # Column based input file - - - - - - -\n\n if (len(line) < config.input_len):\n log_message('Input line is not long enough, fill up with spaces','warn')\n line += ' '*(config.input_len-len(line))\n\n if (config.input_component['name'] != []): # Extract name fields\n for (col_start,length) in config.input_component['name']:\n name_comp_list.append(line[col_start,col_start+length])\n\n else: # Extract givenname and surname into separate components - - - - - -\n if (config.input_component['givenname'] != []): # Extract g-name fields\n for (col_start,length) in config.input_component['givenname']:\n givenname_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['surname'] != []): # Extract surname fields\n for (col_start,length) in config.input_component['surname']:\n surname_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['geocode'] != []): # Extract geocode fields\n for (col_start,length) in config.input_component['geocode']:\n geocode_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['locality'] != []): # Extract locality fields\n for (col_start,length) in config.input_component['locality']:\n locality_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['date1'] != []): # Extract date1 fields\n for (col_start,length) in config.input_component['date1']:\n date1_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['date2'] != []): # Extract date2 fields\n for (col_start,length) in config.input_component['date2']:\n date2_comp_list.append(line[col_start,col_start+length])\n\n # elif (config.in_file_type == 'SQL'): # - - - - - - - - - - - - - - - - - -\n\n ################################\n # Add later: SQL database access\n ################################\n\n msg = [' Component basic field lists:', \\\n ' Name: '+str(name_comp_list), \\\n ' Given name: '+str(givenname_comp_list), \\\n ' Surname: '+str(surname_comp_list), \\\n ' Geocode: '+str(geocode_comp_list), \\\n ' Locality: '+str(locality_comp_list), \\\n ' Date1: '+str(date1_comp_list), \\\n ' Date2: '+str(date2_comp_list)]\n log_message(msg,'v2')\n\n name_comp = ''\n givenname_comp = ''\n surname_comp = ''\n geocode_comp = ''\n locality_comp = ''\n date1_comp = ''\n date2_comp = ''\n\n # Now clean and then concatenate component lists into strings - - - - - - - -\n #\n if (name_comp_list != []): # Name component\n name_comp = name_comp_list[0] # Start with first field in list\n\n for f in name_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['name'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['name'] == 1):\n sep = check_field_spill(name_comp, f)\n\n name_comp = name_comp+sep+f # Append separator and field\n\n if (givenname_comp_list != []): # Givenname component - - - - - - - - - - -\n givenname_comp = givenname_comp_list[0] # Start with first field in list\n\n for f in givenname_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['givenname'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['givenname'] == 1):\n sep = check_field_spill(givenname_comp, f)\n\n givenname_comp = givenname_comp+sep+f # Append separator and field\n\n if (surname_comp_list != []): # Surname component - - - - - - - - - - - - -\n surname_comp = surname_comp_list[0] # Start with first field in list\n\n for f in surname_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['surname'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['surname'] == 1):\n sep = check_field_spill(surname_comp, f)\n\n surname_comp = surname_comp+sep+f # Append separator and field\n\n if (geocode_comp_list != []): # Geocode component - - - - - - - - - - - - -\n geocode_comp = geocode_comp_list[0] # Start with first field in list\n\n for f in geocode_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['geocode'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['geocode'] == 1):\n sep = check_field_spill(geocode_comp, f)\n\n geocode_comp = geocode_comp+sep+f # Append separator and field\n\n if (locality_comp_list != []): # Locality component - - - - - - - - - - - -\n locality_comp = locality_comp_list[0] # Start with first field in list\n\n for f in locality_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['locality'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['locality'] == 1):\n sep = check_field_spill(locality_comp, f)\n\n locality_comp = locality_comp+sep+f # Append separator and field\n\n if (date1_comp_list != []): # Date1 component - - - - - - - - - - - - - - -\n date1_comp = date1_comp_list[0] # Start with first field in list\n\n for f in date1_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['date1'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['date1'] == 1):\n if (date1_comp[-1] != ' ') and (f[0] != ' '):\n tmp_list0 = date1_comp.split()\n tmp_list1 = f.split()\n check_word = tmp_list0[-1]+tmp_list1[0]\n\n if (check_word in ['jan','feb','mar','apr','may','jun','jul','aug', \\\n 'sep','oct','nov','dec','january','february','march','april', \\\n 'may','june','july','august','september','october','november', \\\n 'december']):\n\n sep = '' # Set separator to no space\n msg = ' Correct date1 word spilling: \"'+date1_comp+'\",\"'+f+'\"'\n log_message(msg,'v1')\n\n date1_comp = date1_comp+sep+f # Append separator and field\n\n if (date2_comp_list != []): # Date2 component - - - - - - - - - - - - - - -\n date2_comp = date2_comp_list[0] # Start with first field in list\n\n for f in date2_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['date2'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['date2'] == 1):\n if (date2_comp[-1] != ' ') and (f[0] != ' '):\n tmp_list0 = date1_comp.split()\n tmp_list1 = f.split()\n check_word = tmp_list0[-1]+tmp_list1[0]\n\n if (check_word in ['jan','feb','mar','apr','may','jun','jul','aug', \\\n 'sep','oct','nov','dec','january','february','march','april', \\\n 'may','june','july','august','september','october','november', \\\n 'december']):\n\n sep = '' # Set separator to no space\n msg = ' Correct date1 word spilling: \"'+date1_comp+'\",\"'+f+'\"'\n log_message(msg,'v1')\n\n date2_comp = date2_comp+sep+f # Append separator and field\n\n # Check if name component is given or givenname and surname separately - - -\n #\n if (config.input_component['givenname'] != []) or \\\n (config.input_component['surname'] != []):\n name_comp = [givenname_comp, surname_comp]\n\n msg = [' Components:', \\\n ' Name: \"'+str(name_comp)+'\"', \\\n ' Geocode: \"'+geocode_comp+'\"', \\\n ' Locality: \"'+locality_comp+'\"', \\\n ' Date1: \"'+date1_comp+'\"', \\\n ' Date2: \"'+date2_comp+'\"']\n log_message(msg,'v1')\n\n return [name_comp, geocode_comp, locality_comp, date1_comp, date2_comp]", "def testSplitLine_two_splits():\n line = np.array([1, 2, 3, 3, 3, 4, 4, 4, 5])\n split_lines = splitLine(line)\n if np.all(np.concatenate(split_lines) == line):\n print(str(np.concatenate(split_lines)), \" == \", str(line))\n else:\n print(str(np.concatenate(split_lines)), \" != \", str(line))", "def test_line_split():\n for _x in range(100):\n delim = choice((\"=\", \"|\", \",\", \"$\", \".\", \"/\"))\n l_str = delim.join([random_str(5, 10) for x in range(30)])\n line = Line(l_str, random_str(10, 20), randint(1, 10000))\n # Split the string\n l_parts = line.split(delim)\n exp_parts = l_str.split(delim)\n assert len(l_parts) == len(exp_parts)\n for l_part, x_part in zip(l_parts, exp_parts):\n assert isinstance(l_part, Line)\n assert l_part == x_part\n assert l_part.file == line.file\n assert l_part.number == line.number", "def emptyline(self):", "def _raise_if_not_line(self, l: float):\n # todo: check, if line exists -> if not, causes crash (raise exception before!)\n pass", "def _process_text_line(self, line, columns, format, lower_case, num_line,\n fill_missing=0, filter_case=None,\n strict_separator=False):\n if not isinstance(line, list) and not isinstance(\n line, tuple) and not isinstance(line, numpy.ndarray):\n if format != \"tsv\":\n raise Exception(\"unable to process format \" + format)\n line = line.strip(\"\\r\\n \").replace(\"\\n\", \" \")\n line = DatabaseCore2._split_expr.split(line)\n\n if filter_case is not None:\n line = [filter_case(s) for s in line]\n\n try:\n if fill_missing > 0:\n m = max(columns.keys())\n if m >= len(line):\n line = copy.copy(line)\n add = 0\n while m >= len(line) and add < fill_missing:\n a, b = columns[len(line)]\n if b is int:\n line.append(\"0\")\n elif b is float:\n line.append(\"0.0\")\n elif b is decimal.Decimal:\n line.append(\"0\")\n elif b is str:\n line.append(\"\")\n else:\n line.append(\"\")\n add += 1\n\n res = {}\n for c, v in columns.items():\n if \"AUTOFILL\" in v:\n res[v[0]] = \"NULL\"\n elif \"AUTOINCREMENT\" in v:\n continue\n else:\n if c >= len(line):\n self.LOG(\n \"(a)line number \",\n num_line,\n \"*unable to process a line columns \",\n c,\n \"#\",\n line,\n \" columns \",\n columns)\n return None\n\n val = line[c]\n if len(v) > 2 and v[2].lower() not in [\n \"primarykey\", \"autofill\"]:\n val = v[2](val)\n\n try:\n if isinstance(v[1], tuple):\n val = v[1][0](val)\n elif v[1] is datetime.datetime:\n if isinstance(val, datetime.datetime):\n pass\n elif isinstance(val, str):\n val = datetime.datetime.parse(val)\n else:\n raise TypeError(\n \"unable to convert %s into datetime\" % str(\n type(val)))\n else:\n val = v[1](val)\n except ValueError: # as e :\n self.LOG(\n \"(b)line number \",\n num_line,\n \"**unable to process a line columns \",\n c,\n \"#\",\n v[0],\n \" type \",\n v[1],\n \" value \",\n repr(\n line[c]))\n return None\n\n if isinstance(val, str):\n val = val.replace(\"'\", \"''\")\n if lower_case:\n val = val.lower()\n res[v[0]] = val\n\n return res\n except Exception:\n self.LOG(\"(c)line number\", num_line,\n \"***unable to process a line columns:\", line)\n return None", "def _parse_line(self, line):\n fields = line.split('|', 4) # stop splitting after fourth | found\n line_info = {'raw_message': line}\n if len(fields) == 5:\n line_info.update(dict(zip(self._fieldnames, fields)))\n return line_info", "def isLineData(self, line):\n\n if line is None or line.strip().startswith('#'):\n return False, None, 0\n\n dataType = self.getDataType()\n\n if dataType == 'Y':\n # Y with 1 column\n try:\n yValue = float(line)\n\n return True, 'Y', 1\n except:\n pass\n\n # Y with comma 2 to 5 column\n try:\n yValueList = []\n yValueList = line.split(',')\n\n if len(yValueList) > 1 and len(yValueList) <= 5:\n newYValues = []\n for yValue in yValueList:\n try:\n yValue = float(yValue)\n newYValues.append(yValue)\n except ValueError:\n pass\n\n return True, 'Y', len(newYValues)\n except:\n pass\n\n # Y with space 2 to 5 column\n try:\n yValueList = []\n yValueList = line.split()\n\n if len(yValueList) > 1 and len(yValueList) <= 5:\n for yValue in yValueList:\n yValue = float(yValue)\n\n return True, 'Y', len(yValueList)\n except:\n pass\n elif dataType == 'XY':\n # XY with comma\n try:\n (xValue, yValue) = line.split(',')\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n\n # XY with comma\n try:\n xValue, yValue, dummy = line.split(',')\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n\n # XY with space\n try:\n (xValue, yValue) = line.split()\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n else:\n # Y with 1 column\n try:\n yValue = float(line)\n\n return True, 'Y', 1\n except:\n pass\n\n # Y with comma 2 to 5 column\n try:\n yValueList = []\n yValueList = line.split(',')\n\n if len(yValueList) > 1 and len(yValueList) <= 5:\n numberValues = 0\n for yValue in yValueList:\n try:\n yValue = float(yValue)\n numberValues += 1\n except ValueError:\n pass\n\n return True, 'Y', numberValues\n except:\n pass\n\n # Y with space 2 to 5 column\n try:\n yValueList = []\n yValueList = line.split()\n\n if len(yValueList) > 1 and len(yValueList) <= 5:\n for yValue in yValueList:\n yValue = float(yValue)\n\n return True, 'Y', len(yValueList)\n except:\n pass\n\n # XY with comma\n try:\n (xValue, yValue) = line.split(',')\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n\n # XY with comma\n try:\n xValue, yValue, dummy = line.split(',')\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n\n # XY with space\n try:\n (xValue, yValue) = line.split()\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n\n return False, None, 0", "def parse_line(self, line):\n raise NotImplementedError", "def is_line(self):\n return False", "def __input_data_ok(self, line=None):\n # valid pattern: 1407478022|www.facebook.com\n valid_pattern = re.compile(\"\\w{10}\\|\\w+\")\n if (line) and (re.match(valid_pattern, line)):\n return True\n else:\n return False", "def check_line(self, line: str, line_num: int) -> Lints:\n yield from super().check_line(line, line_num)\n\n if self.in_header and line != \"---\\n\":\n for m in self.TAG_QUOTED.finditer(line):\n yield LintError(\"header-tags-quoted\", line_num, m.start())", "def doomed_parser(line):\n raise exceptions.LineParseException('Error occurred')", "def check_line(self, line):\n return int(line) in self.__bus_dict", "def test_line(line):\r\n if not line.strip():\r\n return False # if the last line is blank\r\n if line.startswith(\"#\"):\r\n return False # comment line\r\n if line.startswith(\" #\"):\r\n return False # comment line\r\n return line", "def line_valid(line: str) -> bool:\n\n return line != ' ' and line != ''", "def is_line(self):\n return True", "def is_line(self):\n return True", "def _split_line(line):\n # This method encapsulates the recognition of an unusual Unix format\n # variant (see ticket http://ftputil.sschwarzer.net/trac/ticket/12 ).\n line_parts = line.split()\n FIELD_COUNT_WITHOUT_USERID = 8\n FIELD_COUNT_WITH_USERID = FIELD_COUNT_WITHOUT_USERID + 1\n if len(line_parts) < FIELD_COUNT_WITHOUT_USERID:\n # No known Unix-style format\n raise ftputil.error.ParserError(\"line '{}' can't be parsed\".format(line))\n # If we have a valid format (either with or without user id field), the\n # field with index 5 is either the month abbreviation or a day.\n try:\n int(line_parts[5])\n except ValueError:\n # Month abbreviation, \"invalid literal for int\"\n line_parts = line.split(None, FIELD_COUNT_WITH_USERID - 1)\n else:\n # Day\n line_parts = line.split(None, FIELD_COUNT_WITHOUT_USERID - 1)\n USER_FIELD_INDEX = 2\n line_parts.insert(USER_FIELD_INDEX, None)\n return line_parts", "def test__clean_line():\n LINES = {\n \"One morn before me were three figures seen,\":\n \"One morn before me were three figures seen,\",\n \"And once—more came they by:-alas! wherefore?\":\n \"And once more came they by: alas! wherefore?\",\n }\n for line, clean_line in LINES.items():\n assert(LineBuilder(line)._clean_line() == clean_line)", "def _validate_linemerge(self, merged_line):\n\n if not isinstance(merged_line, geometry.LineString):\n merged_line = [ls for ls in merged_line]\n else:\n merged_line = [merged_line]\n return merged_line", "def wizard_input_verification(form):\n # Note,that key must start with line_..., other options are ignored, to allow DNF option to work\n lines = [value for key, value in form.items() if key.startswith(\"line\")]\n # if the entered lines are unique, they will have the same value\n if len(lines) != len(set(lines)):\n return False\n else:\n return True", "def testlines(fid):\n first = fid.readline()\n first = first.strip().split('/')\n first = first[0].split(',')\n if float(first[5]) == 50.0 or float(first[5]) == 60.0:\n return True\n else:\n return False", "def _checkForBlankLines(self, datalines):\n empties = None\n count = 0\n rtlines = []\n for line in datalines:\n if line.strip() == \"\":\n empties = 1\n else:\n if empties == 1: # If data line found after empty line then raise\n raise Exception(\"Empty line found in data section at line: \" + str(count))\n else:\n rtlines.append(line)\n count = count + 1\n return rtlines", "def _test_line(\n self, line, manager_data=None\n ): # pylint: disable=too-many-branches # pragma: no cover\n\n if PyFunceble.CONFIGURATION[\"db_type\"] == \"json\" and manager_data is not None:\n autocontinue = AutoContinue(self.file, parent_process=False)\n inactive_db = InactiveDB(self.file)\n mining = Mining(self.file)\n else:\n # We use the previously initiated autocontinue instance.\n autocontinue = self.autocontinue\n\n # We use the previously initiated inactive database instance.\n inactive_db = self.inactive_db\n\n # We use the previously initiated mining instance.\n mining = self.mining\n\n # We remove cariage from the given line.\n line = line.strip()\n\n if not line or line[0] == \"#\":\n # We line is a comment line.\n\n # We return None, there is nothing to test.\n return None\n\n if Regex(line, self.regex_ignore, escape=False, return_data=False).match():\n # The line match our list of elemenet\n # to ignore.\n\n # We return None, there is nothing to test.\n return None\n\n # We format the line, it's the last\n # rush before starting to filter and test.\n subject = self._format_line(line)\n\n if (\n not PyFunceble.CONFIGURATION[\"local\"]\n and PyFunceble.Check(subject).is_reserved_ipv4()\n ):\n # * We are not testing for local components.\n # and\n # * The subject is a reserved IPv4.\n\n # We return None, there is nothing to test.\n return None\n\n if PyFunceble.CONFIGURATION[\"filter\"]:\n # We have to filter.\n\n if Regex(\n subject, PyFunceble.CONFIGURATION[\"filter\"], return_data=False\n ).match():\n # The line match the given filter.\n\n # We get the status of the current line.\n status = self.__process_test(subject)\n else:\n # The line does not match the given filter.\n\n # We return None.\n return None\n else:\n # We do not have to filter.\n\n # We get the status of the current line.\n status = self.__process_test(subject)\n\n # We add the line into the auto continue database.\n autocontinue.add(subject, status)\n\n if status.lower() in self.list_of_up_statuses:\n # The status is in the list of UP status.\n\n # We mine if necessary.\n mining.mine(subject, self.file_type)\n\n if subject in inactive_db:\n # The subject is in the inactive database.\n\n # We generate the suspicous file.\n Generate(\n subject, \"file_domain\", PyFunceble.STATUS[\"official\"][\"up\"]\n ).analytic_file(\"suspicious\")\n\n # And we remove the current subject from\n # the inactive database.\n inactive_db.remove(subject)\n else:\n # The status is not in the list of UP status.\n\n # We add the current subject into the\n # inactive database.\n inactive_db.add(subject, status)\n\n if (\n self.complements_test_started\n and PyFunceble.CONFIGURATION[\"db_type\"] == \"json\"\n ):\n # We started the test of the complements.\n\n if \"complements\" in autocontinue.database:\n # The complement index is present.\n\n while subject in autocontinue.database[\"complements\"]:\n # We loop untill the line is not present into the\n # database.\n\n # We remove the currently tested element.\n autocontinue.database[\"complements\"].remove(subject)\n\n # We save the current state.\n autocontinue.save()\n\n if manager_data is None:\n # We are not in a multiprocess environment.\n\n # We update the counters\n autocontinue.update_counters()\n\n # We process the autosaving if it is necessary.\n self.autosave.process(test_completed=False)\n elif PyFunceble.CONFIGURATION[\"db_type\"] == \"json\":\n # We are in a multiprocess environment.\n\n # We save everything we initiated into the server process\n manager_data.append(\n {\n \"autocontinue\": autocontinue.database,\n \"inactive_db\": inactive_db.database,\n \"mining\": mining.database,\n }\n )\n\n # We return None.\n return None", "def check_data(self):\n\n for i in range(len(self.full_ed_lines)):\n if self.full_ed_lines[i].text() != \"\":\n if self.full_ed_lines[i].hasAcceptableInput():\n continue\n else:\n if i == 1:\n self.msg2Statusbar.emit('Неправильный формат версии! Исправьте и повторите действие!')\n elif i == 5:\n self.msg2Statusbar.emit('Неправильная почта! Исправьте и повторите действие!')\n return False\n else:\n self.msg2Statusbar.emit('Не все поля заполнены! Исправьте и повторите действие!')\n return False\n return True", "def filterLines(weatherRDDRecord):\n fieldsList = weatherRDDRecord.split(\",\")\n #return len(fieldsList)\n if any(i.isdigit() for i in fieldsList[0]):\n return True\n else:\n return False", "def has_invalid_lines(self):\n # Convience variables\n sample_id = self._sample_sheet.sample_id_column\n sample_name = self._sample_sheet.sample_name_column\n sample_project = self._sample_sheet.sample_project_column\n # Look at first line to see which items have been provided\n line = self._sample_sheet.data[0]\n has_sample_id = line[sample_id] != ''\n has_sample_name = (sample_name is not None) and \\\n (line[sample_name] != '')\n has_project = line[sample_project] != ''\n # Look for invalid data lines\n invalid_lines = []\n for line in self._sample_sheet.data:\n if self._sample_sheet.has_lanes and line['Lane'] == '':\n invalid_lines.append(line)\n elif has_sample_id and line[sample_id] == '':\n invalid_lines.append(line)\n elif has_sample_name and line[sample_name] == '':\n invalid_lines.append(line)\n elif has_project and line[sample_project] == '':\n invalid_lines.append(line)\n return invalid_lines", "def clean_json(self, line_no, row):\n if len(row) not in [4, 5]:\n return False\n return True", "def set_valid(line, next_line):\n if next_line and line.o_zip_code == next_line.o_zip_code and \\\n line.o_state == next_line.o_state:\n line.valid = 1\n else:\n if app.config.get('ALLOWED_STATES'):\n state_valid = valid_state(line)\n else:\n state_valid = True\n if app.config.get('ZIPCODE_LENGTH'):\n zipcode_valid = valid_zipcode(line)\n else:\n zipcode_valid = True\n if app.config.get('ALOWED_AGE'):\n age_valid = valid_age(line)\n else:\n age_valid = True\n if app.config.get('EMAIL_VALIDATION'):\n email_valid = valid_email(line)\n else:\n email_valid = True\n if app.config.get('ZIPCODE_SUM'):\n zip_sum_valid = valid_zip_sum(line)\n else:\n zip_sum_valid = True\n if app.config.get('ZIPCODE_SUM'):\n domain_valid = valid_domain(line)\n else:\n domain_valid = True\n if state_valid and zipcode_valid and age_valid and email_valid and \\\n zip_sum_valid and domain_valid:\n line.valid = 1\n else:\n line.valid = 0\n return line.valid", "def isDataLine(line):\n if len(line) > 1:\n return line[0] != \"#\"\n return False", "def isDataLine(line):\n if len(line) > 1:\n return line[0] != \"#\"\n return False", "def notparsablelines(self):\n return self._notparsable", "def check_line(self, lineno, line):\n if line.startswith(CODE_BLOCK_DELIMITER) and self.in_code_block:\n if len(line.strip()) == len(CODE_BLOCK_DELIMITER):\n self.register_error(lineno, \"Code block has no formatter\")\n return\n formatter = line[3:].split()[0]\n if formatter not in self.formatters:\n self.register_error(\n lineno,\n f\"Code block has bad formatter '{formatter}'\",\n )", "def isThereApartToIgnore(self,line):\n good_line = ''\n curr_line = line\n # there are 3 options: or the first of the next line is a comment, or a qoute, or a //. each time we will check\n # what is first\n global multi_comment_line_mode\n bad_line = line.find(\"//\")\n bad_part_start = line.find(\"/*\")\n if (bad_line == -1 and bad_part_start == -1 and not multi_comment_line_mode):\n # if there is no problem\n return line\n while curr_line != '':\n bad_line = curr_line.find(\"//\")\n curr_lenght_line = len(curr_line)\n bad_part_start = curr_line.find(\"/*\")\n qoutes_start = curr_line.find('\"')\n # handling the case in which bad part is first\n if bad_line==-1 and bad_part_start==-1 and qoutes_start==-1:\n good_line += ' ' + curr_line\n return good_line\n if (bad_line!=-1 and bad_part_start!= -1 and qoutes_start!=-1 and\n bad_part_start == min(bad_part_start,bad_line,qoutes_start) or (bad_part_start!=-1 and bad_line==-1\n and qoutes_start == -1) or (bad_part_start!=-1 and bad_line==-1 and qoutes_start!=-1\n and bad_part_start < qoutes_start )or\n (bad_part_start!=-1 and bad_line!=-1 and qoutes_start==-1 and\n bad_part_start < bad_line )):\n curr_bad = curr_line[bad_part_start:]\n bad_part_end = curr_bad.find(\"*/\")\n good_line += ' ' +curr_line[:bad_part_start]# adding this part to good line\n if bad_part_end != -1:\n # good_line += curr_line[:bad_part_start]\n if bad_part_start + bad_part_end + 2 == curr_lenght_line - 1:\n break\n curr_line = curr_line[bad_part_start + bad_part_end + 2:]\n continue\n else:\n # in this case there are more lines which are bad\n # global multi_comment_line_mode\n multi_comment_line_mode = True\n return good_line\n # hadling the case in which bad line is first\n elif ((bad_line!=-1 and bad_part_start!= -1 and qoutes_start!=-1 and\n bad_line == min(bad_part_start,bad_line,qoutes_start))or\n (qoutes_start == -1 and bad_line !=-1 and bad_part_start == -1) or (qoutes_start!=-1 and bad_line!=-1\n and bad_line<qoutes_start ) or (bad_line!=-1 and bad_part_start!=-1 and qoutes_start ==-1\n and bad_line<bad_part_start)):\n curr_line = curr_line[:bad_line]\n continue\n # handling the case in which quates the first\n if(bad_line!=-1 and bad_part_start!= -1 and qoutes_start!=-1 and\n qoutes_start == min(bad_part_start,bad_line,qoutes_start) or\n (qoutes_start != -1 and bad_line ==-1 and bad_part_start==-1) or\n (qoutes_start != -1 and bad_line !=-1 and bad_part_start==-1 and qoutes_start<bad_line) or\n (qoutes_start != -1 and bad_part_start !=-1 and bad_line==-1 and qoutes_start<bad_part_start)):\n end_qoutes = curr_line[qoutes_start+1:].find('\"')\n good_line+=' '+curr_line[:qoutes_start]+curr_line[qoutes_start:end_qoutes+qoutes_start+2]\n curr_line = curr_line[end_qoutes+qoutes_start+2:]\n continue\n # need???\n elif ((qoutes_start!=-1 and bad_part_start!=-1 and qoutes_start > bad_part_start) or\n (qoutes_start==-1 and bad_part_start!=-1)):\n curr_bad = curr_line[bad_part_start:]\n bad_part_end = curr_bad.find(\"*/\")\n if bad_part_end != -1:\n good_line += ' '+curr_line[:bad_part_start] # adding this part to good line\n if bad_part_start+bad_part_end+2 == curr_lenght_line-1:\n break\n curr_line = curr_line[bad_part_start+bad_part_end+2:]\n else:\n # in this case there are more lines which are bad\n multi_comment_line_mode = True\n return good_line\n else:\n good_line+=' '+ curr_line\n break\n return good_line", "def validate(self, field, row):\n raise NotImplementedError", "def no_blank_line_before_section(): # noqa: D416", "def check_meatadata_row(validated, input_validate_dict, row, idx):\n\n if row['RealCrystalName'].isspace() or row['RealCrystalName'] == 'nan':\n add_tset_warning(input_validate_dict, 'Metadata.csv', 'RealCrystalName spaces or null', idx + 2)\n validated = False\n if row['crystal_name'].isspace() or row['RealCrystalName'] == 'nan':\n add_tset_warning(input_validate_dict, 'Metadata.csv', 'Crystal name spaces or null', idx + 2)\n validated = False\n if row['RealCrystalName'] not in row['crystal_name']:\n add_tset_warning(input_validate_dict, 'Metadata.csv', 'Crystal name does not contain RealCrystalName', idx + 2)\n validated = False\n if row['smiles'] == 'nan':\n add_tset_warning(input_validate_dict, 'Metadata.csv', 'Smiles null', idx + 2)\n validated = False\n\n return validated, input_validate_dict", "def _check_lines(self, lines: Sequence[str], needed_lines: NeedLines, ignore_spaces: bool = True) -> None:\n if ignore_spaces:\n lines = [vdns.common.compact_spaces(x) for x in lines]\n needed_lines0 = needed_lines\n needed_lines = []\n for line in needed_lines0:\n if isinstance(line, tuple):\n needed_lines.append((vdns.common.compact_spaces(line[0]), line[1]))\n else:\n needed_lines.append(vdns.common.compact_spaces(line))\n\n remaining_lines = lines\n for line in needed_lines:\n if isinstance(line, str):\n needle = line\n subsequent = False\n elif isinstance(line, tuple):\n needle = line[0]\n subsequent = True\n else:\n raise Exception(f'Unsupported needed_line: {line}')\n\n if subsequent:\n # Skip empty lines\n while remaining_lines and not remaining_lines[0]:\n remaining_lines = remaining_lines[1:]\n self.assertEqual(needle, remaining_lines[0])\n remaining_lines = remaining_lines[1:]\n else:\n self.assertIn(needle, remaining_lines)\n # Limit the next search to the subsequent lines\n remaining_lines = remaining_lines[remaining_lines.index(line) + 1:]", "def CheckLabel(Line): \n for i in Line:\n if i == '\\t': #can't detect leading tabs, stops at the first \\ \n raise InputError(Line,\"malformed input\") \n elif i != ' ':\n break", "def _is_blank_line(self):\n pattern = re.compile(r\"^(\\s)*$\")\n return pattern.search(self._line)", "def getLineInformation(line):\n \n pass", "def phase_check(self, num, line):\n\t\tpass", "def test_constructor_without_value(self):\n line = D1Line()\n self.assertEqual((line.gender,\n line.event_swimmer_id,\n line.last_name,\n line.first_name,\n line.nick_name,\n line.middle_initial,\n line.uss_num,\n line.team_swimmer_id,\n line.date_of_birth,\n line.age),\n (None, None, None, None, None,\n None, None, None, None, None))", "def _check_line_is_good(self, string):\r\n # The standard requires we only accept strings ending in \\r\\n or \\n\r\n if (string[-1] != \"\\n\"):\r\n raise ParseError('Line endings were not as expected', string)\r\n \r\n # The standard places a limit on line lengths\r\n if (len(string)) > 512:\r\n raise ProtocolError('Line too long to be valid', string)\r\n \r\n # Trim our trailing whitespace/line endings\r\n return string.rstrip()", "def _validate_fields(self, change_fields):\n pass", "def test_constructor_short_value(self):\n self.assertRaises(line_format_errors.InputLineError,\n lambda: D1Line(self.short_line))", "def check_column_count(cls, line):\n\n # MAGIC n_cols = n_delim + 1 (no trailing delimiter)\n cols = line.count(cls.DELIMITER) + 1\n expected = 7 # MAGIC USAA convention, not all are populated though\n return cols == expected", "def mapper_data_cleaning(self, l, line):\n lineitems = line.split(\",\")\n yield (lineitems[0], lineitems[2])", "def check(self): # full program\n r = re.compile('(?!(^(((?!;)[A-Z][+-]?\\d+(\\.\\d+)?\\s?)*(\\s*;\\s.*)?)$))')\n for line in self.blocks:\n if r.match(line) and line and line != '\\r' and line != '\\n':\n return False\n return True", "def check_all_lines(self):\n self.E_str = \"check_all_lines\"\n variables = []\n lines = self.file_ltxt\n while self.line_num < len(lines):\n line = lines[self.line_num].strip()\n if line == \"}\": self.line_num += 1; continue\n\n # Error check any variables\n if self.line_declarations['variable'](line):\n self.check_variable_line(line)\n name, _ = self.parse_variable_line(line)\n variables.append(name)\n\n # # Error check any splice commands\n # elif self.line_declarations['splice'](line):\n # self.check_splice_command(line)\n\n # Error check any file loading commands\n elif self.line_declarations['load'](line):\n var = self.check_load_command(line)\n variables.append(var)\n\n elif self.line_declarations['plot'](line):\n var = self.check_plot_command(line)\n if var is not None: variables.append(var)\n\n # Error check any file loading commands\n elif self.line_declarations['write'](line):\n self.check_write_command(line)\n\n # Error check any file loading commands\n elif self.line_declarations['math'](line):\n var = self.check_math_line(line)\n variables.append(var)\n\n # Error check any echo commands\n elif self.line_declarations['echo'](line):\n self.check_echo_command(line)\n\n # Error check any calc commands\n elif self.line_declarations['calc'](line):\n var = self.check_calc_command(line)\n if var != \"\": variables.append(var)\n\n # Error check any calc commands\n elif self.line_declarations['set'](line):\n var = self.check_set_command(line)\n # if var != \"\": variables.append(var)\n\n # Error check any for loop commands\n elif self.line_declarations['for'](line):\n self.check_for_command(line)\n\n # Error check any for script commands\n elif self.line_declarations['script'](line):\n _vars = self.check_script_command(line)\n for var in _vars:\n if var not in variables: variables.append(var)\n\n # Error checking the glue command\n elif self.line_declarations['glue'](line):\n var = self.check_glue_command(line)\n variables.append(var)\n\n # Error check any python commands\n elif self.line_declarations['inline_code'](line):\n if self.line_declarations['python'](line):\n _vars = self.check_python_command(line)\n for var in _vars:\n if var not in variables: variables.append(var)\n else:\n # Run the check_{script_type}_command() fnc\n getattr(self, f\"check_{line.split()[0]}_command\")(line)\n\n # Error check any python commands\n elif self.line_declarations['if'](line):\n self.check_if_statement(line)\n\n elif self.line_declarations['exit'](line):\n break\n\n self.line_num += 1\n\n # Reset the inp file variables and line number\n self.line_num = 0\n for var in set(variables):\n delattr(self, var)\n self.variables.remove(var)\n\n self.files_written = []", "def is_ok_line(line):\n card1 = line[0]\n card2 = line[1]\n card3 = line[2]\n\n if not is_coupled(card1.east, card2.west):\n return False\n if not is_coupled(card2.east, card3.west):\n return False\n return True", "def test_basic_dummy_no_match(self):\n self.assertLines([\"a\", \";\", \"examples/dummy.csv\"], [\"a,b,c,a_xfind\", \"1,2,3,\",])", "def test_line_parsing(self):\r\n\r\n params = ParameterSet.read_param_file(exepath('mocks/line_tests.txt'))\r\n \r\n #print params\r\n #for param in params:\r\n # print '%s = %s' % (param, params[param])\r\n \r\n self.assertEqual(len(params.keys()), 8)\r\n \r\n self.assertEqual(params['my param'], 123)\r\n self.assertEqual(params['my_param'], ['A', 'B'])\r\n self.assertEqual(params['MYPARAM'], {'A': 1, 'B':'Two'})\r\n \r\n self.assertEqual(params['param1'], (1,2,3))\r\n self.assertEqual(params['param2'], (4,5,6))\r\n self.assertEqual(params['param3'], (7,8,9))\r\n \r\n self.assertEqual(params['ml_param'], [[1,2,3],['a', 'b', 'c'],{'C':1,'D':'Two'}])\r\n \r\n self.assertEqual(params['My param'], 'A')\r\n \r\n \r\n keys = params.keys()\r\n \r\n # Skip over single line comments\r\n self.assertFalse('hidden_param_2' in keys)\r\n \r\n # Skip over multiline comments\r\n self.assertFalse('hidden_param' in keys)", "def dealCommonline(line):\n\n print \"\\t\\t%s\" % (line.strip('\\n'))", "def parse_line(self, line):\n\t\tif line[0] == \"#\":\n\t\t\treturn False\n\t\tparts = [x.strip() for x in line.strip().split(\",\")]\n\t\tself.unix_time = int(parts[0])\n\t\tself.cycles_done = int(parts[1])\n\t\tself.cur_path = int(parts[2])\n\t\tself.paths_total = int(parts[3])\n\t\tself.pending_total = int(parts[4])\n\t\tself.pending_favs = int(parts[5])\n\t\tself.map_size = float(parts[6].replace(\"%\",\"\"))\n\t\tself.unique_crashes = int(parts[7])\n\t\tself.unique_hangs = int(parts[8])\n\t\tself.max_depth = int(parts[9])\n\t\tself.execs_per_sec = float(parts[10])\n\t\treturn True", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def bad_examples(self, input_rows):\n for row in input_rows.split(\"===\"):\n row = row.strip()\n if row == \"\" or row.startswith(\"#\"):\n continue\n\n if \"->\" in row:\n field, expected_error = row.split(\"->\")\n else:\n field = row\n expected_error = \"None\"\n\n field = field.strip()\n expected_error = expected_error.strip() + \"\\n\"\n yield field, expected_error", "def test_get_valid_line(self):\n ars = self.ar[2009][11]['general']\n self.assertTrue(isinstance(ars['TotalVisits'], awstats_reader.AttrDict))", "def _compare_line_for_addition(self, current_line_data, product, supplier, shop, extra):\n if current_line_data.get(\"product_id\") != product.id:\n return False\n if current_line_data.get(\"supplier_id\") != supplier.id:\n return False\n if current_line_data.get(\"shop_id\") != shop.id:\n return False\n\n if isinstance(extra, dict): # If we have extra data, compare it to that in this line\n if not compare_partial_dicts(extra, current_line_data): # Extra data not similar? Okay then. :(\n return False\n return True", "def test_checkfields():\n data = StringIO(\"\"\"chrX\\t10\\t100\\n\"\"\")\n a = bedparser.bedfile(data)\n a = list(a)\n i = a[0]\n assert i.chr=='chrX'\n assert i.start==10\n assert i.stop==100\n assert len(a) == 1", "def _parse_field(\n self,\n line: List[str],\n index: int,\n make_invalid_measurement_missing: bool = False,\n ) -> int:\n result = None\n try:\n if line[index]:\n result = int(line[index])\n except (ValueError, IndexError) as ex:\n if not make_invalid_measurement_missing:\n raise ex\n result = None\n return result", "def sanity_check(left_line, right_line):\n\n # check horizontal separation distance\n if abs(right_line.line_base_pos - left_line.line_base_pos) > 4.0:\n #print(\"Line base positions too far from each other\")\n return False\n\n # check lines are roughly parallel\n # if base pos and raduius of both lines are ok, it should be enough\n # to check the X distances of a few points with respect to their y positions\n # so slice the Y points into chunks and check\n chunksize = 200\n length = min(len(left_line.ally), len(right_line.ally))\n\n # TODO: error handling\n if (right_line.allx is not None) and (left_line.allx is not None):\n bias = None\n for i in range(0, length, chunksize):\n\n # take x at car as bias\n if bias is None:\n bias = abs(right_line.allx[i] - left_line.allx[i]) * left_line.xm_per_pix\n else:\n if abs(bias - abs(right_line.allx[i] - left_line.allx[i])*left_line.xm_per_pix) > 1.0:\n #print(\"Lines are not parallel\")\n return False\n else:\n return False\n\n # check curvatures -- the curvatures for left and right should be roughly\n # in the same magitude -- check for error\n if abs(left_line.radius_of_curvature - right_line.radius_of_curvature) > 200:\n #print(\"Line radius of curvature too different\")\n return False\n\n return True", "def test_split_line_on_loop(self):\n tol = 5e-4\n line = gnx.LineString([(10.8332501, 43.6994487),\n (10.8333313, 43.6995065),\n (10.8331066, 43.6996864),\n (10.8327284, 43.6994203),\n (10.8332501, 43.6994487)])\n for distance in [0.000925456010099422, 0.0, 5.0, 9.967085832788407e-05, 0.0008499479239845902]:\n split_result = gnx.split_line(line, distance)\n self.assertIsNot(split_result, None)\n gnx_tu.assert_coordinates_almost_equals(split_result[0].coords[-1],\n split_result[1].coords[0])\n if distance < line.length:\n self.assertAlmostEquals(split_result[0].length, distance, delta=tol)\n self.assertAlmostEquals(split_result[1].length, line.length - distance, delta=tol)\n else:\n self.assertAlmostEquals(split_result[0].length, line.length, delta=tol)\n self.assertAlmostEquals(split_result[1].length, 0.0, delta=tol)", "def emptyline(self):\n return", "def emptyline(self):\n return", "def process_line(self, line):\n ltype = self.line_type(line)\n if ltype == 'gene':\n self.process_gene_line(line)\n return True\n elif ltype == 'mRNA':\n self.process_mrna_line(line)\n return True\n elif ltype == 'CDS':\n self.process_cds_line(line)\n return True\n elif ltype == 'exon':\n self.process_exon_line(line)\n return True\n elif ltype == 'start_codon' or ltype == 'stop_codon':\n self.process_other_feature_line(line)\n return True\n else:\n self.skipped_features += 1\n return False", "def GetLine(line):\r\n pass", "def test_constructor_with_value(self):\n line = D1Line(self.test_line)\n self.assertEqual((line.gender,\n line.event_swimmer_id,\n line.last_name,\n line.first_name,\n line.nick_name,\n line.middle_initial,\n line.uss_num,\n line.team_swimmer_id,\n line.date_of_birth,\n line.age),\n (\"F\",\n 14081,\n \"Reed\",\n \"Laramie\",\n \"\",\n \"J\",\n \"021100LARJREED\",\n 1019,\n datetime.date(2000, 2, 11),\n 9))", "def test_constructor_bad_value(self):\n self.assertRaises(line_format_errors.InputLineError,\n lambda: D1Line(self.bad_line))", "def isValid(self):\n return self.file_name != \"\" and self.line_number != 0", "def parse_line(line):\r\n if not line.strip():\r\n return False # if the last line is blank\r\n if line.startswith(\"#\"):\r\n return False\r\n cluster_line_split = line.rstrip(\"\\n\").split()\r\n return cluster_line_split" ]
[ "0.68945944", "0.65980923", "0.64360386", "0.64280456", "0.640811", "0.6315471", "0.62812597", "0.6233146", "0.61786985", "0.617687", "0.6170383", "0.61697614", "0.6118626", "0.6044029", "0.6039419", "0.6007905", "0.60070646", "0.6002585", "0.5974007", "0.5965619", "0.5962297", "0.5952723", "0.5933941", "0.5921769", "0.5890514", "0.58857", "0.5883305", "0.5880509", "0.5875158", "0.5875158", "0.5855664", "0.5843618", "0.5837556", "0.582431", "0.5781331", "0.5773402", "0.57726127", "0.57667655", "0.5763195", "0.57377553", "0.5695076", "0.5693611", "0.5691093", "0.5691093", "0.5686444", "0.5685396", "0.566229", "0.56612635", "0.566027", "0.5648593", "0.56363904", "0.5631162", "0.56307596", "0.56266135", "0.5626544", "0.5611479", "0.5597885", "0.55953145", "0.5594722", "0.55920386", "0.55910677", "0.55901194", "0.55871767", "0.5581507", "0.55798113", "0.55697894", "0.5568686", "0.55641085", "0.555735", "0.555735", "0.555735", "0.555735", "0.555735", "0.555735", "0.555735", "0.555735", "0.555735", "0.555735", "0.555735", "0.555735", "0.555735", "0.555735", "0.555735", "0.555735", "0.555735", "0.5539559", "0.5523575", "0.5519358", "0.5510767", "0.55043393", "0.5501592", "0.5496319", "0.5496041", "0.5496041", "0.5494828", "0.5492243", "0.5491444", "0.54914135", "0.54887694", "0.5485141" ]
0.6924609
0
Inpue file is correctly read and tuple constructed.
def test_read_input_file(self): test_max_digit = 2 tuple1 = self.test_raw_tuple tuple2, max_digit = read_input_file(self.test_drug_info_file) self.assertEqual(tuple1, tuple2) self.assertAlmostEqual(max_digit,test_max_digit)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse(self, infile):\r\n raise NotImplementedError()", "def _read_input_file(self):\n pass", "def _parse(self, infile):\n raise NotImplementedError()", "def _read(self, in_file):\n #\n # I know this function is long, but the FRD block is long as well...\n # Splitting this into multiple functions would not help in my opinion.\n # Therefore -> shut up pylint\n # pylint: disable=too-many-branches\n # pylint: disable=too-many-statements\n #\n self.setname = in_file.read(6).decode().strip()\n self.value = float(in_file.read(12))\n self.numnod = int(in_file.read(12))\n self.text = in_file.read(20).decode().strip()\n self.ictype = int(in_file.read(2))\n self.numstep = int(in_file.read(5))\n self.analys = in_file.read(10).decode().strip()\n self.format = int(in_file.read(2))\n in_file.read(1) # eol\n\n in_file.read(1) # pad byte\n in_file.read(2) # key = -4\n in_file.read(2) # pad bytes\n self.name = in_file.read(8).decode().strip()\n self.ncomps = int(in_file.read(5))\n self.irtype = int(in_file.read(5))\n if self.irtype != 1:\n raise NotImplementedError()\n in_file.read(1) # eol\n\n for i in range(self.ncomps):\n entity = FRDEntity()\n self.entities.append(entity)\n\n in_file.read(1) # pad byte\n entity.key = int(in_file.read(2))\n in_file.read(2) # pad bytes\n entity.name = in_file.read(8).decode().strip()\n entity.menu = int(in_file.read(5))\n entity.ictype = int(in_file.read(5))\n entity.icind1 = int(in_file.read(5))\n if entity.ictype == 4:\n entity.icind2 = int(in_file.read(5))\n elif entity.ictype == 2 and i == 3:\n entity.icind2 = int(in_file.read(5))\n entity.iexist = int(in_file.read(5))\n entity.icname = in_file.read(3).decode().strip()\n self.ncomps -= 1\n else:\n entity.iexist = int(in_file.read(5))\n in_file.read(1) # eol\n\n for i in range(self.numnod):\n result = FRDNodeResult()\n self.results.append(result)\n if self.format < 2:\n num_lines = int(self.ncomps/(6 + 1)) + 1\n result.data = []\n for j in range(num_lines):\n in_file.read(3) # pad byte and key = -1 || -2\n if result.node is None:\n result.node = int(in_file.read(5*(self.format+1)))\n else:\n in_file.read(5*(self.format+1))\n k_start = j*6\n k_end = min(self.ncomps - k_start, (j+1)*6)\n for _ in range(0, k_end):\n result.data.append(float(in_file.read(12)))\n in_file.read(1) # eol\n else:\n result.node = struct.unpack('i', in_file.read(4))[0]\n result.data = struct.unpack(\n 'f'*self.ncomps, in_file.read(self.ncomps*4))\n\n if self.format < 2:\n in_file.readline() # last record for ascii only", "def _read(self, in_file):\n in_file.read(18) # pad bytes\n self.numnod = int(in_file.read(12))\n in_file.read(37) # pad bytes\n self.format = int(in_file.read(1))\n in_file.read(1) # eol\n self.nodes = []\n\n for _ in range(self.numnod):\n node = FRDNode()\n self.nodes.append(node)\n if self.format < 2:\n in_file.read(1)\n node.key = int(in_file.read(2))\n node.number = int(in_file.read(5*(self.format+1)))\n node.pos = [float(in_file.read(12)) for j in range(3)]\n in_file.read(1) # eol\n else:\n node.number = struct.unpack('i', in_file.read(4))[0]\n if self.format == 2:\n node.pos = struct.unpack('fff', in_file.read(12))\n else:\n node.pos = struct.unpack('ddd', in_file.read(24))\n\n if self.format < 2:\n in_file.readline() # last record for ascii only", "def readprimitive(f): \n \n ## read in lines from input file and ignore blank lines and comment lines\n lines = [line.rstrip() for line in f if line.rstrip() if line[0] != '#']\n\n # a1,a2,a3\n A = np.array([[float(lines[0].split()[0]),float(lines[0].split()[1]),float(lines[0].split()[2])],\n [float(lines[1].split()[0]),float(lines[1].split()[1]),float(lines[1].split()[2])],\n [float(lines[2].split()[0]),float(lines[2].split()[1]),float(lines[2].split()[2])]]).T\n \n # number of basis atoms\n num_basis = int(lines[3].split()[0]) \n\n # basis atom positions in unit cell\n unitcell_pos = []\n for i in range(num_basis): \n unitcell_pos.append([float(lines[4+i].split()[0]),float(lines[4+i].split()[1]),float(lines[4+i].split()[2])]) \n \n return (A,unitcell_pos)", "def read(self, filename): # real signature unknown; restored from __doc__\n pass", "def ReadInputFile(file:int)->EvenMorePizza:\n self.files = ('a_example.in','b_little_bit_of_everything.in', 'c_many_ingredients.in','d_many_pizzas.in','e_many_teams.in')\n \n EvenMorePizza temporalValue\n \n file = open(files[file])#open the file\n data = doc.Read().strip(' ,\\n').split('\\n')\n file.close()#close the file\n #set the title of the command\n data_head = data[0].strip(' ,\\n').split(' ')\n temporalValue.AvailablePizza = int(data_head[0])\n temporalValue.Team2ppl = int(data_head[1])\n temporalValue.Team3ppl = int(data_head[2])\n temporalValue.Team4ppl = int(data_head[3])\n #set the other ingredients\n count = 0\n for a in data[1:]:\n x = a.strip(' ,\\n').split(' ')\n temporalValue.ingredients[count] = x[1:]\n count += 1", "def test_get_infile(self):\r\n pass # not practically testable, but obvious file I/O\r", "def readInput(in_file_name):\n in_file = open(in_file_name, 'r')\n positions = []\n samples = []\n M = []; P = [];\n MC = []; PC = [];\n while True:\n line = in_file.readline()\n if not line: break\n if line[0] == '#': continue #skip comment\n line = line.rstrip('\\n').split('\\t')\n \n #genomic positions and allele support in plasma samples\n positions.append(int(line[0]))\n samples.append(tuple(map(int, line[1:5])))\n \n #maternal and paternal alleles\n M.append(tuple(line[5:7]))\n MC.append(tuple(map(float, line[7:9])))\n \n P.append(tuple(line[9:11]))\n PC.append(tuple(map(float, line[11:13]))) \n \n in_file.close()\n return positions, samples, M, P, MC, PC", "def data(self) -> Tuple[List[str], List[List[str]]]:\n format = self.format\n # Check if the file contains header information. Initialize the header\n # with the optional names of columns in the format descriptor.\n has_header = format.get('header', True)\n columns = format.get('columns')\n rows = list()\n # Delimiter depends on the file format.\n delim = '\\t' if format['type'] == 'tsv' else ','\n f = codecs.iterdecode(self.load().open(), 'utf-8')\n for row in csv.reader(f, delimiter=delim):\n if has_header:\n # Set the has_header flag to False so that all following records\n # are added to the list of rows.\n has_header = False\n columns = row if columns is None else columns\n else:\n rows.append(row)\n columns = [None] * len(rows[0]) if not columns and rows else columns\n return (columns, rows)", "def ReadBasicInfo():\r\n\r\n EquilibriumStep, ProductionStep,HEPCP,HEPCE,Multiple=10000000,10000000,100,100,2\r\n InputPath,OutputPath,AtomParameterPath,TaskSuffix,MaterialInputFormat='..','..','..','','mol'\r\n GasType,GasAtomTypeNum,GasAtomType,GasPartialPressure,TemperatureList,PressureList,\\\r\n TorqueSetting,MuSiCSetting,Nodes=[],[],[],[],[],[],[],[],['1:ppn=1']\r\n CutOff,GridSpacingP,GridSpacingE=12.8,2.0,2.0\r\n MakeGCMC,UsePmap,UseEmap,UsePost,MakePmap,MakeEmap,MakeTorque,KeyOne,KeyTwo,\\\r\n PDBCharges = False,False,False,False,False,False,False,False,False,False\r\n\r\n with open('GlueParameters', 'r') as File:\r\n for Line in File.readlines():\r\n if Line.strip():\r\n WordList = Line.strip().split()\r\n if len(WordList)>1 or KeyOne==True or KeyTwo==True:\r\n if WordList[0]=='#':\r\n continue\r\n\r\n # Controlled part\r\n elif WordList[0] == 'MakeGCMC:' and WordList[1] == 'open':\r\n MakeGCMC = True\r\n elif WordList[0] == 'UsePmap:' and WordList[1] == 'yes':\r\n UsePmap = True\r\n elif WordList[0] == 'UseEmap:' and WordList[1] == 'yes':\r\n UseEmap = True\r\n elif WordList[0] == 'UsePost:' and WordList[1] == 'yes':\r\n UsePost = True\r\n elif WordList[0] == 'MakePmap:' and WordList[1] == 'open':\r\n MakePmap = True\r\n elif WordList[0] == 'MakeEmap:' and WordList[1] == 'open':\r\n MakeEmap = True\r\n elif WordList[0] == 'MakeTorque:' and WordList[1] == 'open':\r\n MakeTorque = True\r\n elif WordList[0] == 'UseChargesFromPDBFile:' and WordList[1] == 'yes':\r\n PDBCharges = True\r\n\r\n # Basic part\r\n elif WordList[0]=='InputPath:':\r\n InputPath=WordList[1]\r\n elif WordList[0]=='MaterialInputFormat:':\r\n MaterialInputFormat=WordList[1]\r\n elif WordList[0]=='OutputPath:':\r\n OutputPath=WordList[1]\r\n elif WordList[0]=='AtomParameterPath:':\r\n AtomParameterPath=WordList[1]\r\n elif WordList[0] == 'GasType:':\r\n GasType = list(WordList[1:])\r\n elif WordList[0] == 'GasAtomTypeNum:':\r\n\r\n for i in WordList[1:]:\r\n GasAtomTypeNum.append(int(i))\r\n\r\n elif WordList[0] == 'GasAtomType:':\r\n GasAtomType = list(WordList[1:])\r\n elif WordList[0] == 'Multiple:':\r\n Multiple = int(WordList[1])\r\n elif WordList[0] == 'CutOff:':\r\n CutOff = float(WordList[1])\r\n\r\n # GCMC part\r\n\r\n elif WordList[0] == 'GasPartialPressure:':\r\n\r\n for j in WordList[1:]:\r\n GasPartialPressure.append(str(j))\r\n\r\n elif WordList[0] == 'TemperatureList(K):':\r\n\r\n for l in WordList[1:]:\r\n TemperatureList.append(float(l))\r\n\r\n elif WordList[0] == 'PressureList(kPa):':\r\n\r\n for k in WordList[1:]:\r\n PressureList.append(float(k))\r\n\r\n elif WordList[0] == 'EquilibriumStep:':\r\n EquilibriumStep = int(WordList[1])\r\n elif WordList[0] == 'ProductionStep:':\r\n ProductionStep = int(WordList[1])\r\n\r\n # Pmap part\r\n elif WordList[0] == 'GridSpacingP(Ang):':\r\n GridSpacingP = float(WordList[1])\r\n elif WordList[0] == 'HighEndPotentialCutoffP(kJ/mol):':\r\n HEPCP = int(WordList[1])\r\n\r\n # Emap part\r\n elif WordList[0] == 'GridSpacingE(Ang):':\r\n GridSpacingE = float(WordList[1])\r\n elif WordList[0] == 'HighEndPotentialCutoffE(kJ/mol):':\r\n HEPCE = int(WordList[1])\r\n\r\n # Torque part\r\n elif WordList[0] == 'Nodes:':\r\n Nodes = WordList[1:]\r\n elif WordList[0] == 'TaskSuffix:':\r\n TaskSuffix = WordList[1]\r\n elif WordList[0] == 'TorqueSetting:':\r\n KeyOne = True\r\n elif WordList[0] == 'MuSiCSetting:':\r\n KeyOne = False\r\n KeyTwo = True\r\n elif WordList[0] == 'END':\r\n KeyTwo = False\r\n elif KeyOne == True:\r\n TorqueSetting.append(Line)\r\n elif KeyTwo == True:\r\n MuSiCSetting.append(Line)\r\n\r\n return (InputPath,OutputPath,AtomParameterPath,MakeTorque,GasType,\r\n GasAtomTypeNum,GasAtomType,GasPartialPressure,TemperatureList,PressureList,CutOff,MakeGCMC,UsePmap,\r\n UseEmap,UsePost,MakePmap,MakeEmap,EquilibriumStep,ProductionStep,GridSpacingP,HEPCP,GridSpacingE,HEPCE,\r\n Multiple,TorqueSetting,MuSiCSetting,Nodes,TaskSuffix,PDBCharges,MaterialInputFormat)", "def read_from_file(self, filename: str) -> None:", "def _read_data(self):", "def parse_data(fp):\n pass", "def readfile(path):\n with open(path, 'r', encoding='utf-8') as f:\n param = tuple(f.readlines())\n return param", "def __init__(self, file_path: str):\n self._data: pd.DataFrame = self.read_input_and_split_tuples(file_path)", "def read_input_file(file_name):\n with open(file_name, 'r') as file_handle:\n\n rows, cols, drones, turns, max_payload = map(int, file_handle.readline().strip().split(' '))\n num_prod_types = int(file_handle.readline())\n\n product_weights = map(int, file_handle.readline().strip().split(' '))\n assert num_prod_types == len(product_weights)\n\n num_warehouses = int(file_handle.readline())\n warehouse_locations = [None] * num_warehouses\n warehouse_stock = [None] * num_warehouses\n for w_id in xrange(num_warehouses):\n warehouse_locations[w_id] = map(int, file_handle.readline().strip().split(' '))\n warehouse_stock[w_id] = map(int, file_handle.readline().strip().split(' '))\n\n if DEBUG:\n print 'Warehouse locations', warehouse_locations[:2]\n print 'Warehouse stock', warehouse_stock[:2]\n\n num_orders = int(file_handle.readline())\n order_locations = [None] * num_orders\n order_items = [None] * num_orders\n for o_id in xrange(num_orders):\n order_locations[o_id] = map(int, file_handle.readline().strip().split(' '))\n order_len = int(file_handle.readline())\n order_items[o_id] = map(int, file_handle.readline().strip().split(' '))\n assert order_len == len(order_items[o_id])\n\n if DEBUG:\n print 'Order locations', order_locations[:2]\n print 'Order items', order_items[:2]\n\n file_handle.close()\n\n return (rows, cols), \\\n drones, turns, max_payload, \\\n warehouse_locations, warehouse_stock, \\\n order_locations, order_items, product_weights", "def load(self, input):", "def _read_eeg(eeg_file):\r\n pass", "def _read_file(self):\n\n with open(self.file_name, 'rb') as f:\n new_test = struct.unpack('<l', f.read(8)[4:])[0]\n f.close()\n\n with open(self.file_name, 'rb') as f:\n old_test = struct.unpack('<h', f.read(6)[4:])[0]\n f.close()\n\n with open(self.file_name, 'rb') as f:\n other_test = struct.unpack('<l', f.read(20)[16:])[0]\n f.close()\n\n open_file = open(self.file_name, 'rb')\n\n if (other_test==202):\n raw = open_file.read(1236)[11:]\n self.model = '202'\n elif ((not new_test==102) and old_test==102):\n raw = open_file.read(1133)\n self.model = '102old'\n elif (new_test==102 and old_test==102):\n raw = open_file.read(1224)\n self.model = '102new'\n\n self.header = DpHeader(raw, self.model)\n\n self.data = DpData(open_file, \n self.model, \n self.header.interferogram_size, \n self.header.number_of_coadds, \n 2048*self.header.zero_fill,\n self.header.laser_wavelength_microns, \n self.header.dispersion_constant_xm,\n self.header.dispersion_constant_xb)\n\n open_file.close()", "def read(self):", "def _read(self, in_file):\n in_file.read(18) # pad bytes\n self.numelem = int(in_file.read(12))\n in_file.read(37) # pad bytes\n self.format = int(in_file.read(1))\n in_file.read(1) # eol\n self.elems = []\n\n for _ in range(self.numelem):\n elem = FRDElem()\n self.elems.append(elem)\n if self.format < 2:\n in_file.read(1)\n elem.key = int(in_file.read(2))\n elem.number = int(in_file.read(5*(self.format+1)))\n elem.type = int(in_file.read(5))\n elem.group = int(in_file.read(5))\n elem.material = int(in_file.read(5))\n in_file.read(1) # eol\n elem.nodes = []\n num_nodes = FRDElem.nodesPerType[elem.type]\n num_lines = int(num_nodes/(5*(3-self.format)+1))+1\n for j in range(num_lines):\n in_file.read(3) # pad byte and key = -2\n k_start = j*5*(3-self.format)\n k_end = min(num_nodes - k_start, (j+1)*5*(3-self.format))\n for _ in range(0, k_end):\n elem.nodes.append(\n int(in_file.read(5*(self.format+1))))\n in_file.read(1) # eol\n else:\n elem.number = struct.unpack('i', in_file.read(4))[0]\n elem.type = struct.unpack('i', in_file.read(4))[0]\n num_nodes = FRDElem.nodesPerType[elem.type]\n elem.group = struct.unpack('i', in_file.read(4))[0]\n elem.material = struct.unpack('i', in_file.read(4))[0]\n elem.nodes = struct.unpack(\n 'i'*num_nodes, in_file.read(num_nodes*4))\n\n if self.format < 2:\n in_file.readline() # last record for ascii only", "def read (self, file):\n\t\tself.unpack (file.read (self.size()))", "def test_file_reader(self):\r\n a = list(file_reader(\"student_majors.txt\", 3, sep='|', header=True))\r\n\r\n b = [('123', 'Jin He', 'Computer Science'), ('234', 'Nanda Koka', 'Software Engineering'), \\\r\n ('345', 'Benji Cai', 'Software Engineering')]\r\n self.assertEqual(a, b)\r\n \r\n c = [(\"CWID\", \"Name\", \"Major\"), ('123', 'Jin He', 'Computer Science'), ('234', 'Nanda Koka', 'Software Engineering'), \\\r\n ('345', 'Benji Cai', 'Software Engineering')]\r\n self.assertNotEqual(a,c)", "def read(self, filename):\n pass", "def read(self, filename):\n pass", "def read_file(inp_fn):\n lines = [line.strip().split(\",\")\n for line in open(inp_fn)\n if not (line.startswith(\"#\"))]\n return [(int(line[0]), year_record({\"male\": int(line[-3]),\n \"female\": int(line[-2]),\n \"unknown\": int(line[-1])},\n None, None))\n for line in lines[1:]]", "def test_read_data_processed(model_data):\n assert len(model_data) == 6 and type(model_data) is tuple", "def parseRaw(tagDict, inFileName):\r\n\r\n # '%Y/%m/%d %H:%M:%S' RAW Argos.csv format\r\n\r\n\r\n csvName = path.basename(inFileName)\r\n # Trap argos raw files that occurred within these dates\r\n # date formatted dd/mm/yy instead of yyyy/mm/dd\r\n bd = False\r\n if csvName >= util.CSV_schema.bad_dates[0][0]:\r\n if csvName <= util.CSV_schema.bad_dates[0][1]:\r\n bd = True\r\n if csvName >= util.CSV_schema.bad_dates[1][0]:\r\n if csvName <= util.CSV_schema.bad_dates[1][1]:\r\n bd = True\r\n\r\n newPasses = []\r\n d_ptt = {v[0]:k for k,v in tagDict.items()}\r\n pttDict = OrderedDict(sorted(d_ptt.items())) # Sort into {ptt: tag_id, ....}\r\n del d_ptt\r\n with open(inFileName, 'rb') as inFile:\r\n count = sum(1 for line in inFile)\r\n inFile.seek(0) # reset file\r\n reader = csv.DictReader(inFile)\r\n while reader.line_num < count:\r\n # Trap for changed fieldname\r\n gt = True if util.CSV_schema.gt_names[1] in reader.fieldnames else False\r\n featID = None\r\n ptt = 0\r\n msgType = 'NEW'\r\n str_timeval = ''\r\n passDur = None\r\n for row in reader:\r\n if row['Platform ID No.'][0] =='#': # What is this even trapping ???\r\n continue\r\n if int(row['Platform ID No.']) not in pttDict.keys(): # Orphan Tag\r\n newOrphan(row, inFileName, gt)\r\n msgType = 'NEW'\r\n updatePttList(ptt, row['Msg Date'],bd)\r\n continue\r\n elif int(row['Platform ID No.']) != ptt: # Start New PTT\r\n if ptt: # Skip ptt = 0\r\n tag_id = pttDict[ptt]\r\n dbutil.updateDeployment(conn, tag_id) # Update ptt that just finished\r\n updatePttList(ptt, last_msg, bd)\r\n updateDevice(tag_id, last_msg, bd)\r\n# HOW to update final (Valid) ptt?????\r\n msgType = 'NEW'\r\n # tag specific vars\r\n ptt = int(row['Platform ID No.']) #=integer\r\n tag_id = pttDict[ptt]\r\n pttStart = tagDict.get(tag_id)[1] #=datetimes\r\n pttStop = tagDict.get(tag_id)[2]\r\n animal_id = tagDict.get(tag_id)[4] #=integer\r\n # loop vars\r\n str_timeval = row['Loc. date'] if row['Loc. date'] else row['Msg Date']\r\n timevalue = format_date(str_timeval,bd)\r\n passDur = row['Pass']\r\n sat = row['Sat.']\r\n # Trap out of range date\r\n if timevalue < pttStart:\r\n ptt = 0 # Force new ptt Variables for next row\r\n continue\r\n# ********* NOT TRAPPING stoptime ??\r\n elif timevalue > pttStop:\r\n ptt = 0\r\n continue\r\n # start parsing\r\n last_msg = format_date(row['Msg Date'],bd)\r\n if msgType == 'SAME':\r\n if row['Loc. date']:\r\n if row['Loc. date'] == str_timeval:\r\n if row['Pass'] != passDur:\r\n msgType = 'NEW'\r\n passDur = row['Pass']\r\n sat = row['Sat.']\r\n if row['Sat.'] != sat:\r\n msgType = 'NEW'\r\n sat = row['Sat.']\r\n elif row['Loc. date'] != str_timeval: # Definitely New pass\r\n msgType = 'NEW'\r\n str_timeval = row['Loc. date']\r\n timevalue = format_date(str_timeval,bd)\r\n passDur = row['Pass']\r\n sat = row['Sat.']\r\n else: # row['Loc. date'] empty\r\n if row['Pass'] == '0': # Single pass\r\n msgType = 'NEW'\r\n str_timeval = row['Msg Date']\r\n timevalue = format_date(str_timeval,bd)\r\n passDur = None # OR '0'\r\n sat = row['Sat.']\r\n elif row['Pass'] != '0': # Multi-Z pass\r\n if row['Pass'] != passDur: # still in same pass\r\n msgType = 'NEW'\r\n str_timeval = getPassTime(inFileName,row['Pass'],\r\n str(ptt),\r\n row['Msg Date'][:10])\r\n timevalue = format_date(str_timeval,bd)\r\n passDur = row['Pass']\r\n sat = row['Sat.']\r\n if msgType == 'SAME': #Append: to Transmit\r\n if featID:\r\n transmitID, last_msg = addTransmit(featID, row, bd)\r\n\r\n if msgType == 'NEW': # Append: to Argos & Transmit\r\n featID = addArgos(row, tag_id, animal_id, timevalue, gt, bd)\r\n msgType = 'SAME'\r\n if featID:\r\n print 'Pass at: [{0}] added for {1}'.format(str_timeval, ptt)\r\n newPasses.append(featID)\r\n\r\n return newPasses", "def _scan_axograph_file(self):\n\n self.info = {}\n\n with open(self.filename, 'rb') as fid:\n f = StructFile(fid)\n\n self.logger.debug('filename: {}'.format(self.filename))\n self.logger.debug('')\n\n # the first 4 bytes are always a 4-character file type identifier\n # - for early versions of AxoGraph, this identifier was 'AxGr'\n # - starting with AxoGraph X, the identifier is 'axgx'\n header_id = f.read(4).decode('utf-8')\n self.info['header_id'] = header_id\n assert header_id in ['AxGr', 'axgx'], \\\n 'not an AxoGraph binary file! \"{}\"'.format(self.filename)\n\n self.logger.debug('header_id: {}'.format(header_id))\n\n # the next two numbers store the format version number and the\n # number of data columns to follow\n # - for 'AxGr' files, these numbers are 2-byte unsigned short ints\n # - for 'axgx' files, these numbers are 4-byte long ints\n # - the 4-character identifier changed from 'AxGr' to 'axgx' with\n # format version 3\n if header_id == 'AxGr':\n format_ver, n_cols = f.read_f('HH')\n assert format_ver == 1 or format_ver == 2, \\\n 'mismatch between header identifier \"{}\" and format ' \\\n 'version \"{}\"!'.format(header_id, format_ver)\n elif header_id == 'axgx':\n format_ver, n_cols = f.read_f('ll')\n assert format_ver >= 3, \\\n 'mismatch between header identifier \"{}\" and format ' \\\n 'version \"{}\"!'.format(header_id, format_ver)\n else:\n raise NotImplementedError(\n 'unimplemented file header identifier \"{}\"!'.format(\n header_id))\n self.info['format_ver'] = format_ver\n self.info['n_cols'] = n_cols\n\n self.logger.debug('format_ver: {}'.format(format_ver))\n self.logger.debug('n_cols: {}'.format(n_cols))\n self.logger.debug('')\n\n ##############################################\n # BEGIN COLUMNS\n\n sig_memmaps = []\n sig_channels = []\n for i in range(n_cols):\n\n self.logger.debug('== COLUMN INDEX {} =='.format(i))\n\n ##############################################\n # NUMBER OF DATA POINTS IN COLUMN\n\n n_points = f.read_f('l')\n\n self.logger.debug('n_points: {}'.format(n_points))\n\n ##############################################\n # COLUMN TYPE\n\n # depending on the format version, data columns may have a type\n # - prior to version 3, column types did not exist and data was\n # stored in a fixed pattern\n # - beginning with version 3, several data types are available\n # as documented in AxoGraph_ReadWrite.h\n if format_ver == 1 or format_ver == 2:\n col_type = None\n elif format_ver >= 3:\n col_type = f.read_f('l')\n else:\n raise NotImplementedError(\n 'unimplemented file format version \"{}\"!'.format(\n format_ver))\n\n self.logger.debug('col_type: {}'.format(col_type))\n\n ##############################################\n # COLUMN NAME AND UNITS\n\n # depending on the format version, column titles are stored\n # differently\n # - prior to version 3, column titles were stored as\n # fixed-length 80-byte Pascal strings\n # - beginning with version 3, column titles are stored as\n # variable-length strings (see StructFile.read_string for\n # details)\n if format_ver == 1 or format_ver == 2:\n title = f.read_f('80p').decode('utf-8')\n elif format_ver >= 3:\n title = f.read_f('S')\n else:\n raise NotImplementedError(\n 'unimplemented file format version \"{}\"!'.format(\n format_ver))\n\n self.logger.debug('title: {}'.format(title))\n\n # units are given in parentheses at the end of a column title,\n # unless units are absent\n if len(title.split()) > 0 and title.split()[-1][0] == '(' and \\\n title.split()[-1][-1] == ')':\n name = ' '.join(title.split()[:-1])\n units = title.split()[-1].strip('()')\n else:\n name = title\n units = ''\n\n self.logger.debug('name: {}'.format(name))\n self.logger.debug('units: {}'.format(units))\n\n ##############################################\n # COLUMN DTYPE, SCALE, OFFSET\n\n if format_ver == 1:\n\n # for format version 1, all columns are arrays of floats\n\n dtype = 'f'\n gain, offset = 1, 0 # data is neither scaled nor off-set\n\n elif format_ver == 2:\n\n # for format version 2, the first column is a \"series\" of\n # regularly spaced values specified merely by a first value\n # and an increment, and all subsequent columns are arrays\n # of shorts with a scaling factor\n\n if i == 0:\n\n # series\n first_value, increment = f.read_f('ff')\n\n self.logger.debug(\n 'interval: {}, freq: {}'.format(\n increment, 1 / increment))\n self.logger.debug(\n 'start: {}, end: {}'.format(\n first_value,\n first_value + increment * (n_points - 1)))\n\n # assume this is the time column\n t_start, sampling_period = first_value, increment\n self.info['t_start'] = t_start\n self.info['sampling_period'] = sampling_period\n\n self.logger.debug('')\n\n continue # skip memmap, chan info for time col\n\n else:\n\n # scaled short\n dtype = 'h'\n gain, offset = \\\n f.read_f('f'), 0 # data is scaled without offset\n\n elif format_ver >= 3:\n\n # for format versions 3 and later, the column type\n # determines how the data should be read\n # - column types 1, 2, 3, and 8 are not defined in\n # AxoGraph_ReadWrite.h\n # - column type 9 is different from the others in that it\n # represents regularly spaced values\n # (such as times at a fixed frequency) specified by a\n # first value and an increment, without storing a large\n # data array\n\n if col_type == 9:\n\n # series\n first_value, increment = f.read_f('dd')\n\n self.logger.debug(\n 'interval: {}, freq: {}'.format(\n increment, 1 / increment))\n self.logger.debug(\n 'start: {}, end: {}'.format(\n first_value,\n first_value + increment * (n_points - 1)))\n\n if i == 0:\n\n # assume this is the time column\n t_start, sampling_period = first_value, increment\n self.info['t_start'] = t_start\n self.info['sampling_period'] = sampling_period\n\n self.logger.debug('')\n\n continue # skip memmap, chan info for time col\n\n else:\n\n raise NotImplementedError(\n 'series data are supported only for the first '\n 'data column (time)!')\n\n elif col_type == 4:\n\n # short\n dtype = 'h'\n gain, offset = 1, 0 # data neither scaled nor off-set\n\n elif col_type == 5:\n\n # long\n dtype = 'l'\n gain, offset = 1, 0 # data neither scaled nor off-set\n\n elif col_type == 6:\n\n # float\n dtype = 'f'\n gain, offset = 1, 0 # data neither scaled nor off-set\n\n elif col_type == 7:\n\n # double\n dtype = 'd'\n gain, offset = 1, 0 # data neither scaled nor off-set\n\n elif col_type == 10:\n\n # scaled short\n dtype = 'h'\n gain, offset = f.read_f('dd') # data scaled w/ offset\n\n else:\n\n raise NotImplementedError(\n 'unimplemented column type \"{}\"!'.format(col_type))\n\n else:\n\n raise NotImplementedError(\n 'unimplemented file format version \"{}\"!'.format(\n format_ver))\n\n ##############################################\n # COLUMN MEMMAP AND CHANNEL INFO\n\n # create a memory map that allows accessing parts of the file\n # without loading it all into memory\n array = np.memmap(\n self.filename,\n mode='r',\n dtype=f.byte_order + dtype,\n offset=f.tell(),\n shape=n_points)\n\n # advance the file position to after the data array\n f.seek(array.nbytes, 1)\n\n if i == 0:\n # assume this is the time column containing n_points values\n\n # verify times are spaced regularly\n diffs = np.diff(array)\n increment = np.median(diffs)\n max_frac_step_deviation = np.max(np.abs(\n diffs / increment - 1))\n tolerance = 1e-3\n if max_frac_step_deviation > tolerance:\n self.logger.debug('largest proportional deviation '\n 'from median step size in the first '\n 'column exceeds the tolerance '\n 'of ' + str(tolerance) + ':'\n ' ' + str(max_frac_step_deviation))\n raise ValueError('first data column (assumed to be '\n 'time) is not regularly spaced')\n\n first_value = array[0]\n\n self.logger.debug(\n 'interval: {}, freq: {}'.format(\n increment, 1 / increment))\n self.logger.debug(\n 'start: {}, end: {}'.format(\n first_value,\n first_value + increment * (n_points - 1)))\n\n t_start, sampling_period = first_value, increment\n self.info['t_start'] = t_start\n self.info['sampling_period'] = sampling_period\n\n self.logger.debug('')\n\n continue # skip saving memmap, chan info for time col\n\n else:\n # not a time column\n\n self.logger.debug('gain: {}, offset: {}'.format(gain, offset))\n self.logger.debug('initial data: {}'.format(\n array[:5] * gain + offset))\n\n # channel_info will be cast to _signal_channel_dtype\n channel_info = (\n name, str(i), 1 / sampling_period, f.byte_order + dtype,\n units, gain, offset, '0')\n\n self.logger.debug('channel_info: {}'.format(channel_info))\n self.logger.debug('')\n\n sig_memmaps.append(array)\n sig_channels.append(channel_info)\n\n # END COLUMNS\n ##############################################\n\n # initialize lists for events and epochs\n raw_event_timestamps = []\n raw_epoch_timestamps = []\n raw_epoch_durations = []\n event_labels = []\n epoch_labels = []\n\n # the remainder of the file may contain metadata, events and epochs\n try:\n\n ##############################################\n # COMMENT\n\n self.logger.debug('== COMMENT ==')\n\n comment = f.read_f('S')\n self.info['comment'] = comment\n\n self.logger.debug(comment if comment else 'no comment!')\n self.logger.debug('')\n\n ##############################################\n # NOTES\n\n self.logger.debug('== NOTES ==')\n\n notes = f.read_f('S')\n self.info['notes'] = notes\n\n self.logger.debug(notes if notes else 'no notes!')\n self.logger.debug('')\n\n ##############################################\n # TRACES\n\n self.logger.debug('== TRACES ==')\n\n n_traces = f.read_f('l')\n self.info['n_traces'] = n_traces\n\n self.logger.debug('n_traces: {}'.format(n_traces))\n self.logger.debug('')\n\n trace_header_info_list = {}\n group_ids = []\n for i in range(n_traces):\n\n # AxoGraph traces are 1-indexed in GUI, so use i+1 below\n self.logger.debug('== TRACE #{} =='.format(i + 1))\n\n trace_header_info = {}\n\n if format_ver < 6:\n # before format version 6, there was only one version\n # of the header, and version numbers were not provided\n trace_header_info['trace_header_version'] = 1\n else:\n # for format versions 6 and later, the header version\n # must be read\n trace_header_info['trace_header_version'] = \\\n f.read_f('l')\n\n if trace_header_info['trace_header_version'] == 1:\n TraceHeaderDescription = TraceHeaderDescriptionV1\n elif trace_header_info['trace_header_version'] == 2:\n TraceHeaderDescription = TraceHeaderDescriptionV2\n else:\n raise NotImplementedError(\n 'unimplemented trace header version \"{}\"!'.format(\n trace_header_info['trace_header_version']))\n\n for key, fmt in TraceHeaderDescription:\n trace_header_info[key] = f.read_f(fmt)\n # AxoGraph traces are 1-indexed in GUI, so use i+1 below\n trace_header_info_list[i + 1] = trace_header_info\n group_ids.append(\n trace_header_info['group_id_for_this_trace'])\n\n self.logger.debug(trace_header_info)\n self.logger.debug('')\n self.info['trace_header_info_list'] = trace_header_info_list\n\n ##############################################\n # GROUPS\n\n self.logger.debug('== GROUPS ==')\n\n n_groups = f.read_f('l')\n self.info['n_groups'] = n_groups\n group_ids = \\\n np.sort(list(set(group_ids))) # remove duplicates and sort\n assert n_groups == len(group_ids), \\\n 'expected group_ids to have length {}: {}'.format(\n n_groups, group_ids)\n\n self.logger.debug('n_groups: {}'.format(n_groups))\n self.logger.debug('group_ids: {}'.format(group_ids))\n self.logger.debug('')\n\n group_header_info_list = {}\n for i in group_ids:\n\n # AxoGraph groups are 0-indexed in GUI, so use i below\n self.logger.debug('== GROUP #{} =='.format(i))\n\n group_header_info = {}\n\n if format_ver < 6:\n # before format version 6, there was only one version\n # of the header, and version numbers were not provided\n group_header_info['group_header_version'] = 1\n else:\n # for format versions 6 and later, the header version\n # must be read\n group_header_info['group_header_version'] = \\\n f.read_f('l')\n\n if group_header_info['group_header_version'] == 1:\n GroupHeaderDescription = GroupHeaderDescriptionV1\n else:\n raise NotImplementedError(\n 'unimplemented group header version \"{}\"!'.format(\n group_header_info['group_header_version']))\n\n for key, fmt in GroupHeaderDescription:\n group_header_info[key] = f.read_f(fmt)\n # AxoGraph groups are 0-indexed in GUI, so use i below\n group_header_info_list[i] = group_header_info\n\n self.logger.debug(group_header_info)\n self.logger.debug('')\n self.info['group_header_info_list'] = group_header_info_list\n\n ##############################################\n # UNKNOWN\n\n self.logger.debug('>> UNKNOWN 1 <<')\n\n # 36 bytes of undeciphered data (types here are guesses)\n unknowns = f.read_f('9l')\n\n self.logger.debug(unknowns)\n self.logger.debug('')\n\n ##############################################\n # EPISODES\n\n self.logger.debug('== EPISODES ==')\n\n # a subset of episodes can be selected for \"review\", or\n # episodes can be paged through one by one, and the indexes of\n # those currently in review appear in this list\n episodes_in_review = []\n n_episodes = f.read_f('l')\n self.info['n_episodes'] = n_episodes\n for i in range(n_episodes):\n episode_bool = f.read_f('Z')\n if episode_bool:\n episodes_in_review.append(i + 1)\n self.info['episodes_in_review'] = episodes_in_review\n\n self.logger.debug('n_episodes: {}'.format(n_episodes))\n self.logger.debug('episodes_in_review: {}'.format(\n episodes_in_review))\n\n if format_ver == 5:\n\n # the test file for version 5 contains this extra list of\n # episode indexes with unknown purpose\n old_unknown_episode_list = []\n n_episodes2 = f.read_f('l')\n for i in range(n_episodes2):\n episode_bool = f.read_f('Z')\n if episode_bool:\n old_unknown_episode_list.append(i + 1)\n\n self.logger.debug('old_unknown_episode_list: {}'.format(\n old_unknown_episode_list))\n if n_episodes2 != n_episodes:\n self.logger.debug(\n 'n_episodes2 ({}) and n_episodes ({}) '\n 'differ!'.format(n_episodes2, n_episodes))\n\n # another list of episode indexes with unknown purpose\n unknown_episode_list = []\n n_episodes3 = f.read_f('l')\n for i in range(n_episodes3):\n episode_bool = f.read_f('Z')\n if episode_bool:\n unknown_episode_list.append(i + 1)\n\n self.logger.debug('unknown_episode_list: {}'.format(\n unknown_episode_list))\n if n_episodes3 != n_episodes:\n self.logger.debug(\n 'n_episodes3 ({}) and n_episodes ({}) '\n 'differ!'.format(n_episodes3, n_episodes))\n\n # episodes can be masked to be removed from the pool of\n # reviewable episodes completely until unmasked, and the\n # indexes of those currently masked appear in this list\n masked_episodes = []\n n_episodes4 = f.read_f('l')\n for i in range(n_episodes4):\n episode_bool = f.read_f('Z')\n if episode_bool:\n masked_episodes.append(i + 1)\n self.info['masked_episodes'] = masked_episodes\n\n self.logger.debug('masked_episodes: {}'.format(\n masked_episodes))\n if n_episodes4 != n_episodes:\n self.logger.debug(\n 'n_episodes4 ({}) and n_episodes ({}) '\n 'differ!'.format(n_episodes4, n_episodes))\n self.logger.debug('')\n\n ##############################################\n # UNKNOWN\n\n self.logger.debug('>> UNKNOWN 2 <<')\n\n # 68 bytes of undeciphered data (types here are guesses)\n unknowns = f.read_f('d 9l d 4l')\n\n self.logger.debug(unknowns)\n self.logger.debug('')\n\n ##############################################\n # FONTS\n\n if format_ver >= 6:\n font_categories = ['axis titles', 'axis labels (ticks)',\n 'notes', 'graph title']\n else:\n # would need an old version of AxoGraph to determine how it\n # used these settings\n font_categories = ['everything (?)']\n\n font_settings_info_list = {}\n for i in font_categories:\n\n self.logger.debug('== FONT SETTINGS FOR {} =='.format(i))\n\n font_settings_info = {}\n for key, fmt in FontSettingsDescription:\n font_settings_info[key] = f.read_f(fmt)\n\n # I don't know why two arbitrary values were selected to\n # represent this switch, but it seems they were\n # - setting1 could contain other undeciphered data as a\n # bitmask, like setting2\n assert font_settings_info['setting1'] in \\\n [FONT_BOLD, FONT_NOT_BOLD], \\\n 'expected setting1 ({}) to have value FONT_BOLD ' \\\n '({}) or FONT_NOT_BOLD ({})'.format(\n font_settings_info['setting1'],\n FONT_BOLD,\n FONT_NOT_BOLD)\n\n # size is stored 10 times bigger than real value\n font_settings_info['size'] = \\\n font_settings_info['size'] / 10.0\n font_settings_info['bold'] = \\\n bool(font_settings_info['setting1'] == FONT_BOLD)\n font_settings_info['italics'] = \\\n bool(font_settings_info['setting2'] & FONT_ITALICS)\n font_settings_info['underline'] = \\\n bool(font_settings_info['setting2'] & FONT_UNDERLINE)\n font_settings_info['strikeout'] = \\\n bool(font_settings_info['setting2'] & FONT_STRIKEOUT)\n font_settings_info_list[i] = font_settings_info\n\n self.logger.debug(font_settings_info)\n self.logger.debug('')\n self.info['font_settings_info_list'] = font_settings_info_list\n\n ##############################################\n # X-AXIS SETTINGS\n\n self.logger.debug('== X-AXIS SETTINGS ==')\n\n x_axis_settings_info = {}\n for key, fmt in XAxisSettingsDescription:\n x_axis_settings_info[key] = f.read_f(fmt)\n self.info['x_axis_settings_info'] = x_axis_settings_info\n\n self.logger.debug(x_axis_settings_info)\n self.logger.debug('')\n\n ##############################################\n # UNKNOWN\n\n self.logger.debug('>> UNKNOWN 3 <<')\n\n # 108 bytes of undeciphered data (types here are guesses)\n unknowns = f.read_f('8l 3d 13l')\n\n self.logger.debug(unknowns)\n self.logger.debug('')\n\n ##############################################\n # EVENTS / TAGS\n\n self.logger.debug('=== EVENTS / TAGS ===')\n\n n_events, n_events_again = f.read_f('ll')\n self.info['n_events'] = n_events\n\n self.logger.debug('n_events: {}'.format(n_events))\n\n # event / tag timing is stored as an index into time\n raw_event_timestamps = []\n event_labels = []\n for i in range(n_events_again):\n event_index = f.read_f('l')\n raw_event_timestamps.append(event_index)\n n_events_yet_again = f.read_f('l')\n for i in range(n_events_yet_again):\n title = f.read_f('S')\n event_labels.append(title)\n\n event_list = []\n for event_label, event_index in \\\n zip(event_labels, raw_event_timestamps):\n # t_start shouldn't be added here\n event_time = event_index * sampling_period\n event_list.append({\n 'title': event_label,\n 'index': event_index,\n 'time': event_time})\n self.info['event_list'] = event_list\n for event in event_list:\n self.logger.debug(event)\n self.logger.debug('')\n\n ##############################################\n # UNKNOWN\n\n self.logger.debug('>> UNKNOWN 4 <<')\n\n # 28 bytes of undeciphered data (types here are guesses)\n unknowns = f.read_f('7l')\n\n self.logger.debug(unknowns)\n self.logger.debug('')\n\n ##############################################\n # EPOCHS / INTERVAL BARS\n\n self.logger.debug('=== EPOCHS / INTERVAL BARS ===')\n\n n_epochs = f.read_f('l')\n self.info['n_epochs'] = n_epochs\n\n self.logger.debug('n_epochs: {}'.format(n_epochs))\n\n epoch_list = []\n for i in range(n_epochs):\n epoch_info = {}\n for key, fmt in EpochInfoDescription:\n epoch_info[key] = f.read_f(fmt)\n epoch_list.append(epoch_info)\n self.info['epoch_list'] = epoch_list\n\n # epoch / interval bar timing and duration are stored in\n # seconds, so here they are converted to (possibly non-integer)\n # indexes into time to fit into the procrustean beds of\n # _rescale_event_timestamp and _rescale_epoch_duration\n raw_epoch_timestamps = []\n raw_epoch_durations = []\n epoch_labels = []\n for epoch in epoch_list:\n raw_epoch_timestamps.append(\n epoch['t_start'] / sampling_period)\n raw_epoch_durations.append(\n (epoch['t_stop'] - epoch['t_start']) / sampling_period)\n epoch_labels.append(epoch['title'])\n self.logger.debug(epoch)\n self.logger.debug('')\n\n ##############################################\n # UNKNOWN\n\n self.logger.debug(\n '>> UNKNOWN 5 (includes y-axis plot ranges) <<')\n\n # lots of undeciphered data\n rest_of_the_file = f.read()\n\n self.logger.debug(rest_of_the_file)\n self.logger.debug('')\n\n self.logger.debug('End of file reached (expected)')\n\n except EOFError as e:\n if format_ver == 1 or format_ver == 2:\n # for format versions 1 and 2, metadata like graph display\n # information was stored separately in the \"resource fork\"\n # of the file, so reaching the end of the file before all\n # metadata is parsed is expected\n self.logger.debug('End of file reached (expected)')\n pass\n else:\n # for format versions 3 and later, there should be metadata\n # stored at the end of the file, so warn that something may\n # have gone wrong, but try to continue anyway\n self.logger.warning('End of file reached unexpectedly '\n 'while parsing metadata, will attempt '\n 'to continue')\n self.logger.debug(e, exc_info=True)\n pass\n\n except UnicodeDecodeError as e:\n # warn that something went wrong with reading a string, but try\n # to continue anyway\n self.logger.warning('Problem decoding text while parsing '\n 'metadata, will ignore any remaining '\n 'metadata and attempt to continue')\n self.logger.debug(e, exc_info=True)\n pass\n\n self.logger.debug('')\n\n ##############################################\n # RAWIO HEADER\n\n # event_channels will be cast to _event_channel_dtype\n event_channels = []\n event_channels.append(('AxoGraph Tags', '', 'event'))\n event_channels.append(('AxoGraph Intervals', '', 'epoch'))\n\n if len(sig_channels) > 0:\n signal_streams = [('Signals', '0')]\n else:\n signal_streams = []\n\n # organize header\n self.header['nb_block'] = 1\n self.header['nb_segment'] = [1]\n self.header['signal_streams'] = np.array(signal_streams, dtype=_signal_stream_dtype)\n self.header['signal_channels'] = np.array(sig_channels, dtype=_signal_channel_dtype)\n self.header['event_channels'] = np.array(event_channels, dtype=_event_channel_dtype)\n self.header['spike_channels'] = np.array([], dtype=_spike_channel_dtype)\n\n ##############################################\n # DATA OBJECTS\n\n # organize data\n self._sampling_period = sampling_period\n self._t_start = t_start\n self._raw_signals = [sig_memmaps] # first index is seg_index\n self._raw_event_epoch_timestamps = [\n np.array(raw_event_timestamps),\n np.array(raw_epoch_timestamps)]\n self._raw_event_epoch_durations = [\n None,\n np.array(raw_epoch_durations)]\n self._event_epoch_labels = [\n np.array(event_labels, dtype='U'),\n np.array(epoch_labels, dtype='U')]", "def parse_products(self, infile):\r\n raise NotImplementedError()", "def test_create_from_file(self):\n # TODO: Expand test to both openeye and rdkit toolkits\n filename = get_data_file_path(\"molecules/toluene.mol2\")\n\n molecule1 = Molecule(filename, allow_undefined_stereo=True)\n with open(filename, \"r\") as infile:\n molecule2 = Molecule(\n infile, file_format=\"MOL2\", allow_undefined_stereo=True\n )\n assert molecule1 == molecule2\n\n import gzip\n\n with gzip.GzipFile(filename + \".gz\", \"r\") as infile:\n molecule3 = Molecule(\n infile, file_format=\"MOL2\", allow_undefined_stereo=True\n )\n assert molecule3 == molecule1\n\n # Ensure that attempting to initialize a single Molecule from a file\n # containing multiple molecules raises a ValueError\n filename = get_data_file_path(\"molecules/butane_multi.sdf\")\n\n with pytest.raises(\n ValueError,\n match=\"Specified file or file-like.*exactly one molecule\",\n ):\n Molecule(filename, allow_undefined_stereo=True)", "def _read(self):\n # initializng data dictionary\n self.data={}\n\n f = FortranFile(self.filename)\n # Default omnivor binary header\n self.data['MK'] = f.readInts('i')\n self.data['itime'] = f.readInts('i')\n self.data['version'] = f.readString()\n self.data['file_id'] = f.readInts('i')\n self.data['sversion'] = f.readString()\n # Velocity field\n self.data['stype'] = f.readString()\n self.data['is_grid'] = f.readInts('i')\n nCPs = f.readInts('i')\n self.data['nCPs'] = nCPs\n if self.data['MK'] == 8:\n real_char='d'\n else:\n real_char='f'\n if self.data['is_grid']:\n #print('File is a velocity grid file')\n n1 = f.readInts('i')\n n2 = f.readInts('i')\n n3 = f.readInts('i')\n self.data['n1'] = n1\n self.data['n2'] = n2\n self.data['n3'] = n3\n self.data['is_straight'] = f.readInts('i')\n self.data['v1'] = f.readReals(real_char)\n self.data['v2'] = f.readReals(real_char)\n self.data['v3'] = f.readReals(real_char)\n\n CPs_raw = f.readReals(real_char)\n Utot_raw = f.readReals(real_char)\n CPs = np.reshape(CPs_raw,(3,nCPs),order = 'F')\n Utot = np.reshape(Utot_raw,(3,nCPs),order = 'F')\n\n acc=-1\n CPsTab = np.zeros((3, n1,n2,n3))\n UtotTab = np.zeros((3, n1,n2,n3))\n # Reshaping the nasty way (this is natural order). \n for i in range(0,n1):\n for j in range(0,n2):\n for k in range(0,n3):\n acc=acc+1\n CPsTab[0:3,i,j,k] = CPs[0:3,acc]\n UtotTab[0:3,i,j,k] = Utot[0:3,acc]\n\n self.data['CPs'] = CPs\n self.data['CPsTab'] = CPsTab\n self.data['Utot'] = Utot\n self.data['UtotTab'] = UtotTab", "def readFromFile(self, inp):\n f = open(inp, \"r\")\n line = f.readline()\n line = line.strip().split(sep=\" \", maxsplit=3)\n self.columns, self.chars, self.pwdLength, _ = line\n self.columns = int(self.columns)\n self.pwdLength = int(self.pwdLength)\n self.func = lmdes\n line = f.readline()\n while line != '':\n pwd, hashV = line.strip().split(sep=\" \", maxsplit=1)\n self.table.insert(hashV, pwd)\n line = f.readline()\n f.close()", "def __init__(self, file):\n self.data = []\n parse_mode = 'unknown'\n for line in open(file, 'r'):\n # Figure out if we should change our parse mode\n if line == '':\n continue\n if line.strip() == 'Column':\n parse_mode = 'keys'\n continue\n elif line.strip() == 'Data':\n parse_mode = 'data'\n continue\n # Parse the data into keys or a list of items\n if parse_mode == 'keys':\n self.keys = line.strip().split('\\t')\n parse_mode = 'unknown'\n continue\n elif parse_mode == 'data':\n item = line.strip().split('\\t')\n self.data.append(item)\n continue", "def _parse_tuple(self, line):\n elements = line[1:-1].split(\",\\t\")\n if len(elements) == len(self.description):\n return tuple(\n [\n pythonize.convert(element.strip(), description[1])\n for (element, description) in zip(elements, self.description)\n ]\n )\n else:\n self._exception_handler(\n InterfaceError, \"length of row doesn't match header\"\n )", "def __parse_file(self):\n\n if PY2:\n text = lambda v: v.decode('utf-8')\n else:\n text = lambda v: v\n with open(self._file, 'r') as csvfile:\n reader = csv.reader(csvfile, delimiter=';')\n for row in reader:\n try:\n siruta = int(row[0])\n except ValueError:\n self.__notify_error(\"Line %s has an invalid SIRUTA code\" % str(row))\n continue\n if not self.siruta_is_valid(siruta):\n self.__notify_error(\"SIRUTA code %d is not valid\" % siruta)\n if len(row) != 15:\n self.__notify_error(\"Line %s does not have 15 elements\" % str(row))\n continue\n if row[7] == \"1\":\n urban = True\n else:\n urban = False\n self._data[siruta] = {\n 'siruta': siruta,\n 'name': text(row[1]).translate(self._dia_trans),\n 'postcode': int(row[2]),\n 'county': int(row[3]),\n 'sirutasup': int(row[4]),\n 'type': int(row[5]),\n 'level': text(row[6]),\n 'urban': urban,\n 'region': int(row[8]),\n }", "def test_create_from_file(self):\n # TODO: Expand test to both openeye and rdkit toolkits\n filename = get_data_file_path(\"molecules/toluene.mol2\")\n\n molecule1 = Molecule(filename, allow_undefined_stereo=True)\n with open(filename, \"r\") as infile:\n molecule2 = Molecule(\n infile, file_format=\"MOL2\", allow_undefined_stereo=True\n )\n assert molecule1 == molecule2\n\n import gzip\n\n with gzip.GzipFile(filename + \".gz\", \"r\") as infile:\n molecule3 = Molecule(\n infile, file_format=\"MOL2\", allow_undefined_stereo=True\n )\n assert molecule3 == molecule1\n\n # Ensure that attempting to initialize a single Molecule from a file\n # containing multiple molecules raises a ValueError\n with pytest.raises(ValueError) as exc_info:\n filename = get_data_file_path(\"molecules/zinc-subset-tripos.mol2.gz\")\n molecule = Molecule(filename, allow_undefined_stereo=True)", "def parse_file(f: TextIO) -> Tuple[List[List[float]], Dict[str, List[str]], Dict[str, List[str]], Dict[str, str]]:\n current_section = None\n\n header_metadata = {}\n column_metadata = {}\n row_metadata = {}\n rfu_matrix = []\n\n matrix_depth = 0\n\n reader = csv.reader(f, delimiter='\\t')\n for line in reader:\n\n # Check for trailing Nones\n for index, cell in enumerate(reversed(line)):\n if cell:\n break\n del line[-1]\n\n # If we see a new section set which portion of the adat we are in & continue to next line\n if '^HEADER' in line[0]:\n current_section = 'HEADER'\n continue\n elif '^TABLE_BEGIN' in line[0]:\n current_section = 'TABLE'\n continue\n elif '^COL_DATA' in line[0]:\n current_section = 'COL_DATA'\n continue\n elif '^ROW_DATA' in line[0]:\n current_section = 'ROW_DATA'\n continue\n\n # Parse the data according to which section of the adat we're reading\n\n if current_section == 'HEADER':\n # Not every key in the header has a value\n if len(line) == 1:\n header_metadata[line[0]] = ''\n\n # Should be the typical case\n elif len(line) == 2:\n try:\n header_metadata[line[0]] = json.loads(line[1])\n if type(header_metadata[line[0]]) != dict:\n header_metadata[line[0]] = line[1]\n except json.JSONDecodeError:\n header_metadata[line[0]] = line[1]\n\n # More than 2 values to a key should never ever happen\n else:\n raise AdatReadError('Unexpected size of header: ' + '|'.join(line))\n\n # If we have the report config section, check to see if it was loaded as a dict\n if line[0] == \"ReportConfig\" and type(header_metadata[line[0]]) != dict:\n warnings.warn('Malformed ReportConfig section in header. Setting to an empty dictionary.')\n header_metadata[line[0]] = {}\n\n elif current_section == 'COL_DATA':\n # Get the height of the column metadata section & skip the rest of the section\n col_metadata_length = len(line)\n current_section = None\n\n elif current_section == 'ROW_DATA':\n # Get the index of the end of the row metadata section & skip the rest of the section\n row_metadata_offset = len(line) - 1\n current_section = None\n\n elif current_section == 'TABLE':\n # matrix_depth is used to identify if we are in the column\n # metadata section or the row metadata/rfu section\n matrix_depth += 1\n\n # Column Metadata Section\n if matrix_depth < col_metadata_length:\n column_metadata_name = line[row_metadata_offset]\n column_metadata_data = line[row_metadata_offset + 1:]\n\n if column_metadata_name == 'SeqId' and re.match(r'\\d{3,}-\\d{1,3}_\\d+', column_metadata_data[0]):\n warnings.warn('V3 style seqIds (i.e., 12345-6_7). Converting to V4 Style. The adat file writer has an option to write using the V3 style')\n seq_id_data = [x.split('_')[0] for x in column_metadata_data]\n version_data = [x.split('_')[1] for x in column_metadata_data]\n column_metadata[column_metadata_name] = seq_id_data\n column_metadata['SeqIdVersion'] = version_data\n else:\n column_metadata[column_metadata_name] = column_metadata_data\n\n # Perform a check to ensure all column metadata is the same length and if not, extend it to the maximum length\n col_meta_lengths = [len(values) for values in column_metadata.values()]\n if len(set(col_meta_lengths)) > 1:\n max_length = max(col_meta_lengths)\n for name, values in column_metadata.items():\n if len(values) == max_length:\n continue\n warnings.warn(f'Adding empty values to column metadata: \"{name}\"')\n n_missing_elements = max_length - len(values)\n append_array = [''] * n_missing_elements\n new_values = values + append_array\n column_metadata[name] = new_values\n\n # Row Metadata Titles\n elif matrix_depth == col_metadata_length:\n row_metadata_names = line[:row_metadata_offset]\n row_metadata = {name: [] for name in row_metadata_names}\n\n # Row Metadata & RFU Section\n elif matrix_depth > col_metadata_length:\n\n # Store in row metadata into dictionary\n row_metadata_data = line[:row_metadata_offset]\n for name, data in zip(row_metadata_names, row_metadata_data):\n row_metadata[name].append(data)\n\n # Store the RFU data\n rfu_row_data = line[row_metadata_offset + 1:]\n converted_rfu_row_data = list(map(float, rfu_row_data))\n rfu_matrix.append(converted_rfu_row_data)\n\n return rfu_matrix, row_metadata, column_metadata, header_metadata", "def parse(self):\n\t\tfirst = None\n\t\tf = open(self.input_file)\n\t\tfor line in f.readlines():\n\t\t\tif line.startswith(\"#\"):\n\t\t\t\tcontinue\n\t\t\ttry:\n\t\t\t\tflow,t,sequence,size = line.split()\n\t\t\texcept:\n\t\t\t\tcontinue\n\t\t\t# append data to a list of tuples\n\t\t\tflow = int(flow)\n\t\t\tt = float(t)\n\t\t\tsequence = int(sequence)\n\t\t\tif size == \"x\":\n\t\t\t\tcontinue\n\t\t\tsize = int(size)\n\t\t\tif not size == 0:\n\t\t\t\tif flow == 1:\n\t\t\t\t\tself.data1.append((t,sequence,size))\n\t\t\t\telif flow == 2:\n\t\t\t\t\tself.data2.append((t,sequence,size))\n\t\t\t\telif flow == 3:\n\t\t\t\t\tself.data3.append((t, sequence, size))\n\t\t\t\telif flow == 4:\n\t\t\t\t\tself.data4.append((t, sequence, size))\n\t\t\t\telif flow == 5:\n\t\t\t\t\tself.data5.append((t, sequence, size))\n\t\t\t\telse:\n\t\t\t\t\tprint \"Erroneous data: \",flow, t, sequence, size\n\t\t\t# Keep track of the minimum and maximum time seen\n\t\t\tif not self.min_time or t < self.min_time:\n\t\t\t\tself.min_time = t\n\t\t\tif not self.max_time or t > self.max_time:\n\t\t\t\tself.max_time = t\n\n\t\t\t# print len(self.data1),len(self.data2),len(self.data3),len(self.data4),len(self.data5)", "def parse_infile(self, infile):\n\n if type(infile)==str:\n print('Im a string')\n folder, file = os.path.split(infile)\n elif type(infile) in [list, tuple]:\n if not len(infile) == 2:\n raise(Exception('The infile must be a string or a length 2 sequence'))\n else:\n folder, file = infile\n else:\n raise(Exception('The infile must be a string or a length 2 sequence'))\n \n self.folder = folder\n self.file_ = file", "def process_file(self, filename):\n import math\n try:\n my_file = open(filename, \"r\")\n except FileNotFoundError:\n return False\n\n for next_line in my_file:\n my_tuple = tuple(next_line.split(\",\"))\n if my_tuple[0].isdigit() and my_tuple[3] == 'TEMP':\n time_of_day = math.floor(float(my_tuple[1]) * 24)\n temp = my_tuple[4].rstrip()\n new_tuple = (int(my_tuple[0]), time_of_day, int(my_tuple[2]), float(temp))\n self._data_set.append(new_tuple)\n return True", "def _read_input_file(self):\n file_type = 'np.array'\n with open(self._file_properties['file_name'], 'r') as in_file:\n for line in in_file.readlines():\n if line[0:5] == '$$SOE':\n file_type = 'Horizons'\n break\n\n if not isfile(self._file_properties['file_name']):\n msg = 'Horizons files {:} does not exists.'\n message = msg.format(self._file_properties['file_name'])\n raise FileExistsError(message)\n if file_type == 'Horizons':\n self._read_horizons_file()\n else:\n (time, x, y, z) = np.loadtxt(\n self._file_properties['file_name'],\n usecols=(0, 1, 2, 3), unpack=True)\n self._time = time\n if int(astropy_version[0]) >= 4:\n self._xyz = SkyCoord(x=x, y=y, z=z,\n representation_type='cartesian')\n else:\n self._xyz = SkyCoord(x=x, y=y, z=z,\n representation='cartesian')", "def __read_pond_file(self, pondfile):\r\n self.currents = []\r\n with open(pondfile, 'r') as infile:\r\n reader = csv.reader(infile)\r\n start_end = [int(v) for v in next(reader)]\r\n self.start_state = tuple(start_end[:2])\r\n self.end_state = tuple(start_end[2:])\r\n for row in reader:\r\n self.currents.append(row)\r\n self.currents = self.currents[::-1]", "def __init__(self, message, file_handle, format):\n oh = open(file_handle, \"rU\")\n config.log.error(\"csv/tsv file did not pass the csv parser\")\n config.log.error(\"Message: %s\" % message)\n print(\"-----------------------\")\n print(\"CSV Diagnostic:\")\n if \"skiplines\" in format: # skip the lines.\n if format[\"skiplines\"] != -1:\n for n in range(format[\"skiplines\"]):\n oh.readline().rstrip(\"\\r\\n\")\n\n print(\"0:\", oh.readline().rstrip(\"\\r\\n\"))\n print(\"1:\", oh.readline().rstrip(\"\\r\\n\"))\n print(\"2:\", oh.readline().rstrip(\"\\r\\n\"))\n print(\"3:\", oh.readline().rstrip(\"\\r\\n\"))\n print(\"-----------------------\")\n print(\"Format Specifier: %s\" % (\" \".join([\"%s:%s\\t\" % (key, format[key]) for key in format])))\n print(\"Expected Format, based on the format specifier:\")\n oh.close()\n\n # This is a safe-ish version of loadCSV() that intelligently fails.\n\n if \"sniffer\" not in format:\n oh = open(file_handle, \"rU\")\n if \"dialect\" in format:\n reader = csv.reader(oh, dialect=format[\"dialect\"])\n else:\n reader = csv.reader(oh)\n\n try:\n if \"skiplines\" in format:\n skiplines = format[\"skiplines\"]\n else:\n skiplines = 0 # skip any header row by default.\n except:\n print(\"Error: End of File\") # premature end of file, skip out.\n print(\"-----------------------\")\n print(\"Error: %s\" % (message))\n return\n\n for index, column in enumerate(reader): # This is cryptically called column, when it is actually row.\n if index > skiplines:\n if column: # list is empty, so omit.\n if (not (column[0] in typical_headers)):\n d = {}\n for key in format:\n if not (key in ignorekeys): # ignore these tags\n try:\n if not key in d:\n d[key] = {}\n if isinstance(format[key], dict) and \"code\" in format[key]:\n # a code block insertion goes here - any valid lib and one line python code fragment\n # store it as a dict with the key \"code\"\n d[key] = eval(format[key][\"code\"]) # this always fails for some reason...\n else:\n d[key] = str(column[format[key]])\n except:\n d[key] = \"mangled\"\n print(\"%s\" % (\" \".join([\"%s:%s\" % (key, d[key]) for key in d])))\n if index > 3:\n break\n else:\n print(\" No specified format (glbase will guess)\")\n\n print(\"-----------------------\")\n config.log.error(\"End of error output\")", "def readInstance(self):\n file = open(self.fName, 'r')\n self.genSize = int(file.readline())\n self.data = {}\n for line in file:\n (id, x, y) = line.split()\n self.data[int(id)] = (int(x), int(y))\n file.close()", "def load(self, filename):\n # XXX Hay que comprobar los datos leidos y lanzar excepcion\n f = open(filename)\n prelaciones = []\n asig = []\n rec = []\n l = f.readline()\n while l:\n # Activities and following activities\n if l[0:21] == 'PRECEDENCE RELATIONS:':\n f.readline()\n l = f.readline()\n while l[0] != '*':\n data = l.split()\n prel = (data[0], data[3:])\n prelaciones.append(prel)\n l = f.readline()\n\n # Activity duration and resource units needed\n if l[0] == '-':\n l = f.readline()\n while l[0] != '*':\n asig.append(l.split())\n l = f.readline()\n\n # Name, type and unit of resources\n if l[0:22] == 'RESOURCEAVAILABILITIES':\n l = f.readline()\n while l[0] != '*':\n rec.append(l.split())\n l = f.readline()\n\n l = f.readline()\n \n # Create data structure\n cont = 1\n activities = []\n for prelacion in prelaciones:\n activities.append([cont, prelacion[0], prelacion[1], '', '', '', '', '', ('Beta')])\n cont += 1 \n\n # Update activities duration\n for n in range(len(asig)): \n activities[n][6] = float(asig[n][2])\n\n # Update resources\n i = 1\n m = 0\n resources = []\n if len(rec) < 2:\n raise InvalidFileFormatException()\n\n for n in range(len(rec[1])):\n # Renewable\n if rec[0][m]=='R' or rec[0][m][0]=='R':\n if rec[0][m]=='R':\n row=[rec[0][m]+rec[0][i], 'Renewable', '', rec[1][n]] \n m+=2\n else:\n row=[rec[0][m], 'Renewable', '', rec[1][n]] \n m+=1 \n # Non Renewable\n elif rec[0][m]=='N' or rec[0][m][0]=='N':\n if rec[0][m]=='N':\n row=[rec[0][m]+rec[0][i], 'Non renewable', rec[1][n], '']\n m+=2\n else:\n row=[rec[0][m], 'Non renewable', rec[1][n], ''] \n m+=1\n # Double constrained\n elif rec[0][m]=='D' or rec[0][m][0]=='D':\n if rec[0][m]=='D':\n row=[rec[0][m]+rec[0][i], 'Double constrained', rec[1][n], rec[1][n]]\n m+=2\n else:\n row=[rec[0][m], 'Double constrained', rec[1][n], rec[1][n]] \n m+=1\n \n resources.append(row)\n i += 2\n # Note: Unlimited resources are not present on PSPLIB projects and so \n # not taken into account here\n\n # Resources needed per activity\n asignation = []\n for n in range(len(asig)): \n for m in range(3, 3+len(rec[1])): #len(self.rec[1]): number of resources \n if asig[n][m] != '0': #unused resources are not shown\n i = m-3\n row = [asig[n][0], resources[i][0], asig[n][m]] \n asignation.append(row)\n \n return (activities, [], resources, asignation)", "def _read_info(self):\n my_filelines = self.file_lines\n info = dict()\n\n for i, line in enumerate(my_filelines):\n if line.startswith(\"VEHICLE\"):\n vehicle_pro_start = i + 2\n elif line.startswith(\"CUSTOMER\"):\n customer_pro_start = i + 3\n\n elif line.startswith(\"NUMBER\"):\n splited = line.split(' ')\n info[splited[0]] = 0\n info[splited[-1]] = 0\n return info, (vehicle_pro_start, customer_pro_start)", "def getTuple(n,type=\"R\",thing = \"T\"):\r\n if type == \"R\":\r\n n=n+\".root\"\r\n print \"getting file \"+n\r\n \r\n file=TFile(n)\r\n t=file.Get(thing)\r\n if type==\"X\":\r\n translate(n)\r\n t,file=getTuple(n,\"R\")\r\n return t,file", "def read_file_object(self, file_obj, file_format='FASTA'):\n if ( file_format.upper() == 'FASTA' ):\n read_func = read_fasta \n #elif ( file_format.upper() == 'COMPACT' ):\n # read_func = read_compact\n #elif ( file_format.upper() == 'COMPACT3' ):\n # read_func = read_compact3\n else:\n raise NotImplementedError(\"Unknown file format (%s) is not supported\" % file_format)\n self.colcount = 0\n for name, seq in read_func(file_obj):\n cseq, l = self.get_alignment_seq_object(seq)\n self[name] = cseq\n self.colcount = max(l, self.colcount)", "def read_properties(self, inputfile):\n raise NotImplementedError(\n \"Reading from this file format is not yet implemented\")", "def load_data(filename: str) -> Tuple[np.ndarray, np.ndarray]:", "def test_read_from_file():\n reading_file = d.read_code_from_file()\n assert type(reading_file) == list\n assert len(reading_file) == 7\n assert \"\\n\" not in d.read_code_from_file()", "def readFromFile(self, infile):\n\n self.mMapComponent2Object = {}\n self.mMapObject2Component = {}\n\n for line in infile:\n if line[0] == \"#\":\n continue\n\n data = line[:-1].split(\"\\t\")\n\n obj_id, obj_start, obj_end, ncoms, com_type, com_id = data[:6]\n\n if com_type == \"N\":\n continue\n com_start, com_end, orientation = data[6:9]\n\n obj_start, obj_end = int(obj_start) - 1, int(obj_end)\n com_start, com_end = int(com_start) - 1, int(com_end)\n\n orientation = orientation in (\"+\", \"0\", \"na\")\n\n if com_start != 0:\n raise ValueError(\"non zero com_start\")\n\n object = ObjectPosition()\n object.mId = obj_id\n object.start = obj_start\n object.end = obj_end\n object.mOrientation = orientation\n\n self.mMapComponent2Object[com_id] = object", "def test_read_data_augmented():\n data = read_data(\"src/tests/dataclassificationmodel/ferPlus_augment.pbz2\", True)\n assert len(data) == 7 and type(data) is tuple", "def read(self, filename):\n raise NotImplementedError", "def __init__(self, fileName):\n in_ = JInputStream.getInputStream(fileName)\n b = int()\n len = int()\n cs = int()\n addr = int()\n buf = [None]*255\n eof = False\n line = 0\n i = 0\n while len(ihxData):\n self.ihxData[i] = -1\n i += 1\n try:\n while not eof:\n while True:\n b = in_.read()\n if b < 0:\n raise IhxParseException(\"Inexpected end of file\")\n if not ((b != int(':'))):\n break\n line += 1\n len = self.readHexByte(in_)\n # length field \n cs = len\n b = self.readHexByte(in_)\n # address field \n cs += b\n addr = b << 8\n b = self.readHexByte(in_)\n cs += b\n addr |= b\n b = self.readHexByte(in_)\n # record type field\n cs += b\n while i < len:\n # data\n buf[i] = int(self.readHexByte(in_))\n cs += buf[i]\n i += 1\n cs += self.readHexByte(in_)\n # checksum\n if (cs & 0xff) != 0:\n raise IhxParseException(\"Checksum error\")\n if b == 0:\n # data record\n while i < len:\n if self.ihxData[addr + i] >= 0:\n System.err.println(\"Warning: Memory at position \" + Integer.toHexString(i) + \" overwritten\")\n self.ihxData[addr + i] = int((buf[i] & 255))\n i += 1\n elif b == 1:\n # eof record\n eof = True\n else:\n raise IhxParseException(\"Invalid record type: \" + b)\n except IhxParseException as e:\n raise IhxFileDamagedException(fileName, line, e.getLocalizedMessage())\n try:\n in_.close()\n except Exception as e:\n System.err.println(\"Warning: Error closing file \" + fileName + \": \" + e.getLocalizedMessage())", "def read_tuple(self):\n cmd = self.read_command()\n return StormTuple(\n cmd['id'], cmd['comp'], cmd['stream'], cmd['task'], cmd['tuple'])", "def __init__(self, inFilename):\n\n self._prmtopVersion=None\n self._flags=[]\n self._raw_format={}\n self._raw_data={}\n self._has_nbfix_terms = False\n\n with open(inFilename, 'r') as fIn:\n for line in fIn:\n if line[0] == '%':\n if line.startswith('%VERSION'):\n tag, self._prmtopVersion = line.rstrip().split(None, 1)\n elif line.startswith('%FLAG'):\n tag, flag = line.rstrip().split(None, 1)\n self._flags.append(flag)\n self._raw_data[flag] = []\n elif line.startswith('%FORMAT'):\n format = line.rstrip()\n index0=format.index('(')\n index1=format.index(')')\n format = format[index0+1:index1]\n try:\n m = FORMAT_RE_PATTERN.search(format)\n self._raw_format[self._flags[-1]] = (format, m.group(1), m.group(2), int(m.group(3)), m.group(4))\n except:\n # We couldn't parse the format, so just treat the whole line as a single string.\n self._raw_format[self._flags[-1]] = (format, 1, 'a', 80, '')\n elif line.startswith('%COMMENT'):\n continue\n elif self._flags \\\n and 'TITLE'==self._flags[-1] \\\n and not self._raw_data['TITLE']:\n self._raw_data['TITLE'] = line.rstrip()\n else:\n flag=self._flags[-1]\n (format, numItems, itemType,\n iLength, itemPrecision) = self._getFormat(flag)\n line = line.rstrip()\n for index in range(0, len(line), iLength):\n item = line[index:index+iLength]\n if item:\n self._raw_data[flag].append(item.strip())\n # See if this is a CHAMBER-style topology file, which is not supported\n # for creating Systems\n self.chamber = 'CTITLE' in self._flags", "def read_file(self, filename):\n with open(filename, 'r') as file:\n for line in file:\n l = line.strip()\n\n if l == ST_POS0:\n self._state = ST_POS0\n elif l == ST_TRNS:\n self._state = ST_TRNS\n elif l == ST_POS1:\n self._state = ST_POS1\n else:\n self._parse_line(l)\n self._state = None", "def __init__(self, filename):\n self.from_file(filename)\n self.parse_cell()\n self.parse_atom()\n self.apply_symops()", "def __call__(self, read, info: ModificationInfo):", "def apply(self, opened_file):", "def f_open(loc):\n file = open(loc)\n t, U = [], []\n for l in file:\n data = l.split(\",\") # 3<=>t; 4<=>U\n t.append(float(data[3]))\n U.append(float(data[4]))\n return t, U", "def test_read_file(self):\n restart_path = os.path.join(arc_path, 'arc', 'testing', 'restart(H,H2O2,N2H3,CH3CO2).yml')\n input_dict = read_file(restart_path)\n self.assertIsInstance(input_dict, dict)\n self.assertTrue('reactions' in input_dict)\n self.assertTrue('freq_level' in input_dict)\n self.assertTrue('use_bac' in input_dict)\n self.assertTrue('ts_guess_level' in input_dict)\n self.assertTrue('running_jobs' in input_dict)\n\n with self.assertRaises(InputError):\n read_file('nopath')", "def _read(self, openf=None, stepfilter=None):\n itemstack = []\n current = None\n result = {}\n xkeys = None\n timeskip = False\n laststep = False\n\n if openf is None:\n f = open(self.filepath)\n else:\n f = openf\n\n line = 'start'\n while line != '':\n lastpos = f.tell()\n line = f.readline()\n if line == '':\n continue\n \n if itemstack is not None and len(itemstack) > 0: \n cast = itemstack.pop()\n raw = line.split()\n values = [t(r) for t, r in zip(cast, raw)]\n if len(values) == 1:\n values = values[0]\n\n if current == \"time\":\n if stepfilter is not None and values not in stepfilter:\n timeskip = True\n elif (self.index is not None and values != self.index):\n if values > self.index:\n if openf is None:\n return {}\n else:\n timeskip = True\n laststep = True\n else:\n timeskip = True\n elif self.index is None:\n self.index = values\n else:\n timeskip = False\n \n if len(itemstack) == 0 and current not in result:\n result[current] = values\n else:\n if current not in result:\n result[current] = []\n result[current].append(values)\n continue\n elif itemstack is None and current == \"atoms\":\n if \"ITEM\" in line:\n current = None\n if openf is not None:\n f.seek(lastpos)\n break\n else:\n #E.g. line: 1 4 -65.9625 1.54915 1.46824 5 30.976 \n vals = line.split()\n sid, atype = tuple(map(int, vals[0:2]))\n result[\"type\"].append(atype)\n result[\"id\"].append(sid)\n x, y, z = tuple(map(float, vals[2:5]))\n result[\"xyz\"].append((x, y, z))\n if len(vals) > 5 and xkeys is not None:\n for ikey, v in enumerate(vals[5:]):\n result[xkeys[ikey]].append(eval(v))\n continue # pragma: no cover\n \n if \"ITEM: TIMESTEP\" in line:\n if laststep:\n f.seek(lastpos)\n break\n itemstack.append((int,))\n current = \"time\"\n timeskip = False\n elif not timeskip:\n if \"ITEM: NUMBER OF ATOMS\" in line:\n itemstack.append((int,))\n current = \"natoms\"\n elif \"ITEM: BOX BOUNDS\" in line:\n period = line.strip().split(\"BOX BOUNDS\")\n if len(period) == 2 and period[1] != '':\n result[\"periodic\"] = period[1].strip().split()\n else:\n result[\"periodic\"] = (\"ss\", \"ss\" ,\"ss\")\n \n\t\t # Changes by JPRIEDS to accommodate triclinic boxes\n\t\t # Written 170719\n\t\t if len(result[\"periodic\"]) == 6:\n\t\t\titemstack.extend([(float, float, float)]*3)\n\t\t\tcurrent = \"box\"\n\t\t\tresult[\"periodic\"] = result[\"periodic\"][3:]\n\t\t elif len(result[\"periodic\"]) == 3:\n\t\t\titemstack.extend([(float, float)]*3)\n\t\t\tcurrent = \"box\"\n\t\t else:\n emsg = \"Could not classify periodic bounds: {}\"\n raise ValueError(emsg.format(result[\"periodic\"]))\n elif \"ITEM: ATOMS\" in line:\n itemstack = None\n current = \"atoms\"\n result[\"type\"] = []\n result[\"id\"] = []\n result[\"xyz\"] = []\n \n #The first two headings in the line have \"ITEM: ATOMS\", the\n #rest are usuall id, type, x, y, z, rest...\n headings = line.split()\n extras = len(headings) > 7\n if extras:\n xkeys = []\n xheadings = headings[7:]\n for xhead in xheadings:\n key = \"atom:{}\".format(xhead)\n result[key] = []\n xkeys.append(key)\n \n if openf is None:\n #Close the file since we opened it.\n f.close()\n \n return result", "def _read_file_definition(self):\n row_count = 0\n #\n # THIS METHOD ASSUMES A 14 ROW HEADER\n # If the number of header row lines in the glider ASCII input file changes from 14,\n # this method will NOT WORK\n num_hdr_lines = 14\n\n header_pattern = r'(.*): (.*)$'\n header_re = re.compile(header_pattern)\n\n line = self._stream_handle.readline()\n\n while line and row_count < num_hdr_lines:\n\n match = header_re.match(line)\n\n if match:\n key = match.group(1)\n value = match.group(2)\n value = value.strip()\n\n # update num_hdr_lines based on the header info.\n if key == 'num_ascii_tags':\n # this key has a required value of 14, otherwise we don't know how to parse the file\n if int(value) != num_hdr_lines:\n raise DatasetParserException(\"Header must be %d rows, but it is %s\" % (num_hdr_lines, value))\n\n elif key == 'num_label_lines':\n # this key has a required value of 3, otherwise we don't know how to parse the file\n if int(value) != 3:\n raise DatasetParserException(\"There must be 3 Label lines from the header for this parser\")\n\n elif key == 'sensors_per_cycle':\n # save for future use\n self._header_dict[key] = int(value)\n\n elif key in ['filename_label', 'mission_name', 'fileopen_time']:\n # create a dictionary of these 3 key/value pairs strings from\n # the header rows that need to be saved for future use\n self._header_dict[key] = value\n\n else:\n log.warn(\"Failed to parse header row: %s.\", line)\n\n row_count += 1\n # only read the header lines in this method so make sure we stop\n if row_count < num_hdr_lines:\n line = self._stream_handle.readline()\n\n if row_count < num_hdr_lines:\n log.error('Not enough data lines for a full header')\n raise DatasetParserException('Not enough data lines for a full header')", "def load_data(self):\n \n # only loader implemented so far !\n try:\n _ascii_array = Utilities.load_ascii(filename=self.filename, sep='')\n start_row = TOF._first_line_number_with_real_data(_ascii_array[0, 0])\n\n _tof_column = _ascii_array[start_row:, 0]\n\n if not TOF._is_this_numeric(_tof_column[0]):\n start_row += 1\n\n _tof_column = _ascii_array[start_row:, 0]\n _counts_column = _ascii_array[start_row:, 1]\n\n self.tof_array = _tof_column\n self.counts_array = _counts_column\n return\n\n except IndexError:\n pass # try another format\n\n try:\n _ascii_array = Utilities.load_ascii(filename=self.filename, sep=',')\n start_row = TOF._first_line_number_with_real_data(_ascii_array[0, 0])\n\n _tof_column = _ascii_array[start_row:, 0] # first row must be excluded in this format\n\n if not TOF._is_this_numeric(_tof_column[0]):\n start_row += 1\n\n _tof_column = _ascii_array[start_row:, 0]\n _counts_column = _ascii_array[start_row:, 1]\n\n self.tof_array = _tof_column\n self.counts_array = _counts_column\n return\n\n except IndexError:\n raise IndexError(\"Format not implemented!\")", "def _fromFile(self,filepath, filename):\n pass", "def _read_infile_with_tplfile(tpl_file, input_file):\n\n if not os.path.exists(input_file):\n raise Exception(\"input file '{0}' not found\".format(input_file))\n\n f_tpl = open(tpl_file, \"r\")\n f_in = open(input_file, \"r\")\n\n # read the tpl header\n _, marker = f_tpl.readline().split()\n itpl, iin = 1, 0\n pnames, pvals = [], []\n pdict = {}\n while True:\n tpl_line = f_tpl.readline()\n if tpl_line == \"\":\n break\n\n in_line = f_in.readline()\n if in_line == \"\":\n raise Exception(\n \"input file EOF, tpl file line {0}, in file line {1}\".format(itpl, iin)\n )\n\n if marker in tpl_line:\n idxs = [i for i, ltr in enumerate(tpl_line) if ltr == marker]\n if len(idxs) % 2 != 0:\n raise Exception(\"unbalanced markers on tpl line {0}\".format(itpl))\n\n for s, e in zip(idxs[0:-1:2], idxs[1::2]):\n tpl_str = tpl_line[s : e + 1]\n pname = tpl_str.replace(marker, \"\").strip().lower()\n if s > len(in_line):\n raise Exception(\n \"input file EOL line {0}, tpl line {1}, looking for {2}\".format(\n iin, itpl, tpl_str\n )\n )\n junk_val = \"Jennyigotunumber8675309\"\n tmp = tpl_line[:s] + \" {} \".format(junk_val) + tpl_line[e + 1 :]\n if len(tmp.split()) == len(in_line.split()):\n # treat this as whitespace delimited\n in_str = in_line.split()[tmp.split().index(junk_val)]\n else:\n # or we must assume the params are written using the same spacing as template file\n in_str = in_line[s : e + 1]\n try:\n v = float(in_str)\n except Exception as e:\n raise Exception(\n \"error casting '{0}' to float on in line {1}, tpl line {2} for {3}: {4}\".format(\n in_str, iin, itpl, tpl_str, str(e)\n )\n )\n\n if pname in pdict:\n eval = pdict[pname]\n if not np.isclose(eval, v, 1.0e-6):\n raise Exception(\n \"different values {0}:{1} for par {2} on in line {3}\".format(\n v, eval, pname, iin\n )\n )\n else:\n pnames.append(pname)\n pvals.append(v)\n pdict[pname] = v\n itpl += 1\n iin += 1\n df = pd.DataFrame({\"parnme\": pnames, \"parval1\": pvals}, index=pnames)\n return df", "def read_parsed_data(parsed_filename_path, parsed_topology_data_path):\n with open(parsed_filename_path, 'rb') as f:\n file_name = pk.load(f)\n with open(parsed_topology_data_path, 'rb') as f:\n topology_info = pk.load(f)\n return file_name, topology_info", "def __init__(self, file_format, location):\n Reader.__init__(self, file_format, location)", "def loaditems(self, fh):\n pass", "def read(self, fileobj):\n raise NotImplementedError", "def __loadFromFile(self):\n fh = open(self.__fileName)\n for line in fh:\n if line.strip() == \" \":\n continue # we have an empty line, just skip\n st = self.__createStudentFromLine(line)\n # invoke the store method from the base class\n StudentsRepo.store_student(self, st)\n fh.close()", "def populate(infile):\n main(infile)", "def read_test_tuples():\n lines = read_input(25, True)\n point_sets = list(parse_points(lines))\n expected_counts = [4, 3, 8]\n\n return zip(point_sets, expected_counts)", "def initialize_from_file(fname: Path) -> Tuple[List, List]:\n item_set_list = []\n frequency = []\n\n with open(fname, \"r\") as file:\n csv_reader = reader(file)\n for line in csv_reader:\n line = list(filter(None, line))\n item_set_list.append(line)\n frequency.append(1)\n\n return item_set_list, frequency", "def read_file(self):\n # This is quite ugly but works for now.\n self.header = read_csv(self.file_name, delim_whitespace=True,\n header=TrackData.header_line,\n nrows=1).to_dict(orient='index')[0]\n self.data = read_csv(self.file_name, delim_whitespace=True, \n header=TrackData.data_line)", "def __loadFromFile(self):\n try:\n f=open(self.__fileR, \"r\")\n line =f.readline().strip()\n rez=[]\n while line!=\"\":\n attrs=line.split(\",\")\n rt=Rent(attrs[0], attrs[1], attrs[2], attrs[3])\n rez.append(rt)\n line=f.readline().strip()\n f.close()\n return rez\n #the file cannot be reached\n except IOError:\n return None", "def test_file_access():\n file = gff.GFFFile()\n entry_scaffold = (\"ab\", \"cd\", 1, 2, None, None, None, {\"Id\":\"foo\"})\n entry = (\"a\",) + entry_scaffold\n file.append(*entry)\n assert file[0] == entry\n file.append(*((\"b\",) + entry_scaffold))\n file.insert(1, *((\"c\",) + entry_scaffold))\n file[1] = (\"d\",) + entry_scaffold\n file.insert(3, *((\"e\",) + entry_scaffold))\n del file[2]\n assert [seqid for seqid, _, _, _, _, _, _, _, _ in file] \\\n == [\"a\", \"d\", \"e\", ]", "def initFromFile(self,file):\n self.source = file\n file_reader = open(file,\"r\")\n self.isInit = True\n lineCounter = 0\n firstLine = None\n SecondLine = None\n ThirdLine = None\n for line in file_reader:\n if(lineCounter == 0):\n firstLine = line.split()\n self.rowsNumber = int(firstLine[0])\n self.columnsNumber = int(firstLine[1])\n self.routerRangeRadius = int(firstLine[2])\n if(lineCounter == 1):\n SecondLine = line.split()\n self.backBoneCosts = int(SecondLine[0])\n Path.backBoneCost = self.backBoneCosts\n self.routerCosts = int(SecondLine[1])\n self.budget = int(SecondLine[2])\n if(lineCounter == 2):\n ThirdLine = line.split()\n self.firstCell = Cell(int(ThirdLine[0]),int(ThirdLine[1]))\n if(lineCounter>2):\n self.map.append([])\n LINE = line\n columnCounter = 0\n for char in LINE:\n temp = Cell(len(self.map)-1,columnCounter,Cell.getCellType(char))\n self.map[len(self.map)-1].append(temp)\n if(temp.cellType == \"FLOOR\"):\n self.notComputeRouter.append(temp)\n columnCounter += 1\n lineCounter +=1\n self.isInit = True", "def __init__(self,file_reader):\n self.file_reader = file_reader", "def test_call_requires_read_1_file(self):\r\n p = RtaxTaxonAssigner({\r\n 'reference_sequences_fp': self.reference_seqs_fp,\r\n 'id_to_taxonomy_fp': self.id_to_taxonomy_fp})\r\n\r\n # no read_1_seqs_fp passed results in AssertionError\r\n self.assertRaises(AssertionError, p, self.input_seqs_fp)", "async def load(self, file: IO) -> dict:", "def __init__(self,file_path):\n\t\tdata_reader = csv.DictReader(file(file_path,'rU'))\n\t\tfor row in data_reader:\n\t\t\t# we have to turn the strings into floating point numbers.\n\t\t\tc = Compound( name = row['Name'],\n\t\t\t Antoine_params = [float(row['Antoine A']),float(row['Antoine B']),float(row['Antoine C'])],\n\t\t\t mass_density = float(row['Mass Density']),\n\t\t\t MW = float(row['Molecular Weight']),\n\t\t\t #Hvap = float(row['Enthalpy of Vaporization']),\n\t\t\t Cp = float(row['Molar Heat Capacity']) )\n\t\t\t# place it in the dictionary\n\t\t\t#print \"Have just read in \",c\n\t\t\tself[c.name] = c", "def __init__(self, in_file=None):\n self.key = 2\n self.code = 'C'\n self.numnod = None\n self.format = None\n self.nodes = []\n if in_file is not None:\n self._read(in_file)", "def read_data(filename, use):\r\n with open(os.path.join(os.getcwd(), filename)) as csvfile:\r\n read_csv = csv.reader(csvfile, delimiter=',')\r\n if use == 'exp':\r\n data = set()\r\n for row in read_csv:\r\n data.add(tuple(row))\r\n elif use == 'field':\r\n data = {}\r\n for row in read_csv:\r\n data[row[0]] = int(row[1])\r\n return data", "def __init__(self, filename):\n\n parser = Parser(filename=filename)\n self.uuid = parser.segregated(parser.read(),'UUID')\n self.id = parser.segregated(parser.read(),'ID')\n self.rate = parser.segregated(parser.read(),'RATE')\n self.gpio = parser.segregated(parser.read(),'GPIO')\n self.ddl = parser.segregated(parser.read(),'DATA_DELIVERY_LOCATION')", "def __init__(self, dataset_dir, listfile=None):\n Reader.__init__(self, dataset_dir, listfile)\n self._data = [line.split(',') for line in self._data]\n\n def process_ihm(x):\n return list(map(int, x.split(';')))\n\n def process_los(x):\n x = x.split(';')\n if x[0] == '':\n return ([], [])\n return (list(map(int, x[:len(x)//2])), list(map(float, x[len(x)//2:])))\n\n def process_ph(x):\n return list(map(int, x.split(';')))\n\n def process_decomp(x):\n x = x.split(';')\n if x[0] == '':\n return ([], [])\n return (list(map(int, x[:len(x)//2])), list(map(int, x[len(x)//2:])))\n\n self._data = [(fname, float(t), process_ihm(ihm), process_los(los),\n process_ph(pheno), process_decomp(decomp))\n for fname, t, ihm, los, pheno, decomp in self._data]", "def _fromfile(self, fh):\r\n fh.seek(0)\r\n data = fh.read(4096)\r\n if (len(data) < 7) or not (b'0' < data[1:2] < b'8'):\r\n raise ValueError(\"Not a Netpbm file:\\n%s\" % data[:32])\r\n try:\r\n self._read_pam_header(data)\r\n except Exception:\r\n try:\r\n self._read_pnm_header(data)\r\n except Exception:\r\n raise ValueError(\"Not a Netpbm file:\\n%s\" % data[:32])", "def load(self, file):\n for i in range(5):\n for j in range(5):\n self[i][j] = 0\n\n f = file.readlines()\n k = 0\n while k < len(f) and \"Boulder {\" not in f[k]:\n k += 1\n k += 1\n while \";\" in f[k]:\n coords = f[k][5:8].split(\",\")\n x, y = int(coords[0]), int(coords[1])\n self[x][y] = Boulder(x, y)\n k += 1\n\n while k < len(f) and \"Elephant {\" not in f[k]:\n k += 1\n k += 1\n while \":\" in f[k] and \";\" in f[k]:\n coords = f[k][5:8].split(\",\")\n x, y = int(coords[0]), int(coords[1])\n d = f[k][22:].split(\"]\")[0].split(\",\")\n xdir, ydir = 0, 0\n if d[0] == \"1\":\n xdir = 1\n elif d[0] == \"-1\":\n xdir = -1\n if d[1] == \"1\":\n ydir = 1\n elif d[1] == \"-1\":\n ydir = -1\n direction = np.array([xdir, ydir])\n self[x][y] = Animal(x, y, direction, 'Elephant')\n k += 1\n\n while k < len(f) and \"Rhinoceros {\" not in f[k]:\n k += 1\n k += 1\n while \":\" in f[k] and \";\" in f[k]:\n coords = f[k][5:8].split(\",\")\n x, y = int(coords[0]), int(coords[1])\n d = f[k][22:].split(\"]\")[0].split(\",\")\n xdir, ydir = 0, 0\n if d[0] == \"1\":\n xdir = 1\n elif d[0] == \"-1\":\n xdir = -1\n if d[1] == \"1\":\n ydir = 1\n elif d[1] == \"-1\":\n ydir = -1\n direction = np.array([xdir, ydir])\n self[x][y] = Animal(x, y, direction, 'Rhinoceros')\n k += 1\n\n file.close()", "def test_parse_flag_file_2(self):\n flag_file = Path(test_file_dir, \"test_flag_file_2.csv\")\n flag_dict = basic.parse_flag_file(flag_file)\n self.assertEqual(len(flag_dict.keys()), 1)", "def load(self):\n canSave = self.canSave\n #--Header\n inPath = os.path.join(self.fileInfo.dir,self.fileInfo.name)\n ins = Tes3Reader(self.fileInfo.name,file(inPath,'rb'))\n (name,size,delFlag,recFlag) = ins.unpack('4s3i',16,'REC_HEAD')\n self.tes3 = Tes3(name,size,delFlag,recFlag,ins,True)\n #--Raw data read\n while not ins.atEnd():\n #--Get record info and handle it\n (name,size,delFlag,recFlag) = ins.unpackRecHeader()\n #--LEVC?\n if name == 'LEVC':\n levc = Levc(name,size,delFlag,recFlag,ins,True)\n self.levcs[levc.id] = levc\n if canSave: self.records.append(levc)\n #print ' Added:',levc.id\n elif name == 'LEVI':\n levi = Levi(name,size,delFlag,recFlag,ins,True)\n self.levis[levi.id] = levi\n if canSave: self.records.append(levi)\n #print ' Added:',levi.id\n #--Other\n elif canSave:\n record = Record(name,size,delFlag,recFlag,ins)\n self.records.append(record)\n else:\n ins.seek(size,1,'Record')\n #--Done Reading\n ins.close()", "def prepare_input(self):\n super().prepare_input()\n input_file = self._input_filepath.open(encoding='utf-8')\n input_formatted_file = Path(self.__input_formatted_filepath).open('w', encoding='utf8')\n for line in input_file.readlines():\n for token in line.split():\n if token.endswith('.') or token.endswith(','):\n input_formatted_file.write('{0}\\n{1}\\n'.format(token[:-1], token[-1]))\n else:\n input_formatted_file.write('{}\\n'.format(token))", "def read_slithertxt(filename: os.PathLike) -> tuple:\n\n reg_statistics = list()\n consume = False\n with open(filename, \"r\") as f:\n for line in f:\n if not consume:\n if line.startswith(\" FromLine\"):\n reg_statistics.append(line)\n consume = True\n else:\n continue\n else:\n if line.isspace():\n consume = False\n break\n else:\n reg_statistics.append(line)\n\n matchline = list()\n lineoffset = list()\n sampoffset = list()\n\n dialect = csv.Dialect\n dialect.delimiter = \" \"\n dialect.skipinitialspace = True\n dialect.quoting = csv.QUOTE_NONE\n dialect.lineterminator = \"\\n\"\n\n reader = csv.DictReader(reg_statistics, dialect=dialect)\n for row in reader:\n matchline.append(float(row[\"MatchLine\"]))\n lineoffset.append(float(row[\"LineOffset\"]))\n sampoffset.append(float(row[\"SampOffset\"]))\n\n return (np.array(matchline), np.array(lineoffset), np.array(sampoffset))", "def test_DL_import_wrong_file_serialized(self):\n filepath = '5.txt'\n with open(filepath, 'wb') as file:\n pickle.dump([\"This is a wrong dataset\"], file)\n # Check if exception was raised for wrong data type\n with self.assertRaises(Exception):\n flow_processing_input.DetectorsLocation(9999, filepath)\n os.remove(filepath)", "def action_import(self):\n ctx = self._context\n \n data = base64.b64decode(self.data)\n file_input = cStringIO.StringIO(data)\n file_input.seek(0)\n reader_info = []\n if self.delimeter:\n delimeter = str(self.delimeter)\n else:\n delimeter = ','\n reader = csv.reader(file_input, delimiter=delimeter,\n lineterminator='\\r\\n')\n try:\n reader_info.extend(reader)\n except Exception:\n raise exceptions.Warning(_(\"Not a valid file!\"))\n keys = reader_info[0]", "def file_reader(filename = 'conv_params'):\n\n with open(filename) as f:\n info = f.readlines()\n info = [i.strip() for i in info] # each element in info is a string of a line from the file\n info = [i.split() for i in info] # split each whitespace delimited element into a list of lists\n info = [[i.split('-') for i in j] for j in info] # note info is 3 layers deep\n\n info[2] = info[2][0] # makes default E just a single string of the number\n info[3] = info[3][0]\n\n return info" ]
[ "0.62718296", "0.6199125", "0.6139345", "0.61193955", "0.60940003", "0.59377885", "0.59339136", "0.5890041", "0.5879968", "0.5824287", "0.5814559", "0.5812669", "0.57651573", "0.5727909", "0.5723036", "0.57121086", "0.570808", "0.57077694", "0.5688802", "0.5678212", "0.5677304", "0.5625205", "0.562451", "0.5605621", "0.55691004", "0.55582535", "0.55582535", "0.555072", "0.5541127", "0.55330527", "0.55220383", "0.5486732", "0.5480248", "0.5478322", "0.5452975", "0.54504126", "0.54395854", "0.5436466", "0.54149675", "0.5414654", "0.54123026", "0.54116464", "0.5403738", "0.5398129", "0.5393059", "0.5392636", "0.53913176", "0.5382239", "0.5381494", "0.5381034", "0.5374886", "0.5372705", "0.53608567", "0.535975", "0.53359485", "0.53316087", "0.53169006", "0.53110105", "0.5301282", "0.5299763", "0.5299061", "0.52808553", "0.5269194", "0.5261575", "0.5260474", "0.52543825", "0.5249742", "0.52451646", "0.5241626", "0.5236154", "0.52319616", "0.5222482", "0.5221932", "0.52208334", "0.5217865", "0.5213911", "0.5213377", "0.5206479", "0.5205968", "0.52056056", "0.520334", "0.51957566", "0.51936793", "0.5192429", "0.5189435", "0.51828957", "0.5180425", "0.5169424", "0.5167028", "0.51613224", "0.51548946", "0.51528066", "0.5151239", "0.51472664", "0.5146073", "0.5145947", "0.5145416", "0.51399827", "0.5133095", "0.51326287" ]
0.6141999
2
Unique drug list dict is correctly returned.
def test_get_unique_drug_list(self): dict1 = self.test_dict dict2 = get_unique_drug_list(self.test_sorted_tuple) self.assertEqual(dict1, dict2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unique_drugs(self):\n if self.results is not None:\n return tuple(self.results['drug'].unique())", "def _get_unique_genres(connection):\n print('---Getting unique genres---')\n genreDict = {}\n cursor = connection.cursor()\n cursor.execute(\"SELECT * FROM shared_genres;\")\n res = cursor.fetchall()\n num_genres = 0\n for genre in res:\n if genre[1] not in genreDict:\n genreDict[genre[1]] = num_genres\n num_genres += 1\n return genreDict", "def getDrugData(self, moleculeChEMBLIdList):\n oD = {}\n chunkSize = 50\n try:\n for ii in range(0, len(moleculeChEMBLIdList), chunkSize):\n drug = new_client.drug # pylint: disable=no-member\n drug.set_format(\"json\")\n mDL = drug.filter(molecule_chembl_id__in=moleculeChEMBLIdList[ii : ii + chunkSize])\n if mDL:\n logger.info(\"mDL (%d)\", len(mDL))\n for mD in mDL:\n oD.setdefault(mD[\"molecule_chembl_id\"], []).append(mD)\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n return oD", "def unique(list_: List) -> List:\n return list(collections.OrderedDict.fromkeys(list_))", "def test_magicdictlist_dedupe():\n d1 = magicdictlist()\n\n d1['key1'].append('1 hello')\n d1['key1'].append('1 world')\n d1['key2'].append('2 hello')\n d1['key1'].append('1 world')\n\n d2 = d1.dedupe()\n assert len(d2) == 2\n assert len(d2['key1']) == 2\n assert len(d2['key2']) == 1\n assert set(d2['key1']) == set(['1 hello', '1 world'])\n assert d2['key2'] == ['2 hello']", "def __init__(self):\n self.d = collections.defaultdict(list)", "def _get_rekey_ddi_data(ddi_data):\n for enum, item in enumerate(ddi_data):\n ddi_data[enum] = dict((d['network'],\n dict(d, index=index))\n for (index, d) in enumerate(item))\n return ddi_data", "def get_dict(cleaned_docs):\n data = []\n for doc in cleaned_docs:\n data += doc\n return list(set(data))", "def _create_dictionary_of_ned_d(\n self):\n self.log.debug(\n 'starting the ``_create_dictionary_of_ned_d`` method')\n\n count = 0\n with open(self.pathToDataFile, 'rb') as csvFile:\n csvReader = csv.reader(\n csvFile, dialect='excel', delimiter=',', quotechar='\"')\n totalRows = sum(1 for row in csvReader)\n csvFile.close()\n totalCount = totalRows\n\n with open(self.pathToDataFile, 'rb') as csvFile:\n csvReader = csv.reader(\n csvFile, dialect='excel', delimiter=',', quotechar='\"')\n theseKeys = []\n dictList = []\n for row in csvReader:\n if len(theseKeys) == 0:\n totalRows -= 1\n if \"Exclusion Code\" in row and \"Hubble const.\" in row:\n for i in row:\n if i == \"redshift (z)\":\n theseKeys.append(\"redshift\")\n elif i == \"Hubble const.\":\n theseKeys.append(\"hubble_const\")\n elif i == \"G\":\n theseKeys.append(\"galaxy_index_id\")\n elif i == \"err\":\n theseKeys.append(\"dist_mod_err\")\n elif i == \"D (Mpc)\":\n theseKeys.append(\"dist_mpc\")\n elif i == \"Date (Yr. - 1980)\":\n theseKeys.append(\"ref_date\")\n elif i == \"REFCODE\":\n theseKeys.append(\"ref\")\n elif i == \"Exclusion Code\":\n theseKeys.append(\"dist_in_ned_flag\")\n elif i == \"Adopted LMC modulus\":\n theseKeys.append(\"lmc_mod\")\n elif i == \"m-M\":\n theseKeys.append(\"dist_mod\")\n elif i == \"Notes\":\n theseKeys.append(\"notes\")\n elif i == \"SN ID\":\n theseKeys.append(\"dist_derived_from_sn\")\n elif i == \"method\":\n theseKeys.append(\"dist_method\")\n elif i == \"Galaxy ID\":\n theseKeys.append(\"primary_ned_id\")\n elif i == \"D\":\n theseKeys.append(\"dist_index_id\")\n else:\n theseKeys.append(i)\n continue\n\n if len(theseKeys):\n count += 1\n if count > 1:\n # Cursor up one line and clear line\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n if count > totalCount:\n count = totalCount\n percent = (float(count) / float(totalCount)) * 100.\n print \"%(count)s / %(totalCount)s (%(percent)1.1f%%) rows added to memory\" % locals()\n rowDict = {}\n for t, r in zip(theseKeys, row):\n rowDict[t] = r\n if t == \"ref_date\":\n try:\n rowDict[t] = int(r) + 1980\n except:\n rowDict[t] = None\n\n if rowDict[\"dist_index_id\"] != \"999999\":\n dictList.append(rowDict)\n\n csvFile.close()\n\n self.log.debug(\n 'completed the ``_create_dictionary_of_ned_d`` method')\n return dictList", "def remove_duplicates(data):\n already_used_items = {}\n return_data = []\n\n for item in data:\n # Yes, I know that I can find used items in the return_data,\n # but HW requires this logic.\n if not already_used_items.get(item):\n return_data.append(item)\n already_used_items[item] = True\n\n return return_data", "def _uniq( list ) : \r\n \r\n d = {} \r\n for e in list : \r\n d[e] = 1 \r\n \r\n return d.keys()", "def _uniq(self, lst):\n h = {}\n for e in lst:\n h[e] = 1\n return sorted(h.keys())", "def removeDups(lst):\n\n return list(dict.fromkeys(lst) )", "def delete_duplicate(x):\n return list(dict.fromkeys(x))", "def dangling_pic_list(pic):\n if pic and not pic.person_set.count():\n ids.append(pic.key().id())", "def item_duplicate():\n return {'name':'chair',\n 'value':300}", "def duplicates(self, x):\n return list(dict.fromkeys(x))", "def remove_duplicates(input_list):\n return list(dict.fromkeys(input_list))", "def _remove_duplicates(input_list):\n return list(OrderedDict.fromkeys(input_list))", "def to_listing_dict(self) -> dict:\n data = super().to_listing_dict()\n return data", "def unique_rp(db):\n for rp in sorted(db['rp'].keys()):\n print(rp)", "def getSet(unique_name):", "def getSet(unique_name):", "def unique(self, key, lst=None):\n d = self.find(key, lst)\n vals = set(d.values())\n return sorted(list(vals))", "def get_sensor_dict():\n\n with open('last_seen.json') as json_file:\n stored_dict = json.load(json_file)\n\n new_list = []\n for dev in stored_dict['devices']:\n new_list.append(dev['id'])\n unique_list = list(set(new_list))\n\n return stored_dict, unique_list", "def __init__(self):\n self.d = defaultdict(list)", "def shuffled_data_reset(self):\n self.unique_data = {}\n for stock in self.stocks:\n self.unique_data[stock] = []\n for date in self.dates:\n self.unique_data[stock] += [date]", "def drug_names():\n results = set()\n if 'qry' in request.args and len(request.args['qry']) >= 3:\n look_for = f\"{request.args['qry'].lower()}%\"\n drug_list = FTA.find_by_name(look_for, False )\n results = set([f\"{d.PROPRIETARY_NAME} - {d.NONPROPRIETARY_NAME}\" for d in drug_list if d.ACTIVE])\n\n results = sorted(list(results))\n return jsonify(results)", "def dgen(self, d,k,v):\n\t\tif k not in d.keys():\n\t\t\td.setdefault(k,{})\n\t\t\td[k].update(v)\n\t\telif k in d.keys():\n\t\t\t# remove psscan colors if pslist already found something\n\t\t\tif \"color\" in d[k].keys():\n\t\t\t\tif d[k][\"color\"] == \"black\":\n\t\t\t\t\tif \"color\" in v.keys():\n\t\t\t\t\t\tif v[\"color\"] == \"blue\":\n\t\t\t\t\t\t\tdel v[\"color\"]\n\t\t\t\t\tif \"fillcolor\" in v.keys():\n\t\t\t\t\t\tif v[\"fillcolor\"] == \"cyan\":\n\t\t\t\t\t\t\tdel v[\"fillcolor\"]\n\t\t\td[k].update(v)", "def unique(self):\n seen = {}\n result = []\n for p in map(tuple, self):\n if p not in seen:\n seen[p] = True\n result.append(p)\n return Pairs(result)", "def unique(self):\n return self.d_series.map_partitions(\n lambda s: s.list.unique(), meta=self.d_series._meta\n )", "def __init__(self):\r\n self.dct = defaultdict(list)", "def _reset_track_lst(self):\n del self._track_item\n self._track_item = {key : [] for key in self._track_lst}\n return self._track_item", "def outputDrugData(filename, List):\n\n data = {}\n dataFile = open(filename, \"r\")\n lines = []\n drugs = {}\n for i in range(0,len(List)):\n drugs[List[i]] = i\n \n for line in dataFile.readlines():\n lines.append(line.strip())\n for line in lines:\n cur = line.split(\",\")\n if cur[1] in data.keys():\n data[cur[1]]\n data[cur[1]].pop(drugs[cur[2]])\n data[cur[1]].insert(drugs[cur[2]], \"1\")\n else:\n data[cur[1]] = [\"0\"]*len(List)\n data[cur[1]].pop(drugs[cur[2]])\n data[cur[1]].insert(drugs[cur[2]], \"1\")\n\n for key in data.keys():\n #key, string of 0s and 1s corresponding to values in columns in order\n print key+\",\"+\",\".join(data[key])+\",\"", "def collect_items_user_dict(self, test_data):\n items_to_fill = {}\n for row in test_data:\n user = row[0]\n item = row[1]\n if item not in items_to_fill:\n items_to_fill[item] = []\n items_to_fill[item] += [user.item()]\n\n return items_to_fill", "def _do_generate_webclient_stocklist(self) -> dict:\n # NOTE: as we want dicts and not Location instances, we go directly to\n # the 'SQL level' (session.execute() and not the 'ORM level' (session.query())\n # of sqlquery.\n loclst = self.get_location_list()\n itmlst = self.get_reagent_item_list()\n itmstat = self.get_reagent_item_status_list()\n\n # create a Dict[locationid, List[reagentitem]] and a Dict[RFID, reagentitem]\n d_d: typing.Dict[typing.Optional[int], typing.List[dict]] = {}\n # rfid_reagitem_dct = ff = {}\n f_f: typing.Dict[str, dict] = {}\n for reag_item in itmlst:\n loc_id = reag_item.get('qcs_location_id', None)\n # we will keep a list of items with None locations... should not happen, but does\n # then we add these to the UNKNOWN list later on\n d_d.setdefault(loc_id, []).append(reag_item)\n # if loc_id is not None:\n # else:\n # raise RuntimeError(\"found None location {}\".format(reag_item))\n #\n rfidstr = reag_item.get('rfid', None)\n if rfidstr is not None:\n if rfidstr != 'REPLACE ME':\n f_f.setdefault(rfidstr, reag_item)\n else:\n raise RuntimeError(\"found None location {}\".format(reag_item))\n # unmangling for None...\n # find loc_id for 'UNKNOWN'...\n if None in d_d:\n none_lst = d_d[None]\n del d_d[None]\n flst = [loc for loc in loclst if loc['name'] == 'UNKNOWN']\n assert len(flst) == 1, \"cannot determine 'UNKNOWN' location\"\n unknown_lst = d_d.setdefault(flst[0]['id'], [])\n unknown_lst.extend(none_lst)\n #\n # NOW, create a Dict[locationid, Tuple[locrecord, List[reagentitem]]]\n # which we send to the client\n r_r: typing.Dict[int, typing.Tuple[dict, typing.List[dict]]] = {}\n locid_reagitem_dct = r_r\n for location in loclst:\n loc_id = location.get('id', None)\n r_r[loc_id] = (location, d_d.get(loc_id, []))\n assert len(r_r) == len(loclst), \"problem with location ids!\"\n #\n # collect the state records for each reagent item...\n z_z: typing.Dict[int, list] = {}\n for state in itmstat:\n reag_item_id = state['qcs_reag_item_id']\n # we want to replace the occurred timedate entry with a simple date\n # to present to the user, i.e.\n # 'occurred': '2011-04-20T00:00:00Z' -> '2011-04-20'\n dstr = state['occurred']\n state['occurred'] = dstr.split('T')[0]\n z_z.setdefault(reag_item_id, []).append(state)\n # and evaluate the 'final state' for each reagent item\n ritemdct = {}\n for reag_item in itmlst:\n reag_item_id = reag_item['id']\n state_lst = z_z.get(reag_item_id, None)\n if state_lst is None:\n state_info = None\n else:\n state_info = self.calc_final_state(state_lst)\n # print(\"BLAAA {} {}\".format(reag_item_id, state_info))\n # we eliminate any reagent item that has a state of 'USED_UP'.\n dct, ismissing, hasexpired = state_info\n state_info = None if dct['status'] == 'USED_UP' else state_info\n if state_info is not None:\n ritemdct[reag_item_id] = (reag_item, state_info)\n # else:\n # print(\"skipping {}\".format(reag_item))\n # create a Dict[reagentid, reagent]\n rl = self.get_reagent_list()\n rg = {}\n for reagent in rl:\n # delete the legacy location field in reagents...\n reagent.pop('location', None)\n reagent_id = reagent.get('id', None)\n if reagent_id is not None:\n rg[reagent_id] = reagent\n else:\n raise RuntimeError(\"reagent ID is None\")\n assert len(rg) == len(rl), \"problem with reagent ids!\"\n # \"itmstatlst\": itmstat,\n # finally, sort the loclst according to a hierarchy\n loclst = sortloclist(loclst)\n # , \"rfiddct\": rfid_reagitem_dct}\n return {\"loclst\": loclst, \"locdct\": locid_reagitem_dct,\n \"ritemdct\": ritemdct, \"reagentdct\": rg}", "def unique_values(self):\n for key in self.metadb.unique_values():\n yield key, self.datadb[key]", "def create_rdict(d):\n rd = {}\n for (key,value) in d.items():\n\n v = rd.setdefault(value, set([]))\n v.add(key)\n\n return rd", "def __init__(self):\n self.ds = set()\n self.keys = []", "def get_gene_id_dict(list_of_results):\n dict1 = {}\n for i, dict2 in enumerate(list_of_results):\n key = dict2[\"GeneID\"]\n if key in dict1.keys():\n # list1 = dict1[key]\n # list1.append(list_of_results[i])\n # dict1[key] = list1\n # list1.append(list_of_results[i])\n dict1[key].append(list_of_results[i])\n else:\n dict1[key] = [list_of_results[i]]\n return dict1", "def get_listu_uredjaja(self):\n lista = sorted(list(self.uredjaji.keys()))\n return lista", "def as_dict(self):\n d = {}\n for name, competition, sid in self.get_queryset().values_list('name', 'competition', 'id'):\n d[(name, competition)] = sid\n return d", "def uniq_data(self):\n return (self.date, self.detection_chip_type, self.drop_psi,\n self.gain, self.instrument, self.laser_power, self.line_rate,\n self.oil_psi)", "def unique(self):\n # variables for uniques \n self._currentSet = 1\n self._uniqueValue = {}\n\n pd = self._dataTable\n for col in pd:\n arr = pd[col].unique()\n for i in arr:\n unique_entry = ((col,i),)\n self._uniqueValue[unique_entry] = 0 \n\n self._sets[self._currentSet] = self._uniqueValue", "def dictogram_dictlist(self):\n for key, value in self.word_dict.items():\n self.word_dict[key] = dictogram.Dictogram(value)\n # print(\"self.word_dict\", self.word_dict)", "def uniqueResults( self, results ):\n rid_map = {}\n for r in results:\n rid_map[r.getRID()] = r\n return rid_map.values()", "def hash_entries(entries):\n d = dict()\n for e in entries:\n uri = e[\"uri\"]\n domain = re.match(\"^/view\\d*/(.*)$\", uri).group(1)\n if domain:\n visitor_id = e[\"visitor_id\"]\n if d.has_key(domain):\n store_page_entries = d[domain]\n store_page_entries.append(visitor_id)\n else:\n d[domain] = [visitor_id]\n print \"Retrieved {0} unique domains.\".format(len(d))\n return d", "def _get_identifiers_from_kbs(self) -> dict:\n id_mapping_dict = defaultdict(set)\n\n for kb in self.kbs:\n sys.stdout.write('\\n%s \\n' % kb.name)\n for p in tqdm.tqdm(kb.pathways, total=len(kb.pathways)):\n for ent in p.entities:\n id_set = list(set(ent.xrefs))\n if len(id_set) == 1:\n id_mapping_dict[id_set.pop()] = set([])\n for p, q in itertools.combinations(id_set, 2):\n id_mapping_dict[p].add(q)\n id_mapping_dict[q].add(p)\n\n return id_mapping_dict", "def _unique(li):\n return list(set(li))", "def get_kritis(self, kriti_list):\n self.kritis = [[k.name, k.composer, k.link] for k in kriti_list if \n k.raga == self.name]", "def unique_list(src_list):\n return list(OrderedDict.fromkeys(src_list).keys())", "def dictOfDraws(self):\n return dict()", "def assign_no_to_node(self,list):\n list = sorted(list)\n d = {}\n for i,node in enumerate(list):\n #print i,node\n d[node] = i \n return d,len(d)", "def ignored_values(self):\r\n return dict()", "def __init__(self):\n self.seen = {}", "def _dd():\n return defaultdict(_dd)", "def _dict_list(self, other, num):\n outlist = [[self]]\n oldlist = [[]]\n while outlist != oldlist:\n oldlist = outlist[:]\n for i, v in enumerate(outlist):\n templist = v[-1]._dlist[num].keys()\n for i2, v2 in enumerate(templist):\n if not v.__contains__(v2):\n littletemplist = v + [v2]\n if not outlist.__contains__(littletemplist):\n outlist.append(littletemplist)\n for i, v in enumerate(oldlist):\n if v[-1] != other:\n outlist.remove(v)\n outlist.sort(key = len)\n if len(outlist) != 0:\n return outlist[0]\n raise ValueError('No Connecting Path found between ' + self.name +\n ' and ' + other.name)", "def get_unique_kmer(kmer_dict, sequence, seq_id, km_size):\n for seq in cut_kmer(sequence, km_size):\n if seq not in kmer_dict:\n kmer_dict[seq] = [seq_id]\n elif seq_id not in kmer_dict[seq]:\n kmer_dict[seq].append(seq_id)\n return kmer_dict", "def __init__(self):\n self.dic = defaultdict(list)", "def getParsedDic(self):\n return {}", "def build_unq_dict_lst(self, lst1, lst2, key1 = \"start_index\", key2 = \"random_seed\"):\n dict_lst = []\n for i in range(len(lst1)):\n for j in range(len(lst2)):\n dictt = {}\n dictt[key1] = lst1[i]\n dictt[key2] = lst2[j]\n dict_lst.append(dictt)\n return dict_lst", "def getDataDict(self):\n #code begins here \n return self.__dflst,self.__dfwells", "def __init__(self):\n # here need a set instead of list since the after a free insert and delete the order may change\n self.dict = defaultdict(set) # insert and delete O(1).\n self.arr = [] # O(1) for get random", "def tri(self, dico):\n return sorted(dico.keys(), key=str)", "def unique_values(self):\n return DiscoDBInquiry(super(DiscoDB, self).unique_values)", "def drug_names_on_drug_list(drug_list):\n return [dl[\"Drug (brand name)\"] for dl in drug_list]", "def __init__(self):\n self.d = dict()\n self.arr = [set() for i in range(0,26)]", "def frequentOneItem(self):\n\n candidate = {}\n # global finalPatterns, minSup, Database\n # self.minSup = self.minSup\n for i in range(len(self.Database)):\n for j in range(len(self.Database[i])):\n if self.Database[i][j] not in candidate:\n candidate[self.Database[i][j]] = [i]\n else:\n candidate[self.Database[i][j]] += [i]\n self.finalPatterns = {keys: value for keys, value in candidate.items() if len(value) >= self.minSup}\n #print(candidate)", "def _remove_duplicates(self):\n for key in self._role_strings_info:\n self._role_strings_info[key] = [dict(tupleized) for tupleized in set(tuple(item.items())\n for item in self._role_strings_info[key])]", "def to_dictionary(self):\n list_dic = {}\n list_dic['id'] = self.id\n list_dic['width'] = self.__width\n list_dic['height'] = self.__height\n list_dic['x'] = self.__x\n list_dic['y'] = self.__y\n return (list_dic)", "def nsrGenera(taxonList, synonymList):\r\n species = list(filter(None, sorted(taxonList + synonymList)))\r\n generaList = [i.split()[0] for i in species]\r\n generaList = list(dict.fromkeys(generaList))\r\n return generaList", "def _unique(iterable):\n return list(dict.fromkeys(iterable))", "def updateDict(self,strSet):\n\tself.createAdjList(strSet,\"remove\")", "def secondary_keys_dicts(self):", "def remove_duplicates_in_items(items: list, id_key: str) -> list:\n ids = {}\n new_items = []\n for item in items:\n item_id = item.get(id_key)\n if item_id not in ids:\n ids[item_id] = True\n new_items.append(item)\n\n return new_items", "def search_not_uniq(list):\n\n not_uniq = {}\n for i in set(list):\n count = list.count(i)\n if count != 1:\n not_uniq[i] = count\n return not_uniq", "def _dictview(self) -> TracksDict:\n return self._items[self._keys, self._beads] # type: ignore", "def __init__(self):\n self._main_dictionary = defaultdict(set)", "def _prepare_dimensions(self, dimensions):\n result = {'brand': []}\n ids = []\n\n for item in dimensions:\n key = item['dimensions__name'].lower().replace(' ', '_')\n value = item['dimensions__value']\n if key in result:\n result[key].append(value)\n else:\n result[key] = [value]\n if item['id'] not in ids:\n result['brand'].append(item['brand__name'])\n ids.append(item['id'])\n\n return result", "def test_list_facet_dictionaries(self):\n pass", "def _extra_keys(self):\r\n return []", "def generate_pairs(self, _list_d):\n\n length = len(_list_d)\n result_list = {}\n\n for i in range(length):\n for j in xrange(i+1,length):\n l = len(result_list)\n result_list[l] = ((i, _list_d[i]),(j, _list_d[j]))\n\n return result_list", "def dlist(src):\n if isinstance(src, dict):\n for k in src:\n src[k] = dlist(src[k])\n if set(src) == set([str(k) for k in range(len(src))]):\n src = [src[str(k)] for k in range(len(src))]\n return src", "def get_dicts(self, clean=False):\n return list(self.iter_dicts(clean=clean))", "def as_dict(self) -> dict:\n category_dict = {}\n for gadza in self:\n category_dict[gadza.gadza_key] = gadza\n return category_dict", "def dubk(d, x, y, z):\n try:\n d[x][y]['list'].append(z)\n except KeyError as e:\n d[x][y] = {'list': [z]}", "def get_list(self, d, kind=None, debug=False, **kwargs):\n\n if self.cloudman:\n entries = []\n for entry in d:\n entries.append(dict(entry))\n # VERBOSE(entries)\n\n return self.update_dict(entries, kind=kind)\n return None", "def get_dict(self):\n return", "def dbase():\r\n albums_data = {}\r\n song_dict = {}\r\n songs_list = []\r\n with open(PATH, 'r') as f:\r\n data = f.read()\r\n temp = data.split(\"#\")\r\n for album in temp[1:]:\r\n index = album.find(\"::\")\r\n albums_data[album[:index]] = \"\"\r\n for album in temp[1:]:\r\n album = album.split(\"*\")\r\n album_name = album[0][:-7]\r\n release_Date = album[0][-5:]\r\n del album[0]\r\n for song in album:\r\n info = song.split(\"::\")\r\n song_name = info[0]\r\n del info[0]\r\n songs_list = info\r\n song_dict[song_name] = songs_list\r\n albums_data[album_name] = (song_dict.copy(), release_Date)\r\n song_dict.clear()\r\n return albums_data", "def variant_sample_list_2_3():\n return {\n \"schema_version\": \"2\",\n \"status\": \"current\",\n \"project\": \"12a92962-8265-4fc0-b2f8-cf14f05db58b\",\n \"institution\": \"828cd4fe-ebb0-4b36-a94a-d2e3a36cc989\",\n \"variant_samples\": [\n {\n \"selected_by\": \"834559db-a3f6-462c-81a4-f5d7e5e65707\",\n \"date_selected\": \"2021-07-09T16:42:23.694711+00:00\",\n \"variant_sample_item\": \"013bcc47-3885-4682-99c2-800b95765524\",\n \"filter_blocks_used\": {\n \"filter_blocks\": [\n {\n \"name\": \"Breast Cancer\",\n \"query\": \"associated_genotype_labels.proband_genotype_label=Heterozygous&associated_genelists=Breast+Cancer+%2828%29&variant.genes.genes_most_severe_consequence.impact=MODERATE&variant.genes.genes_most_severe_consequence.impact=HIGH\"\n }\n ],\n \"intersect_selected_blocks\": False\n }\n },\n {\n \"selected_by\": \"834559db-a3f6-462c-81a4-f5d7e5e65707\",\n \"date_selected\": \"2021-07-09T16:42:23.696554+00:00\",\n \"variant_sample_item\": \"ac62850f-6f77-4d3b-9644-41699238d0e2\",\n \"filter_blocks_request_at_time_of_selection\": \"some-gibberish\"\n }\n ],\n \"created_for_case\": \"GAPCAJQ1L99X\",\n \"uuid\": \"292250e7-5cb7-4543-85b2-80cd318287b2\"\n }", "def gen_dict(self):\n stimuli_dict = dict()\n for i, stim in enumerate(STIMULI):\n stimuli_dict[stim.name] = dict(stimulus_path=stim.value)\n rel_df = self.df.iloc[:, i * 2 : i * 2 + 2]\n stimuli_dict[stim.name][\"responses\"] = rel_df\n return stimuli_dict", "def find_duplicates(inlist):\n \n D = defaultdict(list)\n for i,item in enumerate(mylist):\n D[item].append(i)\n D = {k:v for k,v in list(D.items()) if len(v)>1}\n \n return D", "def _as_dict(self):\n local = dict((key, value) for key, value in self)\n joined = dict([(k, v) for k, v in self.__dict__.items() if not k[0] == '_'])\n local.update(joined)\n return local", "def _produce_ut_vim_accounts_info(self, list_of_vims):\n return {_['name']: _['_id'] for _ in list_of_vims}", "def find_categories_used_dict(request):\n categories_used = []\n\n for item_index, item in enumerate(all_shopping_items(request)):\n category_dict = {\n 'category': item.category.category,\n }\n if item_index == 0:\n categories_used.append(category_dict)\n else:\n add_category = True\n\n for list_item in categories_used:\n \n if list_item['category'] == item.category.category:\n add_category = False\n \n if add_category:\n categories_used.append(category_dict)\n\n return categories_used", "def get_drugcount_dict(year):\r\n return common.get_dict_all(get_drugcount_filename(year), int)", "def uniquified(self, d):\n print(\"the value befour conversion\",d)\n df_unique = d.drop_duplicates()\n\n print(\"after conversion\",df_unique)\n\n\n return df_unique", "def __init__(self):\n self.d = {}\n self.l = []", "def readDrugs(filename):\n dataFile = open(filename, \"r\")\n lines=[]\n for line in dataFile.readlines():\n lines.append(line.strip())\n drugsList = []\n for line in lines:\n cur = line.split(\",\")\n if cur[2] not in drugsList:\n drugsList.append(cur[2])\n print drugsList\n return drugsList", "def get_dict(self):\n new_source_data = self.data.to_dict(orient=\"list\")\n new_source_data[\"index\"] = self.data.index\n for k in list(new_source_data):\n if isinstance(k, tuple):\n new_source_data[\"_\".join(k)] = new_source_data.pop(k)\n\n return new_source_data" ]
[ "0.6729187", "0.5912013", "0.57591486", "0.5738331", "0.5666848", "0.5649718", "0.5634769", "0.562001", "0.56132656", "0.5601305", "0.55995375", "0.55618244", "0.55507255", "0.5541687", "0.5520323", "0.5516712", "0.5491589", "0.54879695", "0.54779065", "0.54644716", "0.5451422", "0.5442167", "0.5442167", "0.542725", "0.5418897", "0.540187", "0.5390148", "0.538011", "0.5379323", "0.5368923", "0.5353095", "0.5342627", "0.5342448", "0.5337619", "0.5330561", "0.53097844", "0.5302741", "0.52987397", "0.52946246", "0.5292481", "0.5288146", "0.5280784", "0.5275621", "0.52668864", "0.5266052", "0.52618253", "0.5257464", "0.5243191", "0.5237233", "0.52345145", "0.52341914", "0.5233594", "0.52173585", "0.52102643", "0.521025", "0.51999754", "0.51996946", "0.51990217", "0.51945084", "0.51931405", "0.518087", "0.51689595", "0.51685387", "0.5165465", "0.5154601", "0.5145899", "0.5141518", "0.51288205", "0.5119666", "0.5115173", "0.51125735", "0.5092196", "0.50916153", "0.5091483", "0.5087934", "0.5087536", "0.50864935", "0.5072481", "0.5068032", "0.50665367", "0.50638485", "0.50635487", "0.50606734", "0.50588197", "0.5056712", "0.50559247", "0.5053203", "0.50509506", "0.504807", "0.50454676", "0.5045169", "0.50437987", "0.50431293", "0.50360405", "0.5035231", "0.50302076", "0.5027702", "0.502489", "0.5021861", "0.5016131" ]
0.68579596
0
Number of unique names for each drug is correcy.
def test_get_num_unique_name(self): list1 = self.test_num_unique_name list2 = get_num_unique_name(self.test_sorted_tuple, self.test_dict) self.assertEqual(list1, list2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count(self):\n return len(self.names)", "def __len__(self):\n return self.data.index.get_level_values(0).to_series().nunique()", "def number_of_variables(dataset, name_of_variable):\r\n first_row = dataset[0].keys()\r\n num = 0\r\n for variable in first_row:\r\n if name_of_variable in variable:\r\n num += 1 \r\n return num", "def codon_counts(self):\n # Removing 5' UTR and 3' UTR sequences\n sequence = self.sequence.replace(self.five_prime_utr_sequence, \"\").replace(self.three_prime_utr_sequence, \"\")\n return len(sequence) / 3", "def count_codon_all(self):\n return Counter(list(self))", "def count_nucleic_acids(self):\n n = 0\n for chain in self.iter_chains():\n n += chain.count_nucleic_acids()\n return n", "def CountNames():\r\n return _hiew.HiewGate_Names_CountName()", "def specht(mu):\n return StandardTableaux(mu).cardinality().n()", "def count_codon(self, codon):\n return sum([1 for c in self if c == codon])", "def number_of_donations(self):\n return len(self.donations)", "def num_donations(self):\n return len(self.donations)", "def count_unique_features(self):\n return N_UNIQUE_FEATS", "def get_number_of_unique_students(self):\n unique_students = set()\n for row in self.responses:\n unique_students.add(row.student)\n return len(unique_students)", "def name_corr(self):\n raise NotImplementedError\n ## Build matrix of names\n corr_mat = []\n for ind in range(self.n_in):\n corr_mat.append(\n list(map(lambda s: s + \",\" + self.domain.var[ind], self.domain.var))\n )\n\n ## Access matrix of names\n corr_names = dict()\n corr_ind = triu_indices(self.n_in, 1)\n for knd in range(len(corr_ind[0])):\n ind = corr_ind[0][knd]\n jnd = corr_ind[1][knd]\n corr_names[\"corr_\" + str(knd)] = corr_mat[ind][jnd]\n\n return corr_names", "def get_customer_count(self):\n return self._df_invoice_original.CustomerID.unique().shape[0]", "def count():", "def part1(fname: str) -> int:\n return sum(len(set(''.join(group))) for group in get_data(fname))", "def count(self, nodename: str):\n if nodename in self._d:\n return len(self._d[nodename][0])\n else:\n return 0", "def countUniqueGreatPeople(self, tCoords):\n\t\tiCount = 0\n\t\tplot = gc.getMap().plot(tCoords[0], tCoords[1])\n\t\tif plot.isCity():\n\t\t\tcity = plot.getPlotCity()\n\t\t\tiGreatPriest = gc.getInfoTypeForString(\"SPECIALIST_GREAT_PRIEST\")\n\t\t\tfor i in range(iGreatPriest, iGreatPriest+7, 1):\n\t\t\t\tiCount += min(1, city.getFreeSpecialistCount(i))\n\t\treturn iCount", "def count_naked_names(graph: BELGraph) -> typing.Counter[str]:\n return Counter(_naked_names_iter(graph))", "def uracil_count(RNAsequence):\n uracil = 0\n for nucleotide in RNAsequence:\n if nucleotide == 'U':\n uracil += 1\n return uracil", "def get_num_cams(self, data):\n cams = set()\n for items in data:\n camid = items[2]\n cams.add(camid)\n return len(cams)", "def carn_count(self):\n return len(self.carnivores)", "def num_of_donations(self):\n return len(self._donations)", "def resultCounter(detections):\n counter = 0\n for attribute, value in classIterator(detections):\n if 'crease' in attribute:\n counter += len(value)\n return counter", "def count_vario(dist_param, picker_param):\n orig = '/home/zby/MAGISTERKA/MGR/results/oryginal.clustered.t'\n cl_orig = read_clustered(orig)\n name_tag = ''\n ndist = dist_param[1:]\n npick = picker_param[1:]\n for index in drange(5, 20, 0.5):\n name_tag = \"{}_{}_{}\".format(index, npick, ndist)\n try:\n clust2 = read_clustered(tfidf_name('merged.stem{}.stop.clustered.t', name_tag))\n except:\n print(\"no data for {}\".format(name_tag))\n continue\n var, norm = variation_of_information(cl_orig, clust2)\n print(\" {} VOI is {}\".format(name_tag, norm))", "def num_carns(self):\n return self._num_carns", "def num_injectors(self):\n injectors = self.info_wells.groupby('well_type').get_group('inj')\n return injectors['well'].nunique()", "def num_students(self,dd=\"\"):\n\t\tif dd==\"\":\n\t\t\tdd=datadrop.objects.all().filter(cohort=self.cohort)\\\n\t\t\t\t.order_by('-date')[0]\n\t\telif isinstance(dd,str):\n\t\t\tdd=datadrop.objects.get(name=dd,cohort=self.cohort)\n\t\treturn len(grade.objects.filter(subject=self).distinct()\\\n\t\t\t.values_list('upn'))", "def coursed_count(self):\n dict_name = \"filted_course_student_groupby\"\n self.__data[dict_name] = self.__data[\"normal_dataframe\"].groupby([\n \"COD_ATIV_CURRIC\",\n \"MATR_ALUNO\"\n ])\n course_dict = {}\n for df in self.__data[dict_name]:\n if df[0][0] not in course_dict:\n course_dict[df[0][0]] = dict.fromkeys(\n [str(i) for i in range(1, 6)], 0)\n count = df[1].shape[0] if df[1].shape[0] <= 5 else 5\n course_dict[df[0][0]][str(count)] += 1\n\n self.analysis[\"coursed_count\"] = course_dict\n # ratio coursed count\n\n def f(x):\n coursed_succes = x[x['SITUACAO_ATIV_CURRIC'].isin(sit.SITUATION_PASS)].shape[0]\n return (x.shape[0] / coursed_succes) if coursed_succes > 0 else -1\n\n groups = self.__data[\"filted_dataframe\"].groupby([\"COD_ATIV_CURRIC\"])\n ratio_coursed = groups.apply(lambda x: f(x))\n self.analysis[\"coursed_ratio\"] = ratio_coursed.to_dict()", "def get_num_countries():\n num_countries = np.zeros(shape=(len(annual_files), 1))\n \n for year in annual_files:\n df = get_runners_data(year)\n country_count = df['countryCode'].value_counts()\n num_countries[annual_files.index(\n year)] = len(country_count.index)\n return num_countries", "def N(self):\n return len(self.cavity_grid.cavities) + 1", "def _count_parties(data_set): #DEMOCRATS, THEN REPUBLICANS\r\n reps = 0\r\n dems = 0\r\n for data_point in data_set:\r\n if data_point.dat_party == \"R\": reps+=1\r\n if data_point.dat_party == \"D\": dems+=1\r\n\r\n return (dems, reps)", "def get_gini(self, rows):\n label_count = defaultdict(int)\n total_count = 0\n for row in rows:\n label = row[self.target_attribute]\n label_count[label] += 1\n total_count += 1\n return 1 - sum([np.square(float(label_count[label])/total_count) for label in label_count.keys()])", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def num_grna(self) -> int:\n return len(self.gRNAs)", "def test_count_degenerate(self):\n self.assertEqual(self.RNA(\"\").count_degenerate(), 0)\n self.assertEqual(self.RNA(\"GACUGCAUGCAUCGUACGUCAGUACCGA\").count_degenerate(), 0)\n self.assertEqual(self.RNA(\"N\").count_degenerate(), 1)\n self.assertEqual(self.PROT(\"N\").count_degenerate(), 0)\n self.assertEqual(self.RNA(\"NRY\").count_degenerate(), 3)\n self.assertEqual(\n self.RNA(\"ACGUAVCUAGCAUNUCAGUCAGyUACGUCAGS\").count_degenerate(), 4\n )", "def get_n_questions(self):\n return self.df.question.nunique()", "def get_unique(self):\n unique_values = len(self.df[self.col_name].unique())\n return unique_values", "def count(self) -> Tuple[groupable, pdarray]:\n repMsg = generic_msg(\n cmd=\"countReduction\",\n args={\"segments\": cast(pdarray, self.segments), \"size\": self.length},\n )\n self.logger.debug(repMsg)\n return self.unique_keys, create_pdarray(repMsg)", "def numnems(self):\n count = 0\n for o in self._objs.values():\n count += len(o.netifs())\n return count", "def nClumps(self):\n \n return len(self)", "def num_conll(self):\n pass", "def gal_count(clusters):\n sum = 0\n for x in clusters:\n sum += x.ngal\n return sum", "def __uniqueCounts(rows):\n results = {} #Initialize a dictionary to store the results\n for row in rows: #Iterate over all rows of data\n #The result is the last column\n r = row[-1]\n if r not in results: results[r] = 0 #Start the count for each class at zero\n results[r] += 1 #Increment the count for this row's class by 1\n return results", "def culggroup_donecount(group, dones):\n return sum(dones[l] for l in group)", "def _num_of_consolidated(self, observation):\n a = set(observation)\n b = set(np.arange(self.num_of_servers))\n intersect = b - a\n return len(intersect)", "def _num_of_consolidated(self, observation):\n a = set(observation)\n b = set(np.arange(self.num_of_servers))\n intersect = b - a\n return len(intersect)", "def compute_number_of_associated_companies(row):\n derived_series = pd.read_json(json.dumps(row['company_derived']), typ='series')\n derived_series = pd.Series(derived_series)\n derived_string = derived_series.to_string()\n if derived_string.count('|') > 0:\n row[\"multiple_companies_derived_count\"] = derived_string.count('|') + 1\n elif \"none\" in derived_string:\n row[\"multiple_companies_derived_count\"] = 0\n else:\n row[\"multiple_companies_derived_count\"] = 1\n return row[\"multiple_companies_derived_count\"]", "def __len__(self):\r\n return len(self.__sorted_names)", "def part1_answer_counter(people: list[str]) -> int:\n return len(set(\"\".join(people)))", "def num_classes(self,dd=\"\"):\n\t\tif dd==\"\":\n\t\t\tdd=datadrop.objects.all().filter(cohort=self.cohort)\\\n\t\t\t\t.order_by('-date')[0]\n\t\telif isinstance(dd,str):\n\t\t\tdd=datadrop.objects.get(name=dd,cohort=self.cohort)\n\t\treturn self.classgroup_set.all().count()", "def get_number_of_unique_successors(partition, graph):\n suc_unique = get_unique_successors(partition, graph)\n return len(suc_unique)", "def freq(self) -> int:", "def NDon(row):\n m = Chem.MolFromSmiles(row.SMILES)\n donors = Descriptors.NumHDonors(m)\n return donors", "def NDon(row):\n m = Chem.MolFromSmiles(row.SMILES)\n donors = Descriptors.NumHDonors(m)\n return donors", "def comitentes_count(self):\n return self.expedientepersona_set.filter(comitente=True).count()", "def countclass(self, comb_res, origin_df):\r\n clsdic_ratio = {}\r\n self.clsdic_df = {}\r\n # totalcount = df['count'].sum() # no sum of count but the num of id which attr contains cls\r\n clslist = comb_res['组合需求'].apply(lambda x: x.split('.')[1]).unique().tolist()\r\n\r\n totalcount = len(origin_df[origin_df.attr.apply(self.judge, args=(clslist, ))])\r\n for cls in clslist:\r\n # dfcls = comb_res[comb_res['组合需求'].str.contains(cls)] # no count but distinct id\r\n df_cls = origin_df[origin_df.attr.apply(self.judge, args=(clslist, cls,))]\r\n self.clsdic_df[cls] = df_cls\r\n clsdic_ratio[cls] = round(len(df_cls) / totalcount * 100, 2)\r\n return sorted(clsdic_ratio.items(), key=lambda x: (x[1], x[0]), reverse=True)", "def culggroup_thickestdonecount(As, Rps, group, dones):\n pairs = sorted(((get_culg_dimension(As, Rps, l), dones[l], l)\n for l in group),\n reverse=True)\n count = len(tuple(itt.takewhile(lambda p: p[1], pairs)))\n return count", "def count(self):\n\n raise NotImplementedError", "def _get_observation_count(self):\n observation_count = 0\n for sequence in self.seq_list:\n observation_count += sequence.shape[0] \n \n return observation_count", "def get_number_practices(df):\n return len(df.practice.unique())", "def compute_unique_count_drift(df_prob, ref_df_prob):\n\n df_diff = set(df_prob.keys()) - set(ref_df_prob.keys())\n ref_df_diff = set(ref_df_prob.keys()) - set(df_prob.keys())\n\n return sum([df_prob[k] for k in df_diff] + [ref_df_prob[k] for k in ref_df_diff])", "def num_trajs(self):\n return len(list(self.run_traj_idx_tuples()))", "def cluster_obs_count(self):\n return(self.merged_data.groupby(\n 'labels').count().transpose().iloc[0, :])", "def count_nucleobases(dnas, nucleobase):\n total_nucleobase = nucleobase + \": \"\n\n for index in range(len(dnas[0])):\n total = 0\n\n for dna in dnas:\n if dna[index] == nucleobase:\n total += 1\n total_nucleobase += str(total) + \" \"\n\n return total_nucleobase", "def __len__(self):\r\n return len(self.img_names)", "def n_components(self):\n return 1", "def CountLocal():\r\n return _hiew.HiewGate_Names_CountLocal()", "def get_n_chains(self): \n res_id_cnt = 0\n tot_n_res = len(self.res_ids)\n n_chns = 0\n for res_id in self.res_ids:\n res_chn_i = res_id[2]\n if res_id_cnt > 1:\n if res_chn_i == self.res_ids[res_id_cnt-1][2]:\n pass\n else:\n n_chns+=1\n res_id_cnt+=1\n return n_chns", "def num_run_trajs(self, run_idx):\n return len(self._h5['{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES)])", "def count(self):\n # TODO not implemented yet\n return 0", "def n(self):\n return len(self.genotypes)", "def validate_unique_cof_names():\n names = FRAMEWORKS_DF['Name'].str.lower()\n names = names.str.replace('-',' ')\n\n duplicates = [item for item, count in collections.Counter(list(names)).items() if count > 1]\n\n if duplicates:\n print('Warning: Duplicate CURATED-COF names detected: {}'.format(duplicates))\n sys.exit(1)\n\n print('No duplicate CURATED-COF names found.')", "def CountGlobal():\r\n return _hiew.HiewGate_Names_CountGlobal()", "def num_links(self):\n count=0.0\n for cluster in self.clusters:\n if self.clusters[cluster] == self.clusters[cluster].antecessor:\n numberofmembers=self.clusters[cluster].number_of_members\n count+=numberofmembers\n return count", "def number_of_crew(self):\n return self._number_of_crew", "def get_invoice_count(self):\n return self._df_invoice_original.InvoiceNo.unique().shape[0]", "def count_amino_acids(self):\n n = 0\n for chain in self.iter_chains():\n n += chain.count_amino_acids()\n return n", "def __len__(self):\n return len(self.image_names)", "def compute_surgery_count(self):\n for record in self:\n surgery_count = 0\n if record.partner_type == \"dr\":\n surgery_count = len(record.doctor_surgery_ids.ids)\n if record.partner_type == \"patient\":\n surgery_count = len(record.surgery_ids.ids)\n record.surgery_count = surgery_count", "def count(self):\n return sum(1 for _ in self)", "def unique_drugs(self):\n if self.results is not None:\n return tuple(self.results['drug'].unique())", "def count_cop(self, infile):\n n_cop = 0\n dgs_in = self._file_handler.file_to_dg_list(infile)\n for dg in dgs_in:\n if dg.has_cop_deprel():\n n_cop += 1\n return n_cop, len(dgs_in)", "def get_invl_count(self):\n return self._df_invoice_original.index.unique().shape[0]", "def part2(fname: dict) -> int:\n return sum(len(set.intersection(*[set(pax) for pax in group])) for group in get_data(fname))", "def n_replicates(self):\n return self.data.n_replicates.values", "def count(self,value = 1):\n n = 0\n for s in self.sample:\n if s == value:\n n += 1\n return n", "def setupDistribution(tournamentsWon1):\n timesWon = np.sort(np.unique(tournamentsWon1))\n numberTimesWon = np.zeros_like(timesWon)\n for i in range (len(timesWon)):\n numberTimesWon[i] = count(tournamentsWon1, timesWon[i])\n return timesWon, numberTimesWon", "def num_animals(self):\n return self._num_herbs + self._num_carns", "def ncusps(self):\n n = self.level()\n return sum([arith.euler_phi(arith.gcd(d,n//d)) for d in n.divisors()])", "def solution_2(arr):\n total = 0\n for group in arr:\n group_list = []\n for person in group:\n group_list = group_list + person\n group_table = Counter(''.join(group_list))\n for k, v in group_table.items():\n if v == len(group):\n total += 1\n return total", "def count(self, base):\n return self._dna.count(base)", "def getQVcountsForDominantModel(genotypesFilename, caseNames, controlNames):\r\n\r\n\t# If we have a sample file, then we have everyone's names:\r\n\tif len(caseNames) != 0 or len(controlNames) != 0:\r\n\t\tcaseCounts = {name: 0 for name in caseNames}\r\n\t\tcontrolCounts = {name: 0 for name in controlNames}\r\n\r\n\t\tcounts = {\"case\": caseCounts, \"ctrl\": controlCounts}\r\n\r\n\t# Otherwise, we work just from the genotypes file and get names from there as we go.\r\n\telse:\r\n\t\tcounts = defaultdict(Counter)\r\n\r\n\treader = csv.reader(open(genotypesFilename, \"r\"))\r\n\theader = next(reader)\r\n\r\n\tfor line in reader:\r\n\r\n\t\tline = dict(zip(header, line))\r\n\t\tcaseOrControl = line[\"Sample Phenotype\"]\r\n\t\tname = line[\"Sample Name\"]\r\n\t\tcounts[caseOrControl][name] += 1\r\n\r\n\treturn counts[\"case\"], counts[\"ctrl\"]", "def getNumDonations(self):\n return len(self.donationList)", "def countAtom (dico_count, PDB_parsed, debug = 0):\n count = 0\n \n for atom in PDB_parsed : \n residue = tool.transformAA(atom[\"resName\"])\n if debug : print residue\n \n if residue in dico_count : \n atom_Name = atom[\"name\"]\n if atom_Name in dico_count[residue] : \n count = count + 1\n return count", "def check_unique(df):\n\n print(\"Number of unique values for each column\")\n print(\"=======================================\")\n # print number of unique values of each column\n for col in df.columns:\n print(f\"{col}: {df[col].nunique()}\")", "def num_years():\n years = movies['Year']\n return ('num_years', years.nunique())" ]
[ "0.62920433", "0.60662675", "0.5955769", "0.59471035", "0.5925419", "0.5915438", "0.5910354", "0.58735096", "0.586835", "0.58442324", "0.58086777", "0.57781553", "0.5766293", "0.5765629", "0.5764152", "0.57473814", "0.5723728", "0.5690694", "0.56676644", "0.56587267", "0.5648405", "0.5648025", "0.56357086", "0.5631345", "0.56130457", "0.56096524", "0.56005526", "0.56005126", "0.55965346", "0.55959064", "0.5578148", "0.5577088", "0.5575639", "0.5566268", "0.5565767", "0.5565767", "0.5565767", "0.5565767", "0.55534554", "0.5552672", "0.5544422", "0.55304676", "0.5519394", "0.5517349", "0.5499859", "0.54990864", "0.54968095", "0.5483188", "0.54791725", "0.5478841", "0.5478841", "0.54738545", "0.5468101", "0.546633", "0.54593784", "0.5453381", "0.5450472", "0.543956", "0.543956", "0.5416048", "0.54139143", "0.54127336", "0.5411964", "0.54106206", "0.5405853", "0.54000014", "0.5392241", "0.5389817", "0.53834975", "0.53685087", "0.5364456", "0.53548753", "0.53488123", "0.5347171", "0.53463614", "0.5345507", "0.5335234", "0.53320634", "0.5324611", "0.53242946", "0.53210366", "0.5315187", "0.5305051", "0.5304106", "0.5302853", "0.5300231", "0.5300206", "0.5293133", "0.5293013", "0.5286035", "0.5284796", "0.5283633", "0.5280436", "0.5277918", "0.5274774", "0.5272487", "0.527237", "0.5272236", "0.5269841", "0.5262215", "0.52611285" ]
0.0
-1
Total cost of each drug is correct.
def test_get_total_cost_each_drug(self): list1 = self.test_total_cost_each_drug list2 = get_total_cost_each_drug(self.test_sorted_tuple, self.test_dict) self.assertEqual(list1, list2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cost(self) -> float:", "def tablecost(self):\n subtotal_getter = operator.attrgetter(\"subtotal\")\n\n cost = 0.0\n\n cost += sum(map(subtotal_getter, self.materials))\n cost += sum(map(subtotal_getter, self.processes))\n cost += sum(map(subtotal_getter, self.fasteners))\n cost += sum(map(subtotal_getter, self.toolings))\n\n return cost", "def get_total(self):\n total = 0.00\n\n for _drink in self.drinks:\n total = total + _drink.get_price()\n\n for _food in self.food:\n total = total + _food.get_price()\n\n return total", "def get_expected_cost(self):", "def patrimony_total(self):\n pass", "def calculate_total_cost(state):\n pass", "def calculate_total_cost(state):\r\n return state.cost()", "def get_total_cost(self):\n total_cost = sum([item.quantity * item.product.price for item in self.orderitem_set.all()])\n return total_cost - total_cost * (self.discount / Decimal('100'))", "def _calculate_costs(self):\n cost = 0\n cost += self._cost_route_fine()\n cost += self._cost_petrol()\n cost += self._cost_wage()\n cost += self._cost_refueling()\n cost += self._cost_caught_by_police()\n cost += self._cost_vehicle_malfunction()\n return cost", "def total_cost(self):\n return (self.food_amount + self.local_transport_amount + self.other_expenses +\n self.travel_amount + self.accomodation_amount)", "def total_cost(self):\n if self.goal:\n return self.goal + (self.community_contribution or 0)\n else:\n return 0", "def calculateCosts(self):\n self.costs = 0\n for house in self.houses:\n if not house.distance == 1000:\n self.costs += house.distance * 9\n for battery in self.batteries:\n self.costs += battery.costs\n return self.costs", "def totalValue(self):\n\n\t\tvalue = 0\n\t\tfor bottle in self.bottles:\n\t\t\tvalue += bottle.inflatedCost\n\n\t\treturn value", "def calculate_total(self):\n if self.total_price == 0:\n for discount in self.discounts:\n for item in self.items:\n item.add_discount(discount)\n\n for item in self.items:\n self.total_price += item.final_price()\n\n return self.total_price", "def total_cost(self):\r\n return sum(i.line_cost for i in self.orderitem_set.filter(status=self.status)) # pylint: disable=E1101\r", "def cost(self):\n\t\treturn self.g + self.h", "def cost(foods, foods_used):\n cost = 0.00\n for i, count in foods_used.items():\n cost += (foods[i]['serving_cost'] * count)\n return cost", "def unitcost(self):\n cost = self.tablecost\n\n for component, quantity in self.components.items():\n cost += component.unitcost * quantity\n\n return cost", "def total_management_cost(self):\n total = 0\n total += self.output_dict['insurance_usd']\n total += self.output_dict['construction_permitting_usd']\n total += self.output_dict['bonding_usd']\n total += self.output_dict['project_management_usd']\n total += self.output_dict['markup_contingency_usd']\n total += self.output_dict['engineering_usd']\n total += self.output_dict['site_facility_usd']\n return total", "def get_total(self):\n\n self.base_price = self.get_base_price()\n\n if self.species == \"christmas melon\":\n self.base_price = self.base_price * 1.5\n\n total = (1 + self.tax) * self.qty * self.base_price\n return total", "def calculScore(self):\n for cell in self.notComputeRouter:\n if(cell.isCovered==True):\n self.score += 1000\n self.score += self.budget", "def total(self):\n gd_total = self._grand_total()\n counts = self._get_as_dict_count()\n for rule in self.rules:\n gd_total += rule(counts)\n return gd_total", "def get_total(self):\n\n base_price = self.get_base_price()\n if self.species == \"christmas melon\":\n base_price = base_price * 1.5\n\n total = ((1 + self.tax) * self.qty * base_price)\n\n return total", "def cost(self):\n return self._cost", "def cost(self):\n return self._cost", "def calculate_cost(self):\n booking_days, booking_hours = self.calculate_daily_hourly_billable_counts()\n day_cost = booking_days * Decimal(self.vehicle.type.daily_rate)\n hour_cost = booking_hours * Decimal(self.vehicle.type.hourly_rate)\n if hour_cost > self.vehicle.type.daily_rate:\n hour_cost = self.vehicle.type.daily_rate\n return float(day_cost + hour_cost)", "def total_donations(self):\n return sum(self.donations)", "def total_donations(self):\n return sum(self.donations)", "def get_total(self):\n\n base_price = self.get_base_price()\n\n if self.species == \"Christmas\":\n base_price = base_price * 1.5\n\n total = (1 + self.tax) * self.qty * base_price\n\n return total", "def get_hcost(self):\n hvcost = self.get_hvcost()\n dcost = self.get_dcost()\n hcost = hvcost + dcost\n return hcost", "def calculate_expected_cost(melon_cost, melon_count):\n\n return melon_cost + melon_count", "def _total_d(self):\n debit = 0.0\n for l in self.data:\n debit += l['debit']\n self.t_credit += l['credit']\n self.t_balance += l['balance']\n return debit", "def total_cost(clusters):\n inter = 0\n intra = 0\n dm = 0\n for clst in clusters:\n # print clst.label, \"has cost: \", str(clst.inter_cost), str(clst.intra_cost), str(clst.dm_cost)\n inter += clst.inter_cost\n intra += clst.intra_cost\n dm += clst.dm_cost\n total = inter + intra + dm\n #iic = inter + intra\n #print \"inter \" + str(inter) + \" intra \" + str(intra) + \" dm \" + str(dm) + \" total \" + str(total) + \" iic \" + str(iic)\n print str(inter) + \"\\t\" + str(intra) + \"\\t\" + str(dm) + \"\\t\" + str(total) # + \" in \" + str(inr)\n return inter, intra, dm, total", "def get_total(self):\n\n base_price = self.get_base_price()\n\n # Christmas Melons are more x1.5 expensive than other melons\n if self.species == \"Christmas Melon\":\n base_price = base_price * 1.5\n\n total = (1 + self.tax) * self.qty * base_price\n\n return total", "def GOAL_TOTAL() -> int:\n return 21", "def cost(self):\n abs_cost = sum(f['price'] * f['qty'] for f in self.fills)\n return -abs_cost if self.is_ask() else abs_cost", "def _grand_total(self):\n count = 0\n for product in self.products:\n count += product.price\n return count", "def total_cost(self):\n return np.einsum('i->', self.c[self.s])", "def get_total(self):\n\n base_price = 5\n \n if self.species == \"Christmas melon\":\n base_price = base_price * 1.5 \n\n total = (1 + self.tax) * self.qty * base_price \n\n if self.order_type == \"international\" and self.qty>10:\n total += 3\n\n\n return total", "def SumTotalCost():\n\n logs.logger.debug(\"Start to add all amount of Cost objects.\")\n try:\n sumTotal = 0\n for item in GetAllAmountOfCost():\n sumTotal += item\n logs.logger.info(\"Add all amount of Cost objects.\")\n return sumTotal\n except Exception as e:\n logs.logger.error(e, exc_info=True)", "def cost(self):\n\n return self._cost", "def get_total_price(self):\n i = self.get_copy_with_resolved_dependencies()\n total_price = Decimal(0)\n for product in i['products']:\n billed_price = Decimal(str(product.get('price', 0))) * Decimal(str(product.get('quantity')))\n total_price += billed_price\n return total_price", "def total(self):\n return sum(self.d.values())", "def total_cost(self, system=None):\n system = system or self.system()\n if system == 'grid':\n cost = self['system (grid)']['internal system nodal cost']\n else:\n cost = self['system (%s)' % system]['system nodal cost']\n cost = float(cost)\n return cost", "def calculate_training_cost(soldier_list: List[Soldier]):\n total_cost = 0.0\n \n for soldier in soldier_list:\n ################################# YOUR CODE HERE #################################\n if soldier.typecode == \"INF\":\n cost = 2.5 * soldier.weapon + 1.0 * soldier.armor\n elif soldier.typecode == \"ARC\":\n cost = 1.5 * soldier.weapon + 3.0 * soldier.armor\n elif soldier.typecode == \"CVL\":\n cost = 4.0 * soldier.weapon + 6.0 * soldier.armor\n if soldier.vitality < 0.35:\n cost *= 0.5\n total_cost += cost\n ##################################################################################\n return total_cost", "def getCost(self):\n\n return self.cost", "def evaluate(self, solution, total = 0):\n for objective in self.objectives:\n total = total + objective(solution)\n return total", "def inventory_value(self):\n cost = 0\n for bike in self.inventory:\n cost = cost + bike.total_cost()\n return cost", "def get_score(self, solution: np.array) -> float:\n score = 0\n for vehicle_count, vehicle_solution in enumerate(solution):\n distances = self.distance_matrix[vehicle_solution[0:-1], vehicle_solution[1:]]\n costs = distances * self.selected_transportation_cost[vehicle_count]\n score += np.sum(costs)\n return score", "def get_total(self):\n\n base_price=5\n if self.species == \"Christmas\":\n base_price=1.5*base_price\n \n total = (1 + self.tax) * self.qty * base_price\n\n if self.order_type==\"international\" and self.qty<10:\n total+=3\n\n return total", "def total_donated(self):\n if not hasattr(self, 'dynamic_total'):\n agg = self.donations.aggregate(Sum('amount'))\n self.dynamic_total = agg['amount__sum']\n return self.current + (self.dynamic_total or 0)", "def _cost_caught_by_police(self):\n if self.fine_frequency != 0:\n if self.number_of_courses % self.fine_frequency == 0 and self.number_of_courses != 0:\n if self.number_of_courses % self.fine_frequency_paid_by_driver == 0 and self.number_of_courses != 0:\n self.fine_paid_number_of_courses += 1\n fine_value = np.random.choice([100, 200, 500], p=[0.25, 0.4, 0.35])\n self.total_penalty_points += self._add_penalty_points() # adding penalty points\n return fine_value\n else:\n return 0\n else:\n return 0\n else:\n return 0", "def cost_a(self):\n return self._cost_a", "def total(self):\n total_price = self.get_total_amount()\n discounts = self.get_total_discount()\n\n return total_price - discounts", "def get_total(self):\n # method on the class DomesticMelonOrder\n base_price = 5\n\n if self.species == \"Christmas melons\":\n base_price = base_price * 1.5\n\n total = (1 + self.tax) * self.qty * base_price\n\n return total", "def getTotDonation(self):\n return sum(self.donationList)", "def test_check_cost():", "def calculate_cost(self):\n number_collisions = self.get_collisions()\n cs = dict(\n number_collisions=number_collisions,\n cost_collisions=number_collisions\n )\n # sum all costs in one total cost\n cs['cost'] = sum(v for k, v in cs.items() if k.startswith('cost_'))\n\n return cs", "def getCost(self):\n return self._cost", "def getCosts(self):\n return self.costs", "def calculate_cost(self):\n costs = {}\n if np.abs(self.agent.get_position()[1]) > self.y_lim:\n costs['cost_outside_bounds'] = 1.\n if self.agent.velocity_violation:\n costs['cost_velocity_violation'] = 1.\n # sum all costs in one total cost\n costs['cost'] = min(1, sum(v for k, v in costs.items() if k.startswith('cost_')))\n return costs", "def line_cost(self):\r\n return self.qty * self.unit_cost", "def total_sdram_requirements(self):", "def grand_total(self):\n return sum(self.grid[pos][1] for pos in assignable_positions if self.grid[pos][0]) + self.grid[\"nb\"][1]", "def calculate_shares_cost_sum(dataset):\n cost_sum = 0\n for data in dataset:\n cost_sum += data[1]\n return cost_sum", "def _cost_petrol(self):\n return self.distance * self.petrol_usage * self.petrol_cost", "def cost_total(X, cost_weights=(1.0, 1.0, 1.0)):\n return cost_weights[0] * cost_distance(X) + \\\n cost_weights[1] * cost_same_team_by_distance(X) + \\\n cost_weights[2] * cost_previous_neighbour_by_distance(X, normalize=True)", "def distance(self):\n _, _, costs = self.calculate_costs()\n return np.sum(costs)", "def table_total(self):\n total = 0.00\n\n for customer in self.customers:\n total = total + customer.get_total()\n\n return total", "def calculate_cost(self):\n info = {}\n c = self.get_collisions() * self.bomb_cost\n z = self.agent.get_position()[2] # Limit range of Drone agent\n\n # sum all costs in one total cost\n info['cost_gathered_bombs'] = c\n info['cost_out_of_range'] = 1. if z > 2 else 0.\n # limit cost to be at most 1.0\n info['cost'] = min(1, sum(v for k, v in info.items()\n if k.startswith('cost_')))\n return info", "def calc_cost(self):\n cost = 0\n for i,[source, sinks] in enumerate(self.nets):\n self.costs[i] = self.calc_half_perimeter(source, sinks)\n cost += self.costs[i]\n self.cost = cost\n return True", "def calculate_cost(self):\n number_collisions = self.get_collisions()\n z = self.agent.get_position()[2]\n cs = dict(\n number_collisions=number_collisions,\n cost_collisions=number_collisions,\n # Drone should not leave valid operation space...\n cost_out_of_range=(1. if z > 2 else 0.)\n )\n # sum all costs in one total cost\n cs['cost'] = min(1, sum(v for k, v in cs.items() if k.startswith('cost_')))\n return cs", "def cost(self):\n cost = 0\n for battery in self.grid.batteries:\n for house in battery.connections:\n cost += house.distances[house.connection] * 9\n cost += battery.cost\n return cost", "def calculate_profit(self):", "def test_shopping_cart_displays_total_cost(self):\n expected_cart_cost = 0\n for item in self.fill_session_cart():\n expected_cart_cost += item['price'] * item['amount']\n\n self.client.get(self.SHOP_CART_URL)\n self.assertEqual(self.client.session['cart_cost'], expected_cart_cost)", "def get_total(self):\n\n # Total sum\n self.sum = 0.00\n\n # Determine which Check buttons are selected\n # and add the charges to find the total\n if self.check_1.get() == 1:\n self.sum += 30.00\n if self.check_2.get() == 1:\n self.sum += 20.00\n if self.check_3.get() == 1:\n self.sum += 40.00\n if self.check_4.get() == 1:\n self.sum += 100.00\n if self.check_5.get() == 1:\n self.sum += 35.00\n if self.check_6.get() == 1:\n self.sum += 200.00\n if self.check_7.get() == 1:\n self.sum += 20.00\n\n # Convert the sum to string\n # and store in StringVar object\n # to automatically update the total_val label\n self.sum_str.set(self.sum)", "def total_points(self):\n total_points = 0.0\n for ingredient in self.ingredients:\n if (ingredient.has_property('ppg')):\n # Use given value if specified\n total_points += ingredient.property('ppg').to('ppg') * ingredient.quantity.to('lb')\n else:\n total_points += EXTRACTS[ingredient.type] * ingredient.quantity.to('lb')\n return(Quantity(total_points, 'points'))", "def report():\n print(\"Donor Name | Total Given | Num Gifts | Average Gift\")\n print(\"------------------------------------------------------------------\")\n for key, val in data.items():\n print(f\"{key:25} $ {float(sum(val)):>12.2f} {len(val):>8} $ {float(sum(val))/len(val):>11.2f}\")", "def costo_total(df, t_util = 8, efic_rep = 0.75, dias_lab = 6, q_sem = 4):\n\n cost_plat = costo_plataforma(df, t_util, efic_rep, dias_lab, q_sem)\n cost_sac = costo_sac(df)\n cost_campana = costo_campana(pd.concat([cost_plat, cost_sac], axis=1))\n return cost_campana", "def total(self):\n total = sum(self.d.values())\n return total", "def total_cost_2D(self, final_list):\n total_cost = 0\n for i in range(len(final_list) - 1):\n temp = self.pairwise_distance(final_list[i], final_list[i + 1])\n total_cost = total_cost + temp\n print(\"Total distance: \" + str(total_cost))", "def test_weighted_total(self):\r\n self.weighted_setup()\r\n self.submit_question_answer('H1P1', {'2_1': 'Correct', '2_2': 'Correct'})\r\n self.submit_question_answer('FinalQuestion', {'2_1': 'Correct', '2_2': 'Correct'})\r\n self.check_grade_percent(1.0)", "def cost(self, route: List[int]) -> float:\n raise NotImplementedError", "def calc_cost(self, dx, dy):\n self.distance+=np.sqrt(dx**2+dy**2)", "def ticket_salida(self):\n total = 0.0\n for x in self.mi_parqueo:\n total=x.cobro=total\n\n print(\"El costo total es de :\",total)", "def calculate_total_price(total_prices):\n total = 0.0\n for symbol,individual_cost in total_prices.items():\n total += individual_cost\n return total", "def total(self):\n return len(self._results) + len(self.test_cases)", "def get_total(self):\n\n total = super().get_total()\n if self.qty < 10:\n total += 3.00\n return total", "def donation_totals(donor_list, donor):\n return sum(donor_list[donor])", "def _compute_cuantia_subtotal(self):\n for line in self:\n line.gasto = line.unidades * line.pvp", "def total_amortization(self):\n return sum(self.table[\"amortization\"])", "def total_cost(path: Path) -> float:\n\t\n\tdistance = calc_total_dist(path)\n\tavg_speed = calc_average_speed(path)\n\t\n\t# Speed is less important, but gets a huge multiplier, because speed and\n\t# \tdistance are in different units. Speed requires a high ratio to have\n\t# \tsimilar amounts of variation.\n\tSPEED_DISTANCE_COST_RATIO = 7865.099\n\t\n\treturn (\n\t\t(distance * 1) +\n\t\t(-avg_speed * SPEED_DISTANCE_COST_RATIO)\n\t)", "def subtotal(self):\n return self.cantidad * self.precio", "def get_total(self):\n\n base_price = 5\n total = (1 + int(self.tax)) * int(self.qty) * base_price\n\n return total", "def sub_total():\n return sum(SAVE_PRICE)", "def profit(self):\n retail_value = 0\n wholesale_value = 0\n for bike in self.sold:\n retail_value += bike.total_cost() + (\n self.retail_margin * bike.total_cost())\n wholesale_value += bike.total_cost()\n return retail_value - wholesale_value", "def total_cost(self):\n path = self.bidirectional_cpp.getPath()\n return self.bidirectional_cpp.getTotalCost() if len(path) > 0 else None", "def compute_total_customs_duty(self):\n for rec in self:\n total = 0.0\n extra_duty = 0.0\n price_total = rec.quantity * rec.unit_price\n# total = (price_total * duty_percentage)/100\n rec.price_total = price_total\n# for hts in rec.hts_ids:\n# if hts.extra_duty_applicable:\n# extra_duty += ((rec.quantity/hts.quantity) * hts.extra_duty)\n# rec.total = total + extra_duty\n\n return True", "def get_fuel_total_saved (self):\n return self.electric_diesel_reduction + self.reduction_diesel_used", "def CalcCostForTurn(self):\r\n costsThisTurn = 0\r\n \r\n inventoryStorageCost = self.currentStock * STORAGE_COST_PER_UNIT\r\n backorderPenaltyCost = self.currentOrders * BACKORDER_PENALTY_COST_PER_UNIT\r\n \r\n costsThisTurn = inventoryStorageCost + backorderPenaltyCost\r\n \r\n return costsThisTurn" ]
[ "0.7039642", "0.68268335", "0.6784941", "0.67635846", "0.6760325", "0.66375047", "0.66135275", "0.65899247", "0.65713847", "0.6541224", "0.6456955", "0.6456513", "0.6444531", "0.6374481", "0.6351378", "0.63261276", "0.6230316", "0.6212625", "0.6205208", "0.6195797", "0.6188748", "0.61811465", "0.61560684", "0.615034", "0.615034", "0.6142691", "0.6112799", "0.6112799", "0.6099505", "0.60935885", "0.60855055", "0.60814196", "0.60797966", "0.60782444", "0.6072214", "0.6067431", "0.6066817", "0.60576314", "0.60371846", "0.6035916", "0.6009447", "0.59972984", "0.59760284", "0.5975958", "0.5973224", "0.5964413", "0.5956499", "0.5953197", "0.59415865", "0.59381276", "0.59378207", "0.59312713", "0.59232837", "0.5920214", "0.59149486", "0.59038246", "0.5893175", "0.5881626", "0.58781374", "0.58640397", "0.5856919", "0.5846066", "0.5825606", "0.58149076", "0.58065414", "0.57990557", "0.57974565", "0.57924974", "0.5791803", "0.5787923", "0.57844985", "0.5774644", "0.5759679", "0.5751241", "0.57508826", "0.57498676", "0.57465214", "0.57399315", "0.5739235", "0.57381755", "0.5736777", "0.5733001", "0.57273924", "0.5724219", "0.57178867", "0.5717221", "0.5716263", "0.57114506", "0.5710217", "0.5706532", "0.5706171", "0.5698647", "0.5696282", "0.5695455", "0.569041", "0.5689725", "0.56888306", "0.5682272", "0.5674959", "0.5674854" ]
0.68602717
1
The output file is as expected.
def test_print_drug_info(self): pwd = self.get_script_path() fout1 = self.test_output_file fout2 = pwd+'/../insight_testsuite/tests/my_test/output/test_output_file_2.txt' print_drug_info(self.test_sorted_tuple, self.test_dict, self.test_num_unique_name, self.test_total_cost_each_drug, fout2, 2) self.assertTrue(filecmp.cmp(fout1, fout2))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_output(self):", "def write_actual_output(self, output):\n actual_output_file = path.splitext(self.source_name)[0] + \".actual\"\n with open(actual_output_file, \"w\") as f:\n f.write(output)", "def _toFile(self):\n pass", "def test_outfile():\n\n out_file = random_filename()\n if os.path.isfile(out_file):\n os.remove(out_file)\n\n try:\n cmd = f'{prg} --cdhit {cdhit} --proteins {proteins} -o {out_file}'\n rv, out = getstatusoutput(cmd)\n assert rv == 0\n\n assert out == ('Wrote 309 of 220,520 unclustered '\n f'proteins to \"{out_file}\"')\n\n assert os.path.isfile(out_file)\n\n seqs = list(SeqIO.parse(out_file, 'fasta'))\n assert len(seqs) == 309\n\n finally:\n if os.path.isfile(out_file):\n os.remove(out_file)", "def write_file(self):\n if self._write_file == None:\n return\n\n try:\n out = file(self._write_file, \"w\")\n except IOError, e:\n print e\n sys.exit(1)\n out.writelines(\"A cases\") \n out.close()", "def export_to_file(self):\r\n return True", "def _setup_output_file(self):\n\n columns = [\"Hero file\",\n \"Test type\",\n \"Name of tested entry\",\n \"Misc dice sum input\",\n \"Value of tested entry\",\n \"Modifier\",\n \"Values of related attributes\",\n \"Rolls\",\n \"Result\",\n \"Description\",\n \"Timestamp\",\n \"Type of dice input\"]\n\n # if file does not exist, add first row of column names\n if not os.path.isfile(self._result_csv):\n with open(self._result_csv, \"w\", encoding=\"utf-8\") as csv_file:\n file_writer = csv.writer(csv_file, delimiter=',',\n quotechar='|',\n quoting=csv.QUOTE_MINIMAL)\n file_writer.writerow(columns)\n return True\n return False", "def writeOut(self):\n # import time\n self.outHeader = self.srcHeader\n for line in self.outHeader:\n self.outFile.write(line + '\\n')\n # now = time.asctime(time.localtime(time.time()))\n # self.outFile.write('%% -- %s -- Written to new alog' % now)\n for time_s in sorted(self.outData):\n for sens in self.outData[time_s]:\n for meas in self.outData[time_s][sens]:\n valu = self.outData[time_s][sens][meas]\n msg_list = [str(time_s), meas, sens, str(valu)]\n line_string = reconstructLine(msg_list)\n self.outFile.write(line_string + '\\n')", "def test_outfile():\n\n out_file = random_string() + '.txt'\n try:\n if os.path.isfile(out_file):\n os.remove(out_file)\n\n rv, out = getstatusoutput(f'{prg} -f {repeat} -o {out_file}')\n assert rv == 0\n expected = (f' 1: amigo_repeat.txt\\n'\n f'Wrote 5 gene IDs from 1 file to file \"{out_file}\"')\n assert out == expected\n assert os.path.isfile(out_file)\n exp_repeat = '\\n'.join(\n sorted(\"\"\"\n AT4G14690 AT5G41340 AT5G03720 AT5G12020 AT2G22360\n \"\"\".split()))\n assert open(out_file).read().strip() == exp_repeat.strip()\n\n finally:\n if os.path.isfile(out_file):\n os.remove(out_file)", "def __export_file(self, filename, output):\n outfile = open(filename, \"w\")\n outfile.write(output)\n outfile.close\n print(\"Output written to file: \" + filename + \"\\n\")", "def beginFileOutput(self):\n self._outputFilepath = self.dataSet[self._outputFileLabel]\n self._outputFile = open(self._outputFilepath, 'w')", "def write_output(self, output_path, output_filename):\n self.output_file = output_path + '/' + output_filename\n if os.path.isfile(self.output_file + '.txt'): # Creación del archivo txt de salida.\n os.remove(self.output_file + '.txt')\n file = open(self.output_file + '.txt', \"x\")\n\n self.parse_html() # Obtiene los html de entrada.\n file.write(\"############################\\n\")\n file.write(\"# ISAMI VERSION: v11.1.0 #\\n\")\n file.write(\"# INITIATION LUG #\\n\")\n file.write(\"# ISAMI_LUG VERSION: v1.0 #\\n\")\n file.write(\"############################\\n\")\n for id in self.parsed_html_dic: # Escribe la salida en el txt con el nombre del caso y kt correspondiente.\n file.writelines('-----------------------------------\\n')\n header = id + \"\\n\"\n file.writelines(header)\n file.writelines('-----------------------------------\\n')\n tables = self.read_tables(self.parsed_html_dic[id])\n info = tables[0]\n for i in info:\n file.writelines(i + \" = \" + str(info[i]) + \"\\n\")\n kt = self.find_kt(self.parsed_html_dic[id])\n file.writelines(\" Kt = \" + str(kt) + \"\\n\")\n file.close()", "def write(self, filename):\n pass", "def write(self, filename):\n pass", "def __init__(self, output_file):\n self.file = open(output_file, \"w\")", "def test_write_championship_to_file():\n f1.write_championship_to_file()\n with open(\"championship_results.txt\") as new_file:\n list_of_lines = new_file.readlines()\n assert len(list_of_lines) == 13", "def writeOutput(self, output):", "def bless_output(self):\n actual_output_file = path.splitext(self.source_name)[0] + \".actual\"\n expected_output_file = path.splitext(self.source_name)[0] + \".expected\"\n if path.exists(expected_output_file):\n os.unlink(expected_output_file)\n os.rename(actual_output_file, expected_output_file)", "def make_outputfile(self, solved_status, filename):\n filename = filename.split(\".\")\n filename[0] = filename[0].replace(\"Input\",\"Output\")\n str_filename = \".\"\n str_filename = str_filename.join(filename)\n # print(str_filename)\n\n f = open(str_filename,\"w+\")\n\n if(solved_status):\n string_rep = self.values_to_grid()\n ptr = 0\n for row in range(0,9):\n for col in range(0,9):\n f.write(string_rep[ptr]+ \" \")\n ptr += 1\n f.write(\"\\r\\n\") #windows compatiable formatting...\n else:\n f.write(\"Unable to solve this puzzle.\")\n\n f.close()", "def write(self, fname):\n pass", "def _write_output_file(output: str, file_name: str):\n\tfile1 = open(file_name, 'w')\n\tfile1.write(output)\n\tfile1.close()", "def write(self):", "def write(self):", "def write_to_file(self, filename: str) -> None:", "def export_file(self):\n if self.args.keyfilter:\n self.filter_keys()\n if self.args.datafilter:\n self.filter_values()\n json.dump(self.outputdata, self.outfile, indent=self.args.indent)\n self.outfile.write('\\n')", "def write_opal(self, file_name):\n \n return 0", "def write_file(self, i, path, fout):\n\n test_file = path + '/' + self.output[i]\n # Write file name\n print(test_file, file=fout, end='\\n\\n')\n\n extension = os.path.splitext(test_file)[1]\n if extension == '.fits' or extension == 'FITS':\n import subprocess\n prog = self.bindir + '/fits2ascii.py -i ' + test_file\n output = subprocess.check_output(prog.split(), shell=False)\n data = output.decode()\n else:\n fin = open(test_file, 'r')\n data = fin.read()\n fin.close()\n #fout.write(data)\n print(data, file=fout)\n print(file=fout, end='\\n')", "def write_output_file(self, index):\n ctx = self.block_store.make_local_output(self.expected_outputs[index])\n self.open_output_contexts[index] = ctx\n return ctx.get_filename()", "def export_samfile(self):", "def write_po(self, outputfile):\n raise NotImplementedError(\n \"Writing to this file format is not yet implemented\")", "def _write(self, out_file):\n #\n # I know this function is long, but the FRD block is long as well...\n # Splitting this into multiple functions would not help in my opinion.\n # Therefore -> shut up pylint\n # pylint: disable=too-many-branches\n # pylint: disable=too-many-statements\n #\n out_file.write(' '.encode()) # pad byte\n out_file.write('{:4d}'.format(self.key).encode())\n out_file.write(self.code.encode())\n out_file.write(self.setname.ljust(6).encode())\n out_file.write('{:12.5E}'.format(self.value).encode())\n out_file.write('{:12d}'.format(self.numnod).encode())\n out_file.write(self.text.ljust(20).encode())\n out_file.write('{:2d}'.format(self.ictype).encode())\n out_file.write('{:5d}'.format(self.numstep).encode())\n out_file.write(self.analys.ljust(10).encode())\n out_file.write('{:2d}'.format(self.format).encode())\n out_file.write('\\n'.encode())\n\n out_file.write(' '.encode()) # pad byte\n out_file.write('-4'.encode()) # key = -4\n out_file.write((' '*2).encode()) # pad bytes\n out_file.write(self.name.ljust(8).encode())\n if self.entities[0].ictype == 2 and self.ncomps == 3:\n out_file.write('{:5d}'.format(self.ncomps + 1).encode())\n else:\n out_file.write('{:5d}'.format(self.ncomps).encode())\n out_file.write('{:5d}'.format(self.irtype).encode())\n out_file.write('\\n'.encode()) # eol\n\n for entity in self.entities:\n out_file.write(' '.encode()) # pad byte\n out_file.write('-5'.encode())\n out_file.write((' '*2).encode()) # pad bytes\n out_file.write(entity.name.ljust(8).encode())\n out_file.write('{:5d}'.format(entity.menu).encode())\n out_file.write('{:5d}'.format(entity.ictype).encode())\n out_file.write('{:5d}'.format(entity.icind1).encode())\n if entity.ictype == 4:\n out_file.write('{:5d}'.format(entity.icind2).encode())\n elif entity.ictype == 2 and entity is self.entities[-1]:\n out_file.write('{:5d}'.format(entity.icind2).encode())\n out_file.write('{:5d}'.format(entity.iexist).encode())\n out_file.write(entity.icname.encode())\n else:\n out_file.write('{:5d}'.format(entity.iexist).encode())\n out_file.write('\\n'.encode()) # eol\n\n for result in self.results:\n if self.format < 2:\n num_lines = int(self.ncomps/(6 + 1)) + 1\n for j in range(num_lines):\n if j == 0:\n out_file.write(' -1'.encode()) # pad byte and key = -1\n if self.format == 0:\n out_file.write(\n '{:5d}'.format(result.node).encode())\n else:\n out_file.write(\n '{:10d}'.format(result.node).encode())\n else:\n out_file.write(' -2'.encode()) # pad byte and key = -2\n out_file.write(' '*(5*(self.format+1)).encode())\n k_start = j*6\n k_end = min(self.ncomps - k_start, (j+1)*6)\n for k in range(k_start, k_end):\n out_file.write(\n '{:12.5E}'.format(result.data[k]).encode())\n out_file.write('\\n'.encode()) # eol\n else:\n out_file.write(struct.pack('i', result.node))\n out_file.write(struct.pack('f'*self.ncomps, *result.data))\n\n if self.format < 2:\n out_file.write(' -3\\n'.encode()) # last record for ascii only", "def write_output(self):\n with open(self.filename, 'a', newline='', encoding='utf-8') as \\\n csv_file:\n csv_writer = csv.writer(csv_file)\n if os.stat(self.filename).st_size == 0:\n # if the csv file needs a headers\n csv_writer.writerow(Configurations.header)\n for quote in self.quotes_objects:\n csv_writer.writerow(quote.info)", "def _amber_write_input_file(self):\n logger.debug(\"Writing {}\".format(self.input))\n with open(os.path.join(self.path, self.input), \"w\") as f:\n f.write(\"{}\\n\".format(self.title))\n f.write(\" &cntrl\\n\")\n self._write_dict_to_mdin(f, self.cntrl)\n\n if self.ewald is not None:\n f.write(\" &ewald\\n\")\n self._write_dict_to_mdin(f, self.ewald)\n\n if self.cntrl[\"nmropt\"] == 1:\n if self.wt is not None:\n for line in self.wt:\n f.write(\" \"+line+\"\\n\")\n f.write(\" &wt type = 'END', /\\n\")\n if self.restraint_file is not None:\n f.write(\"DISANG = {}\\n\".format(self.restraint_file))\n f.write(\"LISTOUT = POUT\\n\\n\")\n if self.group is not None:\n f.write(\"{:s}\".format(self.group))", "def write(self, f):\n if self.best_mhc_align:\n mhc_align_str = self.best_mhc_align.subject_str()\n mhc_score_str = str(self.best_mhc_align.bit_score)\n else:\n mhc_align_str = \".\"\n mhc_score_str = \"0\"\n\n if self.best_non_mhc_align:\n non_mhc_align_str = self.best_non_mhc_align.subject_str()\n non_mhc_score_str = str(self.best_non_mhc_align.bit_score)\n else:\n non_mhc_align_str = \".\"\n non_mhc_score_str = \"0\"\n \n f.write(\"\\t\".join([self.locus, self.short_samp_id, self.name,\n str(self.length), mhc_align_str, non_mhc_align_str,\n mhc_score_str, non_mhc_score_str,\n str(self.n_mhc_align), str(self.n_non_mhc_align)]) + \"\\n\")", "def to_file(self, file_path, smirnoff_data):\n pass", "def write(self, out):", "def write_coord_seq():\n \n import os\n choice = input('Enter the name of the file: ')\n filepath = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Data', choice)\n lis = []\n with open(filepath, 'r') as file:\n for line in file:\n if line[:4] == 'ATOM':\n line_split = line.split()\n lis.append(line_split[3:4])\n choice1 = input('Enter name for the output file: ')\n filepath1 = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice1)\n with open(filepath1, 'w') as myfile:\n for i in lis:\n myfile.writelines(i)\n print('Done!')\n \n with open(choice, 'r') as myfile:\n header = ''\n for line in myfile:\n if line.startswith(\"TITLE\"): \n head_split = line.split()\n header = header + ' '.join(head_split[1:])\n \n choice2 = input('Enter output file name with a .fasta extension: ')\n filepath2 = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice2)\n z = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice1)\n with open(z, 'r') as file:\n with open(filepath2, 'w') as output:\n for i in file:\n output.writelines('>' + header + '\\n' + i)\n print('>' + header + '\\n' + i)\n print('Fasta file generated!')", "def test_basic_mech_write(self):\n\n unit = btmux.parse_from_file(os.path.join(BTMUX_SAMPLE_DIR, 'AS7-D'))\n fobj = StringIO()\n write_to_file(unit, fobj)\n #print fobj.getvalue()\n # TODO: Compare to a golden standard.", "def write_output_shifts_to_file(self, shift_output):\n pass", "def PrintOutput(self):\n self.file_settings[\"file_name\"].SetString(self.file_name)\n file = TimeBasedAsciiFileWriterUtility(self.model_part, self.file_settings, self._GetHeader()).file\n for point, var_values in zip(self.found_positions, self.values):\n file.write(self._DataToString(point, var_values))\n file.close()", "def write_file_simple(self,filename):\n\n output = open(filename,\"w\")\n # write header\n output.write(\"# %1s %3s %22s %6s %22s\\n\"%(\"l\",\"n\",\"nu_theo (muHz)\",\"unused\",\"Inertia\"))\n for i in range(self.modes.shape[0]):\n output.write(\" %1d %3d %22.15e 0.0 %22.15e\\n\"%( \\\n self.modes[\"l\"][i], \\\n self.modes[\"n\"][i], \\\n self.modes[\"freq\"][i]*self.glb[ifreq_ref], \\\n self.modes[\"inertia\"][i]))\n output.close()", "def test_uclust_assigner_write_to_file(self):\r\n params = {'id_to_taxonomy_fp': self.id_to_tax1_fp,\r\n 'reference_sequences_fp': self.refseqs1_fp}\r\n\r\n t = UclustConsensusTaxonAssigner(params)\r\n result = t(seq_path=self.inseqs1_fp,\r\n result_path=self.output_txt_fp,\r\n uc_path=self.output_uc_fp,\r\n log_path=self.output_log_fp)\r\n del t\r\n # result files exist after the UclustConsensusTaxonAssigner\r\n # no longer exists\r\n self.assertTrue(exists(self.output_txt_fp))\r\n self.assertTrue(exists(self.output_uc_fp))\r\n self.assertTrue(exists(self.output_log_fp))\r\n\r\n # check that result has the expected lines\r\n with open(self.output_txt_fp, 'U') as f:\r\n output_lines = list(f)\r\n self.assertTrue('q1\\tA;F;G\\t1.00\\t1\\n' in output_lines)\r\n self.assertTrue('q2\\tA;H;I;J\\t1.00\\t1\\n' in output_lines)", "def make_file(self):\n\n f = open(get_output_path(), \"w\")\n \n f.write(self.export())\n \n f.close()\n\n return self", "def whriteInOuput(finalOutput):\n\n os.chdir(\"D:/IIHT/Python/Project/NRPT all companies scrapper/caches\")\n #open text file, return an object of type io.TextIOWrapper\n with open(\"Companies Website.txt\", \"w\") as writ:\n #write each line in the object op, return an object of type int\n writ.write('\\n'.join(finalOutput) + \"\\n\")", "def test_madlib_file_write():\n madlib(input_values)\n file_text = ''\n with open('assets/updated_madlib_text', 'r') as file:\n for line in file:\n file_text += line\n assert file_text == output_text", "def test_file_output(self):\n outf = tempfile.TemporaryFile()\n yamlish.dump(IN, outf)\n outf.seek(0)\n got_str = outf.read()\n outf.close()\n logging.debug(\"got_str = %s\", got_str)\n got = yaml.safe_load(got_str)\n self.assertEqual(got, self._expected, \"Result matches\")", "def test_erai_ascii_file_write(self):\n self.l2g.lsm_data_to_arc_ascii(self.data_var_map_array,\n self.hmet_write_directory)\n\n # Compare all files\n compare_directory = os.path.join(self.readDirectory, \"erai_hmet_data\")\n self._compare_directories(self.hmet_write_directory,\n compare_directory,\n ignore_file=\"hmet_file_list.txt\",\n raster=True)", "def _write_to_file(self):\n with open(self.filename + \".ir\", \"w+\") as file:\n file.writelines(\n [\"\\n\" + l if p != 0 else l for p, l in enumerate(self.lines)]\n )", "def test_written(self):\n\n checkit=subprocess.run([\"python\", \"../../taxonomy/src_files/rdp_lineage_to_tax.py\", \"-i\", \"../resource_files/rdp_test_taxonomy.csv\", \"-o\", \"test_rdp_taxonomy\"], capture_output=True, text=True)\n \n # is the folder there\n self.assertTrue(os.path.exists(os.path.exists(\"../processed_files/rdp_prep_taxonomy\")))\n \n # there should be 2 files in there\n files_in_dir=os.listdir(\"../processed_files/rdp_prep_taxonomy\")\n self.assertEqual(len(files_in_dir), 2)\n \n for x in files_in_dir:\n if x.split('.')[-1]=='txt':\n taxonomy_file=x\n \n # does the test match the provided actual output\n # rdp_team_taxonomy_check can be found on https://github.com/rdpstaff/classifier/issues/18\n self.assertTrue(filecmp.cmp(\"../resource_files/rdp_team_taxonomy_check.txt\", \"../processed_files/rdp_prep_taxonomy/{}\".format(taxonomy_file)))\n \n shutil.rmtree(\"../processed_files/rdp_prep_taxonomy\")", "def _test_output_created(self):\n TestHarness._test_output_created(self)\n source = glob.glob(os.path.join(os.getcwd(), 'source.*'))\n assert len(source) == 1, 'Either multiple or no source files ' \\\n 'exist.'\n assert source[0].endswith('h5'), \\\n 'Source file is not a HDF5 file.'", "def test_writer_with_file():\n outputfile = \"testfile.txt\"\n GCMT(write=outputfile)\n assert os.path.exists(outputfile)\n os.remove(outputfile)", "def write(self, filename): # real signature unknown; restored from __doc__\n pass", "def test_to_file(self):\n fd, fp = mkstemp()\n close(fd)\n st = SampleTemplate.create(self.metadata, self.new_study)\n st.to_file(fp)\n self._clean_up_files.append(fp)\n with open(fp, 'U') as f:\n obs = f.read()\n self.assertEqual(obs, EXP_SAMPLE_TEMPLATE)\n\n fd, fp = mkstemp()\n close(fd)\n st.to_file(fp, {'2.Sample1', '2.Sample3'})\n self._clean_up_files.append(fp)\n\n with open(fp, 'U') as f:\n obs = f.read()\n self.assertEqual(obs, EXP_SAMPLE_TEMPLATE_FEWER_SAMPLES)", "def write(self):\n # # Sometimes file is not written properly. So delete and rewrite it\n # os.system('rm {}'.format(snip_dir + '/' + self.name))\n # if 'NUM_TIME_STEPS' not in self.define.keys():\n # warnings.warn('NUM_TIME_STEPS missing in header. Execution may hang!')\n with open(snip_dir + '/' + self.name, 'w') as f:\n f.write('/* Temporary generated file for snip process definitions before compilation */\\n')\n f.write(self.__str__())\n\n # os.system('ls {}'.format(snip_dir + '/' + self.name))", "def test_output_correct():\n global out_dir, cor_dir\n out = codecs.open(path.join(out_dir, 'oshea_similarity.json'),\n mode='r', encoding='utf-8')\n inp = codecs.open(path.join(cor_dir, 'oshea_similarity.json'),\n mode='r', encoding='utf-8')\n assert(out.read() == inp.read())", "def writetif(self,outputname,):\n pass", "def _generate_output_file(self):\n\n if self.output_static:\n return\n\n if not self.input_file_generated():\n self.output_generation_log = \"Generation failed. Input wasn't generated\"\n self.output_generation_successful = False\n else:\n solution = self.solution\n if solution is None:\n self.output_generation_log = \"Generation failed. No model solution specified.\"\n self.output_generation_successful = False\n else:\n problem_code = self.problem.get_judge_code()\n testcase_code = self.get_judge_code()\n judge = self.problem.get_judge()\n task_type = self.problem.get_task_type()\n if solution.language not in judge.get_supported_languages():\n self.output_generation_log = \\\n \"Generation failed. Solution language is not supported by the judge\"\n self.output_generation_successful = False\n else:\n evaluation_result = task_type.generate_output(\n problem_code=problem_code,\n testcase_code=testcase_code,\n language=solution.language,\n solution_file=(solution.name, solution.code),\n )\n if not evaluation_result.success:\n self.output_generation_log = \\\n \"Generation failed. Judge couldn't execute the solution. Details: {}\".format(\n evaluation_result.message\n )\n self.output_generation_successful = False\n elif evaluation_result.verdict != JudgeVerdict.ok:\n self.output_generation_log = \\\n \"Generation failed. Solution exited with verdict {} on the judge\".format(\n str(evaluation_result.verdict.name)\n )\n self.output_generation_successful = False\n else:\n self.output_generation_log = \"Generation successful\"\n self.output_generation_successful = True\n self._output_generated_file = evaluation_result.output_file\n self.save()", "def write_SEQRES_fasta():\n \n import os\n choice = input('Enter the name of the file: ')\n filepath = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Data', choice)\n with open(filepath,'r') as file:\n seq_list = []\n for line in file:\n if line[:6] == 'SEQRES':\n line_split = line.split()[4:]\n seq_list.append(line_split)\n choice1 = input('Enter name of the outfile: ') \n filepath1 = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice1)\n with open(filepath1, 'w') as outfile:\n for i in seq_list:\n outfile.writelines(i)\n print('Sequences successfully written!')\n \n with open(choice, 'r') as myfile:\n header = ''\n for line in myfile:\n if line.startswith(\"TITLE\"): \n head_split = line.split()\n header = header + ' '.join(head_split[1:])\n \n choice2 = input('Enter output file name with a .fasta extension: ')\n filepath2 = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice2)\n z = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice1)\n with open(z, 'r') as file:\n with open(filepath2, 'w') as output:\n for i in file:\n output.writelines('>' + header + '\\n' + i)\n print('>' + header + '\\n' + i)\n print('Fasta file generated!')", "def test_write_race_results_to_file():\n number = random.randint(1, 3)\n f1.write_race_results_to_file(number)\n with open(f\"results_for_race_{number}.txt\", encoding=\"utf-8\") as opened_file:\n list_of_lines = opened_file.readlines()\n assert len(list_of_lines) == 13", "def test_to_yaml_file(self):\n\n output_file = \"this_file_is_a_ghost\"\n File(output_file).delete()\n\n Dict(self.test_subject.copy()).to_yaml_file(output_file)\n\n expected = self.test_subject.copy()\n\n actual = Dict().from_yaml_file(output_file)\n\n self.assertEqual(expected, actual)\n\n File(output_file).delete()", "def write_file(self):\n\n running_time = str(self.running_time_end - self.running_time_start)\n rounded_running_time = '{:.10}'.format(running_time)\n output = 'path_to_goal: ' + str(self.path_to_goal) + '\\n'\n output += 'cost_of_path: ' + str(self.cost_of_path) + '\\n'\n output += 'nodes_expanded: ' + str(self.nodes_expanded) + '\\n'\n output += 'fringe_size: ' + str(self.fringe_size) + '\\n'\n output += 'max_fringe_size: ' + str(self.max_fringe_size) + '\\n'\n output += 'search_depth: ' + str(self.search_depth) + '\\n'\n output += 'max_search_depth: ' + str(self.max_search_depth) + '\\n'\n output += 'running_time: ' + rounded_running_time + '\\n'\n\n system_name = system()\n if system_name == 'Windows':\n output += 'max_ram_usage: (Not available on Windows OS)'\n elif system_name == 'Linux':\n output += 'max_ram_usage: ' + \\\n str(getrusage(RUSAGE_SELF).ru_maxrss / 1024) + '\\n'\n\n file = open('output.txt', 'w+')\n file.write(output)\n print(output)", "def write_input_file(y,z,fname):\n file = open('c:/4nec2/out/' + fname + '.nec', 'w')\n file.write('CM Seeddesign \\n')\n file.write('CM Zigzag Antenna \\n')\n file.write('CE File generated by python \\n')\n seg = 1\n\n #write the antenna\n for i in range(0,len(y)-1):\n file.write('GW %3i %3i %8.4f %8.4f %8.4f %8.4f %8.4f %8.4f %8.4f\\n' % (1,seg,0,y[i],z[i],0,y[i+1],z[i+1],1))\n\n file.write('GE 0 \\n')\n file.write('EK \\n')\n file.write('EX %3i %3i %3i %3i %3i %3i %3i\\n' % (0,1,1,1,1,0,0))\n file.write('GN -1 \\n')\n \n file.write('FR %3i %3i %3i %3i %8.4f %8.4f\\n' % (0,1,0,0,900,0))\n file.write('FR %3i %3i %3i %3i %8.4f %8.4f\\n' % (0,11,0,0,850,10))\n\n file.write('LD %3i %3i %3i %3i %8.4f %8.4f\\n' % (5,1,0,0,58000000,2))\n file.write('RP %3i %3i %3i %3i %8.4f %8.4f %8.4f %8.4f\\n' % (0,1,1,1000,90,0,0,0))\n\n file.write('EN \\n')\n file.close()", "def test_call_pynast_test1_file_output(self):\r\n # do not collect results; check output files instead\r\n actual = self.pynast_test1_aligner(\r\n self.pynast_test1_input_fp, result_path=self.result_fp,\r\n log_path=self.log_fp, failure_path=self.failure_fp)\r\n\r\n self.assertTrue(actual is None,\r\n \"Result should be None when result path provided.\")\r\n\r\n expected_aln = self.pynast_test1_expected_aln\r\n with open(self.result_fp) as result_f:\r\n actual_aln = Alignment.from_fasta_records(parse_fasta(\r\n result_f), DNA)\r\n self.assertEqual(actual_aln, expected_aln)\r\n\r\n with open(self.failure_fp) as failure_f:\r\n actual_fail = SequenceCollection.from_fasta_records(\r\n parse_fasta(failure_f), DNA)\r\n self.assertEqual(actual_fail.to_fasta(),\r\n self.pynast_test1_expected_fail.to_fasta())", "def file_output(matches: list, output_file_name: str = 'matches.txt'):\n with open(\"test/Matches/\" + output_file_name, 'w') as f:\n for match in matches:\n for event in match.events:\n f.write(\"%s\\n\" % event.payload)\n f.write(\"\\n\")", "def write_result(self, file_name):\n f = file(file_name, \"w\")\n f.write(self.m_result)\n f.close()", "def _generate_output(self):\n raise NotImplementedError()", "def write_exact_graph_to_file(self, output_file):\n print(\"Writing output file.\")\n with open(output_file, 'w') as f:\n f.write(\"# graph number = 0 name = interval_graph\\n\")\n f.write(str(len(self.vertices)) + \"\\n\")\n for node in self.vertices:\n for arc in self.out_arcs_lists[node]:\n s = self.arc_info[arc]['start']\n t = self.arc_info[arc]['destin']\n w = self.arc_info[arc]['weight']\n f.write(\"{} {} {}\\n\".format(s, t, w))", "def cleaning_file():\n f = open (\"report_for_judy_part2.txt\", \"w\")\n f.close()", "def test_check_mapping_file_correct_file(self):\r\n\r\n # Use valid data, default parameters\r\n check_mapping_file(mapping_fp=self.correct_mapping_fp,\r\n output_dir=self.output_dir,\r\n verbose=False)\r\n\r\n # Check existence of expected output files\r\n output_html_fp = join(self.output_dir,\r\n basename(self.correct_mapping_fp).replace('.txt', '.html'))\r\n output_corrected_fp =\\\r\n join(self.output_dir,\r\n basename(self.correct_mapping_fp).replace('.txt', '_corrected.txt'))\r\n output_log_fp =\\\r\n join(self.output_dir,\r\n basename(self.correct_mapping_fp).replace('.txt', '.log'))\r\n overlib_js_fp = join(self.output_dir, 'overlib.js')\r\n\r\n self.assertTrue(exists(output_html_fp))\r\n self.assertTrue(exists(output_corrected_fp))\r\n self.assertTrue(exists(output_log_fp))\r\n self.assertTrue(exists(overlib_js_fp))\r\n\r\n # Check output data for expected results\r\n\r\n html_data = \"\".join([line for line in open(output_html_fp, \"U\")])\r\n corrected_data =\\\r\n \"\".join([line for line in open(output_corrected_fp, \"U\")])\r\n log_data = \"\".join([line for line in open(output_log_fp, \"U\")])\r\n\r\n self.assertEqual(html_data, self.expected_html_data_correct_input)\r\n self.assertEqual(corrected_data,\r\n self.expected_corrected_data_correct_input)\r\n self.assertEqual(log_data, self.expected_log_data_correct_input)\r\n\r\n # With additional parameters added should not change results using\r\n # same valid input data\r\n check_mapping_file(mapping_fp=self.correct_mapping_fp,\r\n output_dir=self.output_dir,\r\n has_barcodes=True,\r\n char_replace=\"A\",\r\n verbose=False,\r\n variable_len_barcodes=True,\r\n disable_primer_check=True,\r\n added_demultiplex_field=None)\r\n\r\n # Check existence of expected output files\r\n output_html_fp = join(self.output_dir,\r\n basename(self.correct_mapping_fp).replace('.txt', '.html'))\r\n output_corrected_fp =\\\r\n join(self.output_dir,\r\n basename(self.correct_mapping_fp).replace('.txt', '_corrected.txt'))\r\n output_log_fp =\\\r\n join(self.output_dir,\r\n basename(self.correct_mapping_fp).replace('.txt', '.log'))\r\n overlib_js_fp = join(self.output_dir, 'overlib.js')\r\n\r\n self.assertTrue(exists(output_html_fp))\r\n self.assertTrue(exists(output_corrected_fp))\r\n self.assertTrue(exists(output_log_fp))\r\n self.assertTrue(exists(overlib_js_fp))\r\n\r\n # Check output data for expected results\r\n\r\n html_data = \"\".join([line for line in open(output_html_fp, \"U\")])\r\n corrected_data =\\\r\n \"\".join([line for line in open(output_corrected_fp, \"U\")])\r\n log_data = \"\".join([line for line in open(output_log_fp, \"U\")])\r\n\r\n self.assertEqual(html_data, self.expected_html_data_correct_input)\r\n self.assertEqual(corrected_data,\r\n self.expected_corrected_data_correct_input)\r\n self.assertEqual(log_data, self.expected_log_data_correct_input)", "def __init__(self, output_path):\n self._file = open(output_path, 'w')\n # TODO when do we close the file?", "def to_output_file(self, content):\n self.__log(f'Starting to write response content to output file.')\n if self.output_file_exists() and not self.config['FORCE_OVERWRITE']:\n self.__log(f'Cannot write to file. Selected output file exists and FORCE_OVERWRITE is disabled.', 'error')\n raise FileExistsError\n file = self.config['OUT_FOLDER'] + '/' + self.config['OUTPUT_FOLDER'] + '/' + self.output_filename + '.' \\\n + self.options['image_format'].lower()\n with open(file, 'w') as f:\n f.writelines(content)\n self.__log(f'Successfully wrote response content to \"{file}\".', 'success')", "def test_call_output_to_file(self):\r\n fd, result_path = mkstemp(\r\n prefix='RtaxTaxonAssignerTests_', suffix='.fasta')\r\n close(fd)\r\n self._paths_to_clean_up.append(result_path)\r\n\r\n p = RtaxTaxonAssigner({\r\n 'reference_sequences_fp': self.reference_seqs_fp,\r\n 'id_to_taxonomy_fp': self.id_to_taxonomy_fp,\r\n 'read_1_seqs_fp': self.read_1_seqs_fp})\r\n\r\n self._paths_to_clean_up += self.cleanAll(self.read_1_seqs_fp)\r\n\r\n actual = p(self.input_seqs_fp, result_path=result_path)\r\n\r\n f = open(result_path)\r\n observed_lines = set(f.readlines())\r\n f.close()\r\n self.assertEqual(observed_lines, rtax_expected_result_single_lines)\r\n\r\n # Return value is None when result_path is provided (Not sure\r\n # if this is what we want yet, or if we would want both so\r\n # results could be logged to file...)\r\n self.assertEqual(actual, None)", "def test_to_json_file(self):\n\n output_file = \"this_file_is_a_ghost\"\n File(output_file).delete()\n\n Dict(self.test_subject.copy()).to_json_file(output_file)\n\n expected = self.test_subject.copy()\n\n actual = Dict().from_json_file(output_file)\n\n self.assertEqual(expected, actual)\n\n File(output_file).delete()", "def write_annotations(self, output_file):\n logging.info(self._header)\n np.savetxt(output_file, self._zeroes, header=\" \".join(self._header),fmt='%i',comments='')", "def post_process_output_file():\n parsed_data = []\n unparseable_data = []\n\n with open('../output/part-00000', 'r') as input_file:\n for line in input_file:\n line = line.strip()\n try:\n csv_splits = line.split(',')\n csv_splits[0] = int(csv_splits[0])\n # parsed_data is a list of lists\n parsed_data.append(csv_splits)\n except ValueError:\n unparseable_data.append(line)\n parsed_data.sort()\n\n with open('../output/titanic_test_data.csv', 'w') as output_file:\n # start with lines that couldn't be parsed\n # hopefully this will only be the original header\n for line in unparseable_data:\n output_file.write(\"%s\\n\" % line)\n for line in parsed_data:\n output_file.write(\"%d,%s\\n\" % (line[0], line[1]))", "def test1_write():\n with open(FILE_DIR + FILE_NAME, mode='w', encoding='utf-8') as f:\n f.write(DATA)", "def write_output(arr, filename):\n print('Started writing the output..')\n f = open(filename, 'w')\n for a in arr:\n f.write(str(a) + '\\n')\n f.close()\n print('Done!, Open the file to see the approved loans.')", "def test_file(self, file_name, version, classifier_type):\n labels = []\n with open(file_name) as f:\n for line in f.readlines():\n print(line,self.predict(line))\n labels.append(self.predict(line))\n \n filename = 'test_results-' + classifier_type + '-' + version + '.txt'\n \n with open(filename, 'w') as f:\n for label in labels:\n f.write(str(label)+\"\\n\")\n \n print (\"Results from \",file_name,\" printed to:\",filename)", "def write_output():\n f = open(OUTPUT_FILE, 'w')\n for case_index, words in get_output():\n f.write('Case #%d: %s\\n' % (case_index, ' '.join(words)))\n f.close()", "def writexyz(self,fname):\n xyzfile = open(fname + \".xyz\",\"a+\")\n xyzfile.write(str(self.natoms) + \"\\n\\n\")\n for a in self.atoms:\n \tcxyz = a.xyz - np.array(self.pbc_correction(a.xyz))\n\t\t\txyzfile.write(str(a.type) + \"\\t\" + str(cxyz[0]) + \"\\t\" + str(cxyz[1]) + \"\\t\" + str(cxyz[2]) + \"\\n\")\n xyzfile.close()", "def write_to_file(output, test_case_name, path):\n path_to_store = OutputWrite.make_test_dir(path, test_case_name)\n time_stamp = OutputWrite.get_time_stamp()\n try:\n LOG.debug('Changing the dir to {0}'.format(path_to_store))\n os.chdir(path_to_store)\n except Exception as _ex_:\n LOG.exception('Error :{0}'.format(_ex_))\n else:\n file_name = os.path.join(path_to_store, test_case_name +\n time_stamp)\n LOG.debug('The file name after joining = {0}'.format(file_name))\n try:\n LOG.debug('Writing Test case output to the file')\n with open(file_name, 'w') as file_obj:\n file_obj.write(output)\n except FileNotFoundError as _ex_:\n LOG.exception('Error : {0}'.format(_ex_))", "def write_index_to_file(output_file, items): \n \n file = open(output_file, 'w')\n for item in items: \n str0 = str(item[0])\n str1 = ' '.join(str(x) for x in item[1])\n file.write( str0 + ' ' + str1 + '\\n') \n # file.write(item)\n print ('An inverted index has been writted in file')\n file.close()", "def expected_output(self):\n expected_output_file = path.splitext(self.source_name)[0] + \".expected\"\n if not path.exists(expected_output_file):\n return None\n else:\n with open(expected_output_file, \"r\", encoding=\"utf8\") as f:\n return f.read()", "def test_empty_file(self):\n\t\tmain.Main(['input/empty.txt']).run()\n\t\tself.assertTrue(filecmp.cmp('output/output.csv', 'output/empty.csv'))", "def generate_output(input_filename: str, output_filename: str, goal_node: Node,\n generated: set) -> None:\n\n input_stream = io.open(input_filename, 'r', encoding='utf-8', errors='ignore',\n newline='\\n')\n with open(output_filename, 'w') as out_file:\n for i in range(0, 10):\n out_file.write(input_stream.readline().rstrip())\n out_file.write('\\n')\n \"\"\" The first ten lines of the output file are identical to those in the \n input file. The tenth line should be skipped because it's blank.\"\"\"\n out_file.write(str(goal_node.path_cost) + '\\n')\n # Line 11 of the output, the depth level d\n out_file.write(str(len(generated)) + '\\n')\n # Line 12 of the output, the total number of nodes generated\n\n # Writing Line 13 of the output, the sequence of moves\n length = len(goal_node.path_history)\n for i in range(length - 1):\n out_file.write(goal_node.path_history[i] + ' ')\n out_file.write(goal_node.path_history[length - 1] + '\\n')\n\n # Writing Line 14 of the output, the f(n) values\n f_line = str(goal_node.f) + ' '\n parent = goal_node.parent\n while parent: # Loop stops when parent == None\n f_line += (str(parent.f) + ' ')\n parent = parent.parent\n f_list = f_line.split(' ')\n # Breaks down the string to the integers it contains\n reverse = ''\n for i in range(len(f_list) - 2, -1, -1):\n # f_line[len(f_line)-1] is an extra whitespace character and\n # thus shouldn't be copied\n reverse += str(f_list[i])\n if i != 0:\n reverse += ' '\n \"\"\" The order of the f(n) values in f_line is from goal node \n to root node. The four lines above reverse the order, which \n is what the output format expects.\"\"\"\n out_file.write(reverse)\n\n out_file.close()", "def _write(self, out_file):\n out_file.write(' '.encode()) # pad byte\n out_file.write('{:4d}'.format(self.key).encode())\n out_file.write(self.code.encode())\n out_file.write((' '*18).encode()) # pad bytes\n out_file.write('{:12d}'.format(self.numnod).encode())\n out_file.write((' '*37).encode()) # pad bytes\n out_file.write('{:1d}'.format(self.format).encode())\n out_file.write('\\n'.encode())\n\n for node in self.nodes:\n if self.format < 2:\n out_file.write(' '.encode())\n out_file.write('-1'.encode())\n if self.format == 0:\n out_file.write('{:5d}'.format(node.number).encode())\n else:\n out_file.write('{:10d}'.format(node.number).encode())\n for i in range(3):\n out_file.write('{:12.5E}'.format(node.pos[i]).encode())\n out_file.write('\\n'.encode())\n else:\n out_file.write(struct.pack('i', node.number))\n if self.format == 2:\n out_file.write(struct.pack('fff', *node.pos))\n else:\n out_file.write(struct.pack('ddd', *node.pos))\n\n if self.format < 2:\n out_file.write(' -3\\n'.encode()) # last record for ascii only", "def write_output(self, failed_genes):\r\n file_prefix = self.file_name.strip('sambamba_output.txt')\r\n fieldnames = ['GeneSymbol;Accession', 'percentage30']\r\n with open (f'../results/{file_prefix}.coverage_output.csv', 'w', newline = '') as output:\r\n csvwriter = csv.DictWriter(output, fieldnames=fieldnames)\r\n csvwriter.writeheader()\r\n csvwriter.writerows(failed_genes)", "def _write(self, out_file):\n out_file.write(' '.encode()) # pad byte\n out_file.write('{:4d}'.format(self.key).encode())\n out_file.write(self.code.encode())\n out_file.write((' '*18).encode()) # pad bytes\n out_file.write('{:12d}'.format(self.numelem).encode())\n out_file.write((' '*37).encode()) # pad bytes\n out_file.write('{:1d}'.format(self.format).encode())\n out_file.write('\\n'.encode())\n\n for elem in self.elems:\n if self.format < 2:\n out_file.write(' -1'.encode())\n if self.format == 0:\n out_file.write('{:5d}'.format(elem.number).encode())\n else:\n out_file.write('{:10d}'.format(elem.number).encode())\n out_file.write('{:5d}'.format(elem.type).encode())\n out_file.write('{:5d}'.format(elem.group).encode())\n out_file.write('{:5d}'.format(elem.material).encode())\n out_file.write('\\n'.encode())\n num_nodes = FRDElem.nodesPerType[elem.type]\n num_lines = int(num_nodes/(5*(3-self.format)+1))+1\n for j in range(num_lines):\n out_file.write(' -2'.encode()) # pad byte and key = -2\n k_start = j*5*(3-self.format)\n k_end = min(num_nodes, (j+1)*5*(3-self.format))\n if self.format == 0:\n for k in range(k_start, k_end):\n out_file.write(\n '{:5d}'.format(elem.nodes[k]).encode())\n else:\n for k in range(k_start, k_end):\n out_file.write(\n '{:10d}'.format(elem.nodes[k]).encode())\n out_file.write('\\n'.encode()) # eol\n else:\n out_file.write(struct.pack('i', elem.number))\n out_file.write(struct.pack('i', elem.type))\n out_file.write(struct.pack('i', elem.group))\n out_file.write(struct.pack('i', elem.material))\n out_file.write(struct.pack('i'*num_nodes, *elem.nodes))\n\n if self.format < 2:\n out_file.write(' -3\\n') # last record for ascii only", "def final_output_analysis(samples_dict, dir_results_path):\n with open(path.join(dir_results_path, 'corrupted_processes.txt'), 'w', encoding='utf-8', errors='replace') as c_out:\n with open(path.join(dir_results_path, 'analysis.txt'), 'w', encoding='utf-8', errors='replace') as i_out:\n with open(path.join(dir_results_path, 'syscalls.txt'), 'w', encoding='utf-8', errors='replace') as s_out:\n for uuid in sorted(samples_dict.keys()):\n reduced_sample = samples_dict[uuid]\n\n i_out.write('{} {}\\n'.format(string_utils.filename, uuid))\n s_out.write('{} {}\\n'.format(string_utils.filename, uuid))\n c_out.write('{} {}\\n'.format(string_utils.filename, uuid))\n\n # corrupted processes section\n process_repr = '\\t\\t{:15s}\\t{:10d}\\t{:15s}\\tby:\\t{:15s}\\t{:10d}\\n'\n for process in reduced_sample.corrupted_processes:\n c_out.write(process_repr.format(process[0],\n process[1],\n process[2],\n process[3],\n process[4]))\n\n # instruction count section\n i_out.write(string_utils.out_final + '\\t' + str(reduced_sample.total_instruction) + '\\n')\n i_out.write(string_utils.out_terminating + '\\t' + str(reduced_sample.terminate_all) + '\\t')\n i_out.write(string_utils.out_sleeping + '\\t' + str(reduced_sample.sleep_all) + '\\t')\n i_out.write(string_utils.out_crashing + '\\t' + str(reduced_sample.crash_all) + '\\t')\n i_out.write(string_utils.out_raising_error + '\\t' + str(reduced_sample.error_all) + '\\t')\n i_out.write(string_utils.out_writes_file + '\\t' + str(reduced_sample.write_file) + '\\n')\n\n # system calls count section\n s_out.write(string_utils.syscall_final + '\\t' + str(reduced_sample.total_syscalls) + '\\n')\n\n i_out.write('\\n')\n s_out.write('\\n')\n c_out.write('\\n')", "def fileWrite(content):\n file = open('./result.txt', 'w')\n file.write(content)\n file.close()", "def test_two_files():\n\n out_file = ''.join(\n random.choices(string.ascii_uppercase + string.digits, k=5))\n try:\n if os.path.isfile(out_file):\n os.remove(out_file)\n\n rv, out = getstatusoutput(f'{prg} -f {tair} {amigo} -o {out_file}')\n assert rv == 0\n assert re.search('1: tair_heat.txt', out)\n assert re.search('2: amigo_heat.txt', out)\n assert re.search(\n f'Wrote 20 gene IDs from 2 files to file \"{out_file}\"', out)\n assert os.path.isfile(out_file)\n exp_two = '\\n'.join(\n sorted(\"\"\"\n AT5G12020 AT3G06400 AT2G33590 AT1G54050 AT5G67030 AT4G14690 AT1G16030 AT5G03720 AT3G10800 \n AT5G12140 AT1G64280 AT3G24500 AT3G09440 AT3G04120 AT4G19630 AT1G16540 AT2G22360 AT1G13930 \n AT5G41340 AT3G24520\n \"\"\".split()))\n assert open(out_file).read().strip() == exp_two.strip()\n\n finally:\n if os.path.isfile(out_file):\n os.remove(out_file)", "def finalize_result(self):\n logging.debug(\"finalize_result()\")\n with open(self.html_file, \"a\") as result_file:\n result_file.write(\"<br/>Analyzis successful\")\n with open(self.txt_file, \"a\") as result_file:\n result_file.write(\"Analyzis successful\")", "def write_file(self,filename):\n \n with open(filename, 'w') as f:\n tab_width = np.max([len(k) for k in self.header.keys()])\n for k,v in self.header.items():\n f.write(u'{0}:\\t{1}\\n'.format(k, v).encode('utf8').expandtabs(tab_width+2))\n np.savetxt(f, self.data, fmt ='%f %f %f %d')", "def flush(self):\n self.out_file.flush()", "def export(fileName, result):\n with open(fileName, 'a') as output:\n output.write(result)", "def write(self):\n pass", "def write(self):\n pass", "def test_call_writes_output(self):\r\n\r\n # adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs\r\n # Should detect and remove chimeric sequence based\r\n # during ref based detection, then write the OTU mapping file in\r\n # QIIME format.\r\n\r\n fd, self.tmp_result_path = mkstemp(\r\n prefix='UsearchOTUMapping_',\r\n suffix='.txt')\r\n close(fd)\r\n f = open(self.tmp_result_path, \"w\")\r\n\r\n fd, self.tmp_failures_path = mkstemp(\r\n prefix='UsearchFailures_',\r\n suffix='.txt')\r\n close(fd)\r\n f = open(self.tmp_failures_path, \"w\")\r\n\r\n self._files_to_remove.append(self.tmp_result_path)\r\n self._files_to_remove.append(self.tmp_failures_path)\r\n\r\n app = UsearchReferenceOtuPicker(\r\n params={'save_intermediate_files': False,\r\n 'db_filepath':\r\n self.tmp_ref_database,\r\n 'output_dir': self.temp_dir,\r\n 'remove_usearch_logs': True,\r\n 'reference_chimera_detection':\r\n True,\r\n 'de_novo_chimera_detection':\r\n False,\r\n 'cluster_size_filtering':\r\n False,\r\n 'minlen': 12,\r\n 'w': 12,\r\n 'minsize': 1,\r\n 'percent_id': 0.97,\r\n 'percent_id_err': 0.97,\r\n 'abundance_skew': 2\r\n })\r\n\r\n obs = app(self.tmp_seq_filepath2, self.tmp_otu_ref_database,\r\n result_path=self.tmp_result_path,\r\n failure_path=self.tmp_failures_path)\r\n\r\n expected_otu_mapping =\\\r\n [\"1\\tSolemya\\tSolemya_seq2\\n\",\r\n \"0\\tusearch_ecoli_seq\\tusearch_ecoli_seq2\\n\"\"\"\r\n ]\r\n\r\n f = open(self.tmp_result_path, \"U\")\r\n\r\n actual_otu_mapping = f.readlines()\r\n\r\n self.assertEqual(actual_otu_mapping, expected_otu_mapping)\r\n\r\n expected_failures = [\"chimera\"]\r\n\r\n f = open(self.tmp_failures_path, \"U\")\r\n\r\n actual_failures = f.readlines()\r\n\r\n self.assertEqual(actual_failures, expected_failures)", "def write_output_file(updated_file, file_path):\n orig_file = file_path + \".orig\"\n # remove an existion .orig file\n if os.path.isfile(orig_file):\n os.remove(orig_file)\n # rename the current file\n os.rename(file_path, orig_file)\n # write the new file\n with open(file_path, mode='w', encoding='utf-8', newline='') as file_out:\n for line in updated_file:\n file_out.write(line)", "def write_result(self, file_path):\n f = open(file_path, \"a\")\n f.write(\"{}\\t{}\\n\".format(*[self.name, str(self.ROC_AUC_value)]))\n f.close()", "def dump_to_file(final_results):\n\t#Add prefix result\n\tif final_results[\"Results\"][\"Test passed\"] == True:\n\t\ttime_now = time.time()\n\t\touput_filepath = checklists_filepath.replace(\".json\", \"\", 1) + \"_\" + datetime.datetime.fromtimestamp(time_now).strftime('%Y-%m-%d_%Hh%Mm%Ss') + \"_PASSED.json\"\n\telse:\n\t\ttime_now = time.time()\n\t\touput_filepath = checklists_filepath.replace(\".json\", \"\", 1) + \"_\" + datetime.datetime.fromtimestamp(time_now).strftime('%Y-%m-%d_%Hh%Mm%Ss') + \"_FAILED.json\"\n\twith open(ouput_filepath, 'w') as fp:\n\t\tjson.dump(final_results, fp)\n\treturn ouput_filepath" ]
[ "0.68608725", "0.68279564", "0.67969465", "0.67020315", "0.65961033", "0.6551729", "0.65434563", "0.65230155", "0.6469242", "0.6408502", "0.63981223", "0.63253784", "0.6316267", "0.6316267", "0.63015395", "0.6297902", "0.6295766", "0.62910724", "0.62809604", "0.6252292", "0.6172444", "0.61552143", "0.61552143", "0.6150255", "0.61425686", "0.6139873", "0.6130938", "0.61260754", "0.61254674", "0.61242944", "0.6112368", "0.6086146", "0.6081919", "0.6081464", "0.6079866", "0.6075212", "0.60659754", "0.60657346", "0.60650235", "0.60634863", "0.60605055", "0.60503733", "0.6046899", "0.60359716", "0.6034523", "0.60317236", "0.60110414", "0.6008476", "0.6000559", "0.59940016", "0.59884465", "0.59831107", "0.5981659", "0.5974428", "0.59653634", "0.59365004", "0.59284157", "0.5924841", "0.591412", "0.5903251", "0.59028536", "0.589716", "0.58963203", "0.5890763", "0.5879682", "0.5860352", "0.5857418", "0.58481216", "0.5847879", "0.5846739", "0.584623", "0.58450365", "0.5836928", "0.58326393", "0.58260655", "0.582517", "0.5824562", "0.5818594", "0.5815206", "0.58097893", "0.5806368", "0.58062387", "0.58053315", "0.57969826", "0.5796746", "0.57915336", "0.57889324", "0.57882786", "0.57880825", "0.5781405", "0.5773167", "0.5771601", "0.5770119", "0.57697445", "0.5768689", "0.5768584", "0.5768584", "0.57637507", "0.5762578", "0.57620573", "0.57600343" ]
0.0
-1
check if a user exist in the db
def __check_user_exist(self): login_form = self.login_form() user = User.query.filter_by(username=login_form.username.data).first() if user is None or not user.get_password(login_form.password.data): flash('Invalid username or password') # TODO: flash in Template hinzufuegen return redirect(url_for('login')) login_user(user, remember=login_form.remember_me.data) next_page = request.args.get('next') # if 'next' is found and the host is specified # it will redirect if not next_page or url_parse(next_page).netloc != '': next_page = url_for('index') return redirect(next_page)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def userExists(self, username):\n data = db.session.query(User.id).filter_by(username = username).first()\n if data is None:\n return False\n else:\n return True", "def exists_in_db(self) -> bool:\n query = \"\"\"SELECT * \n FROM Users \n WHERE Username=?;\"\"\"\n return len(self.db.fetchall(query, values=(self.username,))) > 0", "def test_existence(self):\n self.assertTrue(User.objects.filter(username='rcm').exists())", "def _user_exists(self, username):\n return self.db.query(User).filter_by(name=username).first() is not None", "def check_user(user):\n result_user = search_column_with_constraint(choose_database(\"auth\"), \"users\", \"id\", \"id\", user)\n # result_user = search_single_entry(choose_database(\"auth\"), \"users\", \"id\", user)\n\n if len(result_user) == 0:\n return 0\n else:\n return 1", "def user_exists(username):\n sql = \"SELECT username \" \\\n \"FROM users \" \\\n \"WHERE username=:username\"\n result = db.session.execute(sql, {\"username\": username})\n user = result.fetchone()\n if user is None:\n return False\n else:\n return True", "def check_user_from_db(username: str, email: str) -> bool:\n if User.objects.filter(Q(username=username) | Q(email=email)).first():\n raise UniqueUser(\"Пользователь уже существует\")\n else:\n return True", "def user_exists(cls, *args, **kwargs):\r\n user_model = cls.user_model()\r\n query = get_query_by_dict_param(user_model, kwargs)\r\n return user_model.select().where(query).count() > 0", "def exists(username):\n if Users.query.filter_by(username=username).first():\n return True\n return False", "def _checkUserExists(username,self):\r\n \r\n exists = False\r\n \r\n if _findUser(username) is not None:\r\n exists = True\r\n \r\n return exists", "def user_exists(conn, account):\n cur = conn.cursor()\n cur.execute(f\"SELECT * FROM users WHERE account = '{account}'\")\n\n rows = cur.fetchall()\n\n if len(rows) > 0:\n return True\n else:\n return False", "def user_exist(cls,user_name):\n for user in cls.user_list:\n if user.user_name == user_name:\n return True\n return False", "def checkUserExists(self, email, username):\n query = \"SELECT * FROM User WHERE Email='\"+email+\"' OR UserName = '\"+username+\"';\"\n self.cur.execute(query)\n\n data = self.cur.fetchall()\n if len(data):\n return True\n else:\n return False", "def check_user(self, username):\n self.dbcursor.execute(self.SQL_CHECK_USER, [username])\n row = self.dbcursor.fetchone()\n if row:\n return True \n return False", "def check_user(self, username):\n self.dbcursor.execute(self.SQL_CHECK_USER, [username])\n row = self.dbcursor.fetchone()\n if row:\n return True \n return False", "def check_user_exists(self):\n is_exists = False\n if auth.UserInfo.objects.filter(\n user_id__username=self.username,\n is_active=True).exists():\n is_exists = True\n return is_exists", "def checkIfUserExists(self, userID):\n return self.db.select_user(userID)", "def assert_user_exists(self, user_id):\n result = self.con.execute(\n 'SELECT id FROM registered_user WHERE id = ? AND active = 1',\n (user_id,)\n ).fetchone()\n if result is None:\n raise err.UnknownUserError(user_id)", "def is_user_present(self, username): # WORKS\n done = self.cur.execute(\"SELECT username FROM users WHERE username = \\\"{}\\\"\".format(username))\n if done == 1:\n return True\n else:\n return False", "def check():\n username = request.args.get(\"user_name\")\n users = db.execute(\"SELECT * FROM users WHERE username = :username\",\n {\"username\": username}).fetchone()\n if users is None:\n return jsonify(True)\n # Username is taken\n return jsonify(False)", "def is_existing_user(email):\n if not email:\n return False\n user = session.query(KlaxerUser).filter(KlaxerUser.email==email).first()\n return True if user else False", "def user_exists(mail_or_id) -> bool:\n conn = sqlite3.connect(\"db.sqlite3\")\n c = conn.cursor()\n\n if type(mail_or_id) is int:\n c.execute(\"\"\"\n SELECT 1 FROM Users\n WHERE id=?\n \"\"\", (mail_or_id,))\n else: #mail\n c.execute(\"\"\"\n SELECT 1 FROM Users\n WHERE mail=?\n \"\"\", (mail_or_id,))\n \n conn.commit()\n \n exists = bool(len(list(c)))\n \n conn.close()\n\n return exists", "def UserExist(self, username):\n return self.com.CheckUserexists(username)", "def username_exist(username):\n return User.objects.filter(username=username).first()", "def check_username_exist(request):\n username = request.POST.get(\"username\")\n user_obj = User.objects.filter(username=username).exists()\n if user_obj:\n return HttpResponse(True)\n else:\n return HttpResponse(False)", "def user_exists(self, login):\n\t\tif login in self.users_by_name and isinstance(self.users_by_name[login], VDOM_user):\n\t\t\treturn True\n\t\treturn False", "def exists(cls, user_id):\n user_id = int(user_id)\n user = DB_USER_TABLE.get(doc_id=user_id)\n if not user:\n raise ValueError(f\"unknown user '{user_id}'\")\n return user_id", "def test_get_user_if_exists(self):\n user = User.objects.create(username=self.username)\n actual = get_user_if_exists(None, self.details, user=user)\n self.assertDictEqual(actual, {'is_new': False})", "def user_exists(cls, name):\n\n for user in cls.user_list:\n if user.user_name == name:\n return True\n\n return False", "def user_exists(username):\n db, c = config.start_db()\n # Check whether there is a row in 'users' where the column 'username' has\n # the value of `username`\n c.execute(\n 'SELECT EXISTS(SELECT 1 FROM users WHERE username=? LIMIT 1)',\n (username,)\n )\n result = c.fetchone()[0] # 1 if user exists, else 0\n config.end_db(db)\n return result == 1", "def test_07_create_user_exists(self):\n\n _, user = self.get_random_item(models.User)\n success, error = utils.create_user(user, session=self.session)\n db_user = db_utils.get_item(\n models.User, filters={\"id\": user[\"id\"]}, session=self.session\n )\n user[\"password\"] = db_user.password\n self.assertTrue(db_user)\n db_user = db_user.as_dict()\n items_equal = utils.is_equal(user, db_user)\n self.assertTrue(items_equal)\n self.assertTrue(success)\n self.assertFalse(error)", "def exists_user(self, tenant_name, username):\n base = basedn.people_dn(username, tenant_name)\n return self.exists_entry(base)", "def user_exists(self,unique_ID):\n\t\ttry:\n\t\t\tself.data[unique_ID]\n\t\texcept KeyError:\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True", "def has_user(self, user):\n\n if not self.check_prereqs():\n return False\n\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n\n query=self.create_query(self.sql_all_users_query+\" WHERE $username_field$='$username$'\",{'username_field':self.sql_username_field,'password_field':self.sql_password_field,'username':user})\n self.log.debug(\"sqlflexibleauthstore: has_user: %s\" % (query,))\n cursor.execute(query)\n\n for row in cursor:\n return True\n return False", "def validate_user_existence():\n from sfa_api.utils.storage import get_storage\n storage = get_storage()\n if not storage.user_exists():\n try:\n info = request_user_info()\n except (requests.exceptions.HTTPError, JSONDecodeError):\n return False\n else:\n if not info.get('email_verified', False):\n # User has a valid token, but their email\n # is yet to be verified\n return False\n storage.create_new_user()\n return True", "def check():\n # Sets variable username to username inputed by user\n username = request.args.get(\"username\")\n # Selects userid from username inputed by user (if there is one)\n userinfo = db.execute(\"SELECT * FROM users WHERE username = :username\", username=username)\n # If there is no info on the username inputed, that means username is not taken, and user can take the username\n if not userinfo:\n # Return true for the username is not taken\n return jsonify(True)\n # Return false if there is info on the username (meaning it was taken)\n return jsonify(False)", "def ensure_user_in_database():\n if 'email' in login_session:\n user_exists = session.query(exists().where(User.email == login_session['email'])).scalar()\n if not user_exists:\n user = User(\n id=login_session['userid'],\n picture=login_session['picture'],\n name=login_session['name'],\n email=login_session['email'],\n client_id=login_session['client_id']\n )\n session.add(user)\n session.commit()\n print(\"Recreated user in database\")", "def test_existing_user(self):\n user = User.objects.create(username=self.username)\n actual = get_user_if_exists(None, self.details)\n self.assertDictEqual(actual, {'is_new': False, 'user': user})", "def has_user(self, username):\n return username in self.user_table", "def test_user_exists(self):\n # requirments for creating user\n payload = {\n 'email': '[email protected]',\n 'password': 'abcd1234',\n 'name': 'Test',\n }\n\n # call the create function above\n create_user(**payload)\n\n # this will do a HTTP POST request and create a user\n response = self.client.post(CREATE_USER_URL, payload)\n\n # Check if statuscode returns a HTTP400 bad request\n # becos user already exist\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def user_exists(email):\n um = logic.UserManager()\n try:\n user = um.lookup_user_by_email(email)\n except ex.TickeeError, e:\n transaction.abort()\n return dict(exists=False)\n else:\n transaction.commit()\n return dict(exists=True,\n id=user.id)", "def test_user_creation(self):\n self.assertTrue(User.objects.exists())", "def test_user_already_exists(self):\n User.objects.create_user(\n '[email protected]',\n '[email protected]',\n '123existing'\n )\n response = self.client.post('/o/register', {\n 'email': '[email protected]',\n 'password': '123existing',\n 'terms_acceptance': True,\n })\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'auth/login.html')\n self.assertContains(\n response,\n 'Użytkownik o podanym emailu już istnieje',\n )\n self.assertNotIn('_auth_user_id', self.client.session)\n self.assertEqual(User.objects.all().count(), 1)", "def userObjExists(self, user : bbUser.bbUser) -> bool:\n return self.userIDExists(user.id)", "def test_get_user_by_username(self):\n\t\tusername_in_db = server.get_user_by_username('Natasha')\n\t\tself.assertTrue(username_in_db, 'Query did not fetch user object.')\n\t\tusername_not_in_db = server.get_user_by_username('xyz')\n\t\tself.assertFalse(username_not_in_db, 'Query fetched user that did not exist (xyz).')", "def check_email_exist(request):\n email = request.POST.get(\"email\")\n user_obj = User.objects.filter(email=email).exists()\n if user_obj:\n return HttpResponse(True)\n else:\n return HttpResponse(False)", "def has_user(self, user): # pylint: disable=unused-argument\r\n return False", "def _verify_user_existence(self, user_exists, social_link_exists, user_is_active=None, username=None):\n users = User.objects.filter(username=(username if username else \"test_username\"))\n assert users.exists() == user_exists\n if user_exists:\n assert users[0].is_active == user_is_active\n self.assertEqual(\n UserSocialAuth.objects.filter(user=users[0], provider=self.BACKEND).exists(),\n social_link_exists\n )\n else:\n assert UserSocialAuth.objects.count() == 0", "def hasUser(self, id):\n try:\n self.getUser(id)\n return True\n except KeyError:\n return False", "def check_user(self, login, password):\n user = self.cursor.execute(\n '''SELECT * FROM users WHERE login = ?''', login).fetchone()\n if user is not None:\n if user[3] == password:\n return Message('response', 'User exists')\n else:\n return Message('response',\n 'Users exists. Check password')\n else:\n return Message('response', 'User does not exists')", "def does_exist_in_mysql(username: str, password: str) -> bool:\n with UseDatabase(app.config['dbconfig']) as cursor:\n _SQL = \"\"\"select id from user where username = %s and password = %s\"\"\"\n cursor.execute(_SQL, (username, password))\n answer = cursor.fetchall()\n return answer", "def check_user_exists(username):\n user = User.query(User.name == username.title()).get()\n\n if not user:\n raise endpoints.NotFoundException(\n 'User {} does not exist!'.format(username))\n else:\n return user", "def check_user(used_name, used_password):\n user_exists = UserData.user_login(used_name, used_password)\n\n return user_exists", "def check():\n\n # Check length of request.form.get(\"username\")\n if not request.args.get(\"username\"):\n return jsonify(False)\n\n # Query database for username\n rows = db.execute(\"SELECT * FROM users WHERE username = :username\",\n username=request.args.get(\"username\"))\n\n\n\n # Ensure username not exists\n if not rows:\n return jsonify(True)\n else:\n return jsonify(False)", "def check_if_user_exists(self, email):\n for user in self.users.values():\n if user['email'] == email:\n return user['id']\n else:\n return False", "def user_exists(cls,password):\n for user in cls.user_list:\n if user.password == password:\n return True\n\n return False", "def userExists(self, user_uuid):\n return self.getUser(user_uuid) is not None", "def check_user(entry_code):\n\tif len(User.objects.filter(unique_code=entry_code)) == 1:\n\t\treturn(True)\n\telse:\n\t\traise Http404('No users exist with this code.')", "def test_user_exists(self):\n payload = {\n 'email': '[email protected]',\n 'password': '123PassW0rd',\n 'name': 'Test Name'\n }\n create_user(**payload)\n\n res = self.client.post(CREATE_USER_URL, payload)\n\n self.assertEquals(res.status_code, status.HTTP_400_BAD_REQUEST)", "async def exists_db(self, member, guild: discord.Guild) -> bool:\n if hasattr(member, 'id') and hasattr(guild, \"id\"):\n cursor = await self.bot.db.execute(\n 'SELECT * FROM users WHERE user_id = ? AND guild_id = ?', (member.id, guild.id)\n )\n results = await cursor.fetchall()\n if len(results) > 0:\n return True\n return False", "def check_user(username, password):\n user = session.query(User).filter(User.username == username).filter(User.password == password).first()\n if user:\n return user.id", "def admin_user_exists(self):\n try:\n User.objects.get(username='admin')\n except User.DoesNotExist:\n return False\n\n return True", "def user_auth(request):\n if request.user.is_authenticated:\n user = User.objects.get(email=request.user.email)\n if UserInformation.objects.filter(user=user).exists():\n return True\n return False", "def verify_user(self, username):\n try:\n self.c.execute('SELECT name FROM profiles WHERE name=(?)' (username,))\n user = self.c.fetchone()[0]\n return user == username\n\n except TypeError:\n return False", "def test_user_existence(self):\n\n credentials = {\n 'email': '[email protected]',\n 'password': 'Testpass12',\n 'name': 'Test Name'\n }\n get_user_model().objects.create_user(**credentials)\n\n # Check that this is a bad request since the user does already exists.\n response = self.client.post(URL_CREATE_USER, credentials)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def validateUser(self,admin):\n \n res=admin.helper.getOneUser(self.name)\n if res == False:\n return True\n else:\n return False", "def test_find_user_by_username(self):\n self.new_user.save_user()\n test_user = User(\"Test\", \"user\", \"test\", \"walIas15\")\n test_user.save_user()\n user_exists = User.user_exist(\"test\")\n self.assertTrue(user_exists)", "def check_existing_users(user_name,password):\n\n\n new_user = User(user_name,password)\n\n return new_user", "def invalid_user(self, username):\n con = dbcon()\n cur = con.cursor()\n cur.execute(\"SELECT * FROM my_users WHERE username=%(username)s\",\\\n {'username':username})\n rows = cur.rowcount\n if rows > 0:\n return True\n return False", "def validates_user():\n email = request.form.get(\"email\")\n password = request.form.get(\"password\")\n # age = request.form.get(\"age\")\n # zipcode = request.form.get(\"zipcode\")\n\n # is_user_there = User.query.filter(User.email == email).all()\n\n is_user_there = db.session.query(User).filter(User.email == email).first()\n\n if is_user_there:\n flash(\"You're already registered!\")\n return redirect(\"/login\")\n\n else:\n new_user = User(email=email, password=password)\n db.session.add(new_user)\n db.session.commit()\n\n flash(\"Success! You were registered!\")\n\n return redirect(\"/\")", "def check():\n username = request.args.get(\"username\")\n names = db.execute(\"SELECT * FROM users WHERE username = :username\", username=username)\n if names and username:\n return jsonify(False)\n elif not names and username:\n return jsonify(True)\n else:\n return jsonify(False)", "def user_exists(self, email):\n user = UserModels.fetch_user_by_email(email)\n if user:\n return {\n \"status\": 400,\n \"error\": \"That email already exists\"\n }", "def test_user_exists(self):\n payload = {'email': '[email protected]', 'password': 'password'}\n create_user(**payload)\n\n res = self.client.post(CREATE_USER_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_user_exists(self):\r\n payload = {\r\n 'email': '[email protected]',\r\n 'password': 'testpass',\r\n 'name': 'Maks'\r\n }\r\n create_user(**payload)\r\n\r\n res = self.client.post(CREATE_USER_URL, payload)\r\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_user_exists(self):\n data = {'email': self.user.email}\n response = self.client.post(self.url, data=data)\n\n expected_response_code = 200\n self.assertEqual(expected_response_code, response.status_code)\n self.assertTrue(response.data.get('exists'))", "def post(self, request, *args, **kwargs):\n name = request.data.get('username', 'anonymous')\n if User.objects.filter(username=name).exists():\n return Response({\n 'message': True\n })\n return Response({\n 'message': False\n })", "def check_user(self, username, password):\n user = [user for user in self.db if user['username'] == username]\n if user:\n if check_password_hash(user[0][\"password\"], password):\n return True\n return False\n return False", "def existAccount(login:str) -> bool:\n\n query = f\"SELECT * FROM {Account.tablename} WHERE {Account.loginCol} = ?\"\n\n try:\n db = DataBaseConnection()\n db.cursor.execute(query, login)\n except Exception as error:\n return {\"flag\": \"queryError\", \"message\": f\"{error}\"} \n else:\n row = db.cursor.fetchone()\n\n if row:\n return True\n else:\n return False", "def record_exists(user):\n cnx = create_connection()\n cursor = cnx.cursor()\n\n query = \"SELECT * FROM \" + USAGE_TABLE['name'] + \" WHERE \" + USAGE_TABLE['relational_column'] + \" = '\" + user + \"'\"\n\n try:\n cursor.execute(query)\n except mysql.connector.Error as e:\n cursor.close()\n cnx.close()\n if e.errno == errorcode.ER_BAD_TABLE_ERROR:\n print(\"Table doesn't exist!\")\n else:\n print(e)\n return\n\n rows = cursor.fetchall()\n cnx.close()\n cursor.close()\n\n if len(rows):\n return True\n else:\n return False", "def test_get_user_exists(self):\n # First make the user\n resp = self.app.post('/users', data=json.dumps(self.test_user1_data))\n assert resp.status_code == 200\n\n # Now get the user data and verify it is correct\n resp = self.app.get('/users/{}'.format(self.test_user1_userid))\n assert resp.status_code == 200\n data = json.loads(resp.data)\n for key in ['first_name', 'last_name', 'userid', 'groups']:\n assert key in data\n assert data['first_name'] == self.test_user1_first\n assert data['last_name'] == self.test_user1_last\n assert data['userid'] == self.test_user1_userid\n for groupid in self.test_user1_groups:\n assert groupid in data['groups']", "def test_user_exists(self):\n\n payload = {\n 'email': '[email protected]',\n 'password': 'test11',\n 'name': \"test name\"\n }\n\n create_user(**payload)\n\n res = self.client.post(CREATE_USER_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def userIDExists(self, id : int) -> bool:\n return id in self.users.keys()", "def is_username_taken(username):\n if User.objects.filter(username=username).exists():\n return True\n return False", "def check_if_row_exists(self, session, data):\n\n row_exists = None\n user_id = 0\n\n try:\n\n user_row = self.get_user_by_id(session, data)\n\n if user_row is not None:\n user_id = user_row.user_id\n else:\n user_id = 0\n\n logger.info('User Row object in DB: %s', str(user_row))\n\n row_exists = session.query(UsersAuthModel).filter(UsersAuthModel.user_id == user_id). \\\n filter(UsersAuthModel.is_active == \"true\").scalar()\n\n logger.info('Row to data: {}, Exists: %s'.format(data), str(row_exists))\n\n except SQLAlchemyError as exc:\n row_exists = None\n logger.exception('An exception was occurred while execute transactions: %s', str(str(exc.args) + ':' +\n str(exc.code)))\n raise mvc_exc.IntegrityError(\n 'Row not stored in \"{}\". IntegrityError: {}'.format(data.get('username'),\n str(str(exc.args) + ':' + str(exc.code)))\n )\n finally:\n session.close()\n\n return row_exists", "def check():\n\n users_rows = db.execute('SELECT username FROM users')\n\n users = [user['username'] for user in users_rows]\n\n if len(str(request.args.get('username'))) > 1 and request.args.get('username') not in users:\n return jsonify(True)\n else:\n return jsonify(False)", "def does_user_exist(self, email_address: str) -> bool:\n search = {'email': email_address}\n result = self._collection.find(search)\n return True if result.retrieved == 1 else False", "def test_user_exists(self):\n payload = {\n 'email': '[email protected]',\n 'password': 'testpass123'\n }\n create_user(**payload)\n res = self.client.post(CREATE_USER_API, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def is_user(self, user='') -> int:\n try:\n if user in self.users:\n return(1)\n else:\n return(0)\n except Exception as error:\n print(f\"Error: self.is_user({user}) -> {error}\")", "def test_func(self):\n return (Student.objects.filter(user=self.request.user).exists())", "def check_user(self):\n try:\n if self.get_customer()[0][0] == self.dni:\n return True\n else:\n return False\n except:\n return False", "def check():\n\n # Get username\n username = request.args.get(\"username\")\n\n # Check for username\n if not len(username) or db.execute(\"SELECT 1 FROM users WHERE username = :username\", username=username.lower()):\n return jsonify(False)\n else:\n return jsonify(True)", "def test_user_exists(self):\n payload = {'email': '[email protected]','password': 'testpass'}\n res = self.client.post(CREATE_USER_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def username_present(username):\n if User.objects.filter(username=username).count():\n return True\n return False", "def check():\n username = request.args.get(\"username\")\n if len(username) < 1:\n print(\"false len\")\n return jsonify(\"false\")\n name = db.execute(f\"SELECT * FROM users WHERE username = '{username}'\")\n if name:\n print(\"false\")\n return \"false\"\n else:\n print(\"true\")\n return \"true\"", "def checkIsUsernameAvailable(self, username):\n\n return User.objects.filter(username__iexact=username).exists()", "def test_user_exist(self):\n data = {\n 'email': '[email protected]',\n 'password': 'testtest',\n 'first_name': 'Test test',\n 'last_name': 'Test'\n } \n sigin_in_user(**data)\n res = self.client.post(SIGN_IN_USER_URL, data)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def has_user(self, username):\n\t\treturn username in self.users", "def require_user( request ):\n\n db = get_db()\n\n if ( not 'users_id' in session ):\n return False;\n\n users_id = session[ 'users_id' ]\n\n user = db.execute( text( \"select users_id, name, email from users where users_id = :id and is_active\" ), id = users_id ).fetchone()\n\n if ( not user ):\n return False;\n\n return user", "def email_exist(email):\n return User.objects.filter(email=email).first()", "def test_dont_create_user(self):\n self.assertFalse(User.objects.exists())" ]
[ "0.8210985", "0.8123747", "0.8072711", "0.8056698", "0.80316305", "0.7948778", "0.79255694", "0.78522176", "0.7824358", "0.7791029", "0.7768088", "0.7739863", "0.7722479", "0.77069974", "0.77069974", "0.7654071", "0.76501435", "0.7623449", "0.76128906", "0.76000804", "0.759825", "0.7596952", "0.755655", "0.75457954", "0.7545174", "0.7524973", "0.75055045", "0.749912", "0.7487915", "0.74793386", "0.7437162", "0.7427711", "0.74262214", "0.7364999", "0.73635846", "0.73602027", "0.7355856", "0.7352697", "0.7341581", "0.7328948", "0.73092836", "0.7278843", "0.72248846", "0.72123027", "0.7208946", "0.7208827", "0.72063655", "0.7192771", "0.7192363", "0.71915585", "0.7167236", "0.7166152", "0.7157714", "0.7142904", "0.7136278", "0.71360016", "0.71340066", "0.71198475", "0.7117408", "0.71118873", "0.7110382", "0.71081704", "0.71059906", "0.710124", "0.70941865", "0.7092016", "0.7091704", "0.70912015", "0.70891684", "0.7089004", "0.7080205", "0.7077127", "0.70528996", "0.7047577", "0.7039035", "0.7030259", "0.7024179", "0.7013783", "0.7005331", "0.69971997", "0.69875425", "0.69638395", "0.69632626", "0.69562906", "0.69532716", "0.694706", "0.69377446", "0.69268894", "0.6926056", "0.6921999", "0.69190925", "0.69188035", "0.69156975", "0.69040793", "0.6896546", "0.68959224", "0.6887482", "0.6885562", "0.6867982", "0.6845349" ]
0.7002597
79
Checks if a string is a permutation of a palindrome by populating a map and counting the occurrences of letters. O(N)
def is_palindrome_permutation(string): letter_to_count = dict() for letter in string: letter_to_count[letter] = letter_to_count.get(letter, 0) + 1 residual = 0 for count in letter_to_count.values(): residual += count % 2 # there are can be a single letter with an odd character count when the palindrome is of odd length return residual <= 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def palindrom_permutation(string: str):\n string = re.sub(r'\\W+', '', string.lower())\n\n chars = dict()\n for c in string:\n chars[c] = chars[c] + 1 if c in chars else 1\n\n almost_not_okey = False\n for val in chars.values():\n if val % 2 == 1:\n if not almost_not_okey:\n almost_not_okey = True\n else:\n return False\n\n if almost_not_okey:\n return len(string) % 2 == 1\n return True", "def palindrome_permutation(s):\n char_dict = {}\n for i in s:\n if i in char_dict:\n char_dict[i] += 1\n else:\n char_dict[i] = 1\n numOdd = 0\n for key in char_dict:\n if key != ' ':\n if char_dict[key] % 2 == 1:\n numOdd += 1\n if numOdd < 2:\n print_permutation(char_dict)\n return True\n else:\n return False", "def is_palindrome_permutation(input_string):\n input_string = input_string.lower()\n input_string = ''.join(input_string.split())\n\n number_chars = {}\n number_even_chars = 0\n\n for char in input_string:\n if char in number_chars:\n number_chars[char] += 1\n else:\n number_chars[char] = 1\n\n for char in number_chars:\n if number_chars[char] % 2 != 0:\n number_even_chars += 1\n if number_even_chars >= 2:\n return False\n\n return True", "def palindromePermutation(s):\n char_count = {}\n for character in s:\n if character == ' ': continue # skip the spaces.\n char_count[character] = char_count.get(character, 0) + 1\n\n odd = False\n for key in char_count:\n if char_count[key] % 2 != 0:\n if odd:\n return False\n odd = True\n\n return True \n\n # Time Complexity: O(n)\n # Space Complexity: O(m), where m is the number of unique characters", "def check_permutation_of(string1,string2):\n if len(string1) != len(string2): #O(1)\n return False\n return collections.Counter(string1) == collections.Counter(string2) #O(n+n) to make the dictionaries\n #O(n+n) to compare equality?\n #so O(4n) == O(n).", "def permutation(string):\n i = 0\n j = len(string) - 1\n while i < j:\n if string[i] != string[j]:\n return False\n i += 1\n j -= 1\n return True", "def has_palindrome_permutation(given_string):\n\n unpaired_characters = set()\n\n for char in given_string:\n if char in unpaired_characters:\n unpaired_characters.remove(char)\n else:\n unpaired_characters.add(char) \n\n return len(unpaired_characters) <= 1", "def checkPermutation(s, t):\n\n # Count each unique letter in both strings and compare the two dicts.\n s_count = {}\n t_count = {}\n for character in s:\n s_count[character] = s_count.get(character, 0) + 1\n\n for character in t:\n t_count[character] = t_count.get(character, 0) + 1\n\n return s_count == t_count\n\n # Time Complexity: O(n)\n # Space Complexity: O(n)", "def is_permutation_palindrome(str):\n for s in permutations(str): # loop through all permutations of str\n if is_palindrome(s):\n return True # successfully find a palindrome permutation\n return False # reach this, then no possible permutation is palindrome", "def palindrome_permutation(w):\n w = w.strip().replace(' ', '')\n chars = {}\n for c in w:\n try:\n chars[c] += 1\n except KeyError:\n chars[c] = 1\n\n if len(w) % 2 == 0:\n #Check if there is an even number\n #of every character in w.\n return all(x % 2 == 0 for x in chars.values()) \n else:\n #Check if there is an even number\n #of every character in w,\n #except for exactly one character.\n found_odd = False\n for c in chars:\n if chars[c] % 1 == 0:\n if not found_odd:\n found_odd = True\n else:\n return False\n \n if found_odd:\n return True\n else:\n return False", "def sherlockAndAnagrams(s):\n # A Dict of palindromes and their counts.\n palindrome_counts = {}\n\n # Get all substrings of length len(s)/c\n for substring_length in range(len(s) - 1):\n for substring_starting_index in range(len(s) - substring_length):\n substring_end_index = substring_starting_index + substring_length + 1\n substring = s[substring_starting_index:substring_end_index]\n # TODO: Sorting is an inefficient way to \"hash\" by palindrome.\n # A letter count dict would be more efficient (in the initial grouping).\n substring_arr = list(substring)\n substring_arr.sort()\n sorted_substring = \"\".join(substring_arr)\n\n if palindrome_counts.get(sorted_substring):\n palindrome_counts[sorted_substring] += 1\n else:\n palindrome_counts[sorted_substring] = 1\n\n return sum([_two_of_m(val) for val in palindrome_counts.values()])", "def countPalindromicSubsequences(self, s: str) -> int:\n MOD = 10 ** 9 + 7\n \n def dp(i, j) -> (int, set):\n distinct = set()\n if i > j:\n return (0, distinct)\n if i == j:\n distinct.add(s[i])\n return (1, distinct)\n ret = 0\n for c in 'abcd':\n l = s.find(c, i, j)\n if l < 0:\n continue\n r = s.rfind(c, i, j)\n sub_ret, sub_set = dp(l, r)\n print(sub_ret, sub_set)\n # print(f'{c}-{sub_set}-{c}')\n ret += sub_ret + 1\n ret %= MOD\n distinct.union(sub_set)\n distinct.add(c)\n\n return ret, distinct\n return dp(0, len(s))[0]", "def checkPermutation(string1, string2):\n string1_content = {}\n # Hash the first string\n for i in string1:\n if string1_content.get(i) is None:\n string1_content[i] = 1\n else:\n string1_content[i] += 1\n\n # For each character in the section string, search for it\n for i in string2:\n if string1_content.get(i) is None:\n return False\n string1_content[i] -= 1\n\n # Make sure every character in the first string had a matching character in the second string\n for key, value in string1_content.items():\n if value != 0:\n return False\n return True", "def is_anagram_of_palindrome(word):\n\n counts = {}\n num_of_odd_occurences = 0\n\n for char in word:\n counts[char] = counts.get(char, 0) + 1\n for val in counts.values():\n if val % 2 != 0:\n num_of_odd_occurences += 1\n\n return num_of_odd_occurences <= 1", "def is_permutation(str1, str2):\n\n chars = dict()\n\n def check_chars(ch, can_add, word_index):\n \"\"\"\n\n :param ch: the character we're looking for\n :param can_add: boolean which states if we can add more items to the dict\n :param word_index: int to identify the word\n :return: void\n \"\"\"\n if ch not in chars and can_add:\n chars[ch] = [False, word_index]\n else:\n chars[ch] = [True, word_index]\n\n n1 = len(str1)\n n2 = len(str2)\n for i in range(0, max(n1, n2)):\n if i < n1:\n check_chars(str1[i], i < n1, 1)\n if i < n2:\n check_chars(str2[i], i < n2, 2)\n\n word = None\n for ch in chars:\n if not chars[ch][0]:\n if word is None:\n word = chars[ch][1]\n elif word is not chars[ch][1]:\n return False\n return True", "def is_palindrome(text):\n\n # Property of a palindrome:\n # There be a maximum of only one letter that sums to an odd number\n \n char_count = {}\n # edge cases\n # Consider empty text as palindrome\n \n for char in text:\n if char in char_count:\n char_count[char] += 1\n else:\n char_count[char] = 1\n \n odd_count = 0\n for count in char_count.values():\n if count % 2 == 1:\n odd_count += 1\n if odd_count > 1:\n return False\n \n return True", "def check_palindrome(word):\r\n char_count = {} #char count hash\r\n for char in word:\r\n if char in char_count:\r\n char_count[char] += 1\r\n else:\r\n char_count[char] = 1\r\n odd_count = 0 #counting number of odd nos encountered\r\n for count in char_count.values():\r\n if count % 2 != 0:\r\n odd_count += 1\r\n len_word = len(word)\r\n if len_word % 2 == 0:\r\n if odd_count >= 1:\r\n return False\r\n else:\r\n if odd_count > 1:\r\n return False\r\n return True", "def build_permutation_dictionary(input_string):\n string_contents = {}\n\n for char in input_string:\n if char not in string_contents:\n string_contents[char] = 0\n else:\n string_contents[char] += 1\n\n return string_contents", "def is_perm(str1, str2):\n\n if len(str1) != len(str2):\n return False\n\n char_ct = defaultdict(int)\n\n for char in str1:\n char_ct[char] += 1\n\n for char in str2:\n char_ct[char] -= 1\n\n if char_ct[char] < 0:\n return False\n\n return True", "def isPalindromes(s):\n\n def toChar(s):\n s= s.lower()\n letters=''\n for c in s:\n if c in \"abcdefgh\":\n letters= letters+c\n return letters\n\n def isPal(s):\n if len(s) <=1:\n return True\n else:\n return s[0]==s[-1] and isPal(s[1:-1])\n return isPal(toChar(s))", "def is_permutation(a, b):\n a, b = str(a), str(b)\n return(len(a) == len(b) and Counter(a) == Counter(b))", "def longestPalindrome(self, s: str) -> int:\n # approach #1 -- using hashset\n # approach 2 -- using hashmap\n hashmap = defaultdict(int)\n odd = 0\n out = 0\n for char in s:\n hashmap[char] += 1\n\n for key, val in hashmap.items():\n if val % 2 == 1:\n odd = 1\n out += (val -1)\n else:\n out += val\n return out +odd", "def permutation_strings(input, input_two):\n if len(input) != len(input_two):\n return False\n else:\n return sorted(input) == sorted(input_two)", "def check_pal(s):\n counts = df(int)\n len_without_spaces = 0\n # Count all nonspaces\n for c in s:\n if c != ' ':\n counts[c.lower()] += 1\n len_without_spaces += 1\n # Now find out how many chars occur an odd number of times\n odd_chars = 0\n for c in counts:\n if counts[c] % 2 != 0:\n odd_chars += 1\n # If string length is even there must be no odd counts\n if len_without_spaces % 2 == 0 and odd_chars == 0:\n return True\n # If string is odd there must be exactly one odd count\n if len_without_spaces % 2 != 0 and odd_chars == 1:\n return True\n # Else, it's not a palindrome\n return False", "def substrCount(n, s):\r\n lst = []\r\n character = s[0]\r\n count = 1\r\n result = 0\r\n for i in range(1, n):\r\n if s[i] == character:\r\n count += 1\r\n else:\r\n lst.append((character, count))\r\n character = s[i]\r\n count = 1\r\n lst.append((character, count))\r\n\r\n for tpl in lst:\r\n \"\"\"calculate all possible palindromes created from same characters that are close to each other\r\n E.g: aaa => 6 possibles (3*4//2 = 6)\r\n \"\"\"\r\n result += tpl[1] * (tpl[1] + 1) // 2\r\n\r\n for i in range(1, len(lst) - 1):\r\n if lst[i - 1][0] == lst[i + 1][0] and lst[i][1] == 1:\r\n \"\"\"\r\n check palindromes created from 3 tuples with a different character in between\r\n \"\"\"\r\n result += min(lst[i - 1][1], lst[i + 1][1])\r\n\r\n return result", "def is_anagram_of_palindrome(word):\n # palindrome has either exactly 2 of each letter in the word\n # or two of each letter revolving around one in the middle\n # An anagram rescrambles the letters\n chars = []\n\n # loop over the word\n # append chars to the list\n # if we see the char in list again, remove it.\n # if there is only one char or no chars in list\n # return True\n # else, return false\n\n for char in word:\n if char in chars:\n chars.remove(char)\n else:\n chars.append(char)\n if len(chars) >= 2:\n return False\n else:\n return True", "def sherlockAndAnagrams(s):\n\n dic = {}\n\n count = 0\n for i in range(len(s)):\n for j in range(i+1, len(s)+1):\n substrings = sorted(list(s[i:j]))\n joined_ss = ''.join(substrings)\n if joined_ss != '':\n if joined_ss in dic:\n count += dic[joined_ss]\n dic[joined_ss] += 1\n else:\n dic[joined_ss] = 1 \n print(dic)\n return count", "def question1a(s,t):\n\n anagrams = permutations(t, len(t))\n for anagram in anagrams:\n if anagram:\n if ''.join(anagram) in s:\n return True\n return False", "def are_anagrams(str_1, str_2):\r\n if len(str_1) != len(str_2):\r\n return False\r\n letters_nb_1 = [0] * 256\r\n for char in str_1:\r\n letters_nb_1[ord(char.lower())] += 1\r\n\r\n for char in str_2:\r\n char_ord = ord(char.lower())\r\n if letters_nb_1[char_ord] > 0:\r\n letters_nb_1[char_ord] -= 1\r\n else:\r\n return False\r\n return letters_nb_1 == [0] * 256", "def check_permutation2(u, v):\n u_chars = {}\n for c in u:\n try:\n u_chars[c] += 1\n except KeyError:\n u_chars[c] = 1\n\n v_chars = {}\n for d in v:\n try:\n v_chars[d] += 1\n except KeyError:\n v_chars[d] = 1\n\n if sum(u_chars.values()) != sum(v_chars.values()):\n #u and v are not of the same length.\n return False\n\n for c in u:\n c_count_in_u = u_chars[c]\n c_count_in_v = v_chars.get(c, 0)\n if c_count_in_u != c_count_in_v:\n return False\n\n return True", "def is_unique_chars_map(string):\n\n if len(string) > 128:\n return False\n\n chars_list = [False] * 128\n for char in string:\n if chars_list[ord(char)]:\n return False\n chars_list[ord(char)] = True\n return True", "def is_unique_n_2(string: str) -> bool:\n\n for idx, letter in enumerate(string):\n for next_letter in string[idx + 1:]:\n if letter == next_letter:\n return False\n return True", "def is_permutation(input1, input2):\n if len(input1) != len(input2):\n return False\n if build_permutation_dictionary(input1) == build_permutation_dictionary(input2):\n return True\n return False", "def has_n_same(string, n):\n all_chars = {}\n for char in string: # sum up count of each char\n all_chars.setdefault(char, 0)\n all_chars[char] += 1\n for char, count in all_chars.items(): # check how many appeared n times\n if count == n:\n return True\n return False", "def is_unique_n_dict(string: str) -> bool:\n\n store = {}\n\n for letter in string:\n if letter in store:\n return False\n store[letter] = 1\n\n return True", "def anagram_dd(str1, str2):\n dic1 = defaultdict(int)\n for char in str1:\n dic1[char] += 1\n for char in str2:\n if char in dic1:\n if dic1[char] == 0:\n return False\n else:\n dic1[char] -= 1\n if dic1[char] == 0:\n del dic1[char]\n else:\n return False\n if dic1 == {}:\n return True\n return False", "def is_palindrome(string):\r\n r_string = string[::-1]\r\n cnt = 0\r\n while cnt < len(string):\r\n if string[cnt] == r_string[cnt]:\r\n cnt += 1\r\n continue\r\n else:\r\n return False\r\n #cnt += 1\r\n return True", "def is_pangram(string):\n a_pos = ord('a')\n letters = [0] * 26\n for char in string:\n if char.isalpha():\n letters[ord(char.lower()) - a_pos] += 1\n return all(letters)", "def brute_non_overlapping_string_counter(D, P):\n\n n, m = len(D), len(P)\n counter = 0\n skip = 0\n\n for i in range(n - m + 1):\n\n if skip:\n skip -= 1\n continue\n\n k = 0\n\n while k < m and D[i+k] == P[k]:\n k += 1\n\n if k == m:\n counter += 1\n skip = m - 1\n\n return counter", "def isPalindrome(s):\r\n return isPal(toChars(s))", "def is_palindrome_v3(s):\n i = 0\n j = len(s)-1\n\n while i < j and s[i] == s[j]:\n i = i + 1\n j = j -1\n\n return j <= i", "def is_pangram(sentence):\n\n list = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p',\\\n 'q','r','s','t','u','v','x','z']\n count = 0\n sentence = sentence.lower()\n\n for i in range(0, len(list)):\n for j in sentence:\n if list[i] == j:\n count = count + 1\n list[i] = '>'\n\n if count == 24:\n return True\n\n elif count < 24 or count > 24:\n return False", "def pangrams():\n\n num_strings = int(input('How many strings? '))\n string_list = []\n\n for list in range(num_strings):\n string_list.append(input('String #' + str(list+1) + '? '))\n\n for check_string in string_list:\n string = True\n alpha_list = []\n for check_letter in range(26):\n letter = 0\n for check_position in range(len(check_string)):\n if check_string[check_position] == ascii_lowercase[check_letter]:\n letter += 1\n if letter == 0:\n string = False\n alpha_list.append(ascii_lowercase[check_letter] + ': ' + str(letter))\n if string:\n print(True, alpha_list)\n else:\n print(False, alpha_list)", "def isPalindrome(string):\n for i in range(len(string)//2):\n if string[i] != string[(i*-1)-1]:\n return False\n return True", "def countPalindromicSubsequences(self, S):\n if not S:\n return 0\n\n ways = [[0] * len(S) for i in range(len(S))]\n\n # base cases: for subarray of length 1 and 2\n for i in range(len(S)):\n ways[i][i] = 1\n if i < len(S) - 1:\n ways[i][i+1] = 2\n\n for ll in range(3, len(S)+1):\n for i in range(len(S) - ll + 1):\n j = ll + i - 1\n if S[i] != S[j]:\n ways[i][j] = ways[i+1][j] + ways[i][j-1] - ways[i+1][j-1]\n else:\n l = i + 1\n while l < j and S[l] != S[i]:\n l += 1\n r = j - 1\n while r > i and S[r] != S[j]:\n r -= 1\n\n if l < r:\n ways[i][j] = 2 * ways[i+1][j-1] - ways[l+1][r-1]\n elif l == r :\n ways[i][j] = 2 * ways[i+1][j-1] + 1\n else:\n ways[i][j] = 2 * ways[i+1][j-1] + 2\n return ways[0][len(S)-1] % (10**9 + 7)", "def get_shortest_palindrome(text):\n strlen = len(text)\n unique_chars = len(set(text))\n print(set(text))\n if unique_chars == strlen:\n return (\"\".join(list(reversed(text[1:])))+text)\n if text==\"\" or strlen==1 or unique_chars==1:\n return text\n if is_palindrome(text):\n return text\n if strlen//unique_chars > 100:\n d = {}\n for char in set(text):\n \n left_pad = []\n #print(strlen)\n i = strlen-1\n while(i!=0):\n left_pad.append(text[i])\n #print(left_pad)\n #print(\"text[:i-1]: \",text[:i],i)\n if is_palindrome(text[:i]):\n # print(\"\".join(left_pad)+text)\n return (\"\".join(left_pad)+text)\n i = i -1", "def is_palindrome(s):\n\n def to_chars(s):\n s = s.lower()\n letters = ''\n for char in s:\n if char in 'abcdefghijklmnopqrstuvwxyz':\n letters += char\n return letters\n\n def is_pal(s):\n if len(s) <= 1:\n return True\n else:\n return s[0] == s[-1] and is_pal(s[1:-1])\n\n return is_pal(to_chars(s))", "def computeLongestPalindromeLength(text):\n # BEGIN_YOUR_CODE (our solution is 19 lines of code, but don't worry if you deviate from this)\n def isPal(t):\n if len(t) == 1:\n return True\n if t[0] == t[-1] and isPal(t[1:-1]):\n return True\n return False\n for size in range(len(text),0,-1):\n for c in combinations(s,size):\n if isPal(''.join(l for l in c)):\n return ''.join(l for l in c)\n # END_YOUR_CODE", "def is_pangram(sentence):\n\n result = set()\n \n for char in sentence:\n\n if char.lower().isalpha():\n\n result.add(char.lower())\n\n\n if len(result) == 26:\n\n return True\n\n else:\n\n return False", "def repeat_again(s: str) -> int:\n string_dict = dict()\n max_length = 0\n met_repeat = False\n for index, v in enumerate(s):\n if v in string_dict:\n m = index - string_dict[v]\n if m > max_length:\n max_length = m\n met_repeat = True\n string_dict[v] = index\n\n if met_repeat is True:\n return max_length\n else:\n return len(s)", "def check(self, s: str, mem: dict):\n dp = [False for _ in range(len(s)+1)]\n dp[0] = True\n for i in range(1, len(s)+1):\n for j in range(i):\n if dp[j] and s[j:i] in mem:\n dp[i] = True\n return dp[-1]", "def check_palindrome():", "def match(list_string):\n assert type(list_string)==list\n for i in list_string:\n assert type(i)==str\n assert i.isalpha()\n #Loops through all the possible substrings of the list of words to find the word pairs that are palindromes.\n my_match = []\n for i in range(0,len(list_string)):\n for j in range(0,len(list_string)):\n if i!=j:\n a = list_string[i]\n b = list_string[j]\n c = a+b\n d = b+a\n if c==c[::-1]:\n if (i,j) not in my_match:\n my_match.append((i,j))\n elif d==d[::-1]:\n if (j,i) not in my_match:\n my_match.append((j,i))\n return my_match", "def map_chars(text_1: str, text_2: str) -> bool:\n if len(text_1) != len(text_2):\n return False\n\n d_1 = defaultdict(int)\n d_2 = defaultdict(int)\n\n for c in text_1:\n d_1[c] += 1\n\n for c in text_2:\n d_2[c] += 1\n\n values_1 = list(d_1.values())\n values_2 = list(d_2.values())\n\n for index, value in enumerate(values_1):\n for index_2, value_2 in enumerate(values_2):\n if value == value_2:\n values_1[index] = -1\n values_2[index_2] = -1\n break\n else:\n return False\n\n for index, value in enumerate(values_2):\n if value != -1:\n return False\n\n return True", "def sherlockAndAnagrams(s):\n counter, len_s = 0, len(s)\n \n print(f\"Original string: {s}. length: {len_s}\")\n for chunk in range(1, len_s):\n arr = []\n for i in range(0, len_s - chunk + 1):\n ex1 = \"\".join(s[i:i+chunk])\n ex = \"\".join(sorted(s[i:i+chunk]))\n print(f\"chunk: {chunk}, i: {i}, sub: {ex1}, sub_sorted: {ex}\")\n arr.append(ex)\n\n c = Counter(arr)\n for key in c:\n val = c[key]\n if val > 1:\n counter += (val * (val - 1)) // 2\n \n print(f\"chunk: {chunk}, # counter: {counter}, Counter: {str(c)}\\n\")\n print(f\"Result: {counter}.\")\n return counter", "def palindrom():\r\n pal = []\r\n\r\n sub_str = gen_substring(\"abaabbaab\")\r\n\r\n for i in range(len(sub_str)):\r\n\r\n rev = reverse_string(sub_str[i])\r\n\r\n if rev == sub_str[i]:\r\n\r\n pal.append(rev)\r\n\r\n return pal", "def is_palindrome(str):\n n = len(str)\n for i in range(n//2): # loop to middle of string str\n if str[i] != str[-1-i]:\n return False # find a character doesn't match with its mirror-positioned character\n return True # reach this, then str is palindrome", "def rearrange_chars_no_two_adj_same(txt):\n n = len(txt)\n txt_chars = list(txt)\n result = []\n count_map = {}\n heap = []\n\n for char in txt_chars:\n if char not in count_map:\n count_map[char] = -1\n else:\n count_map[char] = -(abs(count_map[char])+1)\n\n print(count_map)\n\n for key,value in count_map.items():\n heapq.heappush(heap, (value,key))\n\n print(heap)\n\n while heap:\n cur_key = heapq.heappop(heap)[1]\n result.append(cur_key)\n count_map[cur_key] = -(abs(count_map[cur_key])-1)\n prev_key = cur_key\n\n if heap:\n cur_key = heapq.heappop(heap)[1]\n result.append(cur_key)\n count_map[cur_key] = -(abs(count_map[cur_key])-1)\n if abs(count_map[prev_key]) > 0:\n heapq.heappush(heap, (count_map[prev_key], prev_key))\n if abs(count_map[cur_key]) > 0:\n heapq.heappush(heap, (count_map[cur_key], cur_key))\n\n else:\n break\n\n if n != len(result):\n print(\"Not possible to rearrange\")\n else:\n print(\"\".join(result))", "def isAnagram(self, s, t):\n \n s_count = {}\n t_count = {}\n for char in s:\n s_count[char] = s_count.get(char, 0) +1\n \n for char in t:\n t_count[char] = t_count.get(char, 0) +1\n \n return t_count == s_count", "def string_palidrome(word):\n if word == string_reverse(word):\n return True\n else:\n return False", "def palindrome(string):\r\n if len(string) <= 1:\r\n return True\r\n else:", "def can_make_word(word, letters):\n grouped_chars = group_input(word)\n for char in letters:\n\n if is_empty(grouped_chars):\n return True\n\n if char in grouped_chars and grouped_chars[char] > 0:\n grouped_chars[char] -= 1\n\n return is_empty(grouped_chars)", "def isPalindromic(n: int):\n return str(n) == str(n)[::-1]", "def tally_letters(string):\n output = dict()\n for char in list(string):\n freq = output.get(char, 0)\n output[char]= freq+1\n return output", "def part1(input_string):\n twos, threes = 0, 0\n for line in input_string:\n letters_seen = {}\n for char in line:\n letters_seen[char] = letters_seen.setdefault(char, 0)+1\n if 2 in letters_seen.values():\n twos += 1\n if 3 in letters_seen.values():\n threes += 1\n return threes * twos", "def is_pangram(sentence):\n\n alpha = set()\n is_alpha = False\n\n for character in sentence:\n alpha.add(character)\n\n if len(alpha) == 26:\n is_alpha = True\n\n return is_alpha", "def palCheck(input_string):\n\n # ADD NECESSARY LINES OF CODE SO THAT ALL UNITTESTS PASS\n\n d = Deque()\n for char in input_string:\n d.addFront(char)\n\n while d.size() > 1:\n firstChar = d.removeRear()\n lastChar = d.removeFront()\n if firstChar != lastChar:\n print(\"No, '\" + input_string + \"', is not a palindrom\")\n return False\n\n print(\"Yes, '\" + input_string + \"', is a palindrom!!\")\n return True", "def word_perms(word):\n\t# Question 4a: Generates all strings that are permutations of the letters in word\n\treturn {''.join(w) for w in permutations(word)}", "def is_unique_n_lg(string: str) -> bool:\n\n start = 0\n sorted_string = sorted(string)\n\n while start + 1 < len(sorted_string):\n if string[start] == string[start + 1]:\n return False\n\n start += 1\n\n return True", "def is_anagram(string_1, string_2):\n\n if len(string_1) is 0 or len(string_2) is 0:\n print \"One of the string is empty.\"\n return False\n\n # remove whitespaces and spaces in between\n string_1 = string_1.strip(\" \"). replace(\" \", \"\")\n string_2 = string_2.strip(\" \"). replace(\" \", \"\")\n\n charset = {}\n print string_1\n print string_2\n\n for char in string_1:\n if ord(char) in charset:\n charset[ord(char)] += 1\n else:\n charset[ord(char)] = 1\n print charset\n\n for char in string_2:\n if ord(char) not in charset or charset[ord(char)] is 0:\n return False\n else:\n charset[ord(char)] -= 1\n\n for key, value in charset.items(): \n if value is not 0:\n return False\n return True", "def find_longest_palindromic_string(text):\n n = len(text)\n start = 0\n max_len = 1\n matrix = [[False for _ in range(n)] for _ in range(n)]\n # all palindrome of length 1\n for i in range(n):\n matrix[i][i] = True\n # check palindrome of length 2\n for i in range(n-1):\n if text[i] == text[i + 1]:\n matrix[i][i + 1] = True\n start = i\n max_len = 2\n # check palindrome of length 3 or more\n for length in range(3, n):\n for i in range(n-length+1):\n j = i + length - 1\n if text[i] == text[j] and matrix[i+1][j-1]:\n matrix[i][j] = True\n start = i\n max_len = length\n return text[start: start + max_len]", "def is_palindrome(string):\n k, mid = len(string), len(string) // 2\n # checking even palindromes\n if k % 2 == 0:\n return string[:mid] == get_reverse_complement(string[mid:])\n # checking odd palindromes\n else:\n return string[:mid] == get_reverse_complement(string[mid + 1:])", "def is_palindrome3(word):\n\n i = 0\n j = -1\n\n word = word.lower()\n\n if not word.isalnum():\n word = ''.join(character for character in word if character.isalnum())\n\n if word == \"\":\n return True\n\n while len(word) > 1:\n\n if word[i] == word[j]:\n\n i += 1\n j += 1\n\n else:\n return False\n\n return True", "def anagrams(word):\n\t# Question 4b: Generates all permutations of word and filters it to contain only valid words\n\treturn word_perms(word) & word_sets[len(word)]", "def computeLongestPalindrome(text):\n # BEGIN_YOUR_CODE (our solution is 19 lines of code, but don't worry if you deviate from this)\n cache = {}\n def recurse(m,n, text):\n if (m,n) in cache:\n result = cache[(m,n)]\n elif m==n:\n result = 1\n elif m > n:\n result = 0\n elif text[m] == text[n]:\n result = 2 + recurse(m+1, n-1, text)\n else:\n result1 = recurse(m+1,n, text)\n result2 = recurse(m,n-1, text)\n result = max(result1, result2)\n\n \n cache[(m,n)] = result\n return result\n\n return recurse(0, len(text)-1, text)\n\n\n\n # END_YOUR_CODE", "def simple_unique_characters(word):\n return len(set(word)) == len(word)", "def two_pairs(pword):\n\n last = ''\n count = 1\n counts = []\n for char in pword:\n if char == last:\n char_and_count = counts.pop()\n count = char_and_count.pop()\n updated_count = count + 1\n char_and_count.append(updated_count)\n counts.append(char_and_count)\n elif char != last:\n counts.append([char, count])\n last = char\n count = 1\n\n distinct_pairs = set()\n for char_and_count in counts:\n if char_and_count[1] >= 2:\n distinct_pairs.update(char_and_count[0])\n if len(distinct_pairs) >= 2:\n return True\n return False", "def count(string):\n unique = dict()\n for elem in string:\n if elem in unique:\n unique[elem] = unique[elem]+1\n else:\n unique[elem] = 1\n return unique", "def is_unique_chars_compare(string):\n\n for i, char in enumerate(string):\n for j, other_chars in enumerate(string):\n if i != j and char == other_chars:\n return False\n return True", "def is_palindrome(s):\n s_copy = s.replace(\" \", \"\")\n n = len(s_copy)\n for i in range(n // 2):\n left = s_copy[i]\n right = s_copy[n - 1 - i]\n if left.upper() != right.upper():\n return False\n return True", "def trifeca(word: str):\n last_letter = 'None'\n last_pair_matched = False\n consecutive_matching_pairs = 0\n\n for letter in word:\n if last_pair_matched:\n last_pair_matched = False\n last_letter = letter\n continue\n\n if letter == last_letter:\n last_pair_matched = True\n consecutive_matching_pairs += 1\n \n if consecutive_matching_pairs == 3:\n return True\n else:\n consecutive_matching_pairs = 0\n last_letter = letter \n \n return False", "def part3(string):\n palindrome = True\n for i in range(0, int(len(string)/2) + 1):\n if(string[i] != string[int(len(string))-i-1]):\n palindrome = False\n print(palindrome)", "def solve(an):\n # First, split the expresion into left and right parts by ==\n # split each part into words by +\n # strip spaces fro, each word, reverse each work to\n # enumerate the digit rank from lower to higer\n fullexp = [list(map(lambda x: list(reversed(x.strip())), s.split(\"+\")))\n for s in an.strip().upper().split(\"==\")]\n # Find the maximal lenght of the work, maximal possive digit rank or\n # the power of 10, should the < maxp\n maxp = max([len(w) for s in fullexp for w in s])\n # Extract the leading letters for each (reversed) word\n # those cannot be zeros as the number cannot start with 0\n nzchars = set([w[-1] for s in fullexp for w in s])\n # initialize the lists for digit ranks\n unzchars = [] # non-zero letters unique at level\n uokzchars = [] # zero-allowed letters unique at level\n uchars = [] # all letters unique at level\n tchars = [] # all letter with multipliers per level\n for i in range(maxp):\n tchars.append(dict())\n unzchars.append(set())\n uokzchars.append(set())\n # Now lets scan the expression and accumulate the letter counts\n for si, s in enumerate(fullexp):\n sgn = 1 - (si << 1) # left side (0) is +1, right right (1) is -1\n for w in s: # for each word in the side (already reversed)\n for p, c in enumerate(w): # enumerate with ranks\n if c not in tchars[p]: # check if the letter was alread there\n tchars[p][c] = 0\n tchars[p][c] += sgn # append to the rank dictionary\n\n totchars = set() # Keep track of letters already seen at lower ranks\n # go through the accumulated rank dictionaries\n for p, chardict in enumerate(tchars):\n for c, cnt in tuple(chardict.items()):\n if cnt == 0: # if the cumulative is 0\n del chardict[c] # remove the letter from check dictionry\n # it does not impact the sum with 0-multiplier\n # if the letter contributes to the sum\n # and was not yet seen at lower ranks\n elif c not in totchars:\n # add the letter to either non-zero set\n # or allowed-zero set\n if c in nzchars:\n unzchars[p].add(c)\n else:\n uokzchars[p].add(c)\n # add to the list as seen letter to ignore at the next\n # ranks\n totchars.add(c)\n # pre-build the combo list of letters for the rank\n # non-zero first, followed by zero-allowed\n uchars.append(tuple(unzchars[p]) + tuple(uokzchars[p]))\n # pre-convert check dictionaries to tuples\n tchars[p] = tuple(chardict.items())\n # go for the recursion\n return check_rec([maxp, tchars, unzchars, uokzchars, uchars])", "def string_permutation(self, a,b):\n for c in a:\n if c not in b:\n return False\n return True", "def is_unique_n_bit_vector(string: str) -> bool:\n\n vector = 0\n for letter in string:\n if vector & 1 << ord(letter):\n return False\n vector |= 1 << ord(letter)\n\n return True", "def check_pass(password):\n # big_chain : length of longest chain of repeated symbols\n # c_start : index at which big_chain starts\n big_chain = 0\n cur_loc = 0\n for symb in password:\n if big_chain == 0:\n l_symb = symb\n cur_chain = 1\n big_chain = 1\n c_start = 0\n cur_c = cur_loc\n cur_loc += 1\n continue\n if symb == l_symb:\n cur_chain += 1\n else:\n cur_chain = 1\n cur_c = cur_loc\n if cur_chain > big_chain:\n big_chain = cur_chain\n c_start = cur_c\n cur_loc += 1\n l_symb = symb\n\n # return or repeat, need big_chain, c_start\n if big_chain < 2:\n return False\n if big_chain == 2:\n return True\n return (check_pass(password[:c_start])\n or check_pass(password[c_start+big_chain:]))", "def is_unique(str):\n\n # char_count = {}\n\n # for char in str:\n # count = char_count.get(char, 0)\n\n # if count == 1:\n # return False\n\n # else:\n # char_count[char] = 1\n\n # return True\n\n return len(str) == len(set(str))", "def _spaceEfficientHasRepeatCharacters(check_string):\n for i in range(len(check_string)):\n for j in range(len(check_string)):\n if check_string[i] == check_string[j] and i != j:\n return True\n return False", "def is_palindromic(phrase):\n\n val = str(phrase).lower().replace(\" \", \"\")\n if val == val[::-1]: # Reverse order\n return True\n else:\n return False", "def answer(codes):\n s = set()\n num_distinct_codes = 0\n for code in codes:\n if code in s:\n continue\n elif is_palindrome(code):\n s.add(code)\n else:\n s.add(code)\n s.add(code[::-1])\n num_distinct_codes += 1\n return num_distinct_codes", "def palindrome_itertive(a):\n # TODO make this less crappy\n start = 0 \n end = len(a) - 1\n while start != end:\n # print(end)\n # print('start: ', start, ' a: ', a[start])\n # print('end: ', end, ' a: ', a[end])\n if not a[start] == a[end]:\n return False\n else:\n start += 1\n end -= 1\n return True", "def fn(ss):\n i = cnt = 0\n for ch in s: \n if ss[i] == ch: \n i += 1\n if i == len(ss): \n if (cnt := cnt + 1) == k: return True \n i = 0\n return False", "def palindromes():\n for n in count(1):\n if str(n) == str(n)[::-1]:\n yield n", "def is_palindrome_ingoring_case_and_non_letter_chars(text):", "def compare(theInput,dictionary):\n n=len(theInput)\n ret=0\n for word in dictionary:\n if theInput==word: return 2\n if theInput==word[:n]: ret=1\n return ret", "def naive_with_counts(p, t):\n occurences = []\n num_alignments = 0\n num_character_comparisons = 0\n for i in range(len(t) - len(p) + 1):\n match = True\n for j in range(len(p)):\n if t[i + j] != p[j]:\n match = False\n break\n if match:\n occurences.append(i)\n num_alignments += 1\n num_character_comparisons += (j + 1)\n return occurences, num_alignments, num_character_comparisons", "def num_palindrome():\n nums = map(str, range(1000000))\n odo = []\n for i in range(len(nums)):\n if len(nums[i]) < 6:\n odo.append('0'*(6-len(nums[i])) + nums[i])\n elif len(nums[i]) == 6:\n odo.append(nums[i])\n \n for i in range(len(odo)-3): \n first = odo[i][2:] == odo[i][:1:-1]\n second = odo[i+1][1:] == odo[i+1][:0:-1]\n third = odo[i+2][1:5] == odo[i+2][4:0:-1]\n fourth = odo[i+3][:] == odo[i+3][::-1]\n if first & second & third & fourth:\n print 'A possible odometer reading is '+odo[i]", "def is_unique1(test_str):\n\n letters = set()\n for char in test_str:\n if char in letters:\n return False\n else:\n letters.add(char)\n return True", "def is_palindrome(n):\n ns = str(n)\n for i in range(0, len(ns) // 2):\n if ns[i] != ns[len(ns) - 1 - i]: return False\n return True", "def check_anagram_with_dict(self, str1, str2):\n str1 = str1.lower()\n str2 = str2.lower()\n str1 = StripText.strip(self, str1)\n str2 = StripText.strip(self, str2)\n str1_count = {}\n str2_count = {}\n for each in str1:\n try:\n if str1_count[each]:\n str1_count[each] += 1\n except:\n str1_count[each] = 1\n for each in str2:\n try:\n if str2_count[each]:\n str2_count[each] += 1\n except:\n str2_count[each] = 1\n for each in str1_count:\n if str1_count[each] == str2_count[each]:\n pass\n else:\n return False\n return True" ]
[ "0.79791397", "0.7845905", "0.7619931", "0.7566796", "0.72823507", "0.7219113", "0.72079515", "0.71518576", "0.7108441", "0.6992611", "0.69697845", "0.6915593", "0.6882766", "0.6821809", "0.6709572", "0.6699704", "0.65028495", "0.6462751", "0.645578", "0.64369947", "0.6321017", "0.63135034", "0.6298444", "0.6291888", "0.6255911", "0.6252867", "0.6248278", "0.622805", "0.6211412", "0.6194518", "0.61705756", "0.61624557", "0.61577284", "0.61210227", "0.6107454", "0.6103002", "0.60826963", "0.60782146", "0.60348994", "0.6034215", "0.6020853", "0.601352", "0.59952855", "0.59837747", "0.59693485", "0.592882", "0.59082365", "0.58942676", "0.5892352", "0.5864853", "0.58372056", "0.582655", "0.582314", "0.58081704", "0.57942307", "0.57938224", "0.57872343", "0.57809144", "0.5777418", "0.576072", "0.5747478", "0.57448196", "0.5741929", "0.57218367", "0.57127035", "0.5711937", "0.57116103", "0.5706448", "0.57063377", "0.5702627", "0.5701626", "0.568693", "0.568541", "0.5676922", "0.56727", "0.56710815", "0.56533235", "0.5653204", "0.5651753", "0.56501704", "0.5641466", "0.5639126", "0.563412", "0.5632616", "0.56309026", "0.56307083", "0.5624366", "0.5607241", "0.5602958", "0.5594985", "0.55851734", "0.55802786", "0.55751187", "0.5568176", "0.5566183", "0.5562818", "0.556161", "0.55614233", "0.55494505", "0.55450964" ]
0.81786555
0
Add ops for dataset loaders to graph
def generate_dataset(self): if self.training: dataset = UnpairedDataset(self.opt, self.training) datasetA, datasetB = dataset.generate(cacheA='./dataA.tfcache', cacheB='./dataB.tfcache') dataA_iter = datasetA.make_initializable_iterator() dataB_iter = datasetB.make_initializable_iterator() return dataA_iter, dataB_iter, dataA_iter.get_next(), dataB_iter.get_next() else: # only need shadow dataset for testing dataset = SingleDataset(self.opt, self.training) datasetA = dataset.generate() dataA_iter = datasetA.make_initializable_iterator() return dataA_iter, dataA_iter.get_next()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def import_ops(self):\n if self.is_training:\n self.lr = tf.get_collection_ref(\"lr\")[0]\n self.new_lr = tf.get_collection_ref(\"new_lr\")[0]\n self.lr_update = tf.get_collection_ref(\"lr_update\")[0]\n\n self.cost = tf.get_collection_ref(util.with_prefix(self.name, \"cost\"))[0]\n self.initial_state = util.import_state_tuples(\n self.initial_state, self.initial_state_name, self.name)\n self.final_state = util.import_state_tuples(\n self.final_state, self.final_state_name, self.name)", "def __init__(self, dataset: List, ops: Callable):\n self.dataset = dataset\n self.ops = ops", "def data_loaders(args):\n\n transform = transforms.Compose([\n transforms.Resize(64),\n transforms.ToTensor(),\n lambda image: (image - 0.5) * 2\n ])\n\n train_mnist = datasets.MNIST(\n root=args.database_root,\n train=True,\n download=True,\n transform=transform\n )\n train_loader = DataLoader(\n dataset=train_mnist,\n batch_size=args.train_batch_size,\n shuffle=True,\n num_workers=1,\n pin_memory=True\n )\n\n test_mnist = datasets.MNIST(\n root=args.database_root,\n train=False,\n download=True,\n transform=transform\n )\n test_loader = DataLoader(\n dataset=test_mnist,\n batch_size=args.test_batch_size,\n shuffle=True,\n num_workers=1,\n pin_memory=True\n )\n\n return train_loader, test_loader", "def train(self, train_loader):\n pass", "def add_data(self, op):\n self.__data += [AssemblerVariable(op)]\n self.refresh_name_label()\n self.refresh_name_end_label()", "def _add_train_op(self):\n with tf.device(\"/gpu:0\"):\n learning_rate_D = 0.0004 # tf.train.exponential_decay(0.001, self.global_step_D,\n # 100000, 0.96, staircase=True)\n learning_rate_G = 0.0004 # tf.train.exponential_decay(0.001, self.global_step_G,\n # 100000, 0.96, staircase=True)\n learning_rate_D_in = 0.0004 # tf.train.exponential_decay(0.001, self.global_step_D,\n # 100000, 0.96, staircase=True)\n self._train_op_D = tf.train.AdamOptimizer(learning_rate_D,beta1=0.5).minimize(self._D_loss,\n global_step=self.global_step_D,\n var_list=self.discriminator._theta)\n self._train_op_D_in = tf.train.AdamOptimizer(learning_rate_D_in,beta1=0.5).minimize(self._D_in_loss,\n global_step=self.global_step_D_in,\n var_list=self.discriminator_inner._theta)\n\n self._train_op_G = tf.train.AdamOptimizer(learning_rate_G,beta1=0.5).minimize(self._G_loss,\n global_step=self.global_step_G,\n var_list=self.generator._theta)", "def add_training_fetches(self, fetches):\n fetches[self._name] = {\n 'loss': self._dual.get_op('loss'), # the calculation of loss\n 'training': self._dual.get_op('training'), # the optimisation\n 'output': self._dual.get_op('output'), # the output value\n # debugging\n 'target': self._dual.get_op('target'),\n 'degraded': self._dual.get_op('degraded')\n }\n\n if self._hparams.use_batch_transformer:\n fetches[self._name]['bt_input'] = self._dual.get_op('bt_input')\n fetches[self._name]['bt_output'] = self._dual.get_op('bt_output')\n\n if self._summary_op is not None:\n fetches[self._name]['summaries'] = self._summary_op", "def register_data(self, *, train_loader=None, validation_loader=None):\n raise NotImplementedError()", "def get_loaders(train_dataset, val_dataset, test_dataset, batch_size=128):\n train_loader = DataLoader(train_dataset, batch_size=batch_size, num_workers=8,\n shuffle=True)\n\n val_loader = DataLoader(val_dataset, batch_size=batch_size, num_workers=8,\n shuffle=False)\n\n test_loader = DataLoader(test_dataset, batch_size=batch_size, num_workers=8,\n shuffle=False)\n\n return train_loader, val_loader, test_loader", "def __init__(self, ops: Callable, batch_size: int = 4,\n num_workers: int = 8, path_to_data: str = './project/dataset/few_shot/'):\n super(FewShotDataModule, self).__init__()\n\n self.ops = ops\n self.path_to_data = path_to_data\n self.batch_size = batch_size\n self.num_workers = num_workers\n\n self.splits = {} # Contains train and valid splits.\n self.datasets = {} # Contains instances of the Dataset class. One per data spit.\n self.class_map = dict(zip(CLASS_NAMES, range(len(CLASS_NAMES))))\n self.weights = [0] * len(CLASS_NAMES)", "def get_data_loaders(args, tokenizer):\n personachat = get_dataset(tokenizer, args.dataset_path, args.dataset_cache, args.train_lang)\n _ = personachat.pop(\"test\", None)\n logger.info(\"Build inputs and labels\")\n datasets = {\"train\": [], \"valid\": []}\n\n if args.train_lang in [\"En\", \"Fr\", \"It\", \"Id\", \"Jp\", \"Ko\", \"Zh\"]: #monolingual data\n for dataset_name, dataset in personachat.items():\n for dial in dataset[args.train_lang]: #dial: {\"persona\":[], \"history\":[], \"response\":str}\n instance = build_input_from_segments(dial[\"persona\"], dial[\"history\"][-args.max_turns:], dial[\"response\"], tokenizer, lm_labels = True)\n datasets[dataset_name].append(instance) \n else: #multilingual data\n for dataset_name, dataset in personachat.items():\n for lang, dials in dataset.items():\n for dial in dials: #dial: {\"persona\":[], \"history\":[], \"response\":str}\n instance = build_input_from_segments(dial[\"persona\"], dial[\"history\"][-args.max_turns:], dial[\"response\"], tokenizer, lang_id=\"<{}>\".format(lang.lower()), lm_labels = True)\n datasets[dataset_name].append(instance) #all langs together\n\n\n logger.info(\"Build train and validation dataloaders\")\n train_dataset = DatasetTrain(datasets[\"train\"])\n valid_dataset = DatasetTrain(datasets[\"valid\"])\n\n #logger.info(\"Build train and validation dataloaders\")\n #train_dataset, valid_dataset = TensorDataset(*tensor_datasets[\"train\"]), TensorDataset(*tensor_datasets[\"valid\"])\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if args.distributed else None\n valid_sampler = torch.utils.data.distributed.DistributedSampler(valid_dataset) if args.distributed else None\n train_loader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, shuffle=(not args.distributed), collate_fn=collate_fn)\n valid_loader = DataLoader(valid_dataset, sampler=valid_sampler, batch_size=args.valid_batch_size, shuffle=False, collate_fn=collate_fn)\n\n # logger.info(\"Train dataset (Batch, Candidates, Seq length): {}\".format(train_dataset.tensors[0].shape))\n # #logger.info(\"Train dataset (Batch, Candidates, Seq length): {}\".format(train_dataset.tensors[1].shape))\n # logger.info(\"Valid dataset (Batch, Candidates, Seq length): {}\".format(valid_dataset.tensors[0].shape))\n logger.info(\"Train dataset length: {}\".format(len(train_dataset)))\n logger.info(\"Valid dataset length: {}\".format(len(valid_dataset)))\n return train_loader, valid_loader, train_sampler, valid_sampler", "def get_loaders(opt):\n train_samples, val_samples = get_train_val_metadata(opt.dataset_dir,\n opt.validation_cities,\n opt.patch_size,\n opt.stride)\n print('train samples : ', len(train_samples))\n print('val samples : ', len(val_samples))\n\n logging.info('STARTING Dataset Creation')\n\n full_load = full_onera_loader(opt.dataset_dir, opt)\n\n train_dataset = OneraPreloader(opt.dataset_dir,\n train_samples,\n full_load,\n opt.patch_size,\n opt.augmentation)\n val_dataset = OneraPreloader(opt.dataset_dir,\n val_samples,\n full_load,\n opt.patch_size,\n False)\n\n logging.info('STARTING Dataloading')\n\n train_loader = torch.utils.data.DataLoader(train_dataset,\n batch_size=opt.batch_size,\n shuffle=True,\n num_workers=opt.num_workers)\n val_loader = torch.utils.data.DataLoader(val_dataset,\n batch_size=opt.batch_size,\n shuffle=False,\n num_workers=opt.num_workers)\n return train_loader, val_loader", "def dataloaders():\n # train data path\n data_train = '../dataset/train/'\n # set transformations\n train_transforms = transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n \n train_data = datasets.ImageFolder(data_train, transform = train_transforms)\n trainloader = torch.utils.data.DataLoader(train_data, batch_size = 16, shuffle = True)\n \n return trainloader", "def load_data(dataset, root, batch_size, workers):\n # Data transform\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n train_transform = transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ])\n query_transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize,\n ])\n\n # Construct data loader\n index = dataset.index(\"IF\")\n sub = dataset[index:]\n if sub == 'IF100':\n train_dir = os.path.join(root, 'train-IF100')\n elif sub == 'IF50':\n train_dir = os.path.join(root, 'train-IF50')\n elif sub == 'IF20':\n train_dir = os.path.join(root, 'train-IF20')\n elif sub == 'IF10':\n train_dir = os.path.join(root, 'train-IF10')\n elif sub == 'IF1':\n train_dir = os.path.join(root, 'train-IF1')\n else:\n print('train path error')\n return\n # train_dir = os.path.join(root, 'train')\n query_dir = os.path.join(root, 'query')\n database_dir = os.path.join(root, 'database')\n\n train_dataset = ImagenetDataset(\n train_dir,\n transform=train_transform,\n targets_transform=Onehot(100),\n )\n\n train_dataloader = DataLoader(\n train_dataset,\n batch_size=batch_size,\n shuffle=True,\n num_workers=workers,\n pin_memory=True,\n )\n\n query_dataset = ImagenetDataset(\n query_dir,\n transform=query_transform,\n targets_transform=Onehot(100),\n )\n\n query_dataloader = DataLoader(\n query_dataset,\n batch_size=batch_size,\n num_workers=workers,\n pin_memory=True,\n )\n\n database_dataset = ImagenetDataset(\n database_dir,\n transform=query_transform,\n targets_transform=Onehot(100),\n )\n\n database_dataloader = DataLoader(\n database_dataset,\n batch_size=batch_size,\n num_workers=workers,\n pin_memory=True,\n )\n\n return train_dataloader, query_dataloader, database_dataloader", "def get_dataloaders(datasets, split, args, is_eval=False):\n dataloaders = []\n for task, dataset in datasets.items():\n if is_eval:\n num_rows = dataset.num_rows if args.eval_rows == -1 else args.eval_rows\n else:\n num_rows = dataset.num_rows if args.train_rows == -1 else args.train_rows\n all_input_ids = np.zeros([num_rows, args.max_length])\n all_attention_mask = np.zeros([num_rows, args.max_length])\n all_token_type_ids = np.zeros([num_rows, args.max_length])\n for i in range(num_rows):\n features = dataset[i]\n curr_len = len(features[\"attention_mask\"])\n all_input_ids[i,:curr_len] = features[\"input_ids\"]\n all_attention_mask[i,:curr_len] = features[\"attention_mask\"]\n all_token_type_ids[i,:curr_len] = features[\"token_type_ids\"]\n all_input_ids = torch.tensor(all_input_ids, dtype=torch.long)\n all_attention_mask = torch.tensor(all_attention_mask, dtype=torch.long)\n all_token_type_ids = torch.tensor(all_token_type_ids, dtype=torch.long)\n all_label = torch.tensor(dataset[:num_rows][\"label\"], dtype=torch.long)\n if task == \"stsb\":\n all_label = all_label.float()\n \n data = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_label)\n if split in [\"train\", \"support\"]:\n sampler = RandomSampler(data)\n dataloader = DataLoader(data, sampler=sampler, batch_size=args.train_batch_size)\n else:\n sampler = SequentialSampler(data)\n dataloader = DataLoader(data, sampler=sampler, batch_size=args.eval_batch_size)\n dataloaders.append(dataloader)\n return dataloaders", "def add_to_dataset(self, dataset: Dataset):\n pass", "def _init_loaders(self):\n @self.loaders_wrapper(\"nx2nx\")\n def get_nx2nx_loader(extractor, stream, transformers, graph):\n \"\"\"\n :param tranformers: List of dicts.\n :extractor: function.\n :param graph: networkx.Graph\n :returns: projx.nx_loader\n \"\"\"\n return loaders.nx2nx_loader(extractor, stream, transformers,\n self.loader_json[self.loader_name],\n graph)\n\n @self.loaders_wrapper(\"neo4j2nx\")\n def get_neo4j2nx_loader(extractor, stream, transformers, graph):\n \"\"\"\n :param tranformers: List of dicts.\n :extractor: function.\n :param graph: networkx.Graph\n :returns: projx.nx_loader\n \"\"\"\n return loaders.neo4j2nx_loader(extractor, stream, transformers,\n self.loader_json[self.loader_name],\n graph)\n\n\n @self.loaders_wrapper(\"neo4j2edgelist\")\n def get_neo4j2edgelist_loader(extractor, stream, transformers, graph):\n \"\"\"\n :param tranformers: List of dicts.\n :extractor: function.\n :param graph: networkx.Graph\n :returns: projx.nx_loader\n \"\"\"\n return loaders.neo4j2edgelist_loader(\n extractor,\n stream,\n transformers,\n self.loader_json[self.loader_name],\n graph\n )\n\n\n @self.loaders_wrapper(\"edgelist2neo4j\")\n def get_edgelist2neo4j_loader(extractor, stream, transformers, graph):\n \"\"\"\n :param tranformers: List of dicts.\n :extractor: function.\n :param graph: networkx.Graph\n :returns: projx.nx_loader\n \"\"\"\n return loaders.edgelist2neo4j_loader(\n extractor,\n stream,\n transformers,\n self.loader_json[self.loader_name],\n graph\n )", "def add_operation(self, op):\n\n self.operations[op.name] = op", "def data_creator(config):\n train_dataset, val_dataset = LinearDataset(2, 5), LinearDataset(2, 5)\n train_loader = DataLoader(train_dataset, batch_size=config[\"batch_size\"])\n val_loader = DataLoader(val_dataset, batch_size=config[\"batch_size\"])\n return train_loader, val_loader", "def build_train_op(self, loss, params):\n raise NotImplementedError()", "def addOp(self, op):\n self.operations << op", "def load(\n self,\n input_context: Optional[tf.distribute.InputContext] = None\n ) -> tf.data.Dataset:\n pass", "def create_loader(self):\n # load data to memory.\n if self.is_cifar100:\n (x_train, y_train), (x_test,\n y_test) = tf.keras.datasets.cifar100.load_data()\n else:\n (x_train, y_train), (x_test,\n y_test) = tf.keras.datasets.cifar10.load_data()\n\n y_train = y_train.astype(np.int32)\n y_test = y_test.astype(np.int32)\n\n x_train, y_train = shuffle_dataset(x_train, y_train)\n n_probe = int(math.floor(x_train.shape[0] * FLAGS.probe_dataset_hold_ratio))\n\n # TODO(zizhaoz): add other noise types.\n if 'asymmetric' in self.dataset_name:\n assert 'cifar100' not in self.dataset_name, 'Asymmetric only has CIFAR10'\n (x_train, y_train, y_gold), (x_probe, y_probe) = load_asymmetric(\n x_train,\n y_train,\n noise_ratio=self.noise_ratio,\n n_val=n_probe,\n random_seed=FLAGS.seed)\n elif 'uniform' in self.dataset_name:\n (x_train, y_train, y_gold), (x_probe,\n y_probe) = load_train_val_uniform_noise(\n x_train,\n y_train,\n n_classes=self.num_classes,\n noise_ratio=self.noise_ratio,\n n_val=n_probe)\n else:\n assert self.dataset_name in ['cifar10', 'cifar100']\n\n if not self.split_probe and x_probe is not None:\n # Usually used for supervised comparison.\n tf.logging.info('Merge train and probe')\n x_train = np.concatenate([x_train, x_probe], axis=0)\n y_train = np.concatenate([y_train, y_probe], axis=0)\n y_gold = np.concatenate([y_gold, y_probe], axis=0)\n\n conf_mat = sklearn_metrics.confusion_matrix(y_gold, y_train)\n conf_mat = conf_mat / np.sum(conf_mat, axis=1, keepdims=True)\n tf.logging.info('Corrupted confusion matirx\\n {}'.format(conf_mat))\n x_test, y_test = shuffle_dataset(x_test, y_test)\n self.train_dataset_size = x_train.shape[0]\n self.val_dataset_size = x_test.shape[0]\n if self.split_probe:\n self.probe_size = x_probe.shape[0]\n\n input_tuple = (x_train, y_train.squeeze())\n self.train_dataflow = self.create_ds(input_tuple, is_train=True)\n self.val_dataflow = self.create_ds((x_test, y_test.squeeze()),\n is_train=False)\n if self.split_probe:\n self.probe_dataflow = self.create_ds((x_probe, y_probe.squeeze()),\n is_train=True)\n\n tf.logging.info('Init [{}] dataset loader'.format(self.dataset_name))\n verbose_data('train', x_train, y_train)\n verbose_data('test', x_test, y_test)\n if self.split_probe:\n verbose_data('probe', x_probe, y_probe)\n\n return self", "def get_data_loaders(opt):\n return find_dataloader_using_name(opt.dataloader)(opt).load_data()", "def add_training_op(self, loss):\n ### YOUR CODE HERE\n train_op=tf.train.GradientDescentOptimizer(learning_rate=Config.lr).minimize(loss)\n ### END YOUR CODE\n return train_op", "def add_graph(writer: torch.utils.tensorboard.SummaryWriter = None,\n model: torch.nn.Module = None,\n data_loader: torch.utils.data.dataloader = None,\n device: torch.device = torch.device('cpu')):\n # get an example image for running through the network\n input_batch_dict = next(iter(data_loader))\n input_batch = input_batch_dict['input'].to(device)\n writer.add_graph(model, input_batch)", "def add_train_op(self, loss):\n optimizer = tf.train.AdamOptimizer(self.lr)\n self.train_op = optimizer.minimize(loss)", "def creates_data_loader():\n dataset_faces = FaceDataset(\n IMG_DIR, transform=transform_train, face=True)\n\n dataset_no_faces = FaceDataset(\n IMG_DIR, transform=transform_train, face=False)\n\n datasets_faces_split = train_val_test(dataset_faces, 0.2, 0.0)\n datasets_no_faces_split = train_val_test(dataset_no_faces, 0.2, 0.0)\n\n datasets = {}\n datasets[\"train\"] = datasets_faces_split[\"train\"] + \\\n datasets_no_faces_split[\"train\"]\n datasets[\"test\"] = datasets_no_faces_split[\"test\"]\n datasets[\"val\"] = datasets_faces_split[\"val\"] + \\\n datasets_no_faces_split[\"val\"]\n\n train_loader = DataLoader(dataset=datasets[\"train\"], batch_size=BATCH_SIZE,\n num_workers=NUM_WORKERS, pin_memory=PIN_MEMORY, shuffle=True, drop_last=False)\n\n val_loader = DataLoader(dataset=datasets[\"val\"], batch_size=BATCH_SIZE,\n num_workers=NUM_WORKERS, pin_memory=PIN_MEMORY, shuffle=True, drop_last=False)\n return train_loader, val_loader", "def get_dataloader(sets, root_dir, manifest_path, task, batch_size=1, return_pid = False):\n data_loaders = {}\n\n for set in ['train', 'valid', 'test', 'all_images']: # test doesn't apply to MRNet but will keep in\n if set in sets:\n if set == 'train':\n ds = Dataset(set='train', task = task, root_dir=root_dir, manifest_path = manifest_path, return_pid = return_pid,\n transform=transforms.Compose([transforms.ToPILImage(),\n #transforms.RandomHorizontalFlip(), # default is 50%\n #transforms.RandomAffine(25, # rotation\n # translate=(0.1, 0.1),\n # shear = (-15, 15)),\n transforms.ToTensor(),\n ]))\n loader = DataLoader(ds, batch_size=batch_size, shuffle=True)\n elif set == 'valid':\n ds = Dataset(set='valid', task = task, root_dir=root_dir,manifest_path = manifest_path, return_pid = return_pid,\n transform=transforms.Compose([transforms.ToPILImage(),\n transforms.ToTensor(),\n ]))\n\n loader = DataLoader(ds, batch_size=batch_size, shuffle=False)\n elif set == 'all_images':\n ds = Dataset(set='all_images', task = task, root_dir=root_dir,manifest_path = manifest_path, return_pid = return_pid,\n transform=transforms.Compose([transforms.ToPILImage(),\n transforms.ToTensor(),\n ]))\n loader = DataLoader(ds, batch_size=batch_size, shuffle=False)\n data_loaders[set] = loader\n return (data_loaders)", "def __add__(self, other):\n train = copy.deepcopy(self.train)\n\n for img_path, pid, camid, dsetid in other.train:\n pid += self.num_train_pids\n camid += self.num_train_cams\n dsetid += self.num_datasets\n train.append((img_path, pid, camid, dsetid))\n\n ###################################\n # Note that\n # 1. set verbose=False to avoid unnecessary print\n # 2. set combineall=False because combineall would have been applied\n # if it was True for a specific dataset; setting it to True will\n # create new IDs that should have already been included\n ###################################\n if isinstance(train[0][0], str):\n return ImageDataset(\n train,\n self.query,\n self.gallery,\n transform=self.transform,\n mode=self.mode,\n combineall=False,\n verbose=False\n )\n else:\n return VideoDataset(\n train,\n self.query,\n self.gallery,\n transform=self.transform,\n mode=self.mode,\n combineall=False,\n verbose=False,\n seq_len=self.seq_len,\n sample_method=self.sample_method\n )", "def AddOperation(self, op):\n self._operations.append(op)", "def create_loader(dataset: Dataset, cfg: trainer_configs.BaseDatasetConfig, batch_size: int, *,\r\n collate_fn: Optional[Callable[[List[Any]], Any]] = None) -> DataLoader:\r\n # return DataLoader(\r\n # dataset, batch_size=batch_size, num_workers=cfg.num_workers,\r\n # drop_last=cfg.drop_last, collate_fn=collate_fn) # type: ignore\r\n return DataLoader(\r\n dataset, batch_size=batch_size, shuffle=cfg.shuffle, num_workers=cfg.num_workers,\r\n drop_last=cfg.drop_last, collate_fn=collate_fn) # type: ignore\r", "def split_and_load(batch_data, num_gpus):\n return [batch_data[i].data[0] for i in range(num_gpus)], \\\n [batch_data[i].label[0].as_in_context(mx.gpu(i)) for i in range(num_gpus)]", "def get_dataloaders(args):\n if args.dataset == 'heat':\n dataset_class = heat.HeatDiffusionDataset\n else:\n raise ValueError(f'Unknown dataset {args.dataset}')\n train_dataset = dataset_class(\n dataset_class.get_train_path(args.data_path), args, train=True)\n if args.dist:\n train_sampler = torch.utils.data.distributed.DistributedSampler(\n train_dataset)\n else:\n train_sampler = torch.utils.data.RandomSampler(train_dataset)\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, num_workers=args.workers,\n sampler=train_sampler, pin_memory=True, drop_last=args.drop_last)\n if not args.no_eval:\n validation_dataset = dataset_class(\n dataset_class.get_validation_path(args.data_path), args, train=False)\n if args.dist:\n validation_sampler = torch.utils.data.distributed.DistributedSampler(\n validation_dataset, shuffle=False)\n else:\n validation_sampler = torch.utils.data.SequentialSampler(\n validation_dataset)\n validation_loader = torch.utils.data.DataLoader(\n validation_dataset, batch_size=args.batch_size,\n num_workers=args.workers, sampler=validation_sampler,\n pin_memory=True, drop_last=args.drop_last)\n\n test_dataset = dataset_class(\n dataset_class.get_test_path(args.data_path), args, train=False)\n if args.dist:\n test_sampler = torch.utils.data.distributed.DistributedSampler(\n test_dataset, shuffle=False)\n else:\n test_sampler = torch.utils.data.SequentialSampler(\n test_dataset)\n test_loader = torch.utils.data.DataLoader(\n test_dataset, batch_size=args.batch_size,\n num_workers=args.workers, sampler=test_sampler,\n pin_memory=True, drop_last=args.drop_last)\n else:\n validation_loader = None\n test_loader = None\n\n # Update the data shape if needed.\n if args.data_shape is None:\n args.data_shape = train_dataset.get_shape()\n if args.data_target_shape is None:\n args.data_target_shape = train_dataset.get_target_shape()\n\n return train_loader, validation_loader, test_loader", "def add_op(self, op):\n self._operations.append(op)", "def prepare_data_loaders(num_split, batch_size=32, hier=False, elmo=False, elmo_pre=None, use_elmo_pre=False, deepmoji=False, dev_with_label=False, include_test=False):\n train_data_loaders = []\n val_data_loaders = []\n test_data_loaders = []\n\n vocab = generate_vocab(deepmoji)\n for i in range(num_split):\n train, val, test, _ = prepare_data(batch_size=batch_size, hier=hier, elmo=elmo, elmo_pre=elmo_pre, use_elmo_pre=use_elmo_pre, deepmoji=deepmoji, is_shuffle=True, random_state=i, vocab=vocab, dev_with_label=dev_with_label, include_test=include_test)\n train_data_loaders.append(train)\n val_data_loaders.append(val)\n test_data_loaders.append(test)\n\n return train_data_loaders, val_data_loaders, test_data_loaders, vocab", "def get_data(self, t_img_path, v_img_path, t_label_path, v_label_path):\n train_label_names = tf.constant(sorted(os.path.join(t_label_path, name) for name in os.listdir(t_label_path)))\n val_label_names = tf.constant(sorted(os.path.join(v_label_path, name) for name in os.listdir(v_label_path)))\n train_image_names = tf.constant(sorted(os.path.join(t_img_path, name) for name in os.listdir(t_img_path)))\n val_image_names = tf.constant(sorted(os.path.join(v_img_path, name) for name in os.listdir(v_img_path)))\n\n training_dataset = tf.data.Dataset.from_tensor_slices((train_image_names, train_label_names))\n training_dataset = training_dataset.shuffle(buffer_size=50000)\n training_dataset = training_dataset.map(self.dataset_resize_images, num_parallel_calls=4)\n training_dataset = training_dataset.map(\n lambda filename, label: tuple(tf.py_func(self.dataset_convert_labels, [filename, label], [tf.float32, tf.float32], stateful=False)),\n num_parallel_calls=4)\n training_dataset = training_dataset.prefetch(self.batch_size)\n training_dataset = training_dataset.batch(self.batch_size)\n training_dataset = training_dataset.repeat()\n\n val_dataset = tf.data.Dataset.from_tensor_slices((val_image_names, val_label_names))\n val_dataset = val_dataset.shuffle(buffer_size=5000)\n val_dataset = val_dataset.map(self.dataset_resize_images, num_parallel_calls=4)\n val_dataset = val_dataset.map(\n lambda filename, label: tuple(tf.py_func(self.dataset_convert_labels, [filename, label], [tf.float32, tf.float32], stateful=False)),\n num_parallel_calls=4)\n val_dataset = val_dataset.prefetch(self.batch_size)\n val_dataset = val_dataset.batch(self.batch_size)\n val_dataset = val_dataset.repeat()\n\n handle = tf.placeholder(tf.string, shape=[])\n iterator = tf.data.Iterator.from_string_handle(handle, training_dataset.output_types, training_dataset.output_shapes)\n images, labels = iterator.get_next()\n\n training_iterator = training_dataset.make_one_shot_iterator()\n validation_iterator = val_dataset.make_one_shot_iterator()\n\n return handle, training_iterator, validation_iterator, images, labels", "def build_training_data_loader(self) -> DataLoader:\n pass", "def get_loader(config):\n train_transform = [T.Resize((256, 128)), T.RandomHorizontalFlip(), T.ToTensor(),\n T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]\n train_transform = T.Compose(train_transform)\n\n test_transform = [T.Resize((256, 128)), T.ToTensor(),\n T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]\n test_transform = T.Compose(test_transform)\n\n # Datasets.\n if config.source_dataset in ['duke'] and config.target_dataset in ['market']:\n source_image_dir = config.duke_image_dir\n target_image_dir = config.market_image_dir\n elif config.source_dataset in ['market'] and config.target_dataset in ['duke']:\n source_image_dir = config.market_image_dir\n target_image_dir = config.duke_image_dir\n else:\n assert 'Dataset not support!'\n source_set = ReidDataset(source_image_dir, train_transform)\n target_set = ReidDataset(target_image_dir, train_transform, config.expanding_cam)\n test_set = ReidDataset(source_image_dir, test_transform)\n\n # Dataloader.\n source_loader = data.DataLoader(dataset=source_set, batch_size=config.batch_size,\n num_workers=config.num_workers, shuffle=True, pin_memory=True, drop_last=True)\n\n target_loader = data.DataLoader(dataset=target_set, batch_size=config.batch_size,\n num_workers=config.num_workers, shuffle=True, pin_memory=True, drop_last=True)\n\n test_loader = data.DataLoader(dataset=test_set, batch_size=config.batch_size, num_workers=config.num_workers,\n shuffle=False, pin_memory=True, drop_last=False)\n\n return {'source_loader': source_loader, 'target_loader': target_loader, 'test_loader': test_loader}", "def create_dataset_pyfunc_multiproc(data_path, batch_size=32, num_op_parallel_workers=1, max_rowsize=16):\n\n # Define dataset with num_parallel_workers=8 for reasonable performance\n data1 = ds.MnistDataset(data_path, num_parallel_workers=8)\n\n data1 = data1.map(operations=[vision.ToType(np.int32)], input_columns=\"label\",\n num_parallel_workers=num_op_parallel_workers,\n python_multiprocessing=True, max_rowsize=max_rowsize)\n\n # Setup transforms list which include Python ops\n transforms_list = [\n lambda x: x,\n vision.HWC2CHW(),\n vision.RandomErasing(0.9, value='random'),\n lambda y: y\n ]\n compose_op = transforms.Compose(transforms_list)\n data1 = data1.map(operations=compose_op, input_columns=\"image\", num_parallel_workers=num_op_parallel_workers,\n python_multiprocessing=True, max_rowsize=max_rowsize)\n\n # Callable function to swap order of 2 columns\n def swap_columns(col1, col2, batch_info):\n return (col2, col1,)\n\n # Apply Dataset Ops\n data1 = data1.batch(batch_size, drop_remainder=True, per_batch_map=swap_columns,\n input_columns=['image', 'label'],\n output_columns=['mylabel', 'myimage'],\n num_parallel_workers=num_op_parallel_workers, python_multiprocessing=True)\n\n return data1", "def _load_data_graph(self):\n # input\n with tf.variable_scope(\"train_test\", reuse=True):\n # review input - Both original and reversed\n self.enc_inp_fwd = [tf.placeholder(tf.int32, shape=(None,), name=\"input%i\" % t)\n for t in range(self.seq_length)]\n self.enc_inp_bwd = [tf.placeholder(tf.int32, shape=(None,), name=\"input%i\" % t)\n for t in range(self.seq_length)]\n # desired output\n self.labels = [tf.placeholder(tf.int32, shape=(None,), name=\"labels%i\" % t)\n for t in range(self.seq_length)]\n # weight of the hidden layer\n self.weights = [tf.ones_like(labels_t, dtype=tf.float32)\n for labels_t in self.labels]\n\n # Decoder input: prepend some \"GO\" token and drop the final\n # token of the encoder input\n self.dec_inp = ([tf.zeros_like(self.labels[0], dtype=np.int32, name=\"GO\")] + self.labels[:-1])", "def _custom_data_loader(self) -> DataLoader:\n dataloaders = DataLoader(self.dataset, batch_size=1)\n return dataloaders", "def get_data_loaders(train_batch_size : int, val_batch_size : int) -> tuple[DataLoader, DataLoader]:\n \n data_transform = Compose([ToTensor(), ReshapeTransform((-1,))]) \n #torchvision provides datasets adapted for CNNs, so each MNIST sample is a tensor of shape (1, 28, 28) \n #representing respectively the number of channels (1, since they are greyscale images), the height in pixels (28)\n #and the width in pixels (28). But for a fully-connected network, we want to \"flatten\" this tensor into a 1D tensor \n #of size 28**2. This is accomplished by the ReshapeTransform.\n #Data offered by torchvision is already normalized in [0, 1], so no need to divide it by 255.\n \n train_loader = DataLoader(\n MNIST(download=True, root=\".\", transform=data_transform, train=True), batch_size=train_batch_size, shuffle=True\n )\n \n val_loader = DataLoader(\n MNIST(download=False, root=\".\", transform=data_transform, train=False), batch_size=val_batch_size, shuffle=False\n )\n \n return train_loader, val_loader", "def get_post_init_ops(self):\n return []", "def create_tf_datasets(self):\n images = []\n labels = []\n\n images = self.dataframe_labeled_samples.index.values\n\n labels.append(\n tuple(self.dataframe_labeled_samples['Intersection'].values.astype('uint8')))\n\n images = [\n os.path.join(\n os.path.dirname(\n self.summary_manager.current_labelgui_summary_filepath),\n img_name) for img_name in images]\n labels = list(chain.from_iterable(labels))\n\n\n if self.validation_split == 0:\n images = np.array([\n self.image_preprocessor(\n imageio.imread(f)) for f in tqdm(images)])\n images = tf.data.Dataset.from_tensor_slices(images)\n labels = tf.data.Dataset.from_tensor_slices(labels)\n dataset = tf.data.Dataset.zip((images, labels))\n return dataset, None\n\n images, images_val, labels, labels_val = train_test_split(\n images, labels, test_size=self.validation_split, random_state=0)\n\n train_split_filename = ((\n f'{self.save_checkpoint_filepath or self.checkpoint_filepath}'\n f'_train_split.txt'\n ))\n print(f\"Saving train split files to: {train_split_filename}\")\n with open(train_split_filename, 'w+')\\\n as train_split_file:\n for img in images:\n train_split_file.write(img + '\\n')\n \n val_split_filename = ((\n f'{self.save_checkpoint_filepath or self.checkpoint_filepath}'\n f'_val_split.txt'\n ))\n print(f\"Saving train split files to: {val_split_filename}\")\n with open(val_split_filename, 'w+')\\\n as val_split_file:\n for img in images_val:\n val_split_file.write(img + '\\n')\n\n print(f\"Loading validation image paths ({len(images)}) with preprocessor\")\n images = np.array([\n self.image_preprocessor(\n imageio.imread(f)) for f in tqdm(images)])\n images = tf.data.Dataset.from_tensor_slices(images)\n\n print(f\"Loading labels into tf tensor\")\n labels = tf.data.Dataset.from_tensor_slices(labels)\n print(f\"Creating zipped dataset with images and labels\")\n dataset = tf.data.Dataset.zip((images, labels))\n\n print(f\"Loading validation image paths ({len(images_val)}) with preprocessor\")\n images_val = np.array([\n self.image_preprocessor(\n imageio.imread(f)) for f in tqdm(images_val)])\n #images_val = np.array([self.image_preprocessor(f) for f in tqdm(images_val)])\n images_val = tf.data.Dataset.from_tensor_slices(images_val)\n #images_val = tf.data.Dataset.list_files(images_val)\n #images_val = images_val.map(tf.io.read_file)\n print(f\"Loading validation labels into tf tensor\")\n labels_val = tf.data.Dataset.from_tensor_slices(labels_val)\n print(f\"Creating validation zipped dataset with images and labels\")\n dataset_val = tf.data.Dataset.zip((images_val, labels_val))\n\n return dataset, dataset_val", "def get_each_loader(data_path, batch_size, trn_negnum, shuffle=True, num_workers=0):\n \n dataset = ML_Dataset(data_path, trn_negnum)\n \n if data_path.endswith('trn') == True:\n collate = dataset.train_collate\n else:\n collate = test_collate\n\n data_loader = data.DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n collate_fn=collate)\n\n return data_loader", "def data_loaders(dataset_path):\n dataset_path = dataset_path\n news_stock_dataset = NewsStockDataLoader(dataset_path)\n \n dataset_size = len(news_stock_dataset)\n indices = list(range(dataset_size))\n training_split = int(0.8 * dataset_size)\n validation_split = int(0.9 * dataset_size)\n\n np.random.seed(96)\n np.random.shuffle(indices)\n\n train_indices = indices[:training_split]\n valid_indices = indices[training_split:validation_split]\n test_indices = indices[validation_split:]\n\n train_sampler = SubsetRandomSampler(train_indices)\n valid_sampler = SubsetRandomSampler(valid_indices)\n test_sampler = SubsetRandomSampler(test_indices)\n \n collate = PadSequence()\n\n training_loader = DataLoader(news_stock_dataset,\n num_workers = 1,\n batch_size = Config.get(\"training_batch_size\"),\n sampler = train_sampler,\n collate_fn = collate)\n\n validation_loader = DataLoader(news_stock_dataset,\n num_workers = 1,\n batch_size = Config.get(\"validation_batch_size\"),\n sampler = valid_sampler,\n collate_fn = collate)\n\n testing_loader = DataLoader(news_stock_dataset,\n num_workers = 1,\n batch_size = Config.get(\"testing_batch_size\"),\n sampler= test_sampler,\n collate_fn = collate)\n \n return training_loader, validation_loader, testing_loader", "def create_jsonl_loader(\n batch_size, data_path, encode_fn, pad_id):\n pad_id = tf.constant(pad_id, tf.int64)\n ignore_id = tf.constant(IGNORE_ID, tf.int64)\n\n def generate_converted():\n \"\"\"\n Generator for converted examples.\n \"\"\"\n for example in generate_examples(data_path):\n input_ids, label_ids = encode_fn(\n example['tokens'], example['labels'])\n\n yield {\n 'input_ids': input_ids,\n 'label_ids': label_ids\n }\n\n def prepare_inputs(example):\n \"\"\"\n Creates the attention mask tensor.\n \"\"\"\n return example['input_ids'], example['label_ids']\n\n dataset = tf.data.Dataset.from_generator(\n generate_converted,\n output_types={\n 'input_ids': tf.int64,\n 'label_ids': tf.int64\n },\n output_shapes={\n 'input_ids': tf.TensorShape([None]),\n 'label_ids': tf.TensorShape([None])\n })\n\n dataset = (\n dataset.padded_batch(\n batch_size=batch_size,\n padded_shapes={\n 'input_ids': tf.TensorShape([None]),\n 'label_ids': tf.TensorShape([None])\n },\n padding_values={\n 'input_ids': pad_id,\n 'label_ids': ignore_id\n })\n .map(prepare_inputs)\n )\n\n return dataset", "def register_additional_metric_ops(\n self, metric_ops: Dict[str, Tuple[tf.Tensor, tf.Tensor]]) -> None:\n for metric_name, (value_op, update_op) in metric_ops.items():\n if metric_name in self._metric_names:\n raise ValueError('tried to register new metric with name %s, but a '\n 'metric with that name already exists.' % metric_name)\n self._metric_names.append(metric_name)\n self._metric_value_ops.append(value_op)\n self._metric_update_ops.append(update_op)\n\n # Update metric variables incrementally with only the new elements in the\n # metric_variables collection.\n collection = self._graph.get_collection(\n tf.compat.v1.GraphKeys.METRIC_VARIABLES)\n collection = collection[len(self._metric_variable_nodes):]\n\n # Note that this is a node_list - it's not something that TFMA\n # configures, but something that TF.Learn configures.\n #\n # As such, we also use graph.get_tensor_by_name directly, instead of\n # TFMA's version which expects names encoded by TFMA.\n for node in collection:\n self._metric_variable_nodes.append(node)\n with self._graph.as_default():\n placeholder = tf.compat.v1.placeholder(\n dtype=node.dtype, shape=node.get_shape())\n self._metric_variable_placeholders.append(placeholder)\n self._metric_variable_assign_ops.append(\n tf.compat.v1.assign(node, placeholder))\n\n with self._graph.as_default():\n self._all_metric_variable_assign_ops = tf.group(\n *self._metric_variable_assign_ops)\n self._all_metric_update_ops = tf.group(*self._metric_update_ops)\n self._reset_variables_op = tf.compat.v1.local_variables_initializer()\n self._session.run(self._reset_variables_op)\n\n self._perform_metrics_update_fn = self._session.make_callable(\n fetches=self._all_metric_update_ops,\n feed_list=self._perform_metrics_update_fn_feed_list)", "def prepare_data(self,d):\n train_loaders, train_iters = {}, {}\n unlabeled_loaders, unlabeled_iters = {}, {}\n for domain in opt.domains:\n #CONVERT TO FLOAT32\n features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape((-1,1))\n train = data_utils.TensorDataset(features,target)\n train_loaders[domain] = DataLoader(train, opt.batch_size, shuffle = True)\n train_iters[domain] = iter(train_loaders[domain])\n for domain in opt.unlabeled_domains:\n features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape(-1,1))\n uset = data_utils.TensorDataset(features,target)\n unlabeled_loaders[domain] = DataLoader(uset,opt.batch_size, shuffle = True)\n unlabeled_iters[domain] = iter(unlabeled_loaders[domain])\n \n return train_loaders, train_iters, unlabeled_loaders, unlabeled_iters", "def train_epoch(self, data_loader):\n raise NotImplementedError", "def cmdload(dataset_id, v4):\n cmd_loader = CmdLoader(dataset_id, v4)\n cmd_loader.upload_data_to_florence()", "def _load(self, dataset):\n raise NotImplementedError('Loader {} does not support loading datasets.'.format(self.type()))", "def dataloader(self):\n\n # load / split data\n train_data = self.data.get_train_data()\n if self.args.use_dev:\n train_data, dev_data = self.data.split_data(train_data)\n test_data = self.data.get_test_data()\n\n #print(train_data[0])\n #print(dev_data[0])\n #print(test_data[0])\n\n # build dataset\n train_dataset = self.loader.build_dataset(\n train_data, \n self.args.train_max_seq_len)\n train_loader = self.loader.build_dataloader(\n train_dataset, 'train')\n\n test_dataset = self.loader.build_dataset(\n test_data,\n self.args.eval_max_seq_len)\n test_loader = self.loader.build_dataloader(\n test_dataset, 'test')\n\n if self.args.use_dev:\n dev_dataset = self.loader.build_dataset(\n dev_data,\n self.args.eval_max_seq_len)\n dev_loader = self.loader.build_dataloader(\n dev_dataset, 'dev')\n return train_loader, dev_loader, test_loader\n else:\n return train_loader, test_loader", "def get_driving_data_loaders(batch_size, train_dataset, valid_dataset, test_dataset, num_workers=0): \n\n valid_loader = DataLoader(dataset=valid_dataset,\n batch_size=batch_size,\n num_workers=num_workers,\n shuffle=True)\n\n train_loader = DataLoader(dataset=train_dataset,\n batch_size=batch_size,\n num_workers=num_workers,\n drop_last=True, \n shuffle=True)\n\n test_loader = DataLoader(dataset=test_dataset,\n batch_size=batch_size,\n num_workers=num_workers,\n shuffle=False)\n\n return train_loader, valid_loader, test_loader", "def _load_data(self, cfg):\r\n\r\n if self._split == \"train\":\r\n self._annotations = self._load_lists(cfg.EGO4D_STA.TRAIN_LISTS)\r\n elif self._split == \"val\":\r\n self._annotations = self._load_lists(cfg.EGO4D_STA.VAL_LISTS)\r\n else:\r\n self._annotations = self._load_lists(cfg.EGO4D_STA.TEST_LISTS)", "def get_dataset_loader(self, batch_size, workers, is_gpu):\n\n train_loader = torch.utils.data.DataLoader(\n self.trainset,\n batch_size=batch_size, shuffle=True,\n num_workers=workers, pin_memory=is_gpu, sampler=None)\n\n val_loader = torch.utils.data.DataLoader(\n self.valset,\n batch_size=batch_size, shuffle=False,\n num_workers=workers, pin_memory=is_gpu)\n\n return train_loader, val_loader", "def get_dataset_loader(self, batch_size, workers, is_gpu):\n\n train_loader = torch.utils.data.DataLoader(\n self.trainset,\n batch_size=batch_size, shuffle=True,\n num_workers=workers, pin_memory=is_gpu, sampler=None)\n\n val_loader = torch.utils.data.DataLoader(\n self.valset,\n batch_size=batch_size, shuffle=False,\n num_workers=workers, pin_memory=is_gpu)\n\n return train_loader, val_loader", "def get_dataset_loader(self, batch_size, workers, is_gpu):\n\n train_loader = torch.utils.data.DataLoader(\n self.trainset,\n batch_size=batch_size, shuffle=True,\n num_workers=workers, pin_memory=is_gpu, sampler=None)\n\n val_loader = torch.utils.data.DataLoader(\n self.valset,\n batch_size=batch_size, shuffle=False,\n num_workers=workers, pin_memory=is_gpu)\n\n return train_loader, val_loader", "def get_dataset_loader(self, batch_size, workers, is_gpu):\n\n train_loader = torch.utils.data.DataLoader(\n self.trainset,\n batch_size=batch_size, shuffle=True,\n num_workers=workers, pin_memory=is_gpu, sampler=None)\n\n val_loader = torch.utils.data.DataLoader(\n self.valset,\n batch_size=batch_size, shuffle=False,\n num_workers=workers, pin_memory=is_gpu)\n\n return train_loader, val_loader", "def get_dataset_loader(self, batch_size, workers, is_gpu):\n\n train_loader = torch.utils.data.DataLoader(\n self.trainset,\n batch_size=batch_size, shuffle=True,\n num_workers=workers, pin_memory=is_gpu, sampler=None)\n\n val_loader = torch.utils.data.DataLoader(\n self.valset,\n batch_size=batch_size, shuffle=False,\n num_workers=workers, pin_memory=is_gpu)\n\n return train_loader, val_loader", "def get_dataset_loader(self, batch_size, workers, is_gpu):\n\n train_loader = torch.utils.data.DataLoader(\n self.trainset,\n batch_size=batch_size, shuffle=True,\n num_workers=workers, pin_memory=is_gpu, sampler=None)\n\n val_loader = torch.utils.data.DataLoader(\n self.valset,\n batch_size=batch_size, shuffle=False,\n num_workers=workers, pin_memory=is_gpu)\n\n return train_loader, val_loader", "def get_dataset_loader(self, batch_size, workers, is_gpu):\n\n train_loader = torch.utils.data.DataLoader(\n self.trainset,\n batch_size=batch_size, shuffle=True,\n num_workers=workers, pin_memory=is_gpu, sampler=None)\n\n val_loader = torch.utils.data.DataLoader(\n self.valset,\n batch_size=batch_size, shuffle=False,\n num_workers=workers, pin_memory=is_gpu)\n\n return train_loader, val_loader", "def get_dataset_loader(self, batch_size, workers, is_gpu):\n\n train_loader = torch.utils.data.DataLoader(\n self.trainset,\n batch_size=batch_size, shuffle=True,\n num_workers=workers, pin_memory=is_gpu, sampler=None)\n\n val_loader = torch.utils.data.DataLoader(\n self.valset,\n batch_size=batch_size, shuffle=False,\n num_workers=workers, pin_memory=is_gpu)\n\n return train_loader, val_loader", "def get_dataset_loader(self, batch_size, workers, is_gpu):\n\n train_loader = torch.utils.data.DataLoader(\n self.trainset,\n batch_size=batch_size, shuffle=True,\n num_workers=workers, pin_memory=is_gpu, sampler=None)\n\n val_loader = torch.utils.data.DataLoader(\n self.valset,\n batch_size=batch_size, shuffle=False,\n num_workers=workers, pin_memory=is_gpu)\n\n return train_loader, val_loader", "def build_dataloaders(dataset, batch_size, train_test_split=0.1, train_shuffle=True, eval_shuffle=True):\n # 데이터셋 길이\n dataset_len = len(dataset)\n\n # 학습, 평가 데이터 나누기\n eval_len = int(dataset_len * train_test_split)\n train_len = dataset_len - eval_len\n\n train_dataset, eval_dataset = random_split(dataset, (train_len, eval_len))\n\n train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=train_shuffle)\n eval_loader = DataLoader(eval_dataset, batch_size=batch_size, shuffle=eval_shuffle)\n\n\n logging.info(f'''train_dataloader size: {len(train_loader.dataset)} | shuffle: {train_shuffle}\n eval_dataloader size: {len(eval_loader.dataset)} | shuffle: {eval_shuffle}''')\n\n return train_loader, eval_loader", "def get_loader(dataset='train.txt', crop_size=128, image_size=28, batch_size=2, mode='train', num_workers=1): \n transform = [] \n if mode == 'train': \n transform.append(transforms.RandomHorizontalFlip()) \n transform.append(transforms.CenterCrop(crop_size)) \n transform.append(transforms.Resize(image_size)) \n transform.append(transforms.ToTensor()) \n transform.append(transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))) \n transform = transforms.Compose(transform) \n train_data=MyDataset(txt=dataset, transform=transform) \n data_loader = DataLoader(dataset=train_data, \n batch_size=batch_size, \n shuffle=(mode=='train'), \n num_workers=num_workers) \n return data_loader", "def _process_dataset(all_train_img, all_train_label, all_test_img, all_test_label):\n # Read all training and test images and set the correct path\n train_files = tf.io.gfile.listdir(all_train_img)\n test_files = tf.io.gfile.listdir(all_test_img)\n all_train_class_path = [os.path.join(all_train_img, f) for f in train_files]\n all_test_img_path = [os.path.join(all_test_img, f) for f in test_files]\n # Since Labels start at 1, substract -1 for correct indices with starting '0'\n label_np_test = read_labels_txt(all_test_label) - 1\n synsets_np_train = read_labels_mat(all_train_label)\n\n all_train_img_path = []\n label_np_train = []\n for folder in all_train_class_path:\n img_class_files = tf.io.gfile.listdir(folder)\n synset = os.path.basename(os.path.normpath(folder))\n label_train = synsets_np_train.index(synset)\n for f in img_class_files:\n all_train_img_path.append(os.path.join(folder, f))\n label_np_train.append(label_train)\n\n # Create the Datasets for training and test images with corresponding labels\n path_ds_train = tf.data.Dataset.from_tensor_slices((all_train_img_path, label_np_train))\n img_label_ds_train = path_ds_train.map(_process_image)\n path_ds_test = tf.data.Dataset.from_tensor_slices((all_test_img_path, label_np_test))\n img_label_ds_test = path_ds_test.map(_process_image)\n\n print(img_label_ds_train)\n print(img_label_ds_test)\n\n # Check an example image if necessary\n # example, = img_label_ds_test.take(1)\n for i in range(5):\n example, = img_label_ds_train.take(1)\n image, label = example[0], example[1]\n plt.figure(i)\n if image.shape[2] == 1:\n plt.imshow(tf.squeeze(image), cmap='gray')\n else:\n plt.imshow(image/255)\n print(\"Label: {}\".format(label.numpy()))\n plt.show()\n\n return img_label_ds_train, img_label_ds_test", "def get_loaders(img_size=CONFIG[\"matrix_size\"], batch_size=CONFIG[\"batch_size\"],\n used_keypoints=CONFIG[\"used_keypoints\"], interpolation_frames=CONFIG[\"interpolation_frames\"],\n noise_frames=CONFIG[\"noise_frames\"], all_data=None, all_labels=None):\n\n if all_data is None or all_labels is None:\n all_data, all_labels = load_video_data_labels(interpolation_frames, noise_frames, used_keypoints, img_size)\n\n p = np.random.permutation(len(all_data))\n train_len = int(len(p) / 80)\n others_len = int((len(p) - train_len) / 2)\n\n train_data, train_labels = all_data[p[:train_len]], all_labels[p[:train_len]]\n val_data = all_data[p[train_len:train_len + others_len]]\n val_labels = all_labels[p[train_len:train_len + others_len]]\n test_data, test_labels = all_data[p[-others_len:]], all_labels[p[-others_len:]]\n\n # Transform to tensor\n train_data_tensor, train_labels_tensor = torch.from_numpy(train_data), torch.from_numpy(train_labels)\n val_data_tensor, val_labels_tensor = torch.from_numpy(val_data), torch.from_numpy(val_labels)\n test_data_tensor, test_labels_tensor = torch.from_numpy(test_data), torch.from_numpy(test_labels)\n\n # Data Loader for easy mini-batch return in training, load the Dataset from the numpy arrays\n train_loader = DataLoader(TensorDataset(train_data_tensor, train_labels_tensor), batch_size=batch_size)\n val_loader = DataLoader(TensorDataset(val_data_tensor, val_labels_tensor), batch_size=batch_size)\n test_loader = DataLoader(TensorDataset(test_data_tensor, test_labels_tensor), batch_size=batch_size)\n\n data = {\"train_data\": train_data,\n \"train_labels\": train_labels,\n \"val_data\": val_data,\n \"val_labels\": val_labels,\n \"test_data\": test_data,\n \"test_labels\": test_labels,\n \"all_data\": all_data[p],\n \"all_labels\": all_labels[p]}\n\n return data, train_loader, val_loader, test_loader", "def get_train_routine(\n self,\n ) -> Callable[\n [\n torch.utils.data.DataLoader,\n torch.utils.data.DataLoader,\n argparse.Namespace,\n torch.nn.Module,\n torch.optim.Optimizer,\n torch.optim.Optimizer,\n Progress,\n TaskID,\n ],\n None,\n ]:\n pass", "def init_loaders(self, *args, **kwargs):\n\n # Convert the data to Dataset\n dataset_dict = self.init_datasets(*args, **kwargs)\n\n # If the Dataset implements collate_fn, that is used. Otherwise, default_collate is used\n if hasattr(dataset_dict[\"train\"], \"collate_fn\") and callable(\n getattr(dataset_dict[\"train\"], \"collate_fn\")\n ):\n collate_fn = dataset_dict[\"train\"].collate_fn\n else:\n collate_fn = default_collate\n\n # If 'iters_per_epoch' is defined, then a fixed number of random sample batches from the training set\n # are drawn per epoch.\n # Otherwise, an epoch is defined by a full run through all of the data in the dataloader.\n #\n if self.config_dict.get(\"iters_per_epoch\") is not None:\n num_samples = (\n self.config_dict[\"iters_per_epoch\"] * self.config_dict[\"batch_size\"]\n )\n loaders_dict = {}\n for key in dataset_dict.keys():\n if key == \"train\":\n loaders_dict[key] = DataLoader(\n dataset_dict[key],\n batch_sampler=BatchSampler(\n RandomSampler(\n dataset_dict[key],\n replacement=True,\n num_samples=num_samples,\n ),\n batch_size=self.config_dict[\"batch_size\"],\n drop_last=False,\n ),\n collate_fn=collate_fn,\n )\n else:\n loaders_dict[key] = DataLoader(\n dataset_dict[key],\n batch_size=self.config_dict[\"batch_size\"],\n collate_fn=collate_fn,\n )\n else:\n loaders_dict = {\n key: DataLoader(\n dataset_dict[key],\n batch_size=self.config_dict[\"batch_size\"],\n collate_fn=collate_fn,\n )\n for key in data_dict.keys()\n }\n\n return loaders_dict", "def merge(self, op):\n self.__desc = listify(self.__desc, op.__desc)\n self.__name = listify(self.__name, op.__name)\n self.__label_pre = listify(self.__label_pre, op.__label_pre)\n self.__label_post = listify(self.__label_post, op.__label_post)", "def train(self, src, labels): # real signature unknown; restored from __doc__\n pass", "def _add_train_op(self):\n # Take gradients of the trainable variables w.r.t. the loss function to minimize\n loss_to_minimize = self._loss\n tvars = tf.trainable_variables()\n gradients = tf.gradients(loss_to_minimize, tvars, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_TREE)\n\n # Clip the gradients\n with tf.device(\"/gpu:%d\"%(config.gpu_selection)):\n grads, global_norm = tf.clip_by_global_norm(gradients, config.max_grad_norm)\n\n # Add a summary\n tf.summary.scalar('global_norm', global_norm)\n\n # Apply adagrad optimizer\n optimizer = tf.train.AdagradOptimizer(config.lr, initial_accumulator_value=config.adagrad_init_acc)\n #optimizer = tf.train.MomentumOptimizer(config.lr, momentum=0.01)\n with tf.device(\"/gpu:%d\"%(config.gpu_selection)):\n self._train_op = optimizer.apply_gradients(zip(grads, tvars), global_step=self.global_step, name='train_step')", "def add_op2_data(cls, data, comment=''):\n sid = data[0]\n pressure = data[1]\n nodes = data[2:]\n if nodes[-1] == 0:\n nodes = list(nodes)\n nodes.pop()\n return PLOAD(sid, pressure, nodes, comment=comment)", "def _add_train_op(self):\n self._lr_rate = tf.maximum(\n self._hps.min_lr, # min_lr_rate.\n tf.train.exponential_decay(self._hps.lr, self.global_step, 30000, 0.98))\n \n \n # Take gradients of the trainable variables w.r.t. the loss function to minimize\n loss_to_minimize = self._total_loss if self._hps.coverage else self._loss\n tvars = tf.trainable_variables()\n gradients = tf.gradients(loss_to_minimize, tvars, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)\n\n # Clip the gradients\n with tf.device(self._get_gpu(self._num_gpus-1)):\n grads, global_norm = tf.clip_by_global_norm(gradients, self._hps.max_grad_norm)\n\n # Add a summary\n tf.summary.scalar('global_norm', global_norm)\n\n # Apply adagrad optimizer\n if self._hps.optimizer == 'adagrad':\n optimizer = tf.train.AdagradOptimizer(\n self._hps.lr, initial_accumulator_value=self._hps.adagrad_init_acc)\n\n elif self._hps.optimizer == 'adam': \n # Adam\n optimizer = tf.train.AdamOptimizer()\n \n elif self._hps.optimizer == 'sgd':\n # SGD\n optimizer = tf.train.GradientDescentOptimizer(self._lr_rate)\n tf.summary.scalar('learning rate', self._lr_rate)\n \n else:\n raise Exception('Invalid optimizer: ', self._hps.optimizer)\n\n with tf.device(self._get_gpu(self._num_gpus-1)):\n self._train_op = optimizer.apply_gradients(\n zip(grads, tvars), global_step=self.global_step, name='train_step')", "def get_data_loader (imgs_path, labels, extra_info=None, transform=None, params=None):\n\n\n dt = BuildDataset(imgs_path, labels, extra_info, transform)\n\n # Checking the params values. If it's not defined in params of if params is None, the default values are described\n # below:\n batch_size = 30\n shuf = True\n num_workers = 4\n pin_memory = True\n\n # However, if the params is defined, we used the values described on it:\n if (params is not None):\n if ('batch_size' in params.keys()):\n batch_size = params['batch_size']\n if ('shuf' in params.keys()):\n shuf = params['shuf']\n if ('num_workers' in params.keys()):\n num_workers = params['num_workers']\n if ('pin_memory' in params.keys()):\n pin_memory = params['pin_memory']\n\n # Calling the dataloader\n dl = data.DataLoader (dataset=dt, batch_size=batch_size, shuffle=shuf, num_workers=num_workers,\n pin_memory=pin_memory)\n\n return dl", "def load(cfg, train_mode, split, shot, query,\n bs, test_bs, num_workers, pin_memory,\n ret_name=False):\n if train_mode == \"train\":\n dataset = COCOTrain(cfg, split, shot, query, ret_name=ret_name)\n data_loader = DataLoader(dataset,\n batch_size=bs,\n shuffle=True,\n num_workers=num_workers,\n pin_memory=pin_memory,\n drop_last=False)\n else:\n dataset = COCOTest(cfg, split, shot, query, ret_name=ret_name)\n data_loader = DataLoader(dataset,\n batch_size=test_bs, # Large batch for evaluation\n shuffle=False,\n num_workers=num_workers,\n pin_memory=pin_memory,\n drop_last=False)\n num_classes = 80\n return dataset, data_loader, num_classes", "def train_dataloader(self) -> torch.utils.data.DataLoader: \n return torch.utils.data.DataLoader(self.dataset_train, **self.dl_kwargs)", "def init_loaders_predict(self, data_dict, label_dict):\n return self.init_loaders(data_dict, label_dict)", "def train_dataloader(self) -> DataLoader:\n return self._custom_data_loader()", "def get_dataloaders(logging, batch_size):\n # Load Data\n logging.info(\"Reading Train and Test data...\")\n train_df = pd.read_csv(\"C:/tmp/avila_classification/data/avila-tr.txt\", header=None)\n test_df = pd.read_csv(\"C:/tmp/avila_classification/data/avila-ts.txt\", header=None)\n\n # Fix column names\n col_names = ['col_' + str(j + 1) for j in range(train_df.shape[1] - 1)]\n indep_cols = col_names.copy()\n col_names.append('y')\n\n logging.debug(\"Assigning columns\")\n train_df.columns = col_names\n test_df.columns = col_names\n\n # Encode dependent variable column\n le = LabelEncoder()\n le.fit(train_df['y'])\n logging.debug(f\"Classes: {le.classes_}\")\n logging.debug(f\"Transformed Classes: {le.transform(le.classes_)}\")\n\n train_df['y_enc'] = le.transform(train_df['y'])\n test_df['y_enc'] = le.transform(test_df['y'])\n\n # train_df.head()\n logging.debug(f\"Shape of train data: {train_df.shape}\")\n logging.debug(f\"Shape of test data: {test_df.shape}\")\n\n # Create train and validation dataloaders\n train_ds = AvilaDataset(data_frame=train_df, indep_cols=indep_cols, dep_col='y_enc')\n valid_ds = AvilaDataset(data_frame=test_df, indep_cols=indep_cols, dep_col='y_enc')\n\n # Should be some exponent of 2 (128, 256)\n # batch_size = 256\n train_dl = DataLoader(train_ds, batch_size=batch_size, shuffle=True)\n valid_dl = DataLoader(valid_ds, batch_size=batch_size, shuffle=False)\n\n return train_dl, valid_dl, le", "def with_cpu(ops, model):\n ...", "def _build_train_ops(train_params):\n global_step = tf.get_variable('global_step', shape=[], dtype='int32',\n initializer=tf.constant_initializer(0), trainable=False)\n #global_step = tf.train.get_or_create_global_step()\n loss = tf.get_collection(tf.GraphKeys.LOSSES)\n if len(loss) == 0:\n raise RuntimeError(\"No losses found in losses collection\")\n loss = tf.add_n(loss, name=\"loss\")\n\n if len(tf.get_collection(tf.GraphKeys.SUMMARIES)) > 0:\n # Add any summaries client stored in SUMMARIES\n summary_tensor = tf.summary.merge([[tf.summary.tensor_summary(\"loss\", loss)] +\n tf.get_collection(tf.GraphKeys.SUMMARIES)])\n else:\n summary_tensor = tf.summary.tensor_summary(\"loss\", loss)\n\n train_objective = loss\n\n regularizers = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n if len(regularizers) > 0:\n regularization_loss = tf.add_n(regularizers, name=\"regularization_loss\")\n if train_params.regularization_weight is not None:\n train_objective = train_objective + regularization_loss * train_params.regularization_weight\n else:\n train_objective = train_objective + regularization_loss\n else:\n regularization_loss = None\n\n opt = train_params.opt.get()\n opt = hvd.DistributedOptimizer(opt)\n #train_opt = opt.apply_gradients(opt.compute_gradients(train_objective), global_step=global_step)\n train_opt = opt.minimize(train_objective, global_step=global_step)\n\n if train_params.ema is not None:\n ema = tf.train.ExponentialMovingAverage(decay=train_params.ema)\n ema_op = ema.apply(tf.trainable_variables())\n with tf.control_dependencies([train_opt]):\n # Run the old training op, then update the averages.\n train_opt = tf.group(ema_op)\n else:\n ema = None\n\n # Any collections starting with \"monitor\" are also added as summaries\n to_monitor = {}\n for col in tf.get_default_graph().get_all_collection_keys():\n if col.startswith(\"monitor\"):\n v = tf.get_collection(col)\n if len(v) > 0:\n print(\"Monitoring: \" + col)\n v = tf.add_n(v)\n to_monitor[col] = v\n\n if len(to_monitor) > 0:\n monitor_ema = tf.train.ExponentialMovingAverage(decay=train_params.monitor_ema, name=\"MonitorEMA\",\n zero_debias=True)\n train_opt = tf.group(train_opt, monitor_ema.apply(list(to_monitor.values())))\n summary_tensor = tf.summary.merge(\n [tf.summary.scalar(col, monitor_ema.average(v)) for col, v in to_monitor.items()] +\n [summary_tensor])\n\n # EMA for the loss and what we monitoring\n if train_params.loss_ema is not None:\n loss_ema = tf.train.ExponentialMovingAverage(decay=train_params.loss_ema, name=\"LossEMA\", zero_debias=True)\n\n if regularization_loss is None:\n ema_op = loss_ema.apply([loss])\n train_opt = tf.group(train_opt, ema_op)\n ema_var = loss_ema.average(loss)\n summary_tensor = tf.summary.merge([tf.summary.scalar(\"training-ema/loss\", ema_var), summary_tensor])\n else:\n to_track = [loss, train_objective, regularization_loss]\n ema_op = loss_ema.apply(to_track)\n train_opt = tf.group(train_opt, ema_op)\n tensor_vars = [\n tf.summary.scalar(\"training-ema/loss\", loss_ema.average(loss)),\n tf.summary.scalar(\"training-ema/objective\", loss_ema.average(train_objective)),\n tf.summary.scalar(\"training-ema/regularization-loss\",\n loss_ema.average(regularization_loss))\n ]\n summary_tensor = tf.summary.merge([tensor_vars, summary_tensor])\n\n return loss, summary_tensor, train_opt, global_step, ema", "def get_data_loaders(cap_files, visual_feats, vocab, bow2vec, batch_size=100, num_workers=2, n_caption=2, video2frames=None, video2frames_target=None, visual_feats_target=None, caption_file_target=None, multi_flag=0):\n if video2frames_target!=None and visual_feats_target!=None:\n if multi_flag == 0:\n dset = {'train': Dataset4DualEncoding(cap_files['train'], visual_feats['train'], bow2vec, vocab, video2frames=video2frames['train'], video2frames_target=video2frames_target['train'], visual_feat_target=visual_feats_target['train'], caption_file_target=caption_file_target),\n 'val': Dataset4DualEncoding(cap_files['val'], visual_feats['val'], bow2vec, vocab, n_caption, video2frames=video2frames['val']),\n 'test': Dataset4DualEncoding(cap_files['test'], visual_feats['test'], bow2vec, vocab, n_caption, video2frames=video2frames['test'])}\n else:\n dset = {'train': Dataset4DualEncoding(cap_files['train'], visual_feats['train'], bow2vec, vocab, video2frames=video2frames['train'], video2frames_target=video2frames_target['train'], visual_feat_target=visual_feats_target['train'], caption_file_target=caption_file_target, visual_feat_source2=visual_feats['train2'], video2frames_source2=video2frames['train2'], caption_file_source2=cap_files['train2']),\n 'val': Dataset4DualEncoding(cap_files['val'], visual_feats['val'], bow2vec, vocab, n_caption, video2frames=video2frames['val']),\n 'test': Dataset4DualEncoding(cap_files['test'], visual_feats['test'], bow2vec, vocab, n_caption, video2frames=video2frames['test'])}\n\n\n\n else:\n dset = {'train': Dataset4DualEncoding(cap_files['train'], visual_feats['train'], bow2vec, vocab, video2frames=video2frames['train']),\n 'val': Dataset4DualEncoding(cap_files['val'], visual_feats['val'], bow2vec, vocab, n_caption, video2frames=video2frames['val']),\n 'test': Dataset4DualEncoding(cap_files['test'], visual_feats['test'], bow2vec, vocab, n_caption, video2frames=video2frames['test'])}\n\n data_loaders = {x: torch.utils.data.DataLoader(dataset=dset[x],\n batch_size=batch_size,\n shuffle=(x=='train'),\n pin_memory=True,\n num_workers=num_workers,\n collate_fn=collate_frame_gru_fn)\n for x in ['train', 'val', 'test']}\n return data_loaders", "def enqueue_ops_fn():\n num_cores_per_host = ctx.num_of_cores_per_host\n per_host_sharded_inputs = []\n for core_ordinal in range(num_cores_per_host):\n with ops.name_scope('ordinal_%d' % (core_ordinal)):\n inputs = _Inputs.from_input_fn(input_fn())\n if inputs.is_dataset:\n raise TypeError(\n '`input_fn` returning `Dataset` is not yet supported in '\n 'per-Core input pipeline deployment yet. Please set '\n 'TPUConfig.per_host_input_for_training to True or return '\n '`features` and `labels` from `input_fn`')\n features, labels = inputs.features_and_labels()\n\n inputs_structure_recorder.validate_and_record_structure(\n features, labels)\n flattened_inputs = (\n inputs_structure_recorder.flatten_features_and_labels(\n features, labels))\n per_host_sharded_inputs.append(flattened_inputs)\n\n infeed_queue = tpu_feed.InfeedQueue(\n number_of_tuple_elements=len(per_host_sharded_inputs[0]))\n captured_infeed_queue.capture(infeed_queue)\n infeed_queue.set_configuration_from_sharded_input_tensors(\n per_host_sharded_inputs)\n\n per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(\n per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function)\n return per_host_enqueue_ops", "def build_graph(self):\n self._build_model()\n if self.mode == 'train':\n self._build_train_op()", "def prepare_data_loaders_without_shuffle(batch_size=32, hier=False, elmo=False, elmo_pre=None, use_elmo_pre=False, deepmoji=False, dev_with_label=False, include_test=False):\n train_data_loaders = []\n val_data_loaders = []\n test_data_loaders = []\n\n vocab = generate_vocab(deepmoji)\n train, val, test, _ = prepare_data(batch_size=batch_size, hier=hier, elmo=elmo, elmo_pre=elmo_pre, use_elmo_pre=use_elmo_pre, deepmoji=deepmoji, is_shuffle=False, vocab=vocab, dev_with_label=dev_with_label, include_test=include_test)\n\n return train, val, test, vocab", "def head_ops(self,\n features,\n labels,\n mode,\n train_op_fn,\n logits=None,\n logits_input=None,\n scope=None):\n raise NotImplementedError(\"Calling an abstract method.\")", "def register_dataset_split(dataset_name, split_dict):\n\n _DATASET_TYPE_LOAD_FUNC_MAP = {\n \"COCODataset\": _register_extended_coco,\n \"COCOText\": _register_coco_text,\n \"COCOTextDataset\": _register_coco_text,\n \"LVISDataset\": _register_extended_lvis,\n }\n\n factory = split_dict.get(\"DS_TYPE\", \"COCODataset\")\n _DATASET_TYPE_LOAD_FUNC_MAP[factory](\n dataset_name=dataset_name, split_dict=split_dict\n )", "def load_and_process(data_dir, train_node_num, eval_node_num, test_node_num):\n biases, feature, label = get_biases_features_labels(data_dir)\n # split training, validation and testing set\n nodes_num = label.shape[0]\n train_mask = get_mask(nodes_num, 0, train_node_num)\n eval_mask = get_mask(nodes_num, train_node_num, train_node_num + eval_node_num)\n test_mask = get_mask(nodes_num, nodes_num - test_node_num, nodes_num)\n\n y_train = np.zeros(label.shape)\n y_val = np.zeros(label.shape)\n y_test = np.zeros(label.shape)\n\n y_train[train_mask, :] = label[train_mask, :]\n y_val[eval_mask, :] = label[eval_mask, :]\n y_test[test_mask, :] = label[test_mask, :]\n\n y_train = y_train[np.newaxis]\n y_val = y_val[np.newaxis]\n y_test = y_test[np.newaxis]\n train_mask = train_mask[np.newaxis]\n eval_mask = eval_mask[np.newaxis]\n test_mask = test_mask[np.newaxis]\n\n return feature, biases, y_train, train_mask, y_val, eval_mask, y_test, test_mask", "def __init__(\n self,\n list_data,\n cfg,\n use_labels,\n mode=\"train\",\n language=\"english\",\n n_layers=2,\n normalize_adj=False,\n ):\n super(GraphDataset, self).__init__()\n\n self.mode = mode\n self.n_layers = n_layers\n self.normalize_adj = normalize_adj\n\n # Load node information\n node_info, node_dict = load_node_info(cfg[\"paths\"][\"node_infos\"])\n\n # Compute the TF-IDF vector of each paper\n abstract_features_tfidf, title_features_tfidf = self.tf_idf(\n node_info, cfg[\"language\"]\n )\n\n # Construct a one-hot embedding for each journal\n journal_names = {}\n journal_idx = 0\n for element in node_info:\n if element.journal not in journal_names:\n journal_names[element.journal] = journal_idx\n journal_idx += 1\n\n self.process_features(\n list_data,\n node_dict,\n abstract_features_tfidf,\n title_features_tfidf,\n journal_names,\n use_labels,\n language,\n mode,\n )\n\n ## Setting up the graph ##\n # Dictionary to \"translate\" node IDs in IDs used for\n # the adjacency matrix\n dict_node_ids = {}\n dict_node_idx = 0\n for edge in list_data:\n if edge.origin not in dict_node_ids:\n dict_node_ids[edge.origin] = dict_node_idx\n dict_node_idx += 1\n if edge.target not in dict_node_ids:\n dict_node_ids[edge.target] = dict_node_idx\n dict_node_idx += 1\n\n edges_t = np.zeros((len(list_data), 3), dtype=int)\n for i, edge in enumerate(list_data):\n edges_t[i, 0] = dict_node_ids[edge.origin]\n edges_t[i, 1] = dict_node_ids[edge.target]\n edges_t[i, 2] = edge.exists\n edges_s = np.unique(edges_t[:, :2], axis=0)\n self.n = len(dict_node_ids)\n self.m_s, self.m_t = edges_s.shape[0], edges_t.shape[0]\n\n adj = sp.coo_matrix(\n (np.ones(self.m_s), (edges_s[:, 0], edges_s[:, 1])),\n shape=(self.n, self.n),\n dtype=np.float32,\n )\n\n adj += adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)\n if normalize_adj:\n degrees = np.power(np.array(np.sum(adj, axis=1)), -0.5).flatten()\n degrees = sp.diags(degrees)\n adj = degrees.dot(adj.dot(degrees))\n\n self.adj = adj.tolil()\n self.edges_s = edges_s\n self.nbrs_s = self.adj.rows\n\n nbrs_t = [[] for _ in range(self.n)]\n for (u, v, t) in edges_t:\n nbrs_t[u].append((v, t))\n nbrs_t[v].append((u, t))\n self.nbrs_t = np.array(nbrs_t)\n\n last_time = np.max(edges_t[:, -1]) + 1\n timestamps = dict()\n for (u, v, t) in edges_t:\n if u not in timestamps.keys():\n timestamps[u] = dict()\n if v not in timestamps[u].keys():\n timestamps[u][v] = []\n timestamps[u][v].append(last_time - t)\n\n if v not in timestamps.keys():\n timestamps[v] = dict()\n if u not in timestamps[v].keys():\n timestamps[v][u] = []\n timestamps[v][u].append(last_time - t)\n\n for u in range(self.n):\n if u not in timestamps.keys():\n timestamps[u] = dict()\n timestamps[u][u] = [1]\n self.timestamps = timestamps\n ## Finished setting up graph", "def pass_through_json_split(desc_d, skip_ops_list):\n op_jsons = []\n\n # get some basic info to init subgraph\n composite_graph_id = desc_d['composite_graph']\n composite_id = desc_d['id']\n \n input_tensor_names = [tensor[0]['tensor_name'] for tensor in desc_d['input_desc']] if desc_d['input_desc'] else []\n output_tensor_names = [tensor['tensor_name'] for tensor in desc_d['output_desc']] if desc_d['output_desc'] else []\n\n input_tensor_shapes = [tensor[0]['shape'] for tensor in desc_d['input_desc']] if desc_d['input_desc'] else []\n output_tensor_shapes = [tensor['shape'] for tensor in desc_d['output_desc']] if desc_d['output_desc'] else []\n \n sub_graphs = []\n for i in range(len(desc_d['op_desc'])):\n op_info = desc_d['op_desc'][i]\n op_name = op_info['name']\n if(op_name in skip_ops_list):\n continue\n g = Graph()\n g.ops.append(op_info)\n g.input = [t for t in op_info['input_desc'] if 'value' not in t[0].keys()]\n # g.input = op_info['input_desc']\n g.output = op_info['output_desc']\n g.op_name = op_name\n \n sub_graphs.append(g)\n \n op_result = []\n for g in sub_graphs:\n op_json_str = {}\n op_json_str['composite'] = True\n op_json_str['composite_graph'] = composite_graph_id\n op_json_str['id'] = composite_id\n op_json_str['op'] = g.op_name\n op_json_str['input_desc'] = g.input\n op_json_str['op_desc'] = g.ops\n op_json_str['output_desc'] = g.output\n # op_json_str['core_num'] = g.core_num\n op_json_str['platform'] = \"AKG\"\n op_json_str['process'] = desc_d['process']\n\n op_result.append(op_json_str)\n \n # all sub json info saved in op_jsons list\n for _, single_op in enumerate(op_result):\n json_str = json.dumps(single_op, indent=4)\n op_jsons.append(json_str)\n\n return op_jsons, input_tensor_names, output_tensor_names, input_tensor_shapes, output_tensor_shapes", "def __init__(self, sample_df, data_path, load_semilabels=True, load_mask=True,\n output_size=512, data_augmentation=True):\n data.Dataset.__init__(self)\n self.sample_df = sample_df\n self.data_path = data_path\n self.load_semilabels = load_semilabels\n self.load_mask = load_mask\n if data_augmentation:\n self.transform = tf.Compose(tf.Grayscale(), \\\n tf.AutoContrast(cutoff=1), \\\n tf.RandomHorizontalFlip(p=0.5), \\\n tf.RandomVerticalFlip(p=0.5), \\\n tf.RandomBrightness(lower=0.8, upper=1.2), \\\n tf.RandomScaling(scale_range=(0.8,1.2)), \\\n tf.RandomRotation(degree_range=(-20,20)), \\\n tf.ResizeMax(output_size), \\\n tf.PadToSquare(), \\\n tf.MinMaxNormalization(), \\\n tf.ToTorchTensor())\n else:\n self.transform = tf.Compose(tf.Grayscale(), \\\n tf.AutoContrast(cutoff=1), \\\n tf.ResizeMax(output_size), \\\n tf.PadToSquare(), \\\n tf.MinMaxNormalization(), \\\n tf.ToTorchTensor())", "def provide_data(split_name, batch_size, dataset_dir, num_readers=1,\n num_threads=1):\n dataset = datasets.get_dataset('mnist', split_name, dataset_dir=dataset_dir)\n provider = tf.contrib.slim.dataset_data_provider.DatasetDataProvider(\n dataset,\n num_readers=num_readers,\n common_queue_capacity=2 * batch_size,\n common_queue_min=batch_size,\n shuffle=(split_name == 'train'))\n [image, label] = provider.get(['image', 'label'])\n\n # Preprocess the images.\n image = (tf.to_float(image) - 128.0) / 128.0\n\n # Creates a QueueRunner for the pre-fetching operation.\n images, labels = tf.train.batch(\n [image, label],\n batch_size=batch_size,\n num_threads=num_threads,\n capacity=5 * batch_size)\n\n one_hot_labels = tf.one_hot(labels, dataset.num_classes)\n return images, one_hot_labels, dataset.num_samples", "def run(layers):", "def initialise_dataset_loader(\n self, data_param=None, task_param=None, data_partitioner=None):\n raise NotImplementedError", "def make_dataloaders(params):\r\n transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])])\r\n\r\n transform_validation = transforms.Compose([transforms.ToTensor(),\r\n transforms.Normalize([0.4914, 0.4822, 0.4465],\r\n [0.2023, 0.1994, 0.2010])])\r\n\r\n transform_validation = transforms.Compose([transforms.ToTensor()])\r\n\r\n trainset = torchvision.datasets.CIFAR10(root=params['path'], train=True, transform=transform_train)\r\n testset = torchvision.datasets.CIFAR10(root=params['path'], train=False, transform=transform_validation)\r\n\r\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, num_workers=4)\r\n testloader = torch.utils.data.DataLoader(testset, batch_size=params['batch_size'], shuffle=False, num_workers=4)\r\n return trainloader, testloader", "def get_data_loaders(data, batch_size, ratio=0.8, num_workers=1):\n train_size = int(len(data) * ratio)\n val_size = len(data) - train_size\n train_set, val_set = random_split(data, [train_size, val_size])\n data_train_loader = DataLoader(train_set, batch_size=batch_size, num_workers=num_workers, shuffle=True)\n data_val_loader = DataLoader(val_set, batch_size=batch_size, num_workers=num_workers, shuffle=True)\n return data_train_loader, data_val_loader", "def load_data(opt=\"mnist\"):\n if opt == \"mnist\":\n train, test = tf.keras.datasets.mnist.load_data()\n \n x_train, y_train = train\n x_test, y_test = test\n \n x_train = x_train.reshape(x_train.shape[0], 28 * 28)\n x_test = x_test.reshape(x_test.shape[0], 28 * 28)\n \n y_train = y_train.astype(np.int)\n y_test = y_test.astype(np.int)\n for i in range(len(y_train)):\n y_train[i] = 1 if y_train[i] % 2 == 0 else -1\n for i in range(len(y_test)):\n y_test[i] = 1 if y_test[i] % 2 == 0 else -1\n\n elif opt == \"covertype\":\n df = pd.read_csv(\"covtype.data\", header=None)\n x = df.iloc[:, 0:54].values\n y = df[54].values\n for i in range(len(y)):\n y[i] = 1 if y[i] % 2 == 0 else -1\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)\n \n else:\n logging.error(\"Unknown dataset!!\")\n\n logging.info(\"train data shape: {}\".format(x_train.shape))\n logging.info(\"test data shape: {}\".format(x_test.shape))\n return (x_train, y_train), (x_test, y_test)", "def add(self, entity: AddableEntity):\n\n entity.client = self.client\n\n if isinstance(entity, Dataset):\n\n # results = self.client.execute(ADD_DATASET, params={\n # 'projectId': self.id, 'name': entity.name, 'description': entity.description\n # })\n # entity.__dict__.update(results.get('createDataset'))\n return\n\n if isinstance(entity, Label):\n self.create" ]
[ "0.65246606", "0.6266942", "0.59693766", "0.59283423", "0.5864471", "0.57984567", "0.57888275", "0.57675457", "0.5730815", "0.56816316", "0.56744856", "0.5652499", "0.5620212", "0.55867034", "0.5557611", "0.55549645", "0.54842335", "0.5481167", "0.5478355", "0.54730076", "0.546972", "0.5432318", "0.5427359", "0.5406471", "0.53868127", "0.5383954", "0.5381144", "0.53765184", "0.5375924", "0.5373492", "0.53725994", "0.5343864", "0.5336204", "0.533031", "0.531921", "0.5308063", "0.52972806", "0.52934134", "0.5293087", "0.5286722", "0.52719533", "0.5269441", "0.5256642", "0.5251305", "0.52490705", "0.5246117", "0.5236743", "0.523226", "0.5230971", "0.52262545", "0.5225605", "0.5222903", "0.5221538", "0.522026", "0.5218818", "0.5218053", "0.52180165", "0.52180165", "0.52180165", "0.52180165", "0.52180165", "0.52180165", "0.52180165", "0.52180165", "0.52180165", "0.52175975", "0.5216926", "0.52116877", "0.52091813", "0.520871", "0.5207858", "0.5206275", "0.52007216", "0.5197856", "0.51976323", "0.5182502", "0.5180706", "0.51779455", "0.5169865", "0.5168795", "0.5166058", "0.5163754", "0.51616246", "0.5161144", "0.5154615", "0.5147764", "0.5142174", "0.5138708", "0.5137429", "0.51312125", "0.51277", "0.5126182", "0.5122282", "0.5115715", "0.51109546", "0.51106685", "0.50997484", "0.5097738", "0.5094137", "0.5091084", "0.5087887" ]
0.0
-1
Build TensorFlow graph for MaskShadowGAN model.
def build(self): # add ops for generator (A->B) to graph self.G = Generator(channels=self.opt.channels, ngf=self.opt.ngf, norm_type=self.opt.layer_norm_type, init_type=self.opt.weight_init_type, init_gain=self.opt.weight_init_gain, training=self.training, name='G') if self.training: # add ops for other generator (B->A) and discriminators to graph self.F = Generator(channels=self.opt.channels, ngf=self.opt.ngf, norm_type=self.opt.layer_norm_type, init_type=self.opt.weight_init_type, init_gain=self.opt.weight_init_gain, training=self.training, name='F') self.D_A = Discriminator(channels=self.opt.channels, ndf=self.opt.ndf, norm_type=self.opt.layer_norm_type, init_type=self.opt.weight_init_type, init_gain=self.opt.weight_init_gain, training=self.training, name='D_A') self.D_B = Discriminator(channels=self.opt.channels, ndf=self.opt.ndf, norm_type=self.opt.layer_norm_type, init_type=self.opt.weight_init_type, init_gain=self.opt.weight_init_gain, training=self.training, name='D_B') # generate fake images fakeB = self.G(self.realA) fakeA = self.F(self.realB, self.rand_mask) # generate reconstructed images reconstructedA = self.F(fakeB, self.last_mask) reconstructedB = self.G(fakeA) # generate identity mapping images identA = self.G(self.realB) identB = self.F(self.realA, self.mask_non_shadow) tf.summary.image('A/original', batch_convert_2_int(self.realA)) tf.summary.image('B/original', batch_convert_2_int(self.realB)) tf.summary.image('A/generated', batch_convert_2_int(fakeA)) tf.summary.image('B/generated', batch_convert_2_int(fakeB)) tf.summary.image('A/reconstructed', batch_convert_2_int(reconstructedA)) tf.summary.image('B/reconstructed', batch_convert_2_int(reconstructedB)) # add loss ops to graph Gen_loss, D_A_loss, D_B_loss = self.__loss(fakeA, fakeB, reconstructedA, reconstructedB, identA, identB) # add optimizer ops to graph optimizers = self.__optimizers(Gen_loss, D_A_loss, D_B_loss) return fakeA, fakeB, optimizers, Gen_loss, D_A_loss, D_B_loss else: # only need generator from A->B during testing fakeB = self.G(self.realA) return fakeB
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_graph(self, inputs, masks):\n with vs.variable_scope(\"SimpleSoftmaxLayer\"):\n\n # Linear downprojection layer\n logits = tf.contrib.layers.fully_connected(inputs, num_outputs=1, activation_fn=None) # shape (batch_size, seq_len, 1)\n logits = tf.squeeze(logits, axis=[2]) # shape (batch_size, seq_len)\n\n # Take softmax over sequence\n masked_logits, prob_dist = masked_softmax(logits, masks, 1)\n\n return masked_logits, prob_dist", "def build_graph(self, inputs, masks):\n with vs.variable_scope(\"SimpleSoftmaxLayer\"):\n\n # Linear downprojection layer\n logits = tf.contrib.layers.fully_connected(inputs, num_outputs=1, activation_fn=None) # shape (batch_size, seq_len, 1)\n logits = tf.squeeze(logits, axis=[2]) # shape (batch_size, seq_len)\n\n # Take softmax over sequence\n masked_logits, prob_dist = masked_softmax(logits, masks, 1)\n\n return masked_logits, prob_dist", "def build_graph(self):\n\t\tself.n_hidden = 100\n\t\tself.weights_hidden = tf.get_variable(\"weights_hidden\", [self.state_size, self.n_hidden], initializer = tf.random_normal_initializer())\n\t\tself.bias_hidden = tf.get_variable(\"bias_hidden\", [self.n_hidden], initializer = tf.constant_initializer(0.1))\n\n\t\tself.weights_out = tf.get_variable(\"weights_out\", [self.n_hidden, self.action_size], initializer = tf.random_normal_initializer())\n\t\tself.bias_out = tf.get_variable(\"bias_out\", [self.action_size], initializer = tf.constant_initializer(0.1))", "def build_graph(self):\n\t\tself._create_placeholders()\n\t\tself._create_embedding()\n\t\tself._create_recurrent_layers()\n\t\tself._create_de_embedding()\n\t\tself._create_loss()\n\t\tself._create_optimizer()\n\t\tself._create_summaries()", "def _build_graph(self):\n self._setup_placeholders()\n self._embed()\n self.p_emb = tf.concat([self.p_emb, tf.expand_dims(self.em, -1)], -1)\n self._encode()\n self._match()\n self._fuse()\n\n with tf.variable_scope('boundary'):\n self._decode()\n with tf.variable_scope('content'):\n self._content()\n with tf.variable_scope('verif'):\n self._verify()\n\n self._compute_loss()", "def build_tf_graph(self):\n raise NotImplementedError", "def build_graph_conv_model(in_nv_dims, in_ne_dims, in_nhood_size):\n\n in_vertex1 = tf.placeholder(tf.float32, [None, in_nv_dims], \"vertex1\")\n in_vertex2 = tf.placeholder(tf.float32, [None, in_nv_dims], \"vertex2\")\n in_edge1 = tf.placeholder(tf.float32, [None, in_nhood_size, in_ne_dims], \"edge1\")\n in_edge2 = tf.placeholder(tf.float32, [None, in_nhood_size, in_ne_dims], \"edge2\")\n in_hood_indices1 = tf.placeholder(tf.int32, [None, in_nhood_size, 1], \"hood_indices1\")\n in_hood_indices2 = tf.placeholder(tf.int32, [None, in_nhood_size, 1], \"hood_indices2\")\n\n # Input from the ligand protein that is going to the left branch\n input1 = in_vertex1, in_edge1, in_hood_indices1\n # Input from the receptor protein that is going to the right branch\n input2 = in_vertex2, in_edge2, in_hood_indices2\n\n examples = tf.placeholder(tf.int32, [None, 2], \"examples\")\n labels = tf.placeholder(tf.float32, [None], \"labels\")\n dropout_keep_prob = tf.placeholder(tf.float32, shape=[], name=\"dropout_keep_prob\")\n\n layer_no = 1\n # First convolutional layer of the left branch\n name = \"left_branch_{}_{}\".format(\"node_average\", layer_no)\n with tf.name_scope(name):\n output, params = node_average_model(input1, None, filters=256, dropout_keep_prob=0.5)\n input1 = output, in_edge1, in_hood_indices1\n\n # Firs convolutional layer of the right branch\n name = \"right_branch_{}_{}\".format(\"node_average\", layer_no)\n with tf.name_scope(name):\n output, _ = node_average_model(input2, params, filters=256, dropout_keep_prob=0.5) #the weights (params) are the one of the left_branc\n input2 = output, in_edge2, in_hood_indices2\n\n layer_no = 2\n # Second convolutional layer of the left branch\n name = \"left_branch_{}_{}\".format(\"node_average\", layer_no)\n with tf.name_scope(name):\n output, params = node_average_model(input1, None, filters=256, dropout_keep_prob=0.5)\n input1 = output, in_edge1, in_hood_indices1\n\n # Second convolutional layer of the right branch\n name = \"right_branch_{}_{}\".format(\"node_average\", layer_no)\n with tf.name_scope(name):\n output, _ = node_average_model(input2, params, filters=256, dropout_keep_prob=0.5)#the weights (params) are the one of the\n input2 = output, in_edge2, in_hood_indices2\n\n # The output of the two branches are merged\n layer_no = 3\n name = \"{}_{}\".format(\"merge\", layer_no)\n input = input1[0], input2[0], examples\n with tf.name_scope(name):\n input = merge(input)\n\n # First dense layer\n layer_no = 4\n name = \"{}_{}\".format(\"dense\", layer_no)\n with tf.name_scope(name):\n input = dense(input, out_dims=512, dropout_keep_prob=0.5, nonlin=True, trainable=True)\n\n # Second dense layer\n layer_no = 5\n name = \"{}_{}\".format(\"dense\", layer_no)\n with tf.name_scope(name):\n input = dense(input, out_dims=1, dropout_keep_prob=0.5, nonlin=False, trainable=True)\n\n # Average layer\n layer_no = 6\n name = \"{}_{}\".format(\"average_predictions\", layer_no)\n with tf.name_scope(name):\n preds = average_predictions(input)\n\n return [in_vertex1, in_edge1, in_hood_indices1, in_vertex2, in_edge2, in_hood_indices2, examples, preds, labels,\n dropout_keep_prob]", "def build_graph(self):\n self.__create_placeholders()\n self.__create_encoder()\n self.__create_latent()\n self.__create_decoder()\n self.__create_loss()\n self.__create_generate()\n self.__create_reconstruct()\n self.__create_optimizer()\n self.__create_summary()", "def build_graph():\n os.environ['CUDA_VISIBLE_DEVICES']= '0'\n\n # frozen_model = '/home/kevin/Codes/DeepNet/log/20180419_221132/frozen_model.pb'\n # frozen_model = '/home/kevin/Downloads/deeplabv3_cityscapes_train/frozen_inference_graph.pb'\n # frozen_model = '/home/kevin/Codes/EnvNet/RUNS/used3/frozen_model.pb'\n frozen_model = '/home/kevin/Codes/DeepNet/log/20180716_212035/frozen_model1.pb'\n graph = load_graph(frozen_model)\n\n for op in graph.get_operations():\n print(op.name)\n\n ## model_envnet/frozen_model.pb\n image_pl = graph.get_tensor_by_name('ImagePlaceholder:0')\n pred_seg = graph.get_tensor_by_name('SemanticPredictions:0')\n\n ## model_deeplab/frozen_inference_graph.pb\n # image_pl = graph.get_tensor_by_name('ImageTensor:0')\n # pred_seg = graph.get_tensor_by_name('SemanticPredictions:0')\n\n # ## model_deepnet/frozen_model.pb\n # image_pl = graph.get_tensor_by_name('ImagePlaceholder:0')\n # pred_seg = graph.get_tensor_by_name('SemanticPredictions:0')\n\n config = tf.ConfigProto() \n config.gpu_options.per_process_gpu_memory_fraction = 0.5\n sess = tf.Session(graph=graph,config=config)\n\n return image_pl, pred_seg, sess", "def _build_model(self):\n fc0_std = 0.2 # std of random initialization for fc0 matrix\n fc0_shape = [4, 4, 1024] # shape of the fc0 reshaped output (for batch size = 1)\n fc0_size = fc0_shape[0] * fc0_shape[1] * fc0_shape[2] # size of the fc0 output\n\n # if input is not a variable connecting the discriminator to another network (ex, generator output),\n # initialize a placeholder\n if self._input_data is None:\n # placeholder for input data\n self._input_data = tf.placeholder(tf.float32, [None, self.input_size], name=self.nid + \"_input\")\n\n # project and reshape the input array - basically an fc layer doing matrix multiplication and bias addition\n with tf.variable_scope(self.nid+\"_fc0\"):\n W = tf.get_variable(\"W\", shape=[self.input_size, fc0_size], dtype=tf.float32,\n initializer=tf.random_normal_initializer(stddev=fc0_std))\n b = tf.get_variable(\"b\", shape=[fc0_size], dtype=tf.float32,\n initializer=tf.random_normal_initializer(stddev=fc0_std))\n fc0_output = tf.reshape(tf.nn.bias_add(tf.matmul(self._input_data, W), b), [-1] + fc0_shape)\n\n fsconv_input = fc0_output # initial fsconv input is the output of the fc layer\n\n # filter's first 2 dimensions, the rest two are auto computed\n filter_shape = [5, 5]\n # generate output shapes for each fsconv layer\n output_shapes = [[int(2 ** x), int(2 ** x), int(fc0_shape[2] * 4 / (2 ** x))]\n for x in range(3, 10) if int(2**x) <= np.min(self.output_size[:2])]\n # set the last output shape to be 3-channeled (or as required by the model)\n output_shapes[-1][2] = self.output_size[2]\n\n fsconv_shapes = [fc0_output.get_shape().as_list()]\n\n batch_size = tf.shape(fsconv_input)[0] # workaround since conv2d_transpose explicitly needs batch size\n\n # create the intermediate fsconv layers\n for output_shape in output_shapes:\n with tf.variable_scope(self.nid+\"_fsconv-{}x{}\".format(output_shape[0], output_shape[1])):\n W_shape = filter_shape+[output_shape[2]]+[fsconv_input.get_shape().as_list()[-1]]\n W = tf.get_variable(\"W\", initializer=tf.truncated_normal(W_shape, stddev=0.1))\n b = tf.get_variable(\"b\", shape=output_shape[-1:], initializer=tf.constant_initializer(0.0))\n # fractionally-strided convolution network\n # conv2d_transpose does not accept variable batch sizes and batch size needs to be explicitly specified\n # a workaround is to compute it during run-time and pass it with the output_shape\n # https://github.com/tensorflow/tensorflow/issues/833\n fsconv = tf.nn.conv2d_transpose(fsconv_input, W,\n output_shape=[batch_size]+output_shape, strides=[1, 2, 2, 1])\n fsconv = tf.nn.bias_add(fsconv, b)\n fsconv = tf.nn.relu(fsconv) # apply relu layer\n # store the shape for verbose\n fsconv_shapes.append(fsconv.get_shape().as_list())\n fsconv_input = fsconv\n\n if self._verbose:\n print(\"FSConv layer output shapes - {}\".format(fsconv_shapes))\n\n self._model = fsconv", "def build_inference_graph(self):\n self.build_train_graph()", "def build_graph(self):\n # Print\n if self.verbose:\n print('Building Yolo Graph....')\n # Reset default graph\n tf.reset_default_graph()\n # Input placeholder\n self.x = tf.placeholder('float32', [None, 448, 448, 3])\n # conv1, pool1\n self.conv1 = self.conv_layer(1, self.x, 64, 7, 2)\n self.pool1 = self.maxpool_layer(2, self.conv1, 2, 2)\n # size reduced to 64x112x112\n # conv2, pool2\n self.conv2 = self.conv_layer(3, self.pool1, 192, 3, 1)\n self.pool2 = self.maxpool_layer(4, self.conv2, 2, 2)\n # size reduced to 192x56x56\n # conv3, conv4, conv5, conv6, pool3\n self.conv3 = self.conv_layer(5, self.pool2, 128, 1, 1)\n self.conv4 = self.conv_layer(6, self.conv3, 256, 3, 1)\n self.conv5 = self.conv_layer(7, self.conv4, 256, 1, 1)\n self.conv6 = self.conv_layer(8, self.conv5, 512, 3, 1)\n self.pool3 = self.maxpool_layer(9, self.conv6, 2, 2)\n # size reduced to 512x28x28\n # conv7 - conv16, pool4\n self.conv7 = self.conv_layer(10, self.pool3, 256, 1, 1)\n self.conv8 = self.conv_layer(11, self.conv7, 512, 3, 1)\n self.conv9 = self.conv_layer(12, self.conv8, 256, 1, 1)\n self.conv10 = self.conv_layer(13, self.conv9, 512, 3, 1)\n self.conv11 = self.conv_layer(14, self.conv10, 256, 1, 1)\n self.conv12 = self.conv_layer(15, self.conv11, 512, 3, 1)\n self.conv13 = self.conv_layer(16, self.conv12, 256, 1, 1)\n self.conv14 = self.conv_layer(17, self.conv13, 512, 3, 1)\n self.conv15 = self.conv_layer(18, self.conv14, 512, 1, 1)\n self.conv16 = self.conv_layer(19, self.conv15, 1024, 3, 1)\n self.pool4 = self.maxpool_layer(20, self.conv16, 2, 2)\n # size reduced to 1024x14x14\n # conv17 - conv24\n self.conv17 = self.conv_layer(21, self.pool4, 512, 1, 1)\n self.conv18 = self.conv_layer(22, self.conv17, 1024, 3, 1)\n self.conv19 = self.conv_layer(23, self.conv18, 512, 1, 1)\n self.conv20 = self.conv_layer(24, self.conv19, 1024, 3, 1)\n self.conv21 = self.conv_layer(25, self.conv20, 1024, 3, 1)\n self.conv22 = self.conv_layer(26, self.conv21, 1024, 3, 2)\n self.conv23 = self.conv_layer(27, self.conv22, 1024, 3, 1)\n self.conv24 = self.conv_layer(28, self.conv23, 1024, 3, 1)\n # size reduced to 1024x7x7\n # fc1, fc2, fc3\n self.fc1 = self.fc_layer(29, self.conv24, 512,\n flatten=True, linear=False)\n self.fc2 = self.fc_layer(\n 30, self.fc1, 4096, flatten=False, linear=False)\n self.fc3 = self.fc_layer(\n 31, self.fc2, 1470, flatten=False, linear=True)\n # Run session\n self.sess = tf.Session()\n self.sess.run(tf.global_variables_initializer())\n self.saver = tf.train.Saver()\n self.saver.restore(self.sess, self.weightFile)\n # Print\n print('Graph built.')", "def create(self):\n # Create a graph and add all layers\n self.graph = tf.Graph()\n with self.graph.as_default():\n # Define variable learning rate and dis_noise\n self.relative_lr = tf.placeholder_with_default([1.],[1],name=\"relative_lr\")\n self.relative_lr = self.relative_lr[0]\n \n self.rel_dis_noise = tf.placeholder_with_default([1.],[1],name=\"rel_dis_noise\")\n self.rel_dis_noise = self.rel_dis_noise[0]\n self.dis_noise = self.rel_dis_noise * self.dis_noise_0\n \n \n # Create the generator and discriminator\n if self.architecture == 'Res6':\n gen_dim = [64, 128,256, 256,256,256,256,256,256, 128,64 ]\n kernel_size =[7, 3,3, 3,3,3,3,3,3, 3,3, 7]\n elif self.architecture == 'Res9':\n gen_dim= [64, 128,256, 256,256,256,256,256,256,256,256,256, 128,64 ]\n kernel_size=[7, 3,3, 3,3,3,3,3,3,3,3,3, 3,3, 7]\n else:\n print('Unknown generator architecture')\n return None\n \n self.genA = Res_Gen.ResGen('BtoA',self.a_chan,gen_dim=gen_dim,kernel_size=kernel_size,deconv=self.deconv,verbose=self.verbose)\n self.genB = Res_Gen.ResGen('AtoB',self.b_chan,gen_dim=gen_dim,kernel_size=kernel_size,deconv=self.deconv,verbose=self.verbose)\n \n if self.patchgan == 'Patch34':\n self.disA = PatchGAN34.PatchGAN34('A',noise=self.dis_noise)\n self.disB = PatchGAN34.PatchGAN34('B',noise=self.dis_noise)\n elif self.patchgan == 'Patch70':\n self.disA = PatchGAN70.PatchGAN70('A',noise=self.dis_noise)\n self.disB = PatchGAN70.PatchGAN70('B',noise=self.dis_noise)\n elif self.patchgan == 'Patch142':\n self.disA = PatchGAN142.PatchGAN142('A',noise=self.dis_noise)\n self.disB = PatchGAN142.PatchGAN142('B',noise=self.dis_noise)\n elif self.patchgan == 'MultiPatch':\n self.disA = MultiPatch.MultiPatch('A',noise=self.dis_noise)\n self.disB = MultiPatch.MultiPatch('B',noise=self.dis_noise)\n else:\n print('Unknown Patch discriminator type')\n return None\n \n self.disA_His = HisDis.HisDis('A',noise=self.dis_noise,keep_prob=1.)\n self.disB_His = HisDis.HisDis('B',noise=self.dis_noise,keep_prob=1.)\n \n # Create a placeholder for the input data\n self.A = tf.placeholder(tf.float32,[None, None, None, self.a_chan],name=\"a\")\n self.B = tf.placeholder(tf.float32,[None, None, None, self.b_chan],name=\"b\")\n \n if self.verbose:\n print('Size A: ' +str(self.a_chan)) # Often 1 --> Real\n print('Size B: ' +str(self.b_chan)) # Often 3 --> Syn\n \n # Create cycleGAN \n \n self.fake_A = self.genA.create(self.B,False)\n self.fake_B = self.genB.create(self.A,False)\n \n \n \n # Define the histogram loss\n t_A = tf.transpose(tf.reshape(self.A,[-1, self.a_chan]),[1,0])\n t_B = tf.transpose(tf.reshape(self.B,[-1, self.b_chan]),[1,0])\n t_fake_A = tf.transpose(tf.reshape(self.fake_A,[-1, self.a_chan]),[1,0])\n t_fake_B = tf.transpose(tf.reshape(self.fake_B,[-1, self.b_chan]),[1,0])\n\n self.s_A,_ = tf.nn.top_k(t_A,tf.shape(t_A)[1])\n self.s_B,_ = tf.nn.top_k(t_B,tf.shape(t_B)[1])\n self.s_fake_A,_ = tf.nn.top_k(t_fake_A,tf.shape(t_fake_A)[1])\n self.s_fake_B,_ = tf.nn.top_k(t_fake_B,tf.shape(t_fake_B)[1])\n \n self.m_A = tf.reshape(tf.reduce_mean(tf.reshape(self.s_A,[self.a_chan, self.imsize, -1]),axis=2),[1, -1])\n self.m_B = tf.reshape(tf.reduce_mean(tf.reshape(self.s_B,[self.b_chan, self.imsize, -1]),axis=2),[1, -1])\n self.m_fake_A = tf.reshape(tf.reduce_mean(tf.reshape(self.s_fake_A,[self.a_chan, self.imsize, -1]),axis=2),[1, -1])\n self.m_fake_B = tf.reshape(tf.reduce_mean(tf.reshape(self.s_fake_B,[self.b_chan, self.imsize, -1]),axis=2),[1, -1])\n \n # Define generator loss functions\n self.lambda_c = tf.placeholder_with_default([self.lambda_c],[1],name=\"lambda_c\")\n self.lambda_c = self.lambda_c[0]\n self.lambda_h = tf.placeholder_with_default([self.lambda_h],[1],name=\"lambda_h\")\n self.lambda_h = self.lambda_h[0]\n \n self.dis_real_A = self.disA.create(self.A,False)\n self.dis_real_Ah = self.disA_His.create(self.m_A,False)\n self.dis_real_B = self.disB.create(self.B,False)\n self.dis_real_Bh = self.disB_His.create(self.m_B,False)\n self.dis_fake_A = self.disA.create(self.fake_A,True)\n self.dis_fake_Ah = self.disA_His.create(self.m_fake_A,True)\n self.dis_fake_B = self.disB.create(self.fake_B,True)\n self.dis_fake_Bh = self.disB_His.create(self.m_fake_B,True)\n \n self.cyc_A = self.genA.create(self.fake_B,True)\n self.cyc_B = self.genB.create(self.fake_A,True)\n \n \n # Define cycle loss (eq. 2)\n self.loss_cyc_A = tf.reduce_mean(tf.abs(self.cyc_A-self.A))\n self.loss_cyc_B = tf.reduce_mean(tf.abs(self.cyc_B-self.B))\n \n self.loss_cyc = self.loss_cyc_A + self.loss_cyc_B\n \n # Define discriminator losses (eq. 1)\n self.loss_dis_A = (tf.reduce_mean(tf.square(self.dis_real_A)) +\\\n tf.reduce_mean(tf.square(1-self.dis_fake_A)))*0.5 +\\\n (tf.reduce_mean(tf.square(self.dis_real_Ah)) +\\\n tf.reduce_mean(tf.square(1-self.dis_fake_Ah)))*0.5*self.lambda_h\n \n \n self.loss_dis_B = (tf.reduce_mean(tf.square(self.dis_real_B)) +\\\n tf.reduce_mean(tf.square(1-self.dis_fake_B)))*0.5 +\\\n (tf.reduce_mean(tf.square(self.dis_real_Bh)) +\\\n tf.reduce_mean(tf.square(1-self.dis_fake_Bh)))*0.5*self.lambda_h\n \n self.loss_gen_A = tf.reduce_mean(tf.square(self.dis_fake_A)) +\\\n self.lambda_h * tf.reduce_mean(tf.square(self.dis_fake_Ah)) +\\\n self.lambda_c * self.loss_cyc/2.\n self.loss_gen_B = tf.reduce_mean(tf.square(self.dis_fake_B)) +\\\n self.lambda_h * tf.reduce_mean(tf.square(self.dis_fake_Bh)) +\\\n self.lambda_c * self.loss_cyc/2.\n \n # Create the different optimizer\n with self.graph.as_default():\n # Optimizer for Gen\n self.list_gen = []\n for var in tf.trainable_variables():\n if 'gen' in str(var):\n self.list_gen.append(var)\n optimizer_gen = tf.train.AdamOptimizer(learning_rate=self.relative_lr*0.0002,beta1=0.5)\n self.opt_gen = optimizer_gen.minimize(self.loss_gen_A+self.loss_gen_B,var_list=self.list_gen)\n \n # Optimizer for Dis\n self.list_dis = []\n for var in tf.trainable_variables():\n if 'dis' in str(var):\n self.list_dis.append(var)\n optimizer_dis = tf.train.AdamOptimizer(learning_rate=self.relative_lr*0.0002,beta1=0.5)\n self.opt_dis = optimizer_dis.minimize(self.loss_dis_A + self.loss_dis_B,var_list=self.list_dis)", "def build_graph(self, input_x, input_y):\n self.input_x = input_x\n self.input_y = input_y\n\n self.dropout_keep_prob_embedding_t = tf.constant(self.dropout_keep_prob_embedding)\n self.dropout_keep_prob_affine_t = tf.constant(self.dropout_keep_prob_affine)\n self.dropout_keep_prob_cell_input_t = tf.constant(self.dropout_keep_prob_cell_input)\n self.dropout_keep_prob_cell_output_t = tf.constant(self.dropout_keep_prob_cell_output)\n\n with tf.variable_scope(\"embedding\"), tf.device(\"/cpu:0\"):\n W = tf.get_variable(\n \"W\",\n [self.vocabulary_size, self.embedding_dim],\n initializer=tf.random_uniform_initializer(-1.0, 1.0))\n self.embedded_chars = tf.nn.embedding_lookup(W, input_x)\n self.embedded_chars_drop = tf.nn.dropout(self.embedded_chars, self.dropout_keep_prob_embedding_t)\n\n with tf.variable_scope(\"rnn\") as scope:\n # The RNN cell\n cell_class = self.cell_class_map.get(self.cell_class)\n one_cell = rnn_cell.DropoutWrapper(\n cell_class(self.hidden_dim),\n input_keep_prob=self.dropout_keep_prob_cell_input_t,\n output_keep_prob=self.dropout_keep_prob_cell_output_t)\n self.cell = rnn_cell.MultiRNNCell([one_cell] * self.num_layers)\n # Build the recurrence. We do this manually to use truncated backprop\n self.initial_state = tf.zeros([self.batch_size, self.cell.state_size])\n self.rnn_states = [self.initial_state]\n self.rnn_outputs = []\n for i in range(self.sequence_length):\n if i > 0:\n scope.reuse_variables()\n new_output, new_state = self.cell(self.embedded_chars_drop[:, i, :], self.rnn_states[-1])\n if i < max(0, self.sequence_length - self.backprop_truncate_after):\n new_state = tf.stop_gradient(new_state)\n self.rnn_outputs.append(new_output)\n self.rnn_states.append(new_state)\n self.final_state = self.rnn_states[-1]\n self.final_output = self.rnn_outputs[-1]\n\n with tf.variable_scope(\"affine\"):\n W = tf.get_variable(\n \"W\",\n [self.hidden_dim, self.affine_dim],\n initializer=tf.truncated_normal_initializer(stddev=0.1))\n b = tf.get_variable(\n \"b\",\n [self.affine_dim],\n initializer=tf.constant_initializer(0.1))\n self.affine = tf.nn.tanh(tf.nn.xw_plus_b(self.final_output, W, b))\n self.affine_drop = tf.nn.dropout(self.affine, self.dropout_keep_prob_affine_t)\n\n with tf.variable_scope(\"output\"):\n W = tf.get_variable(\n \"W\",\n [self.affine_dim, self.num_classes],\n initializer=tf.truncated_normal_initializer(stddev=0.1))\n b = tf.get_variable(\n \"b\",\n [self.num_classes],\n initializer=tf.constant_initializer(0.1))\n self.scores = tf.nn.xw_plus_b(self.affine_drop, W, b)\n self.predictions = tf.argmax(self.scores, 1)\n\n with tf.variable_scope(\"loss\"):\n self.losses = tf.nn.softmax_cross_entropy_with_logits(self.scores, input_y, name=\"ce_losses\")\n self.total_loss = tf.reduce_sum(self.losses)\n self.mean_loss = tf.reduce_mean(self.losses)\n\n with tf.variable_scope(\"accuracy\"):\n self.correct_predictions = tf.equal(self.predictions, tf.argmax(input_y, 1))\n self.acc = tf.reduce_mean(tf.cast(self.correct_predictions, \"float\"), name=\"accuracy\")", "def build_graph(self):\n if self.model == 'dense':\n # ForecastNet with two densely connected hidden layers in a cell and Mixture Density Network outputs\n self.outputs, self.mu, self.sigma, self.cost = forecastnet_graph(self.X,\n self.Y,\n self.hidden_dim,\n self.out_seq_length,\n self.is_training)\n elif self.model == 'conv':\n # ForecastNet with a convlutional neural network in a cell and Mixture Density Network outputs\n self.outputs, self.mu, self.sigma, self.cost = forecastnet_conv_graph(self.X,\n self.Y,\n self.hidden_dim,\n self.out_seq_length,\n self.is_training)\n elif self.model == 'dense2':\n # ForecastNet with two densely connected hidden layers in a cell and linear outputs\n self.outputs, self.cost = forecastnet_graph2(self.X,\n self.Y,\n self.hidden_dim,\n self.out_seq_length,\n self.is_training)\n elif self.model == 'conv2':\n # ForecastNet with a convolutional neural network in a cell and linear outputs\n self.outputs, self.cost = forecastnet_conv_graph2(self.X,\n self.Y,\n self.hidden_dim,\n self.out_seq_length,\n self.is_training)", "def build_graph(self):\n\n self.graph = tf.Graph()\n self.graph.seed = self.sir_options['seed']\n # pylint: disable=E1129\n with self.graph.as_default():\n with tf.variable_scope(\"sir\"):\n\n self.frame = tf.get_variable(\n \"frame\",\n [\n self.video_options['height'],\n self.video_options['width']],\n dtype=tf.float32,\n initializer=tf.zeros_initializer)\n\n self.frame_input = tf.placeholder(\n dtype=tf.float32,\n shape=[self.video_options['height'],\n self.video_options['width']])\n self.set_frame = self.frame.assign(self.frame_input)\n\n self.template = tf.get_variable(\n 'template',\n [\n self.template_options['height'],\n self.template_options['width']],\n dtype=tf.float32,\n initializer=tf.zeros_initializer)\n\n with tf.variable_scope(\"grid_coordinates\"):\n rspace = tf.linspace(\n np.float32(-self.template_options['height']/2.0),\n np.float32(self.template_options['height']/2.0),\n self.template_options['height'], name='rspace')\n\n cspace = tf.linspace(\n np.float32(-self.template_options['width']/2.0),\n np.float32(self.template_options['width']/2.0),\n self.template_options['width'], name='cspace')\n\n tgrid = tf.meshgrid(rspace, cspace, indexing='ij')\n tgrid = tf.stack(tgrid, axis=2, name='tgrid')\n\n with tf.variable_scope(\"roi\"):\n self.roi_x = tf.get_variable(\n \"roi_X\",\n [4],\n dtype=tf.float32,\n initializer=tf.zeros_initializer)\n\n # build magnification and rotation transformation\n roi_c = tf.cos(self.roi_x[3])\n roi_s = tf.sin(self.roi_x[3])\n roi_rot = [[roi_c, -roi_s], [roi_s, roi_c]]\n roi_rot_mag = roi_rot*self.roi_x[2]\n\n # perform transformation contraction\n roi_grid = tf.einsum('mnz,zk->mnk', tgrid, roi_rot_mag)\n\n # translate\n roi_grid = roi_grid + self.roi_x[0:2]\n\n # shape tensors for _interpolate_bilinear batch\n roi_grid = tf.reshape(\n roi_grid,\n [1,\n (self.template_options['height'] *\n self.template_options['width']),\n 2])\n roi_frame = tf.reshape(\n self.frame,\n [1,\n self.video_options['height'],\n self.video_options['width'],\n 1])\n\n # bilinear interpolate and reshape\n roi_out = tf.squeeze(\n _interpolate_bilinear(roi_frame, roi_grid))\n roi_out = tf.reshape(\n roi_out,\n [self.template_options['height'],\n self.template_options['width']],\n name='roi_out')\n\n # system dynamics\n self.system_a = tf.constant(\n [[1, 0, 1, 0, 0, 0],\n [0, 1, 0, 1, 0, 0],\n [0, 0, 1, 0, 0, 0],\n [0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 1]], dtype=tf.float32)\n\n self.system_u = tf.constant(\n [0, 0, 2, 2, 0.05, 0.02],\n shape=(6, 1), dtype=tf.float32)\n\n self.sir_p = tf.get_variable(\n \"P\",\n [self.sir_options['particle_count'], 6],\n dtype=tf.float32,\n initializer=tf.zeros_initializer)\n\n self.p_aux = tf.get_variable(\n \"P_aux\",\n [self.sir_options['particle_count'], 6],\n dtype=tf.float32,\n initializer=tf.zeros_initializer)\n\n self.p_seed = tf.placeholder(\n dtype=tf.float32,\n shape=self.sir_p.shape)\n self.seed_p = self.sir_p.assign(self.p_seed)\n\n sir_w = tf.get_variable(\"W\",\n [self.sir_options['particle_count']],\n dtype=tf.float32,\n initializer=tf.ones_initializer)\n\n self.reset_w = sir_w.assign(\n tf.fill(\n tf.shape(sir_w),\n 1/self.sir_options['particle_count']))\n\n self.noise_p = tf.multiply(\n tf.random_normal(self.sir_p.shape),\n tf.reshape(self.system_u, [1, 6]), name='noise')\n prediction_p = tf.matmul(\n self.sir_p,\n self.system_a,\n transpose_b=True) + self.noise_p\n\n self.predict_from_p = self.sir_p.assign(prediction_p)\n\n with tf.variable_scope(\"transform\"):\n\n # build magnification and rotation transformations\n transform_c = tf.cos(self.sir_p[:, 5])\n transform_s = tf.sin(self.sir_p[:, 5])\n transform_rot = tf.stack(\n [tf.stack([transform_c, -transform_s], axis=1),\n tf.stack([transform_s, transform_c], axis=1)], axis=2)\n transform_rot_mag = transform_rot * \\\n tf.reshape(\n self.sir_p[:, 4],\n [self.sir_options['particle_count'], 1, 1])\n\n # perform transformation contraction\n transform_grid = tf.einsum(\n 'mnz,pzk->pmnk', tgrid, transform_rot_mag)\n\n # translate\n transform_grid = transform_grid + \\\n tf.reshape(\n self.sir_p[:, 0:2],\n [self.sir_options['particle_count'], 1, 1, 2])\n\n with tf.variable_scope(\"interpolations\"):\n score_grid = tf.reshape(transform_grid, [1, -1, 2])\n score_frame = tf.reshape(\n self.frame,\n [1,\n self.video_options['height'],\n self.video_options['width'],\n 1])\n interpolations = _interpolate_bilinear(\n score_frame, score_grid)\n interpolations = tf.reshape(\n interpolations,\n [self.sir_options['particle_count'],\n self.template_options['height'],\n self.template_options['width']])\n\n with tf.variable_scope(\"Score\"):\n template_mean = tf.reduce_mean(\n self.template, axis=[0, 1], keepdims=True)\n mean_shifted_template = self.template-template_mean\n e_template = tf.einsum(\n 'mn,mn->',\n mean_shifted_template,\n mean_shifted_template)\n\n ss_mean = tf.reduce_mean(\n interpolations,\n axis=[1, 2],\n keepdims=True)\n mean_shifted_ss = interpolations-ss_mean\n e_ss = tf.einsum(\n 'pmn,pmn->p', mean_shifted_ss, mean_shifted_ss)\n\n # spatial-support and template products\n e_template_ss = tf.einsum(\n 'pmn,mn->p', mean_shifted_ss, mean_shifted_template)\n\n def score_init_time_0():\n # template energy\n e0_template = tf.get_variable(\n \"e0_template\",\n [],\n dtype=tf.float32,\n initializer=tf.ones_initializer)\n store_e0_template = e0_template.assign(e_template)\n # spatial-support energy\n e0_ss = tf.get_variable(\n \"e0_ss\",\n [self.sir_options['particle_count']],\n dtype=tf.float32,\n initializer=tf.ones_initializer)\n store_e0_ss = e0_ss.assign(e_ss)\n # template-spatial support cross energy\n e0_template_ss = tf.get_variable(\n \"e0_template_ss\",\n [self.sir_options['particle_count']],\n dtype=tf.float32,\n initializer=tf.ones_initializer)\n store_e0_template_ss = \\\n e0_template_ss.assign(e_template_ss)\n return e0_template, store_e0_template, e0_ss, \\\n store_e0_ss, e0_template_ss, \\\n store_e0_template_ss\n\n def score_init_time_1():\n # template\n e1_template = tf.get_variable(\n \"e1_template\",\n [],\n dtype=tf.float32,\n initializer=tf.ones_initializer)\n shift_e1_template = e1_template.assign(e_template)\n # spatial-support energy\n e1_ss = tf.get_variable(\n \"e1_ss\",\n [self.sir_options['particle_count']],\n dtype=tf.float32,\n initializer=tf.ones_initializer)\n shift_e1_ss = e1_ss.assign(e1_ss)\n # template-spatial support cross energy\n e1_template_ss = tf.get_variable(\n \"e1_template_ss\",\n [self.sir_options['particle_count']],\n dtype=tf.float32,\n initializer=tf.ones_initializer)\n shift_e1_template_ss = e1_template_ss.assign(\n e1_template_ss)\n return e1_template, shift_e1_template, e1_ss, \\\n shift_e1_ss, e1_template_ss, shift_e1_template_ss\n\n def score_init_time_2():\n # template energy\n e2_template = tf.get_variable(\n \"e2_template\",\n [],\n dtype=tf.float32,\n initializer=tf.ones_initializer)\n shift_e2_template = e2_template.assign(e1_template)\n # spatial-support energy\n e2_ss = tf.get_variable(\n \"e2_ss\",\n [self.sir_options['particle_count']],\n dtype=tf.float32,\n initializer=tf.ones_initializer)\n shift_e2_ss = e2_ss.assign(e1_ss)\n # template-spatial support cross energy\n e2_template_ss = tf.get_variable(\n \"e2_template_ss\",\n [self.sir_options['particle_count']],\n dtype=tf.float32,\n initializer=tf.ones_initializer)\n shift_e2_template_ss = e2_template_ss.assign(\n e1_template_ss)\n return e2_template, shift_e2_template, e2_ss, \\\n shift_e2_ss, e2_template_ss, shift_e2_template_ss\n\n if self.sir_options['score_type'] == 'NCC':\n e0_template, \\\n self.store_e0_template, \\\n e0_ss, \\\n self.store_e0_ss, \\\n e0_template_ss, \\\n self.store_e0_template_ss = \\\n score_init_time_0()\n\n corr = (e0_template_ss /\n (tf.sqrt(e0_template)*tf.sqrt(e0_ss)))\n\n elif self.sir_options['score_type'] == 'ASV':\n e0_template, \\\n self.store_e0_template, \\\n e0_ss, \\\n self.store_e0_ss, \\\n e0_template_ss, \\\n self.store_e0_template_ss = \\\n score_init_time_0()\n e1_template, \\\n self.shift_e1_template, \\\n e1_ss, self.shift_e1_ss, \\\n e1_template_ss, \\\n self.shift_e1_template_ss = \\\n score_init_time_1()\n\n corr = ((e0_template_ss+e1_template_ss) /\n (tf.sqrt(e0_template+e1_template) *\n tf.sqrt(e0_ss+e1_ss)))\n\n elif self.sir_options['score_type'] == 'ASVHO':\n e0_template, \\\n self.store_e0_template, \\\n e0_ss, \\\n self.store_e0_ss, \\\n e0_template_ss, \\\n self.store_e0_template_ss = \\\n score_init_time_0()\n e1_template, \\\n self.shift_e1_template, \\\n e1_ss, self.shift_e1_ss, \\\n e1_template_ss, \\\n self.shift_e1_template_ss = \\\n score_init_time_1()\n e2_template, \\\n self.shift_e2_template, \\\n e2_ss, \\\n self.shift_e2_ss, \\\n e2_template_ss, \\\n self.shift_e2_template_ss = \\\n score_init_time_2()\n\n corr = ((e0_template_ss +\n e1_template_ss +\n e2_template_ss) /\n (tf.sqrt(\n e0_template +\n e1_template +\n e2_template) *\n tf.sqrt(\n e0_ss +\n e1_ss +\n e2_ss)))\n\n score = tf.exp(-100*(1.0-corr))\n score = score / (tf.reduce_sum(score))\n score_out = tf.get_variable(\n \"score_out\",\n [self.sir_options['particle_count']],\n dtype=tf.float32,\n initializer=tf.ones_initializer)\n self.store_score = score_out.assign(score)\n\n with tf.variable_scope(\"resampling\"):\n logprobs_w = tf.reshape(tf.log(sir_w), [1, -1])\n ridx_w = tf.squeeze(\n tf.multinomial(\n logprobs_w,\n num_samples=self.sir_options['particle_count']))\n\n resample_indices = tf.get_variable(\n \"resample_indices\",\n [self.sir_options['particle_count']],\n dtype=tf.int64,\n initializer=tf.zeros_initializer)\n self.store_ridx = resample_indices.assign(ridx_w)\n\n r_p = tf.gather(self.sir_p, resample_indices)\n self.resample_p = self.sir_p.assign(r_p)\n if self.sir_options['score_type'] == 'ASV':\n self.resample_e1_template_ss = e1_template_ss.assign(\n tf.gather(e1_template_ss, resample_indices))\n self.resample_e1_ss = e1_ss.assign(\n tf.gather(e1_ss, resample_indices))\n if self.sir_options['score_type'] == 'ASVHO':\n self.resample_e1_template_ss = e1_template_ss.assign(\n tf.gather(e1_template_ss, resample_indices))\n self.resample_e1_ss = e1_ss.assign(\n tf.gather(e1_ss, resample_indices))\n self.resample_e2_template_ss = e2_template_ss.assign(\n tf.gather(e2_template_ss, resample_indices))\n self.resample_e2_ss = e2_ss.assign(\n tf.gather(e2_ss, resample_indices))\n\n self.w_update = score_out * sir_w\n self.w_update = (self.w_update /\n tf.reduce_sum(self.w_update, axis=0))\n self.update_w = sir_w.assign(self.w_update)\n\n self.estimate = tf.reduce_sum(\n tf.reshape(\n self.w_update,\n [-1, 1])*self.sir_p, axis=0, name='estimate')\n\n with tf.variable_scope('template_update'):\n\n self.set_roi_to_template = self.template.assign(roi_out)\n\n # establish best template history if not using estimate\n if 'ESTIMATE' not in self.sir_options['update_method']:\n if 'WEIGHT' in self.sir_options['update_method']:\n max_source = sir_w\n elif 'SCORE' in self.sir_options['update_method']:\n max_source = score\n elif 'CORRELATION' in \\\n self.sir_options['update_method']:\n max_source = corr\n\n # max among current particles\n max_idx = tf.argmax(max_source)\n best_current_template = interpolations[max_idx]\n best_current_value = max_source[max_idx]\n self.template_history = TemplateHistory(\n self.graph,\n self.sir_options['historical_length'],\n best_current_template,\n best_current_value\n )\n\n if 'SVD' in self.sir_options['update_method']:\n best_historical_template = \\\n self.template_history.get_svd()\n else:\n best_historical_template = \\\n self.template_history.get_best()\n\n self.update_from_best = self.template.assign(\n best_historical_template)\n\n # useful ops\n self.store_aux_p = self.p_aux.assign(self.sir_p)\n self.restore_p_from_aux = self.sir_p.assign(self.p_aux)\n # number of effective particles calculation\n self.neff = 1.0/tf.einsum('p,p->', sir_w, sir_w)", "def build_graph(self):\n n_classes = self.n_classes\n\n (self.feed('data')\n .conv(3, 3, 64, 1, 1, name='conv1_1', trainable=False)\n .conv(3, 3, 64, 1, 1, name='conv1_2', trainable=False)\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool1')\n .conv(3, 3, 128, 1, 1, name='conv2_1', trainable=False)\n .conv(3, 3, 128, 1, 1, name='conv2_2', trainable=False)\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool2')\n .conv(3, 3, 256, 1, 1, name='conv3_1')\n .conv(3, 3, 256, 1, 1, name='conv3_2')\n .conv(3, 3, 256, 1, 1, name='conv3_3')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool3')\n .conv(3, 3, 512, 1, 1, name='conv4_1')\n .conv(3, 3, 512, 1, 1, name='conv4_2')\n .conv(3, 3, 512, 1, 1, name='conv4_3')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool4')\n .conv(3, 3, 512, 1, 1, name='conv5_1')\n .conv(3, 3, 512, 1, 1, name='conv5_2')\n .conv(3, 3, 512, 1, 1, name='conv5_3'))\n\n self.compute_rDeRF() # dummy\n\n # Classification\n (self.feed('conv5_3')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool6')\n .reshape(shape=(-1, 7, 7, 512), name='pool6_reshape')\n .fc(4096, name='fc6')\n .dropout(0.5, name='drop6')\n .fc(4096, name='fc7')\n .dropout(0.5, name='drop7')\n # .make_time(name='drop7_reduced')\n .fc(n_classes, relu=False, name='cls_score')\n .softmax(name='cls_prob'))\n pass", "def build_graph(self):\n self.graph = tf.Graph()\n temp_connections = self.connections\n with self.graph.as_default():\n operations = {}\n\n # create Variables for input vertices\n for neuron_id in self.input_neurons:\n self.inputs[neuron_id] = tf.get_variable(name=str(neuron_id), shape=(),\n initializer=tf.zeros_initializer)\n deletion = []\n while len(temp_connections) > 0:\n for neuron_id in deletion:\n temp_connections.pop(neuron_id, None)\n deletion = []\n keys = list(temp_connections)\n random.shuffle(keys)\n # create input & output vertices\n for neuron_id in temp_connections:\n input_neuron_ids = temp_connections[neuron_id]\n if self.check(input_neuron_ids, operations):\n # weights\n v_weights = tf.constant(self.weights[neuron_id])\n # input vertices\n v_inputs = []\n\n for input_neuron_id in input_neuron_ids:\n if self.is_input_neuron(input_neuron_id):\n vertex = self.inputs[input_neuron_id]\n else:\n vertex = operations[input_neuron_id]\n\n v_inputs.append(vertex)\n\n deletion.append(neuron_id)\n\n # multiply weights and inputs\n mul = tf.multiply(v_inputs, v_weights, str(neuron_id))\n # sum multiplied values\n sum = tf.reduce_sum(mul, name='sum_' + str(neuron_id))\n # apply activation function\n if self.is_output_neuron(neuron_id):\n activation = tf.sigmoid(sum, name=\"output\")\n else:\n activation = tf.nn.leaky_relu(sum, alpha=0.2, name=\"relu_\" + str(neuron_id))\n\n operations[neuron_id] = activation\n if self.is_output_neuron(neuron_id):\n self.output = activation\n return self.graph, self.inputs, self.output", "def build_graph(self):\n assert self.n_features is not None, 'Number of features is unknown. It can be set explicitly by .core.set_num_features'\n self.graph = tf.Graph()\n self.graph.seed = self.seed\n with self.graph.as_default():\n with tf.name_scope('learnable_params') as scope:\n self.init_learnable_params()\n with tf.name_scope('input_block') as scope:\n self.init_placeholders()\n with tf.name_scope(\"cosine_similarity\"):\n self.init_similarity_computation()\n with tf.name_scope('main_block') as scope:\n self.init_main_block()\n with tf.name_scope('optimization_criterion') as scope:\n self.init_regularization()\n self.init_loss()\n self.init_target()\n self.trainer = self.optimizer.minimize(self.target)\n self.init_all_vars = tf.global_variables_initializer()\n self.summary_op = tf.summary.merge_all()\n self.saver = tf.train.Saver()", "def _build_graph(self):\n self.X = tf.placeholder(tf.float32, [self.N, None])\n self.y = tf.placeholder(tf.float32, [self.C, None])\n\n self.W = tf.get_variable(\"W\", shape=[self.C, self.N], initializer=tf.truncated_normal_initializer)\n self.b = tf.get_variable(\"b\", shape=[self.C, 1], initializer=tf.zeros_initializer)\n\n self.z = tf.matmul(self.W, self.X) + self.b\n self.y_hat = tf.nn.softmax(self.z, dim=0)\n\n self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=self.z, dim=0))\n\n self.train = tf.train.AdamOptimizer().minimize(self.loss)\n\n self.correct_pred = tf.equal(tf.argmax(self.y, 0), tf.argmax(self.y_hat, 0))\n\n self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32))\n\n self.model = tf.global_variables_initializer()", "def _build_graph(self, seed):\n self.g = tf.Graph()\n with self.g.as_default():\n tf.set_random_seed(seed)\n self._placeholders()\n self._policy_nn()\n self._loss_train_op()\n self.init = tf.global_variables_initializer()", "def create_graph():\n # Creates graph from saved graph_def.pb.\n with tf.gfile.FastGFile(os.path.join(FLAGS.model_dir, r'/home/lg/Desktop/finetune/frozen_inception_v3_299.pb'), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def,return_elements=['InceptionV3/Predictions/Reshape_1:0'], name='lg')", "def build_graph(parameters):\n input_tensor = tf.compat.v1.placeholder(\n dtype=parameters[\"input_dtype\"],\n name=\"input\",\n shape=parameters[\"input_shape\"])\n zeros = tf.zeros_like(input_tensor)\n # This maximum node is so that converter can perform the\n # constants-propagation through the above zeros_like, which it can't do if\n # the output of the zeros_like as an output of the whole graphs (graph\n # outputs can't be constants). If converter does not perform such\n # constants-propagation then the resulting tflite graph retains the\n # zeros_like as a Fill op, which is unsupported by TFLite, even as a custom\n # op.\n out = tf.maximum(zeros, input_tensor)\n return [input_tensor], [out]", "def _make_graph(self):\n # this resets the whole default graph for tensorflow\n tf.reset_default_graph()\n # inputs/outputs:\n # each input example will be two np.hstacked 3x3 matrices, flattened\n # (initial state s and final state s' after selecting action a)\n self.input = tf.placeholder(tf.float32, [None, 3 * 6])\n self.layers, self.weights, self.biases = \\\n make_fully_connected_network(\n input_layer=self.input,\n architecture=self.architecture,\n activation=self.activation\n )\n self.output = self.layers[-1]\n self.observed = tf.placeholder(tf.float32, shape=[None, 1])\n # MSE loss function\n self.loss = tf.reduce_sum(tf.square(self.output - self.observed))\n if self.penalty:\n penalty_tensor = tf.add_n([self.penalty_function(x) for x in self.weights])\n self.loss = self.loss + self.penalty * penalty_tensor\n self.optimizer = (self.optimizer_algo(learning_rate=self.learning_rate, **self.optimizer_params)\n .minimize(self.loss))", "def _build_bdrnn_graph(self, hparams):\n\n sample = self.iterator.get_next()\n\n inputs, tgt_outputs, seq_len = sample\n\n # linear projection to state size\n #with tf.variable_scope(\"bdrnn_in\", dtype=tf.float32):\n # inputs = tf.layers.dense(inputs=inputs,\n # units=hparams.input_proj_size,\n # kernel_initializer=tf.glorot_uniform_initializer())\n\n lm_fw_cell = []\n lm_bw_cell = []\n lm_init_state_fw = []\n lm_init_state_bw = []\n if hparams.pretrained:\n with tf.variable_scope(\"lm_rnn\", dtype=tf.float32):\n # create lm\n with tf.variable_scope(\"fw\", dtype=tf.float32):\n lm_fw_cell = _create_rnn_cell(num_units=hparams.num_units,\n num_layers=1,\n mode=self.mode)\n # build the cell so it is in the correct scope\n # NOTE: this is hard coded\n lm_fw_cell[0].build([None, hparams.num_features])#hparams.input_proj_size])\n lm_init_state_fw = _get_initial_state([lm_fw_cell[0].state_size], tf.shape(inputs)[0], \"lm\")\n with tf.variable_scope(\"bw\", dtype=tf.float32):\n lm_bw_cell = _create_rnn_cell(num_units=hparams.num_units,\n num_layers=1,\n mode=self.mode)\n # NOTE: this is hard coded\n lm_bw_cell[0].build([None, hparams.num_features])#hparams.input_proj_size])\n lm_init_state_bw = _get_initial_state([lm_bw_cell[0].state_size], tf.shape(inputs)[0], \"lm\")\n\n lm_outputs, lm_states = tf.nn.bidirectional_dynamic_rnn(lm_fw_cell[0],\n lm_bw_cell[0],\n inputs,\n sequence_length=seq_len,\n initial_state_fw=lm_init_state_fw[0],\n initial_state_bw=lm_init_state_bw[0],\n dtype=tf.float32)\n # optionally fix the LM weights\n if hparams.fixed_lm:\n print(\"Fixing pretrained language models.\")\n lm_outputs = tf.stop_gradient(lm_outputs)\n lm_outputs = tf.concat([lm_outputs[0], lm_outputs[1]], axis=-1)\n lm_outputs = tf.layers.dense(lm_outputs,\n 20,\n kernel_initializer=tf.glorot_uniform_initializer())\n lm_outputs = tf.concat([lm_outputs, inputs], axis=-1)\n\n\n #lm_outputs = tf.concat([lm_outputs[0], lm_outputs[1], inputs], axis=-1)\n else:\n lm_outputs = tf.concat(lm_outputs, axis=-1)\n\n\n\n with tf.variable_scope(\"bdrnn\", dtype=tf.float32) as bdrnn_scope:\n # create bdrnn\n with tf.variable_scope(\"fw\", dtype=tf.float32):\n fw_cells = _create_rnn_cell(num_units=hparams.num_units,\n num_layers=hparams.num_layers,\n mode=self.mode\n )\n init_state_fw = _get_initial_state([cell.state_size for cell in fw_cells],\n tf.shape(inputs)[0], \"initial_state_fw\")\n\n with tf.variable_scope(\"bw\", dtype=tf.float32):\n bw_cells = _create_rnn_cell(num_units=hparams.num_units,\n num_layers=hparams.num_layers,\n mode=self.mode,\n )\n\n init_state_bw = _get_initial_state([cell.state_size for cell in bw_cells],\n tf.shape(inputs)[0], \"initial_state_bw\")\n # NOTE: this is commented because the lm cells and states are separated now\n #fw_cells = lm_fw_cell + fw_cells\n #bw_cells = lm_bw_cell + bw_cells\n #init_state_fw = lm_init_state_fw + init_state_fw\n #init_state_bw = lm_init_state_bw + init_state_bw\n\n # run bdrnn\n combined_outputs, output_state_fw, output_state_bw = \\\n tf.contrib.rnn.stack_bidirectional_dynamic_rnn(cells_fw=fw_cells,\n cells_bw=bw_cells,\n inputs=lm_outputs,\n sequence_length=seq_len,\n initial_states_fw=init_state_fw,\n initial_states_bw=init_state_bw,\n dtype=tf.float32,\n scope=bdrnn_scope)\n # outputs is a tuple (output_fw, output_bw)\n # output_fw/output_bw are tensors [batch_size, max_time, cell.output_size]\n # outputs_states is a tuple (output_state_fw, output_state_bw) containing final states for\n # forward and backward rnn\n\n # concatenate the outputs of each direction\n #combined_outputs = tf.concat([outputs[0], outputs[1]], axis=-1)\n\n with tf.variable_scope(\"bdrnn_out\", dtype=tf.float32):\n # dense output layers\n dense1 = tf.layers.dense(inputs=combined_outputs,\n units=hparams.num_dense_units,\n kernel_initializer=tf.glorot_uniform_initializer(),\n activation=tf.nn.relu,\n use_bias=True)\n drop1 = tf.layers.dropout(inputs=dense1,\n rate=hparams.dropout,\n training=self.mode==tf.contrib.learn.ModeKeys.TRAIN)\n dense2 = tf.layers.dense(inputs=drop1,\n units=hparams.num_dense_units,\n kernel_initializer=tf.glorot_uniform_initializer(),\n activation=tf.nn.relu,\n use_bias=True)\n drop2 = tf.layers.dropout(inputs=dense2,\n rate=hparams.dropout,\n training=self.mode==tf.contrib.learn.ModeKeys.TRAIN)\n\n logits = tf.layers.dense(inputs=drop2,\n units=hparams.num_labels,\n use_bias=False)\n\n # mask out entries longer than target sequence length\n mask = tf.sequence_mask(seq_len, dtype=tf.float32)\n\n crossent = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits,\n labels=tgt_outputs,\n name=\"crossent\")\n\n # divide loss by batch_size * mean(seq_len)\n loss = tf.reduce_sum(crossent*mask)/tf.cast(hparams.batch_size, tf.float32)\n\n metrics = []\n update_ops = []\n if self.mode == tf.contrib.learn.ModeKeys.EVAL:\n # mean eval loss\n loss, loss_update = tf.metrics.mean(values=loss)\n\n predictions = tf.argmax(input=logits, axis=-1)\n tgt_labels = tf.argmax(input=tgt_outputs, axis=-1)\n acc, acc_update = tf.metrics.accuracy(predictions=predictions,\n labels=tgt_labels,\n weights=mask)\n # confusion matrix\n targets_flat = tf.reshape(tgt_labels, [-1])\n predictions_flat = tf.reshape(predictions, [-1])\n mask_flat = tf.reshape(mask, [-1])\n cm, cm_update = streaming_confusion_matrix(labels=targets_flat,\n predictions=predictions_flat,\n num_classes=hparams.num_labels,\n weights=mask_flat)\n tf.add_to_collection(\"eval\", cm_summary(cm, hparams.num_labels))\n metrics = [acc, cm]\n update_ops = [loss_update, acc_update, cm_update]\n\n return logits, loss, metrics, update_ops", "def build_graph(self):\n\n\n\n self.inputs.append( #uint8\n tf.placeholder(tf.float32, shape=[None, None, None, self.channel],\n name='input/lr')) \n\n self.label.append(\n tf.placeholder(tf.float32, shape=[None, None, None, self.channel],\n name='label/hr'))", "def build_model():\n with tf.name_scope('placeholders'):\n real_data_int = tf.placeholder(tf.int32, shape=[None, picture_size])\n x_true = 2 * ((tf.cast(real_data_int, tf.float32) / 255.) - .5)\n z = tf.placeholder(tf.float32, [None, input_dim])\n if use_JL:\n JL = tf.placeholder(tf.float32, [d_last_layer_size, JL_dim])\n P_non_normalized = tf.placeholder(tf.float32, [JL_dim, n_projections])\n P_non_normalized_SWD = tf.placeholder(tf.float32, [picture_size, n_projections])\n else:\n JL = None\n P_non_normalized = tf.placeholder(tf.float32, [d_last_layer_size, n_projections])\n P_non_normalized_SWD = tf.placeholder(tf.float32, [picture_size, n_projections])\n\n x_generated = generator(z, n_features_first=n_features_first_g,\n n_features_reduction_factor=n_features_reduction_factor, min_features=64,\n BN=BN, power=power, extra_layer=extra_layer_g,\n init_method=init_method, n_features_image=n_features_image)\n\n d_pred_true, d_last_true = discriminator(x_true, reuse=False, n_features_last=n_features_last_d,\n n_features_increase_factor=n_features_reduction_factor,\n min_features=min_features, d_BN=d_BN, power=power,\n n_features_image=n_features_image, init_method=init_method)\n d_pred_gen, d_last_gen = discriminator(x_generated, reuse=True, n_features_last=n_features_last_d,\n n_features_increase_factor=n_features_reduction_factor,\n min_features=min_features, d_BN=d_BN, power=power,\n n_features_image=n_features_image, init_method=init_method)\n\n # define generator loss (big part taken from SWG)\n with tf.name_scope('g_loss'):\n # apply the Johnson-Lindenstrauss map, if wanted, to the flattened array\n if use_JL:\n JL_true = tf.matmul(d_last_true, JL)\n JL_gen = tf.matmul(d_last_gen, JL)\n else:\n JL_true = d_last_true\n JL_gen = d_last_gen\n\n # next project the samples (images). After being transposed, we have tensors\n # of the format: [[projected_image1_proj1, projected_image2_proj1, ...],\n # [projected_image1_proj2, projected_image2_proj2, ...],...]\n # Each row has the projections along one direction. This makes it easier for the sorting that follows.\n # first normalize the random normal vectors to lie in the sphere\n P = tf.nn.l2_normalize(P_non_normalized, axis=0)\n\n projected_true = tf.transpose(tf.matmul(JL_true, P))\n projected_fake = tf.transpose(tf.matmul(JL_gen, P))\n\n sorted_true, true_indices = tf.nn.top_k(input=projected_true, k=batch_size)\n sorted_fake, fake_indices = tf.nn.top_k(input=projected_fake, k=batch_size)\n\n # For faster gradient computation, we do not use sorted_fake to compute\n # loss. Instead we re-order the sorted_true so that the samples from the\n # true distribution go to the correct sample from the fake distribution.\n\n # It is less expensive (memory-wise) to rearrange arrays in TF.\n # Flatten the sorted_true from dim [n_projections, batch_size].\n flat_true = tf.reshape(sorted_true, [-1])\n\n # Modify the indices to reflect this transition to an array.\n # new index = row + index\n rows = np.asarray([batch_size * np.floor(i * 1.0 / batch_size) for i in range(n_projections * batch_size)])\n rows = rows.astype(np.int32)\n flat_idx = tf.reshape(fake_indices, [-1, 1]) + np.reshape(rows, [-1, 1])\n\n # The scatter operation takes care of reshaping to the rearranged matrix\n shape = tf.constant([batch_size * n_projections])\n rearranged_true = tf.reshape(tf.scatter_nd(flat_idx, flat_true, shape), [n_projections, batch_size])\n\n generator_loss = tf.reduce_mean(tf.square(projected_fake - rearranged_true))\n\n # get the sliced Wasserstein distance (SWD) (since SWD and JLSWD are not comparable)\n with tf.name_scope('SWD'):\n P_SWD = tf.nn.l2_normalize(P_non_normalized_SWD, axis=0)\n\n projected_true_SWD = tf.transpose(tf.matmul(x_true, P_SWD))\n projected_fake_SWD = tf.transpose(tf.matmul(x_generated, P_SWD))\n\n sorted_true_SWD, true_indices_SWD = tf.nn.top_k(input=projected_true_SWD, k=batch_size)\n sorted_fake_SWD, fake_indices_SWD = tf.nn.top_k(input=projected_fake_SWD, k=batch_size)\n\n flat_true_SWD = tf.reshape(sorted_true_SWD, [-1])\n flat_idx_SWD = tf.reshape(fake_indices_SWD, [-1, 1]) + np.reshape(rows, [-1, 1])\n\n rearranged_true_SWD = tf.reshape(tf.scatter_nd(flat_idx_SWD, flat_true_SWD, shape),\n [n_projections, batch_size])\n\n SWD = tf.reduce_mean(tf.square(projected_fake_SWD - rearranged_true_SWD))\n\n # define the discriminator loss\n with tf.name_scope('d_loss'):\n d_true_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(d_pred_true), logits=d_pred_true)\n d_fake_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(d_pred_gen), logits=d_pred_gen)\n discriminator_loss = tf.reduce_mean(d_true_loss + d_fake_loss)\n\n with tf.name_scope('g_optimizer'):\n generator_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='generator')\n g_optimizer = tf.train.AdamOptimizer(learning_rate, beta1=0.5)\n g_train = g_optimizer.minimize(generator_loss, var_list=generator_vars)\n\n with tf.name_scope('d_optimizer'):\n discriminator_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator')\n d_optimizer = tf.train.AdamOptimizer(learning_rate, beta1=0.5)\n d_train = d_optimizer.minimize(discriminator_loss, var_list=discriminator_vars)\n\n return real_data_int, z, x_generated, JL, P_non_normalized, P_non_normalized_SWD, SWD, g_train, d_train", "def _build_graph(self):\n\n self.graph = tf.Graph()\n\n # set self.graph as default graph\n with self.graph.as_default():\n # # clear old variables\n # tf.reset_default_graph()\n\n # set random seed\n if self.random_seed is not None:\n tf.set_random_seed(self.random_seed)\n\n self._create_placeholders()\n self._create_variables()\n\n self._create_prediction()\n\n self._create_loss()\n self._create_optimizer()\n\n self._init = tf.global_variables_initializer()\n\n self.saver = tf.train.Saver()\n\n # create session\n self.sess = tf.Session(graph=self.graph)", "def build_graph(self):\n self._build_model()\n if self.mode == 'train':\n self._build_train_op()", "def _build_model(self):\n if self.weight_function is None:\n self.weight_function = default_weight_function\n\n tf.reset_default_graph()\n\n # Placeholders for the inputs\n self.x0 = tf.placeholder(\n shape=[None, self.num_features],\n dtype=self.dtype,\n name=\"x0\"\n )\n self.x1 = tf.placeholder(\n shape=[None, self.num_features],\n dtype=self.dtype,\n name=\"x1\"\n )\n # Placeholder for the real classes\n self.y0 = tf.placeholder(\n shape=[None, 1],\n dtype=self.dtype,\n name=\"y0\"\n )\n # Placeholder for the weights\n self.w0 = tf.placeholder(\n shape=[None, ],\n dtype=self.dtype,\n name=\"w0\"\n )\n\n # Drop placeholder\n self.should_drop = tf.placeholder(tf.bool, name=\"drop\")\n\n # Regularization\n regularizer = tf.keras.regularizers.l2(self.weight_regularization)\n\n # Input_Dropout\n in0 = tf.layers.dropout(inputs=self.x0,\n rate=self.input_dropout,\n training=self.should_drop\n )\n\n in1 = tf.layers.dropout(inputs=self.x1,\n rate=self.input_dropout,\n training=self.should_drop\n )\n\n # Constructing the feature creation part of the net\n nn0 = tf.layers.dense(\n inputs=in0,\n units=self.hidden_layers[0],\n activation=self.feature_activation,\n use_bias=self.feature_bias,\n kernel_initializer=self.kernel_initializer,\n kernel_regularizer=regularizer,\n name=\"nn_hidden_0\"\n )\n\n # By giving nn1 the same name as nn0 and using the flag reuse=True,\n # the weights and biases of all neurons in each branch are identical\n nn1 = tf.layers.dense(\n inputs=in1,\n units=self.hidden_layers[0],\n activation=self.feature_activation,\n use_bias=self.feature_bias,\n kernel_initializer=self.kernel_initializer,\n kernel_regularizer=regularizer,\n name=\"nn_hidden_0\",\n reuse=True\n )\n\n # Layer Dropout\n nn0 = tf.layers.dropout(inputs=nn0,\n rate=self.dropout,\n training=self.should_drop\n )\n nn1 = tf.layers.dropout(inputs=nn1,\n rate=self.dropout,\n training=self.should_drop\n )\n\n for i in range(1, len(self.hidden_layers)):\n nn0 = tf.layers.dense(\n inputs=nn0,\n units=self.hidden_layers[i],\n activation=self.feature_activation,\n use_bias=self.feature_bias,\n kernel_initializer=self.kernel_initializer,\n kernel_regularizer=regularizer,\n name=\"nn_hidden_\" + str(i)\n )\n nn1 = tf.layers.dense(\n inputs=nn1,\n units=self.hidden_layers[i],\n activation=self.feature_activation,\n use_bias=self.feature_bias,\n kernel_initializer=self.kernel_initializer,\n kernel_regularizer=regularizer,\n name=\"nn_hidden_\" + str(i),\n reuse=True\n )\n\n # Layer Dropout\n nn0 = tf.layers.dropout(inputs=nn0,\n rate=self.dropout,\n training=self.should_drop\n )\n nn1 = tf.layers.dropout(inputs=nn1,\n rate=self.dropout,\n training=self.should_drop\n )\n\n # Creating antisymmetric features for the ranking\n self.nn = (nn0 - nn1) / 2.\n\n self.nn = tf.layers.dense(\n inputs=self.nn,\n units=1,\n activation=self.ranking_activation,\n use_bias=False,\n kernel_initializer=self.kernel_initializer,\n kernel_regularizer=regularizer,\n name=\"nn_rank\"\n )\n\n self.nn_cls = tf.layers.dense(\n inputs=nn0 / 2.,\n units=1,\n activation=self.ranking_activation,\n use_bias=False,\n kernel_initializer=self.kernel_initializer,\n kernel_regularizer=regularizer,\n name=\"nn_rank\",\n reuse=True\n )\n\n nn_out = tf.identity(\n input=self.nn,\n name=\"nn\"\n )", "def build_model(fix_first_layers_gen_b=True, fix_last_layer_gen_b=fix_last_layer_gen_new,\n fix_2last_layer_gen_b=fix_2last_layer_gen_new,\n fix_first_layers_disc_b=True, fix_last_layer_disc_b=fix_last_layer_disc_new,\n fix_2last_layer_disc_b=fix_2last_layer_disc_new\n ):\n\n with tf.name_scope('placeholders'):\n x_true = tf.placeholder(tf.float32, [None, 28, 28, 1])\n z = tf.placeholder(tf.float32, [None, input_dim])\n\n x_generated = generator(z, n_features_first=n_features_first,\n n_features_reduction_factor=n_features_reduction_factor,\n fix_first_layers=fix_first_layers_gen_b, fix_last_layer=fix_last_layer_gen_b,\n fix_2last_layer=fix_2last_layer_gen_b, architecture=architecture)\n\n if architecture == 'DCGAN':\n d_true1 = discriminator1(x_true, reuse=False, n_features_first=n_features_first,\n n_features_reduction_factor=n_features_reduction_factor,\n fix_first_layers=fix_first_layers_disc_b, fix_last_layer=fix_last_layer_disc_b,\n fix_2last_layer=fix_2last_layer_disc_b, architecture=architecture)\n d_true = discriminator2(d_true1, reuse=False, n_features_first=n_features_first,\n n_features_reduction_factor=n_features_reduction_factor,\n fix_first_layers=fix_first_layers_disc_b, fix_last_layer=fix_last_layer_disc_b,\n fix_2last_layer=fix_2last_layer_disc_b, architecture=architecture)\n\n d_generated1 = discriminator1(x_generated, reuse=True, n_features_first=n_features_first,\n n_features_reduction_factor=n_features_reduction_factor,\n fix_first_layers=fix_first_layers_disc_b, fix_last_layer=fix_last_layer_disc_b,\n fix_2last_layer=fix_2last_layer_disc_b, architecture=architecture)\n d_generated = discriminator2(d_generated1, reuse=True, n_features_first=n_features_first,\n n_features_reduction_factor=n_features_reduction_factor,\n fix_first_layers=fix_first_layers_disc_b, fix_last_layer=fix_last_layer_disc_b,\n fix_2last_layer=fix_2last_layer_disc_b, architecture=architecture)\n\n else: # WGAN-GP\n d_true = discriminator(x_true, reuse=False, n_features_first=n_features_first,\n n_features_reduction_factor=n_features_reduction_factor,\n fix_first_layers=fix_first_layers_disc_b, fix_last_layer=fix_last_layer_disc_b,\n fix_2last_layer=fix_2last_layer_disc_b, architecture=architecture)\n\n d_generated = discriminator(x_generated, reuse=True, n_features_first=n_features_first,\n n_features_reduction_factor=n_features_reduction_factor,\n fix_first_layers=fix_first_layers_disc_b, fix_last_layer=fix_last_layer_disc_b,\n fix_2last_layer=fix_2last_layer_disc_b, architecture=architecture)\n\n if architecture == 'DCGAN':\n with tf.name_scope('loss'):\n g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_generated,\n labels=tf.ones_like(d_generated)))\n d_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_generated,\n labels=tf.zeros_like(d_generated))) +\\\n tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_true,\n labels=tf.ones_like(d_true)))\n d_loss = d_loss/2.\n\n with tf.name_scope('optimizer'):\n optimizer = tf.train.AdamOptimizer(learning_rate=2*learning_rate, beta1=0.5)\n\n g_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='generator')\n g_train = optimizer.minimize(g_loss, var_list=g_vars)\n d_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator')\n d_train = optimizer.minimize(d_loss, var_list=d_vars)\n\n else: # WGAN-GP\n with tf.name_scope('regularizer'):\n epsilon = tf.random_uniform([batch_size, 1, 1, 1], 0.0, 1.0)\n x_hat = epsilon * x_true + (1 - epsilon) * x_generated\n\n # without splitting the discriminator\n d_hat = discriminator(x_hat, reuse=True, n_features_first=n_features_first,\n n_features_reduction_factor=n_features_reduction_factor,\n fix_first_layers=fix_first_layers_disc_b, fix_last_layer=fix_last_layer_disc_b,\n fix_2last_layer=fix_2last_layer_disc_b, architecture=architecture)\n\n gradients = tf.gradients(d_hat, x_hat)[0]\n ddx = tf.sqrt(tf.reduce_sum(gradients ** 2, axis=[1, 2]))\n d_regularizer = tf.reduce_mean((ddx - 1.0) ** 2)\n\n with tf.name_scope('loss'):\n g_loss = -tf.reduce_mean(d_generated)\n wasserstein_dist = tf.reduce_mean(d_true) - tf.reduce_mean(d_generated)\n d_loss = -wasserstein_dist + lambda_reg * d_regularizer\n\n with tf.name_scope('optimizer'):\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=0, beta2=0.9)\n # FK: TODO: beta1 = 0.5 in IWGAN, here 0 -> change? In experiments (only 1000 epochs) it seemed better with 0\n\n g_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='generator')\n g_train = optimizer.minimize(g_loss, var_list=g_vars)\n d_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator')\n d_train = optimizer.minimize(d_loss, var_list=d_vars)\n\n # initialize variables\n session.run(tf.global_variables_initializer())\n\n if architecture == 'DCGAN':\n return x_true, z, x_generated, d_true1, d_true, d_generated1, d_generated, g_loss, d_loss, optimizer, \\\n g_vars, g_train, d_vars, d_train\n else: # WGANGP\n return x_true, z, x_generated, d_true, d_generated, epsilon, x_hat, d_hat, gradients, ddx, d_regularizer, \\\n g_loss, wasserstein_dist, d_loss, optimizer, g_vars, g_train, d_vars, d_train", "def create_graph():\n # Creates graph from saved graph_def.pb.\n\n # with tf.gfile.FastGFile(os.path.join(\n # FLAGS.model_dir, 'classify_image_graph_def.pb'), 'rb') as f:\n with tf.gfile.FastGFile(os.path.join(\n FLAGS.model_dir, 'output_graph.pb'), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')", "def _build_graph(self):\n self.X = tf.placeholder(tf.float32, [self.N, None])\n self.y = tf.placeholder(tf.float32, [self.C, None])\n\n self.W1 = tf.get_variable(\"W1\", shape=[self.h, self.N], initializer=tf.truncated_normal_initializer)\n self.b1 = tf.get_variable(\"b1\", shape=[self.h, 1], initializer=tf.zeros_initializer)\n\n self.W2 = tf.get_variable(\"W2\", shape=[self.C, self.h], initializer=tf.truncated_normal_initializer)\n self.b2 = tf.get_variable(\"b2\", shape=[self.C, 1], initializer=tf.truncated_normal_initializer)\n\n self.z1 = tf.matmul(self.W1, self.X) + self.b1\n self.a1 = self.activation(self.z1)\n\n self.z2 = tf.matmul(self.W2, self.a1) + self.b2\n self.y_hat = tf.nn.softmax(self.z2, dim=0)\n\n self.l2_reg = tf.nn.l2_loss(self.W1) + tf.nn.l2_loss(self.W2)\n\n self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=self.z2, dim=0)) \\\n + self.beta * self.l2_reg\n\n self.train = tf.train.AdamOptimizer().minimize(self.loss)\n\n self.correct_pred = tf.equal(tf.argmax(self.y, 0), tf.argmax(self.y_hat, 0))\n self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32))\n\n self.model = tf.global_variables_initializer()", "def _build_graph(self):\n start_t = time.time()\n self._setup_placeholders()\n self._embed()\n self._encode_back()\n self._encode()\n self._match()\n self._fuse()\n self._decode()\n self._compute_loss()\n self._create_train_op()\n self.logger.info('Time to build graph: {} s'.format(time.time() - start_t))\n param_num = sum([np.prod(self.sess.run(tf.shape(v))) for v in self.all_params])\n self.logger.info('There are {} parameters in the model'.format(param_num))", "def build_model(self):\n \n start_time = time.time()\n print(\"build model started\")\n # label\n self.FA = tf.placeholder(dtype=tf.int32, shape=[None])\n self.ges = tf.placeholder(dtype=tf.int32, shape=[None])\n self.obj = tf.placeholder(dtype=tf.int32, shape=[None])\n \n self.images = tf.placeholder(dtype=tf.float32, shape=[None, height, width, 3])\n batch_size = tf.shape(self.images)[0]\n rgb_scaled = self.images * 255.0\n\n # Convert RGB to BGR\n VGG_MEAN = [103.939, 116.779, 123.68]\n red, green, blue = tf.split(axis=3, num_or_size_splits=3, value=rgb_scaled)\n assert red.get_shape().as_list()[1:] == [224, 224, 1]\n assert green.get_shape().as_list()[1:] == [224, 224, 1]\n assert blue.get_shape().as_list()[1:] == [224, 224, 1]\n bgr = tf.concat(axis=3, values=[\n blue - VGG_MEAN[0],\n green - VGG_MEAN[1],\n red - VGG_MEAN[2],\n ])\n assert bgr.get_shape().as_list()[1:] == [224, 224, 3]\n \n with tf.variable_scope(\"vgg19\"):\n self.conv1_1 = self.conv_layer(bgr, \"conv1_1\")\n self.conv1_2 = self.conv_layer(self.conv1_1, \"conv1_2\")\n self.pool1 = self.max_pool(self.conv1_2, 'pool1')\n\n self.conv2_1 = self.conv_layer(self.pool1, \"conv2_1\")\n self.conv2_2 = self.conv_layer(self.conv2_1, \"conv2_2\")\n self.pool2 = self.max_pool(self.conv2_2, 'pool2')\n\n self.conv3_1 = self.conv_layer(self.pool2, \"conv3_1\")\n self.conv3_2 = self.conv_layer(self.conv3_1, \"conv3_2\")\n self.conv3_3 = self.conv_layer(self.conv3_2, \"conv3_3\")\n self.conv3_4 = self.conv_layer(self.conv3_3, \"conv3_4\")\n self.pool3 = self.max_pool(self.conv3_4, 'pool3')\n\n self.conv4_1 = self.conv_layer(self.pool3, \"conv4_1\")\n self.conv4_2 = self.conv_layer(self.conv4_1, \"conv4_2\")\n self.conv4_3 = self.conv_layer(self.conv4_2, \"conv4_3\")\n self.conv4_4 = self.conv_layer(self.conv4_3, \"conv4_4\")\n self.pool4 = self.max_pool(self.conv4_4, 'pool4')\n\n self.conv5_1 = self.conv_layer(self.pool4, \"conv5_1\")\n self.conv5_2 = self.conv_layer(self.conv5_1, \"conv5_2\")\n self.conv5_3 = self.conv_layer(self.conv5_2, \"conv5_3\")\n self.conv5_4 = self.conv_layer(self.conv5_3, \"conv5_4\")\n self.pool5 = self.max_pool(self.conv5_4, 'pool5')\n\n \n shape = self.pool5.get_shape()\n size = 1\n for dim in shape[1:]:\n size *= dim.value\n \n # dense\n with tf.variable_scope('dense') as scope:\n # Move everything into depth so we can perform a single matrix multiply.\n reshape = tf.reshape(self.pool5, [-1, size])\n weights = tf.get_variable('weights', initializer=tf.truncated_normal(shape=[size, 192]))\n biases = tf.get_variable('biases', [192], initializer=tf.constant_initializer(0.1))\n dense = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)\n\n\n # linear layer(WX + b),\n with tf.variable_scope('softmax_linear_FA') as scope:\n weights = tf.get_variable('weights', initializer=tf.truncated_normal(shape=[192, 2]))\n biases = tf.get_variable('biases', [2], initializer=tf.constant_initializer(0.1))\n softmax_linear_FA = tf.add(tf.matmul(dense, weights), biases, name=scope.name)\n self.output_FA = tf.nn.softmax(softmax_linear_FA)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.FA, logits=softmax_linear_FA, name='cross_entropy')\n cross_entropy_mean_FA = tf.reduce_mean(cross_entropy, name='cross_entropy_mean')\n\n with tf.variable_scope('softmax_linear_ges') as scope:\n weights = tf.get_variable('weights', initializer=tf.truncated_normal(shape=[192, 13]))\n biases = tf.get_variable('biases', [13], initializer=tf.constant_initializer(0.1))\n softmax_linear_ges = tf.add(tf.matmul(dense, weights), biases, name=scope.name)\n self.output_ges = tf.nn.softmax(softmax_linear_ges)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.ges, logits=softmax_linear_ges, name='cross_entropy')\n cross_entropy_mean_ges = tf.reduce_mean(cross_entropy, name='cross_entropy_mean')\n\n with tf.variable_scope('softmax_linear_obj') as scope:\n weights = tf.get_variable('weights', initializer=tf.truncated_normal(shape=[192, 24]))\n biases = tf.get_variable('biases', [24], initializer=tf.constant_initializer(0.1))\n softmax_linear_obj = tf.add(tf.matmul(dense, weights), biases, name=scope.name)\n self.output_obj = tf.nn.softmax(softmax_linear_obj)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.obj, logits=softmax_linear_obj, name='cross_entropy')\n cross_entropy_mean_obj = tf.reduce_mean(cross_entropy, name='cross_entropy_mean')\n\n self.loss = cross_entropy_mean_FA + cross_entropy_mean_ges + cross_entropy_mean_obj\n self.lr = tf.placeholder(tf.float32, [])\n self.global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n optimizer = tf.train.AdamOptimizer(self.lr)\n grads_and_vars = optimizer.compute_gradients(self.loss)\n self.train_op = optimizer.apply_gradients(grads_and_vars, global_step=self.global_step)\n self.data_dict = None\n print((\"build model finished: %ds\" % (time.time() - start_time)))", "def build_graph(self):\n persumed_state_shape = tuple([None, None] + self.state_shape)\n\n # placeholders\n self.states = tf.placeholder(tf.float32, persumed_state_shape)\n self.next_states = tf.placeholder(tf.float32, persumed_state_shape)\n self.rewards = tf.placeholder(tf.float32, (None,))\n self.transition_action_filters = tf.placeholder(tf.float32, (None, self.actions_count))\n self.next_legal_actions_filters = tf.placeholder(tf.float32, (None, self.actions_count))\n self.query_actions_filter = tf.placeholder(tf.float32, (None, self.actions_count))\n\n next_actions_scores = tf.stop_gradient(self.target_nn(self.next_states))\n target_values = self._reduce_max(next_actions_scores, reduction_indices=[1,], c=self.next_legal_actions_filters)\n future_estimate = self.rewards + self.discount * target_values\n\n actions_scores = tf.identity(self.prediction_nn(self.states))\n transition_action_score = actions_scores * self.transition_action_filters\n predicted_value = tf.reduce_sum(transition_action_score, reduction_indices=[1, ])\n\n self.loss = tf.reduce_mean(tf.square(future_estimate - predicted_value), name='loss')\n gradients = self.optimizer.compute_gradients(self.loss)\n for i, (grad, var) in enumerate(gradients):\n if grad is not None:\n gradients[i] = (tf.clip_by_value(grad, -1, 1), var)\n\n self.finalize = self.optimizer.apply_gradients(gradients)\n\n # node for actions query\n self.query_action = self._argmax(actions_scores, dimension=1, c=self.query_actions_filter)", "def build_graph(self):\n tf.logging.info('Building graph...')\n t0 = time.time()\n self._add_placeholders()\n with tf.device(\"/gpu:0\"):\n self._add_seq2seq()\n self.global_step = tf.Variable(0, name='global_step', trainable=False)\n if self._hps.mode == 'train':\n self._add_train_op()\n self._summaries = tf.summary.merge_all()\n t1 = time.time()\n tf.logging.info('Time to build graph: %i seconds', t1 - t0)\n \n print('#'*78,'\\nprinting model variables:')\n total_parameters = 0\n for variable in tf.trainable_variables():\n shape = variable.get_shape().as_list()\n variable_parameters = 1\n for dim in shape:\n variable_parameters *= dim\n print('{:}: shape={:}, variable_parameters={:}'.format(\n variable.name, shape, variable_parameters))\n total_parameters += variable_parameters\n print('total model parameters: {:}'.format(total_parameters))\n print('#'*78)", "def build_net(graph, training=True, validation=False):\n\n with graph.as_default(): \n x = tf.placeholder(tf.float32, [None] + resize_shape, 'x')\n # TODO: use len(labels_map)\n y = tf.placeholder(tf.int32, [None, 17], 'y')\n phase_train = tf.placeholder(tf.bool, name='phase_train')\n keep_prob = tf.placeholder(tf.float32, name='keep_prob')\n keep_prob_fc1 = tf.placeholder(tf.float32, name='keep_prob_fc1')\n learning_rate = tf.placeholder(tf.float32, name='learning_rate')\n\n # Create Input Pipeline for Train, Validation and Test Sets\n if training:\n batch, batch_labels, batch_image_paths = dsutils.create_input_pipeline(\n image_paths=image_paths[:index_split_train_val],\n labels=labels_onehot_list[:index_split_train_val],\n batch_size=batch_size,\n n_epochs=n_epochs,\n shape=input_shape,\n crop_factor=resize_factor,\n training=training,\n randomize=True)\n elif validation:\n batch, batch_labels, batch_image_paths = dsutils.create_input_pipeline(\n image_paths=image_paths[index_split_train_val:],\n labels=labels_onehot_list[index_split_train_val:],\n batch_size=batch_size,\n # only one epoch for test output\n n_epochs=1,\n shape=input_shape,\n crop_factor=resize_factor,\n training=training) \n else:\n batch, batch_labels, batch_image_paths = dsutils.create_input_pipeline(\n image_paths=test_image_paths,\n labels=test_onehot_list,\n batch_size=batch_size,\n # only one epoch for test output\n n_epochs=1,\n shape=input_shape,\n crop_factor=resize_factor,\n training=training)\n\n Ws = []\n \n current_input = x\n\n for layer_i, n_output in enumerate(n_filters):\n with tf.variable_scope('layer{}'.format(layer_i)):\n # 2D Convolutional Layer with batch normalization and relu\n h, W = utils.conv2d(x=current_input,\n n_output=n_output,\n k_h=filter_sizes[layer_i],\n k_w=filter_sizes[layer_i])\n h = tf.layers.batch_normalization(h, training=phase_train)\n h = tf.nn.relu(h, 'relu' + str(layer_i))\n\n # Apply Max Pooling Every 2nd Layer\n if layer_i % 2 == 0:\n h = tf.nn.max_pool(value=h,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME')\n\n # Apply Dropout Every 2nd Layer\n if layer_i % 2 == 0:\n h = tf.nn.dropout(h, keep_prob)\n\n Ws.append(W)\n current_input = h\n\n h = utils.linear(current_input, fc_size, name='fc_t')[0]\n h = tf.layers.batch_normalization(h, training=phase_train)\n h = tf.nn.relu(h, name='fc_t/relu')\n h = tf.nn.dropout(h, keep_prob_fc1)\n\n logits = utils.linear(h, len(labels_map), name='fc_t2')[0]\n h = tf.nn.sigmoid(logits, 'fc_t2')\n\n # must be the same type as logits\n y_float = tf.cast(y, tf.float32)\n\n cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits,\n labels=y_float)\n loss = tf.reduce_mean(cross_entropy)\n\n if training:\n # update moving_mean and moving_variance so it will be available at inference time\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)\n else:\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)\n \n saver = tf.train.Saver()\n init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\n return batch, batch_labels, batch_image_paths, init, x, y, phase_train, keep_prob, keep_prob_fc1, learning_rate, h, loss, optimizer, saver", "def build_graph(self):\n # place holders for inputs here \n HIDDEN_LAYER = self.FLAGS.feature_layer_size\n \n self.x_i = tf.placeholder(dtype=tf.float32, shape = (None, self.INPUT_DIM), name=\"x_i\")\n self.a_i = tf.placeholder(dtype=tf.float32, shape = (None, self.ACTION_DIM), name = \"a_i\")\n self.q_opc = tf.placeholder(dtype=tf.float32, shape = (None, 1), name = \"q_opc\")\n self.q_ret = tf.placeholder(dtype=tf.float32, shape = (None, 1), name = \"q_ret\" )\n self.c = self.FLAGS.c # truncation threshold constant\n \n self.actor_net = PolicyNet(HIDDEN_LAYER, self.ACTION_DIM, name= self.name + \"_actor\", co_var = self.co_var)\n self.critic_net = AdvantageValueNet(HIDDEN_LAYER , name= self.name + \"_critic\")\n \n self.policy_xi_stats, self.policy_xi_dist = self.actor_net(self.x_i)\n \n self.val_xi, self.adv_xi_ai = self.critic_net(self.x_i, self.a_i, self.policy_xi_dist)\n \n #sample a' now\n self.a_i_ = tf.reshape(self.policy_xi_dist.sample(1), shape=[-1,self.ACTION_DIM])\n self.a_i_ = tf.clip_by_value(self.a_i_, self.env.action_space.low[0], self.env.action_space.high[0]) #20190828 add clipping\n \n _, self.adv_xi_ai_ = self.critic_net(self.x_i, self.a_i_, self.policy_xi_dist) # val will be the same for \n \n _, self.average_policy_xi_dist = self.average_actor_net(self.x_i) # can this be done better ?\n \n self.prob_a_i = tf.reshape(self.policy_xi_dist.prob(self.a_i),shape=[-1,1]) + 1e-8\n self.prob_a_i_ = tf.reshape(self.policy_xi_dist.prob(self.a_i_),shape=[-1,1]) + 1e-8\n \n self.log_prob_a_i = tf.log(self.prob_a_i)\n self.log_prob_a_i_ = tf.log(self.prob_a_i_)\n \n # for predicting 1-step a_i', p_i, p_i',\n self.u_i = tf.placeholder(dtype=tf.float32, shape = (None, 2*self.ACTION_DIM))\n \n #self.u_i_dist = tf.contrib.distributions.MultivariateNormalDiag(loc= self.u_i, \n # scale_diag = tf.ones_like(self.u_i) * self.co_var)\n self.u_i_dist = tf.contrib.distributions.MultivariateNormalDiag(loc= self.u_i[:,0], scale_diag=self.u_i[:,1])\n \n self.u_i_prob_a_i = tf.reshape(self.u_i_dist.prob(self.a_i),shape=[-1,1]) + 1e-8\n self.u_i_prob_a_i_ = tf.reshape(self.u_i_dist.prob(self.a_i_),shape=[-1,1]) + 1e-8\n \n self.p_i = tf.divide(self.prob_a_i, self.u_i_prob_a_i)\n self.p_i_ = tf.divide(self.prob_a_i_ , self.u_i_prob_a_i_)\n \n\n # take care of NaNs here, for importance sampling weights (might be an extra step)\n self.p_i = tf.where(tf.is_nan(self.p_i), tf.zeros_like(self.p_i), self.p_i)\n self.p_i_ = tf.where(tf.is_nan(self.p_i_), tf.zeros_like(self.p_i_), self.p_i_)\n\n self.c_i = tf.minimum(1. , tf.pow(self.p_i, 1.0/self.ACTION_DIM))\n \n \n # for verification about checking if params are getting synched\n self.local_actor_vars = self.actor_net.local_params()\n self.global_actor_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'global_actor')\n \n self.local_critic_vars = self.critic_net.local_params()\n self.global_critic_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'global_critic')\n \n \n # Sync ops from global\n self.sync_local_actor_op = self.actor_net.update_local_params_op('global_actor') # global actor\n self.sync_local_critic_op = self.critic_net.update_local_params_op('global_critic')\n \n # soft update the average network\n self.soft_update_average_actor_op = self.average_actor_net.soft_update_from_target_params('global_actor',\n self.FLAGS.tau)\n \n #Get gradients from local network using local losses\n \n g1 = tf.reshape(tf.gradients((self.log_prob_a_i * (self.q_opc - self.val_xi)),self.policy_xi_stats,\n name=self.name+\"g1_grads\", unconnected_gradients='zero'), shape = [-1,2*self.ACTION_DIM])\n g2 = (self.adv_xi_ai_ - self.val_xi) * tf.reshape(tf.gradients((self.log_prob_a_i_), \n self.policy_xi_stats, name=self.name+\"g2_grads\", unconnected_gradients='zero'), shape = [-1,2*self.ACTION_DIM])\n \n \n self.g = tf.minimum(self.c, self.p_i) * g1 + tf.nn.relu(1 - tf.divide(self.c , self.p_i_)) * g2\n \n \n self.k = tf.reshape(tf.gradients(tf.contrib.distributions.kl_divergence(self.average_policy_xi_dist, \n self.policy_xi_dist), self.policy_xi_stats, unconnected_gradients='zero'), shape = [-1,2*self.ACTION_DIM])\n \n \n \n self.kg = tf.reduce_sum( tf.multiply(self.g, self.k), 1, keep_dims=True)\n \n #print \"kg\", self.kg\n \n self.k2 = tf.reduce_sum( tf.multiply(self.k, self.k), 1, keep_dims=True)\n \n self.reg_g = self.g - tf.maximum(tf.zeros_like(self.g), tf.divide((self.kg - self.FLAGS.delta), self.k2) ) * self.k\n \n # take gradients wrt to the local params\n self.actor_grads = tf.gradients(self.policy_xi_stats, self.local_actor_vars, \n grad_ys= -self.reg_g, name=\"actor_grads\", unconnected_gradients='zero')\n \n \n #for ti,tj in zip(self.actor_grads, self.global_actor_vars):\n # print ti, \"\\n\", tj , \"\\n\", \"===========\"\n \n # apply local gradients to the global network\n self.actor_train_op = self.optimizer.apply_gradients(zip(self.actor_grads, self.global_actor_vars),\n global_step=tf.train.get_global_step())\n \n \n # critic loss function and updates\n \n # take gradient wrt to local variables\n self.critic_loss_1 = ((self.q_ret - self.adv_xi_ai) ** 2.0) / 2.0\n \n # for predicting 1-step a_i', p_i, p_i',\n self.v_target = tf.placeholder(dtype=tf.float32, shape = (None, 1))\n \n #self.v_trunc = tf.minimum(self.p_i, 1.0) * (self.q_ret - self.adv_xi_ai) + self.val_xi\n self.critic_loss_2 = ((self.v_target - self.val_xi) ** 2.0) / 2.0\n \n self.critic_loss = self.critic_loss_1 + self.critic_loss_2\n \n #Apply local gradients to global network\n \n self.critic_grads = tf.gradients(self.critic_loss, self.local_critic_vars)\n \n self.critic_train_op = self.optimizer.apply_gradients(zip(self.critic_grads, self.global_critic_vars),\n global_step=tf.train.get_global_step())\n \n # critic_summaries op\n critic_grads_summary = []\n print('-----------------------------------------------------------------')\n print('Create critic_grads_summary histogram ')\n for grad,var in zip(self.critic_grads, self.local_critic_vars):\n print('{} - {}'.format(var.name, grad))\n critic_grads_summary.append(tf.summary.histogram(var.name + '/gradient', grad))\n critic_grads_summary.append(tf.summary.histogram(var.name + '/weight', var))\n \n self.critic_summary_op = tf.summary.merge([\n tf.summary.scalar(self.name + \"_critc_mean_loss_Q\", tf.reduce_mean(self.critic_loss_1)),\n tf.summary.scalar(self.name + \"_critc_mean_loss_V\", tf.reduce_mean(self.critic_loss_2)),\n tf.summary.scalar(self.name + \"_critc_sum_loss_Q\", tf.reduce_sum(self.critic_loss_1)),\n tf.summary.scalar(self.name + \"_critc_sum_loss_V\", tf.reduce_sum(self.critic_loss_2)),\n tf.summary.scalar(self.name + \"_critc_mean_loss\", tf.reduce_mean(self.critic_loss)),\n tf.summary.scalar(self.name + \"_critc_sum_loss\", tf.reduce_sum(self.critic_loss)),\n tf.summary.histogram(self.name + \"_val_target\", self.v_target),\n tf.summary.histogram(self.name + \"_val_pred\", self.val_xi),\n tf.summary.histogram(self.name + \"_Q_pred\", self.adv_xi_ai),\n tf.summary.histogram(self.name + \"_Q_ret\", self.q_ret),\n tf.summary.histogram(self.name + \"_Q_opc\", self.q_opc),\n ] + critic_grads_summary)\n \n \n # actor summaries op\n\n actor_grads_summary = []\n print('-----------------------------------------------------------------')\n print('Create actor_grads_summary histogram ')\n for grad,var in zip(self.actor_grads, self.local_actor_vars):\n print('{} - {}'.format(var.name, grad))\n actor_grads_summary.append(tf.summary.histogram(var.name + '/gradient', grad))\n actor_grads_summary.append(tf.summary.histogram(var.name + '/weight', var))\n \n\n self.actor_summary_op = tf.summary.merge([\n tf.summary.scalar(self.name + \"_actor_mean_loss_reg_g\", tf.reduce_mean(self.reg_g)),\n tf.summary.scalar(self.name + \"_actor_neg_mean_loss_reg_g\", tf.reduce_mean(-self.reg_g)),\n tf.summary.scalar(self.name + \"_actor_sum_loss_reg_g\", tf.reduce_sum(self.reg_g)),\n tf.summary.scalar(self.name + \"_actor_neg_sum_reg_g\", tf.reduce_sum(-self.reg_g)),\n tf.summary.scalar(self.name + \"_actor_sum_g\", tf.reduce_sum(self.g)),\n tf.summary.scalar(self.name + \"_actor_neg_sum_g\", tf.reduce_sum(-self.g)),\n tf.summary.scalar(self.name + \"_actor_mean_kl\", tf.reduce_mean(self.k)),\n tf.summary.scalar(self.name + \"_actor_sum_kl\", tf.reduce_sum(self.k)),\n tf.summary.histogram(self.name + \"_policy_stats\", self.policy_xi_stats),\n ] + actor_grads_summary )", "def _build_graph(self,\n question_word,\n question_word_mask,\n question_subword,\n question_subword_mask,\n question_char,\n question_char_mask,\n context_word,\n context_word_mask,\n context_subword,\n context_subword_mask,\n context_char,\n context_char_mask):\n with tf.variable_scope(\"graph\", reuse=tf.AUTO_REUSE):\n \"\"\"build representation layer for qanet model\"\"\"\n (question_feat, question_feat_mask, context_feat,\n context_feat_mask) = self._build_representation_layer(question_word, question_word_mask,\n question_subword, question_subword_mask, question_char, question_char_mask, context_word,\n context_word_mask, context_subword, context_subword_mask, context_char, context_char_mask)\n \n \"\"\"build understanding layer for qanet model\"\"\"\n (question_understanding, context_understanding, question_understanding_mask,\n context_understanding_mask) = self._build_understanding_layer(question_feat,\n context_feat, question_feat_mask, context_feat_mask)\n \n \"\"\"build interaction layer for qanet model\"\"\"\n answer_interaction, answer_interaction_mask = self._build_interaction_layer(question_understanding,\n context_understanding, question_understanding_mask, context_understanding_mask)\n \n \"\"\"build modeling layer for qanet model\"\"\"\n answer_modeling, answer_modeling_mask = self._build_modeling_layer(answer_interaction, answer_interaction_mask)\n \n \"\"\"build output layer for qanet model\"\"\"\n answer_output_list, answer_output_mask_list = self._build_output_layer(answer_modeling, answer_modeling_mask)\n answer_start_output = answer_output_list[0]\n answer_end_output = answer_output_list[1]\n answer_start_output_mask = answer_output_mask_list[0]\n answer_end_output_mask = answer_output_mask_list[1]\n \n return answer_start_output, answer_end_output, answer_start_output_mask, answer_end_output_mask", "def build_model(self):\n input_pencil = tf.keras.Input((128,128,3))\n # generator's output\n gen_image = self.gan_generator.model(input_pencil)\n # generator's output\n x = self.gan_discriminator.model([input_pencil,gen_image])\n model = tf.keras.Model(input_pencil,[x,gen_image])\n # compiling the model\n model.compile(loss=['hinge', 'mae'], optimizer = self.optimizer,loss_weights=[1,100], metrics=['accuracy'])\n self.model = model", "def build_graph(self):\n with vs.variable_scope(\"context\"):\n context_encoder = RNNEncoder(self.FLAGS.hidden_size, self.keep_prob)\n context_hiddens = context_encoder.build_graph(self.context_embs,\n self.context_mask) # (batch_size, context_len, hidden_size*2)\n\n with vs.variable_scope(\"question\"):\n question_encoder = RNNEncoder(self.FLAGS.hidden_size, self.keep_prob)\n question_hiddens = question_encoder.build_graph(self.qn_embs,\n self.qn_mask) # (batch_size, question_len, hidden_size*2)\n question_last_hidden = tf.reshape(question_hiddens[:, -1, :], (-1, 2 * self.FLAGS.hidden_size))\n question_last_hidden = tf.contrib.layers.fully_connected(question_last_hidden,\n num_outputs=self.FLAGS.hidden_size)\n # Use context hidden states to attend to question hidden states\n\n # attn_output is shape (batch_size, context_len, hidden_size*2)\n # The following is BiDAF attention\n if self.FLAGS.use_bidaf:\n attn_layer = BiDAF(self.keep_prob, self.FLAGS.hidden_size * 2, self.FLAGS.hidden_size * 2)\n attn_output = attn_layer.build_graph(question_hiddens, self.qn_mask, context_hiddens,\n self.context_mask) # (batch_size, context_len, hidden_size * 6)\n else: # otherwise, basic attention\n attn_layer = BasicAttn(self.keep_prob, self.FLAGS.hidden_size * 2, self.FLAGS.hidden_size * 2)\n _, attn_output = attn_layer.build_graph(question_hiddens, self.qn_mask, context_hiddens)\n # Concat attn_output to context_hiddens to get blended_reps\n blended_reps = tf.concat([context_hiddens, attn_output], axis=2) # (batch_size, context_len, hidden_size*4)\n\n blended_reps_final = tf.contrib.layers.fully_connected(blended_reps, num_outputs=self.FLAGS.hidden_size)\n\n decoder = RNNDecoder(self.FLAGS.batch_size, self.FLAGS.hidden_size, self.ans_vocab_size, self.FLAGS.answer_len,\n self.ans_embedding_matrix, self.keep_prob, sampling_prob=self.sampling_prob,\n schedule_embed=self.FLAGS.schedule_embed, pred_method=self.FLAGS.pred_method)\n (self.train_logits, self.train_translations, _), \\\n (self.dev_logits, self.dev_translations, self.attention_results) = decoder.build_graph(blended_reps_final, question_last_hidden,\n self.ans_embs, self.ans_mask, self.ans_ids,\n self.context_mask)", "def _build_graph(self):\n start_t = time.time()\n self._setup_placeholders()\n self._embed()\n self._encode()\n self._match()\n self._fuse()\n self._decode()\n self._passage_rank()\n self._compute_loss()\n self._create_train_op()\n self.logger.info('Time to build graph: {} s'.format(time.time() - start_t))\n param_num = sum([np.prod(self.sess.run(tf.shape(v))) for v in self.all_params])\n self.logger.info('There are {} parameters in the model'.format(param_num))", "def _build_graph(self):\n start_t = time.time()\n self._setup_placeholders()\n self._embed()\n self._encode()\n self._match()\n self._fuse()\n self._decode()\n self._compute_loss()\n self._create_train_op()\n self.logger.info('Time to build graph: {} s'.format(time.time() - start_t))\n param_num = total_params(tf.trainable_variables())\n self.logger.info('There are {} parameters in the model'.format(param_num))", "def build_graph(self):\n\n ##### Build Graph #####\n baseModel.build_graph(self)\n\n ##### Create Optimization #####\n with tf.variable_scope(\"optimize\"):\n self.add_loss()\n self.add_accuracy()\n self.initialize_learning_rate()\n self.initialize_optimization()\n\n ##### History and Checkpoints #####\n self.hasTrained = False\n self._lastSaved = collections.defaultdict(None)\n self.history = collections.defaultdict(list)\n self.saver = tf.train.Saver(\n tf.global_variables(), \n max_to_keep=self.FLAGS.keep)\n self.bestLossSaver = tf.train.Saver(\n tf.global_variables(), \n max_to_keep=self.FLAGS.keep)\n self.bestAccSaver = tf.train.Saver(\n tf.global_variables(), \n max_to_keep=self.FLAGS.keep)\n\n logging.basicConfig(level=logging.INFO)\n log_handler = logging.FileHandler(\"log.txt\")\n logging.getLogger().addHandler(log_handler)\n\n self.summaries = tf.summary.merge_all()", "def create_graph():\n with tf.gfile.FastGFile(os.path.join(\n config['inference']['model_dir'], 'output_graph.pb'), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')", "def build(self): \n self.computation_graph = tf.Graph()\n with self.computation_graph.as_default():\n\n self.factorization_layer = Factorization(self.args, self.vocab_size)\n self.cluster_layer = Clustering(self.args)\n self.regularizer_layer = Regularization(self.args)\n\n self.gamma = tf.placeholder(\"float\")\n self.loss = self.factorization_layer()+self.gamma*self.cluster_layer(self.factorization_layer)+self.regularizer_layer(self.factorization_layer)\n\n self.batch = tf.Variable(0)\n self.step = tf.placeholder(\"float\")\n \n self.learning_rate_new = tf.train.polynomial_decay(self.args.initial_learning_rate,\n self.batch,\n self.true_step_size,\n self.args.minimal_learning_rate,\n self.args.annealing_factor)\n \n self.train_op = tf.train.AdamOptimizer(self.learning_rate_new).minimize(self.loss, global_step = self.batch)\n \n self.init = tf.global_variables_initializer()\n\n self.weights = overlap_generator(self.args, self.graph)", "def create_graph():\n # Creates graph from saved graph_def.pb.\n with tf.gfile.FastGFile(os.path.join(\n MODEL_DIR, 'classify_image_graph_def.pb'), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')", "def define_graph(self):\n with tf.name_scope('discriminator'):\n ##\n # Setup scale networks. Each will make the predictions for images at a given scale.\n ##\n\n self.scale_nets = []\n for scale_num in xrange(self.num_scale_nets):\n with tf.name_scope('scale_net_' + str(scale_num)):\n scale_factor = 1. / 2 ** ((self.num_scale_nets - 1) - scale_num)\n self.scale_nets.append(DScaleModel(scale_num,\n int(self.height * scale_factor),\n int(self.width * scale_factor),\n self.scale_conv_layer_fms[scale_num],\n self.scale_kernel_sizes[scale_num],\n self.scale_fc_layer_sizes[scale_num]))\n\n # A list of the prediction tensors for each scale network\n self.scale_preds = []\n for scale_num in xrange(self.num_scale_nets):\n self.scale_preds.append(self.scale_nets[scale_num].preds)\n\n ##\n # Data\n ##\n\n self.labels = tf.placeholder(tf.float32, shape=[None, 1], name='labels')\n\n ##\n # Training\n ##\n\n with tf.name_scope('training'):\n # global loss is the combined loss from every scale network\n self.global_loss = adv_loss(self.scale_preds, self.labels)\n self.global_step = tf.Variable(0, trainable=False, name='global_step')\n self.optimizer = tf.train.GradientDescentOptimizer(c.LRATE_D, name='optimizer')\n self.train_op = self.optimizer.minimize(self.global_loss,\n global_step=self.global_step,\n name='train_op')\n\n # add summaries to visualize in TensorBoard\n loss_summary = tf.summary.scalar('loss_D', self.global_loss)\n self.summaries = tf.summary.merge([loss_summary])", "def create_graph():\n # Creates graph from saved graph_def.pb.\n with gfile.FastGFile(os.path.join(\n FLAGS.model_dir, 'classify_image_graph_def.pb'), 'r') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')", "def prepare_processing_graph(self, model_settings):\n desired_samples = model_settings['desired_samples']\n self.wav_filename_placeholder_ = tf.placeholder(tf.string, [])\n wav_loader = io_ops.read_file(self.wav_filename_placeholder_)\n wav_decoder = contrib_audio.decode_wav(\n wav_loader, desired_channels=1, desired_samples=desired_samples)\n # Allow the audio sample's volume to be adjusted.\n self.foreground_volume_placeholder_ = tf.placeholder(tf.float32, [])\n scaled_foreground = tf.multiply(wav_decoder.audio,\n self.foreground_volume_placeholder_)\n # Shift the sample's start position, and pad any gaps with zeros.\n self.time_shift_padding_placeholder_ = tf.placeholder(tf.int32, [2, 2])\n self.time_shift_offset_placeholder_ = tf.placeholder(tf.int32, [2])\n padded_foreground = tf.pad(\n scaled_foreground,\n self.time_shift_padding_placeholder_,\n mode='CONSTANT')\n sliced_foreground = tf.slice(padded_foreground,\n self.time_shift_offset_placeholder_,\n [desired_samples, -1])\n # Mix in background noise.\n self.background_data_placeholder_ = tf.placeholder(tf.float32,\n [desired_samples, 1])\n self.background_volume_placeholder_ = tf.placeholder(tf.float32, [])\n background_mul = tf.multiply(self.background_data_placeholder_,\n self.background_volume_placeholder_)\n background_add = tf.add(background_mul, sliced_foreground)\n background_clamp = tf.clip_by_value(background_add, -1.0, 1.0)\n # Run the spectrogram and MFCC ops to get a 2D 'fingerprint' of the audio.\n spectrogram = contrib_audio.audio_spectrogram(\n background_clamp,\n window_size=model_settings['window_size_samples'],\n stride=model_settings['window_stride_samples'],\n magnitude_squared=True)\n self.mfcc_ = contrib_audio.mfcc(\n spectrogram,\n wav_decoder.sample_rate,\n dct_coefficient_count=model_settings['dct_coefficient_count'])", "def build(self): \n self.computation_graph = tf.Graph()\n with self.computation_graph.as_default():\n\n self.factorization_layer = Factorization(self.args, self.vocab_size)\n self.cluster_layer = Clustering(self.args)\n\n self.gamma = tf.placeholder(\"float\")\n self.loss = self.factorization_layer()+self.gamma*self.cluster_layer(self.factorization_layer)\n\n self.batch = tf.Variable(0)\n self.step = tf.placeholder(\"float\")\n \n self.learning_rate_new = tf.train.polynomial_decay(self.args.initial_learning_rate,\n self.batch,\n self.true_step_size,\n self.args.minimal_learning_rate,\n self.args.annealing_factor)\n \n self.train_op = tf.train.AdamOptimizer(self.learning_rate_new).minimize(self.loss, global_step = self.batch)\n \n self.init = tf.global_variables_initializer()\n\n self.weights = overlap_generator(self.args, self.graph)", "def create_graph(self, all_samples, all_labels, nodes_per_layer, dropout_layers, dropout_rate, act_func,\n learning_rate, batch_size):\n\n with tf.variable_scope(\"Dataset\"):\n # Get shuffled batch samples and labels\n request_batch_samples, request_batch_labels = self.get_batch(all_samples, all_labels, batch_size)\n\n with tf.variable_scope(\"Input\"):\n # Create placeholders with default to use batch samples and labels\n batch_samples = tf.placeholder_with_default(request_batch_samples, [None, self.n_total_features],\n name=\"BatchSamples\")\n batch_labels = tf.placeholder_with_default(request_batch_labels, [None, self.label_dimension],\n name=\"BatchLabels\")\n\n with tf.variable_scope(\"ConvolutionalLayer\"):\n # Reshape the data to match the convolutional layer format of [height x width x channel]\n # The tensor become a 4D of [batch size, height, width, channel]\n reshaped_batch_samples = tf.reshape(batch_samples, shape=[-1, self.nt_features, self.nt_positions, 1])\n\n # Keep track of the created layers\n layers = [reshaped_batch_samples] # The batch samples will be used as input for the first hidden layer\n\n # Create the convolutional layer\n conv_layer = tf.layers.conv2d(inputs=layers[-1], filters=1, kernel_size=[1, self.nt_features],\n padding=\"same\", activation=act_func, name=\"conv-layer\")\n layers.append(conv_layer)\n # Flatten the convolutional data in a 1D vector for the fully connected layer\n flatten_conv_layer = tf.contrib.layers.flatten(layers[-1])\n layers.append(flatten_conv_layer)\n\n with tf.variable_scope(\"HiddenLayers\"):\n # The 'index_offset indicates which layer number it is; the offset increase if dropout layers are included\n index_offset = 1\n # Loop through the hidden layers nodes to define the hidden layers (including dropout layers)\n for index, n_units in enumerate(nodes_per_layer):\n # Create for every defined hidden node a fully connected layer\n name_layer = 'fc-layer{}'.format(index + index_offset)\n hidden_layer = tf.layers.dense(inputs=layers[-1], units=n_units, activation=act_func, name=name_layer)\n # Add the defined hidden layer to the list with all layers\n layers.append(hidden_layer)\n # Check if a fully connected layer is followed by a dropout layer\n if dropout_layers[index]:\n index_offset += 1\n name_layer = 'dr-layer{}'.format(index + index_offset)\n hidden_layer = tf.layers.dropout(inputs=layers[-1], rate=dropout_rate, name=name_layer)\n layers.append(hidden_layer)\n\n with tf.variable_scope(\"PredictionLayer\"):\n # Define a prediction layer which has the class dimension as output\n pred = tf.layers.dense(layers[-1], self.label_dimension, activation=None)\n\n with tf.variable_scope(\"Cost\"):\n # Sigmoid cross entropy can be used because the data allows binary classification\n self.cost = tf.nn.sigmoid_cross_entropy_with_logits(logits=pred, labels=batch_labels)\n # Set the activaion function for prediction labels to evaluate the trained network\n self.prediction = tf.nn.sigmoid(pred)\n\n with tf.variable_scope(\"Optimizer\"):\n # Define an optimize function to decrease the loss function\n self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.cost)\n\n # Create a graph to visualize the architecture in TensorBoard\n # working_dir = os.path.dirname(os.path.abspath(__file__))\n # writer = tf.summary.FileWriter(working_dir + \"/graphs/tensorboard\", graph=self.session.graph)\n # writer.close()", "def create_graph():\n # Creates graph from saved graph_def.pb.\n with tf.gfile.FastGFile(os.path.join(\n FLAGS.model_dir, 'classify_image_graph_def.pb'), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')", "def build_graph(self, values, values_mask, keys_mask, keys, use_mask=True, sentinel=True):\n\n with vs.variable_scope(\"encoder_initialization\"):\n\n print('value_vec_size is: ', self.value_vec_size)\n print('num_values size is: ', values.shape[1])\n print('num_keys size is: ', keys.shape[1])\n print('value_vec_size is (key):', keys.shape[2])\n # Declare variable \n # Compute projected question hidden states\n W = tf.get_variable(\"W\", shape = (self.value_vec_size, self.value_vec_size), \\\n initializer = tf.contrib.layers.xavier_initializer())\n b = tf.get_variable(\"b\", shape = (values.shape[1], self.value_vec_size), initializer = tf.constant_initializer(0))\n Q = tf.tanh(tf.tensordot(values, W, 1) + tf.expand_dims(b, axis=0)) # (batch_size, num_values, value_vec_size)\n D = keys # (batch_size, num_keys, value_vec_size)\n Q_length = values.shape[1]\n D_length = keys.shape[1]\n if sentinel:\n Q = concat_sentinel('question_sentinel', Q, self.value_vec_size) # (batch_size, num_values, value_vec_size)\n D = concat_sentinel('document_sentinel', D, self.value_vec_size)\n Q_length += 1\n D_length += 1\n\n with vs.variable_scope(\"coattention_layer_1\"):\n S_D_1, S_Q_1, C_D_1 = coattention(\\\n Q, Q_length, D, D_length, values_mask, keys_mask, use_mask)\n\n with vs.variable_scope('encode_summaries_from_coattention_layer_1'):\n\n print('Q Length is: ', Q_length)\n print('D length is: ', D_length)\n\n size = int(self.value_vec_size)\n\n if self.device == 'gpu':\n bidirection_rnn = tf.contrib.cudnn_rnn.CudnnLSTM(1, size, 2*size, direction=cudnn_rnn_ops.CUDNN_RNN_BIDIRECTION, dtype=tf.float32)\n S_Q_1 = tf.transpose(S_Q_1, perm=[1, 0, 2])\n print 'S_Q_1 shape', S_Q_1.shape\n input_h = tf.zeros([2, tf.shape(values)[0], size])\n input_c = tf.zeros([2, tf.shape(values)[0], size])\n params = tf.get_variable(\"RNN\", shape=(estimate_cudnn_parameter_size(self.value_vec_size, size, 2)),\n initializer=tf.contrib.layers.xavier_initializer(), dtype=tf.float32)\n E_Q_2 , _, _ = bidirection_rnn(S_Q_1, input_h, input_c, params)\n print 'E_Q_2 shape:', E_Q_2 .shape\n E_Q_2 = tf.transpose(E_Q_2 , perm=[1, 0, 2])\n E_Q_2 = tf.nn.dropout(E_Q_2, self.keep_prob)\n\n else:\n cell = tf.nn.rnn_cell.BasicLSTMCell(size)\n cell = DropoutWrapper(cell, input_keep_prob=self.keep_prob)\n Q_fw_bw_encodings, _ = tf.nn.bidirectional_dynamic_rnn(\n cell_fw = cell,\n cell_bw = cell,\n dtype = tf.float32,\n inputs = S_Q_1,\n # sequence_length = Q_length\n )\n E_Q_2 = tf.concat(Q_fw_bw_encodings, 2)\n\n\t# add gpu lstm\n D_fw_bw_encodings, _ = tf.nn.bidirectional_dynamic_rnn(\n cell_fw = cell,\n cell_bw = cell,\n dtype = tf.float32,\n inputs = S_D_1,\n # sequence_length = D_length\n ) \n E_D_2 = tf.concat(D_fw_bw_encodings, 2)\n\n with vs.variable_scope('coattention_layer_2'):\n S_D_2, S_Q_2, C_D_2 = coattention(\\\n E_Q_2, Q_length, E_D_2, D_length, values_mask, keys_mask, use_mask)\n\n with vs.variable_scope('final_encoder'):\n document_representations = tf.concat(\\\n [D, E_D_2, S_D_1, S_D_2, C_D_1, C_D_2], 2)#(N, D, 2H)\n\n size = int(self.value_vec_size)\n\n\t# add gpu lstm\n cell = tf.nn.rnn_cell.BasicLSTMCell(size)\n outputs, _ = tf.nn.bidirectional_dynamic_rnn(\n cell_fw = cell,\n cell_bw = cell,\n dtype = tf.float32,\n inputs = document_representations,\n # sequence_length = D_length,\n )\n encoding = tf.concat(outputs, 2)\n\n encoding = encoding[:, :-1, :]\n return encoding, None,None", "def build(self):\n\n # bgr_ = bgr*255.0\n bgr_= self.X\n start_time = time.time()\n print(\"build model started\")\n\n # blue ,green, red = tf.split(axis=3, num_or_size_splits=3, value= bgr)\n red ,green, blue, = tf.split(axis=3, num_or_size_splits=3, value= bgr_)\n assert red.get_shape().as_list()[1:] == [224, 224, 1]\n assert green.get_shape().as_list()[1:] == [224, 224, 1]\n assert blue.get_shape().as_list()[1:] == [224, 224, 1]\n bgr = tf.concat(axis=3, values=[\n # blue - VGG_MEAN[0],\n # green - VGG_MEAN[1],\n # red - VGG_MEAN[2],\n\n red - VGG_MEAN[0],\n green - VGG_MEAN[1],\n blue - VGG_MEAN[2],\n ])\n assert bgr.get_shape().as_list()[1:] == [224, 224, 3]\n\n\n\n print(bgr.shape)\n\n self.conv1_1 = self.conv_layer(bgr, \"conv1_1\")\n self.conv1_2 = self.conv_layer(self.conv1_1, \"conv1_2\")\n self.pool1 = self.max_pool(self.conv1_2, 'pool1')\n\n self.conv2_1 = self.conv_layer(self.pool1, \"conv2_1\")\n self.conv2_2 = self.conv_layer(self.conv2_1, \"conv2_2\")\n self.pool2 = self.max_pool(self.conv2_2, 'pool2')\n\n\n\n\n self.conv3_1 = self.conv_layer(self.pool2, \"conv3_1\")\n self.conv3_2 = self.conv_layer(self.conv3_1, \"conv3_2\")\n self.conv3_3 = self.conv_layer(self.conv3_2, \"conv3_3\")\n self.pool3 = self.max_pool(self.conv3_3, 'pool3')\n\n self.conv4_1 = self.conv_layer(self.pool3, \"conv4_1\")\n self.conv4_2 = self.conv_layer(self.conv4_1, \"conv4_2\")\n self.conv4_3 = self.conv_layer(self.conv4_2, \"conv4_3\")\n self.pool4 = self.max_pool(self.conv4_3, 'pool4')\n\n\n\n\n\n self.conv5_1 = self.conv_layer(self.pool4, \"conv5_1\")\n self.conv5_2 = self.conv_layer(self.conv5_1, \"conv5_2\")\n self.conv5_3 = self.conv_layer(self.conv5_2, \"conv5_3\")\n self.pool5 = self.max_pool(self.conv5_3, 'pool5')\n\n self.fc6 = self.fc_layer(self.pool5, \"fc6\")\n assert self.fc6.get_shape().as_list()[1:] == [4096]\n self.relu6 = tf.nn.relu(self.fc6)\n\n self.fc7 = self.fc_layer(self.relu6, \"fc7\")\n self.relu7 = tf.nn.relu(self.fc7)\n\n self.fc8 = self.fc_layer(self.relu7, \"fc8\")\n\n # self.fc9 = self.fc_layer(self.fc8,'fc9')\n # self.relu9 = tf.nn.relu(self.fc9)\n\n\n\n\n relu8 = tf.nn.relu(self.fc8)\n fc9 = self.fc_layer(relu8, 'fc9')\n print((\"build model finished: %ds\" % (time.time() - start_time)))\n return fc9\n\n # self.prob = tf.nn.softmax(self.fc8, name=\"prob\")", "def _build_model(self):\n\n with tf.variable_scope(\"Matchnet\", reuse=tf.AUTO_REUSE):\n # For determining the runtime shape\n x_shp = tf.shape(self.x_in)\n\n # -------------------- Network archintecture --------------------\n # Build graph\n print(\"Building Graph\")\n self.logits = build_graph(self.x_in, self.is_training, self.config)\n # ---------------------------------------------------------------\n\n # Turn into weights for each sample\n weights = tf.nn.relu(tf.tanh(self.logits))\n\n # Make input data (num_img_pair x num_corr x 4)\n xx = tf.transpose(tf.reshape(\n self.x_in, (x_shp[0], x_shp[2], 4)), (0, 2, 1))\n\n # Create the matrix to be used for the eight-point algorithm\n X = tf.transpose(tf.stack([\n xx[:, 2] * xx[:, 0], xx[:, 2] * xx[:, 1], xx[:, 2],\n xx[:, 3] * xx[:, 0], xx[:, 3] * xx[:, 1], xx[:, 3],\n xx[:, 0], xx[:, 1], tf.ones_like(xx[:, 0])\n ], axis=1), (0, 2, 1))\n print(\"X shape = {}\".format(X.shape))\n wX = tf.reshape(weights, (x_shp[0], x_shp[2], 1)) * X\n print(\"wX shape = {}\".format(wX.shape))\n XwX = tf.matmul(tf.transpose(X, (0, 2, 1)), wX)\n print(\"XwX shape = {}\".format(XwX.shape))\n\n # Recover essential matrix from self-adjoing eigen\n e, v = tf.self_adjoint_eig(XwX)\n self.e_hat = tf.reshape(v[:, :, 0], (x_shp[0], 9))\n # Make unit norm just in case\n self.e_hat /= tf.norm(self.e_hat, axis=1, keep_dims=True)", "def prepare_processing_graph(self, model_settings):\n desired_samples = model_settings['desired_samples']\n self.wav_filename_placeholder_ = tf.placeholder(tf.string, [])\n wav_loader = io_ops.read_file(self.wav_filename_placeholder_)\n wav_decoder = contrib_audio.decode_wav(\n wav_loader, desired_channels=1, desired_samples=desired_samples)\n # Allow the audio sample's volume to be adjusted.\n self.foreground_volume_placeholder_ = tf.placeholder(tf.float32, [])\n scaled_foreground = tf.multiply(wav_decoder.audio,\n self.foreground_volume_placeholder_)\n # Shift the sample's start position, and pad any gaps with zeros.\n self.time_shift_padding_placeholder_ = tf.placeholder(tf.int32, [2, 2])\n self.time_shift_offset_placeholder_ = tf.placeholder(tf.int32, [2])\n padded_foreground = tf.pad(\n scaled_foreground,\n self.time_shift_padding_placeholder_,\n mode='CONSTANT')\n sliced_foreground = tf.slice(padded_foreground,\n self.time_shift_offset_placeholder_,\n [desired_samples, -1])\n # Mix in background noise.\n self.background_data_placeholder_ = tf.placeholder(tf.float32,\n [desired_samples, 1])\n self.background_volume_placeholder_ = tf.placeholder(tf.float32, [])\n background_mul = tf.multiply(self.background_data_placeholder_,\n self.background_volume_placeholder_)\n background_add = tf.add(background_mul, sliced_foreground)\n background_clamp = tf.clip_by_value(background_add, -1.0, 1.0)\n # Run the spectrogram and MFCC ops to get a 2D 'fingerprint' of the audio.\n spectrogram = contrib_audio.audio_spectrogram(\n background_clamp,\n window_size=model_settings['window_size_samples'],\n stride=model_settings['window_stride_samples'],\n magnitude_squared=True)\n if model_settings['use_mfcc'] == True:\n self.mfcc_ = contrib_audio.mfcc(\n spectrogram,\n wav_decoder.sample_rate,\n dct_coefficient_count=model_settings['dct_coefficient_count'])\n else:\n linear_to_mel_weight_matrix = tf.contrib.signal.linear_to_mel_weight_matrix(\n num_mel_bins=model_settings['dct_coefficient_count'], num_spectrogram_bins=spectrogram.shape[-1].value,\n sample_rate=model_settings['sample_rate'], upper_edge_hertz=7600.0, lower_edge_hertz=80.0)\n self.mfcc_ = tf.tensordot(spectrogram, linear_to_mel_weight_matrix, 1)\n self.mfcc_.set_shape(spectrogram.shape[:-1].concatenate(linear_to_mel_weight_matrix.shape[-1:]))", "def create_graph(self, modelDir):\n # Creates graph from saved graph_def.pb.\n with tf.gfile.FastGFile(os.path.join(\n modelDir, 'classify_image_graph_def.pb'), 'rb') as f:\n \tgraph_def = tf.GraphDef()\n \tgraph_def.ParseFromString(f.read())\n \t_ = tf.import_graph_def(graph_def, name='')", "def build_graph(self, name):\n with tf.variable_scope(name, reuse=tf.AUTO_REUSE):\n # change shape of input for when adding score\n self.input_positions = tf.placeholder(tf.float32, shape=(None, 1, 2,6), name='inputs')\n self.target_q = tf.placeholder(shape=[None], dtype=tf.float32, name='target')\n net = self.input_positions\n\n net = tf.layers.conv2d(inputs=net, filters=128, kernel_size=6,\n kernel_regularizer=tf.contrib.layers.l1_l2_regularizer(),\n data_format=\"channels_last\", padding='SAME', activation=tf.nn.relu)\n net = tf.layers.conv2d(inputs=net, filters=128, kernel_size=6,\n kernel_regularizer=tf.contrib.layers.l1_l2_regularizer(),\n data_format=\"channels_last\", padding='SAME', activation=tf.nn.relu)\n net = tf.layers.conv2d(inputs=net, filters=64, kernel_size=6,\n kernel_regularizer=tf.contrib.layers.l1_l2_regularizer(),\n data_format=\"channels_last\", padding='SAME', activation=tf.nn.relu)\n\n net = tf.layers.flatten(net)\n\n net = self.add_dense_layer(net, 12, tf.nn.relu)\n\n self.value = self.add_dense_layer(net, 1, name='state_q_value')\n self.advantage = self.add_dense_layer(net, 12, name='action_advantage')\n\n self.q_values = tf.add(self.value, tf.subtract(self.advantage,\n tf.reduce_mean(self.advantage, axis=1, keepdims=True)),\n name=\"action_q_values\")\n\n self.probabilities = tf.nn.softmax(self.q_values, name='probabilities')\n\n self.actions = tf.placeholder(shape=[None], dtype=tf.int32, name='actions')\n self.actions_onehot = tf.one_hot(self.actions, 12, dtype=tf.float32)\n self.q = tf.reduce_sum(tf.multiply(self.q_values, self.actions_onehot), axis=1, name=\"selected_action_q\")\n\n tf.summary.histogram(\"Action_Q_values\", self.q)\n\n self.td_error = tf.square(self.target_q - self.q)\n self.loss = tf.reduce_mean(self.td_error, name=\"q_loss\")\n\n tf.summary.scalar(\"Q_Loss\", self.loss)\n self.reg_losses = tf.identity(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, scope=name),\n name=\"reg_losses\")\n\n reg_loss = self.beta * tf.reduce_mean(self.reg_losses)\n tf.summary.scalar(\"Regularization_loss\", reg_loss)\n\n self.merge = tf.summary.merge_all()\n\n self.total_loss = tf.add(self.loss, reg_loss, name=\"total_loss\")\n self.train_step = tf.train.GradientDescentOptimizer(learning_rate=self.learningRate). \\\n minimize(self.total_loss, name='train')", "def build_graph(self):\n tf.logging.info('Building graph...')\n t0 = time.time()\n # with tf.device(\"/gpu:0\"):\n self._build_GAN()\n\n self.global_step_D = tf.Variable(0, name='global_step_D', trainable=False)\n self.global_step_D_in = tf.Variable(0, name='global_step_D_in',\n trainable=False)\n self.global_step_G = tf.Variable(0, name='global_step_G', trainable=False)\n self.global_step = tf.add(tf.add(self.global_step_G, self.global_step_D),\n self.global_step_D_in, name='global_step')\n\n tf.summary.scalar('global_step_D', self.global_step_D, collections=['All'])\n tf.summary.scalar('global_step_D_in', self.global_step_D_in,\n collections=['All'])\n tf.summary.scalar('global_step_G', self.global_step_G, collections=['All'])\n self._add_train_op()\n self._summaries_D = tf.summary.merge_all(key='Dis')\n self._summaries_D_in = tf.summary.merge_all(key='Dis_in')\n self._summaries_G = tf.summary.merge_all(key='Gen')\n self._summaries_All = tf.summary.merge_all(key='All')\n t1 = time.time()\n tf.logging.info('Time to build graph: %i seconds', t1 - t0)", "def build(self):\n self.computation_graph = tf.Graph()\n with self.computation_graph.as_default():\n\n self.walker_layer = DeepWalker(self.args, self.vocab_size, self.degrees)\n self.cluster_layer = Clustering(self.args)\n self.regularizer_layer = Regularization(self.args)\n\n self.gamma = tf.placeholder(\"float\")\n self.loss = self.walker_layer()\n self.loss = self.loss + self.gamma*self.cluster_layer(self.walker_layer)\n self.loss = self.loss + self.regularizer_layer(self.walker_layer)\n\n self.batch = tf.Variable(0)\n self.step = tf.placeholder(\"float\")\n\n self.learning_rate_new = tf.train.polynomial_decay(self.args.initial_learning_rate,\n self.batch,\n self.true_step_size,\n self.args.minimal_learning_rate,\n self.args.annealing_factor)\n\n self.train_op = tf.train.AdamOptimizer(self.learning_rate_new).minimize(self.loss,\n global_step=self.batch)\n\n self.init = tf.global_variables_initializer()\n\n self.weights = overlap_generator(self.args, self.graph)", "def build(self): \n self.computation_graph = tf.Graph()\n with self.computation_graph.as_default():\n\n self.factorization_layer = Factorization(self.args, self.vocab_size)\n self.regularizer_layer = Regularization(self.args)\n\n self.gamma = tf.placeholder(\"float\")\n self.loss = self.factorization_layer()\n\n self.batch = tf.Variable(0)\n self.step = tf.placeholder(\"float\")\n \n self.learning_rate_new = tf.train.polynomial_decay(self.args.initial_learning_rate,\n self.batch,\n self.true_step_size,\n self.args.minimal_learning_rate,\n self.args.annealing_factor)\n \n self.train_op = tf.train.AdamOptimizer(self.learning_rate_new).minimize(self.loss, global_step = self.batch)\n \n self.init = tf.global_variables_initializer()\n\n self.weights = overlap_generator(self.args, self.graph)", "def _build_graph(self):\n with tf.variable_scope(name_or_scope = self.name, reuse = self.reuse):\n if self.strides == 1:\n data_proj, data_path = ly.channel_shuffle(self.input)\n # deal data_path with branch_main\n with tf.variable_scope(name_or_scope = 'branch_main_s1', reuse = self.reuse):\n data_path = ly.conv_bn_activation(data_path, self.mid_channels, 1, self.strides, data_format = self.data_format, is_training = self.is_training, _use_bias = False)\n data_path = ly.depthwise_conv_layer(data_path, self.mid_channels, self.ksize, [1, self.strides, self.strides, 1], data_format = self.data_format)\n data_path = ly._bn(data_path, self.data_format, self.is_training)\n data_path = ly.conv_bn_activation(data_path, self.outputs, 1, self.strides, data_format = self.data_format, is_training = self.is_training, _use_bias = False)\n return tf.concat((data_proj, data_path), axis = -1)\n else:\n data_proj = self.input\n data_path = self.input\n with tf.variable_scope(name_or_scope = 'branch_main_s2', reuse = self.reuse):\n data_path = ly.conv_bn_activation(data_path, self.mid_channels, 1, 1, data_format = self.data_format, is_training = self.is_training, _use_bias = False)\n data_path = ly.depthwise_conv_layer(data_path, self.mid_channels, self.ksize, [1, self.strides, self.strides, 1], data_format = self.data_format)\n data_path = ly._bn(data_path, self.data_format, self.is_training)\n data_path = ly.conv_bn_activation(data_path, self.outputs, 1, 1, data_format = self.data_format, is_training = self.is_training, _use_bias = False)\n with tf.variable_scope(name_or_scope = 'branch_proj_s2', reuse = self.reuse):\n data_proj = ly.depthwise_conv_layer(data_proj, self.inp, self.ksize, [1, self.strides, self.strides, 1], data_format = self.data_format)\n data_proj = ly._bn(data_proj, self.data_format, self.is_training)\n data_proj = ly.conv_bn_activation(data_proj, self.inp, 1, 1, data_format = self.data_format, is_training = self.is_training, _use_bias = False)\n return tf.concat((data_proj, data_path), axis = -1)", "def __init__(self,\n sess,\n output_shape,\n processing_dtype=tf.float32,\n conditional_input_shapes=None,\n noise_shape=(100,),\n generator_network_fn=gen_lib.mnist_generator_gan,\n discriminator_network_fn=gen_lib.mnist_discriminator_gan,\n tf_device='/cpu:*',\n max_tf_checkpoints_to_keep=4,\n g_optimizer=tf.train.AdamOptimizer(),\n d_optimizer=tf.train.AdamOptimizer(),\n k=1,\n weights_clip=0.01,\n summary_writer=None,\n summary_writing_frequency=500,\n allow_partial_reload=False):\n assert weights_clip > 0\n self.weights_clip = weights_clip\n gan.VanillaGAN.__init__(self,\n sess,\n output_shape,\n processing_dtype=processing_dtype,\n conditional_input_shapes=conditional_input_shapes,\n noise_shape=noise_shape,\n generator_network_fn=generator_network_fn,\n discriminator_network_fn=discriminator_network_fn,\n tf_device=tf_device,\n max_tf_checkpoints_to_keep=max_tf_checkpoints_to_keep,\n g_optimizer=g_optimizer,\n d_optimizer=d_optimizer,\n k=k,\n summary_writer=summary_writer,\n summary_writing_frequency=summary_writing_frequency,\n allow_partial_reload=allow_partial_reload)\n tf.logging.info('\\t weights_clip: %d', weights_clip)", "def build_graph(self, values, values_mask, keys_mask, keys, use_mask=True):\n\n with vs.variable_scope(\"CoAttn\"):\n\n print('value_vec_size is: ', self.value_vec_size)\n print('num_values size is: ', values.shape[1])\n print('num_keys size is: ', keys.shape[1])\n print('value_vec_size is (key):', keys.shape[2])\n # Declare variable \n W = tf.get_variable(\"W\", shape = (self.value_vec_size, self.value_vec_size), \\\n initializer = tf.contrib.layers.xavier_initializer())\n b = tf.get_variable(\"b\", shape = (self.value_vec_size), initializer = tf.constant_initializer(0))\n\n # Compute projected question hidden states\n\n Q = tf.tanh(tf.tensordot(values, W, 1) + tf.reshape(b, [1, 1, self.value_vec_size])) # (batch_size, num_values, value_vec_size)\n\n\n Q = concat_sentinel('question_sentinel', Q, self.value_vec_size) # (batch_size, num_values, value_vec_size)\n # Q = tf.nn.dropout(Q, self.keep_prob)\n print('Q shape is: ', Q.shape)\n # sentinel = tf.get_variable(name='question_sentinel', shape=tf.shape(Q)[2], \\\n # initializer=tf.contrib.layers.xavier_initializer(), dtype = tf.float32)\n # sentinel = tf.tile(sentinel, [tf.shape(original_tensor)[0], 1, 1])\n # concat_tensor = tf.concat([original_tensor, sentinel], 2)\n\n print('Q shape is: ', Q.shape)\n D = keys # (batch_size, num_keys, value_vec_size)\n D = concat_sentinel('document_sentinel', D, self.value_vec_size)\n # D = tf.nn.dropout(D, self.keep_prob)\n\n # key = document, value = question here\n ### End your code here to implement 'Sentinel Vector'\n # Compute affinity matrix L\n L = tf.matmul(D, tf.transpose(Q, perm=[0, 2, 1])) # shape (batch_size, num_keys, num_values)\n\n # Compute Context-to-Question (C2Q) Attention, we obtain C2Q attention outputs\n if use_mask:\n print('tf.shape(values)[0] is: ', tf.shape(values)[0])\n print('tf.ones([tf.shape(values)[0], 1] is ', tf.ones([tf.shape(values)[0], 1], dtype=tf.int32))\n values_mask = tf.expand_dims(tf.concat([values_mask, tf.ones([tf.shape(values)[0], 1], dtype=tf.int32)], axis=1), 1)\n print \"value_mask shape:\", values_mask.shape\n print \"L shape:\", L.shape\n _, A_D = masked_softmax(L, mask=values_mask, dim=2) #(batch_size, num_keys, num_values)\n else:\n A_D = tf.nn.softmax(L, dim=-1)\n\n C2Q_Attn = tf.matmul(A_D, Q) # (batch_size, num_keys, value_vec_size)\n print('C2Q_Attn shapeis ', C2Q_Attn.shape)\n\n # Compute Question-to-Context (Q2C) Attention, we obtain Q2C attention outputs\n if use_mask:\n keys_mask = tf.expand_dims(tf.concat([keys_mask, tf.ones([tf.shape(keys)[0], 1], dtype=tf.int32)], axis=1), 1)\n print \"key_mask shape:\", keys_mask.shape\n print \"L shape:\", L.shape\n _, A_Q = masked_softmax(tf.transpose(L, perm=[0, 2, 1]), mask=keys_mask, dim=-1) # (batch_size, num_values, num_keys)\n else:\n A_Q = tf.nn.softmax(tf.transpose(L, perm=[0, 2, 1]), dim=2)\n\n Q2C_Attn = tf.matmul(A_Q, D) # (batch_size, num_values, key_vec_size)\n print('Q2C_Attn shapeis ', Q2C_Attn.shape)\n\n # Compute second-level attention outputs S\n S = tf.matmul(A_D, Q2C_Attn) # (batch_size, num_keys, value_vec_size)\n print('S size is: ', S.shape)\n\n # Concatenate C2Q_Attn and S:\n C_D = tf.concat([D, C2Q_Attn, S], 2) # (batch_size, num_keys, 3 * value_vec_size)\n # C_D = tf.nn.dropout(C_D, self.keep_prob)\n print('co_context size is: ', C_D.shape)\n\n # co_input = tf.concat([tf.transpose(D, perm = [0, 2, 1]), C_D], 1)\n # print('co_input size is: ', co_input.shape)\n size = int(self.value_vec_size)\n \n if self.device == 'gpu':\n bidirection_rnn = tf.contrib.cudnn_rnn.CudnnLSTM(1, size, 3*size, dropout=0.2, direction=cudnn_rnn_ops.CUDNN_RNN_BIDIRECTION, dtype=tf.float32)\n C_D = tf.transpose(C_D, perm=[1, 0, 2])\n print 'C_D shape', C_D.shape\n input_h = tf.zeros([2, tf.shape(values)[0], size])\n input_c = tf.zeros([2, tf.shape(values)[0], size])\n params = tf.get_variable(\"RNN\", shape=(estimate_cudnn_parameter_size(3*self.value_vec_size, size, 2)),\n initializer=tf.contrib.layers.xavier_initializer(), dtype=tf.float32)\n \n U, _, _ = bidirection_rnn(C_D, input_h, input_c, params)\n#\n print 'U shape:', U.shape\n U = tf.transpose(U, perm=[1, 0, 2])\n\n else:\n (u_fw_out, u_bw_out), _ = tf.nn.bidirectional_dynamic_rnn(\n cell_fw=DropoutWrapper(rnn_cell.BasicLSTMCell(size),input_keep_prob=self.keep_prob), cell_bw=DropoutWrapper(rnn_cell.BasicLSTMCell(size),input_keep_prob=self.keep_prob), \n inputs=C_D, dtype = tf.float32)\n U = tf.concat([u_fw_out, u_bw_out], 2)\n\n U = tf.nn.dropout(U[:,:-1, :], self.keep_prob)\n # U = tf.nn.dropout(U, self.keep_prob)\n print('U shape is: ', U.shape)\n \n return U,A_D,A_Q", "def build(self): \n self.computation_graph = tf.Graph()\n with self.computation_graph.as_default():\n\n self.factorization_layer = Factorization(self.args, self.vocab_size)\n self.regularizer_layer = Regularization(self.args)\n\n self.gamma = tf.placeholder(\"float\")\n self.loss = self.factorization_layer()+self.regularizer_layer(self.factorization_layer)\n\n self.batch = tf.Variable(0)\n self.step = tf.placeholder(\"float\")\n \n self.learning_rate_new = tf.train.polynomial_decay(self.args.initial_learning_rate,\n self.batch,\n self.true_step_size,\n self.args.minimal_learning_rate,\n self.args.annealing_factor)\n \n self.train_op = tf.train.AdamOptimizer(self.learning_rate_new).minimize(self.loss, global_step = self.batch)\n \n self.init = tf.global_variables_initializer()\n\n self.weights = overlap_generator(self.args, self.graph)", "def build_graph(self):\n train_graph = tf.Graph()\n opts = self._options\n with train_graph.as_default():\n self.__inputs, self.__doc_inputs, self.__labels, self.__lr = self._get_inputs()\n embed, word_embeddings, combined_embed_vector_length = self._get_embedding_layer(\n self.__inputs, self.__doc_inputs)\n\n norm_w = tf.sqrt(tf.reduce_sum(tf.square(word_embeddings), 1, keep_dims=True))\n self.__normalized_word_embeddings = word_embeddings / norm_w\n\n weights = tf.Variable(\n tf.truncated_normal((self.vocab_size, combined_embed_vector_length),\n stddev=1.0 / math.sqrt(combined_embed_vector_length))\n )\n biases = tf.Variable(tf.zeros(self.vocab_size))\n\n if opts.loss == 'softmax':\n loss = tf.nn.sampled_softmax_loss(weights=weights,\n biases=biases,\n labels=self.__labels,\n inputs=embed,\n num_sampled=opts.negative_sample_size,\n num_classes=opts.vocab_size)\n tf.summary.scalar(\"Softmax loss\", loss)\n else:\n loss = tf.nn.nce_loss(weights=weights,\n biases=biases,\n labels=self.__labels,\n inputs=embed,\n num_sampled=opts.negative_sample_size,\n num_classes=opts.vocab_size)\n tf.summary.scalar(\"NCE loss\", loss)\n\n self.__cost = tf.reduce_mean(loss)\n\n if opts.train_method == 'Adam':\n self.__optimizer = tf.train.AdamOptimizer(self.__lr).minimize(self.__cost)\n else:\n self.__optimizer = tf.train.GradientDescentOptimizer(self.__lr).minimize(self.__cost)\n\n self.__summary = tf.summary.merge_all()\n\n self._session = tf.Session(graph=train_graph)\n self.saver = tf.train.Saver()\n return self", "def build_graph(self):\n return nn.Sequential(\n nn.Linear(self.input_dim, self.hidden_dim),\n self.hidden_activation,\n nn.Linear(self.hidden_dim, self.n_classes_))", "def create_graph(self):\n self.graph = tf.Graph()\n model_type = self.options['model_type']\n optimiser_selected = self.options['optimizer']\n\n with self.graph.as_default():\n self.tf_dataset = tf.placeholder(tf.float32,\n shape=(None, self.options['num_steps'], self.input_dimensions))\n self.tf_labels = tf.placeholder(tf.float32, shape=(None, self.input_dimensions))\n self.learning_rate = tf.placeholder(tf.float32, None, name='learning_rate')\n\n # Forward pass\n if model_type == 'rnn':\n self.predict = self.rnn_model(self.tf_dataset)\n elif model_type == 'lstm':\n self.predict = self.lstm_model(self.tf_dataset)\n else:\n raise NotImplementedError(\"Unimplemented RNN model keyword\")\n\n self.loss = tf.reduce_mean(tf.square(self.predict - self.tf_labels))\n\n if self.options['regularisation_coeff'] > 0.:\n # Add in L2 penalty for regularisation if required\n penalty = self.options['regularisation_coeff'] * sum(tf.nn.l2_loss(var)\n for var in tf.trainable_variables())\n self.loss += penalty\n\n if self.options['use_customised_optimizer'] is False:\n if optimiser_selected == 'adam':\n self.optimizer = tf.train.AdamOptimizer(self.learning_rate)\n elif optimiser_selected == 'grad':\n self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)\n elif optimiser_selected == 'ada':\n self.optimizer = tf.train.AdagradOptimizer(self.learning_rate)\n elif optimiser_selected == 'rms':\n self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate)\n else:\n raise NotImplementedError(\"Unimplemented built-in optimiser keyword.\")\n else:\n self.optimizer = self.options['customized_optimizer']\n self.minimise = self.optimizer.minimize(self.loss)", "def _create_network(self):\n self.z_mean, self.z_log_sigma_sq = self._recognition_network()\n tf.add_to_collection(\"outputs\", self.z_mean)\n\n # Draw one sample z from Gaussian distribution\n eps = tf.random_normal((self.batch_size, self.output_size), 0, 1, dtype=tf.float32)\n # z = mu + sigma*epsilon\n self.z_latent = tf.add(self.z_mean, tf.multiply(tf.sqrt(tf.exp(self.z_log_sigma_sq)), eps))\n tf.add_to_collection(\"latents\", self.z_latent)\n\n # Use generator to determine mean of\n # Bernoulli distribution of reconstructed input\n self.x_decoded = self._generator_network()\n tf.add_to_collection(\"generators\", self.x_decoded)\n tf.add_to_collection(\"targets\", tf.zeros([self.batch_size], dtype=tf.int32))", "def create_collapsed_computation_graph(self, x, reuse=False):\n\t\tprint('TRAIN: implicitly flipping individual bits with probability {}'.format(self.noise))\n\t\tdset_name = self.datasource.target_dataset\n\t\tif dset_name in ['mnist', 'BinaryMNIST', 'omniglot', 'random']:\n\t\t\tmean = self.encoder(x, reuse=reuse)\n\t\telif dset_name == 'cifar10':\n\t\t\tmean = self.cifar10_convolutional_encoder(x, reuse=reuse)\n\t\telif dset_name == 'svhn':\n\t\t\tmean = self.convolutional_32_encoder(x, reuse=reuse)\n\t\telif dset_name == 'celebA':\n\t\t\tmean = self.complex_encoder(x, reuse=reuse)\n\t\telse:\n\t\t\tprint('dataset {} is not implemented'.format(dset_name))\n\t\t\traise NotImplementedError\n\n\t\t# for downstream classification\n\t\tclassif_q = Bernoulli(logits=mean)\n\t\tclassif_y = tf.cast(classif_q.sample(), tf.float32)\n\t\t\n\t\t# if self.noise == 0, then you have to feed in logits for the Bernoulli to avoid NaNs\n\t\tif self.noise != 0:\n\t\t\ty_hat_prob = tf.nn.sigmoid(mean)\n\t\t\ttotal_prob = y_hat_prob - (2 * y_hat_prob * self.noise) + self.noise\n\t\t\tq = Bernoulli(probs=total_prob)\n\t\telse:\n\t\t\tprint('no additional channel noise; feeding in logits for latent q_phi(z|x) to avoid numerical issues')\n\t\t\ttotal_prob = tf.nn.sigmoid(mean)\n\t\t\tq = Bernoulli(logits=mean)\t\n\n\t\t# use VIMCO if self.vimco_samples > 1, else just one sample\n\t\ty = tf.cast(q.sample(self.vimco_samples), tf.float32)\n\t\tif dset_name in ['mnist', 'BinaryMNIST', 'omniglot', 'random']:\n\t\t\tx_reconstr_logits = self.decoder(y, reuse=reuse)\n\t\telif dset_name == 'cifar10':\n\t\t\tx_reconstr_logits = self.cifar10_convolutional_decoder(y, reuse=reuse)\n\t\telif dset_name == 'svhn':\n\t\t\tx_reconstr_logits = self.convolutional_32_decoder(y, reuse=reuse)\n\t\telif dset_name == 'celebA':\n\t\t\tx_reconstr_logits = self.complex_decoder(y, reuse=reuse)\n\t\telse:\n\t\t\tprint('dataset {} is not implemented'.format(dset_name))\n\t\t\traise NotImplementedError\n\n\t\treturn total_prob, y, classif_y, q, x_reconstr_logits", "def _build_model(self, name, hidden_layers, nodes):\n with tf.variable_scope(name):\n self.inputs_ = tf.placeholder(tf.float32, [None, self.state_size], name='inputs')\n self.actions_ = tf.placeholder(tf.int32, [None], name='actions')\n one_hot_actions = tf.one_hot(self.actions_, self.action_size)\n self.targetQs_ = tf.placeholder(tf.float32, [None], name='target')\n self.layers = list()\n self.layers.append(fully_connected(\"hidden1\", self.inputs_, nodes))\n for layer in range(hidden_layers):\n self.layers.append(fully_connected(f\"hidden{layer+2}\", self.layers[layer], nodes))\n self.output = fully_connected(\"output\", self.layers[-1], self.action_size, activation=None)\n self.Q = tf.reduce_sum(tf.multiply(self.output, one_hot_actions), axis=1)\n self.loss = tf.reduce_mean(tf.square(self.targetQs_ - self.Q))\n self.opt = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)", "def create_graph():\n with gfile.FastGFile(os.path.join(\n FLAGS.model_dir, FLAGS.model_name), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')", "def prepare_processing_graph(self):\n desired_samples = self.model_settings['desired_samples']\n self.wav_filename_placeholder_ = tf.placeholder(tf.string, [])\n\n wav_loader = io_ops.read_file(self.wav_filename_placeholder_)\n wav_decoder = contrib_audio.decode_wav(wav_loader, desired_channels=1, desired_samples=desired_samples)\n # Allow the audio sample's volume to be adjusted.\n self.foreground_volume_placeholder_ = tf.placeholder(tf.float32, [])\n scaled_foreground = tf.multiply(wav_decoder.audio, self.foreground_volume_placeholder_)\n\n # Shift the sample's start position, and pad any gaps with zeros.\n self.time_shift_padding_placeholder_ = tf.placeholder(tf.int32, [2, 2])\n self.time_shift_offset_placeholder_ = tf.placeholder(tf.int32, [2])\n padded_foreground = tf.pad(scaled_foreground, self.time_shift_padding_placeholder_, mode='CONSTANT')\n sliced_foreground = tf.slice(padded_foreground, self.time_shift_offset_placeholder_, [desired_samples, -1])\n # Mix in background noise.\n self.background_data_placeholder_ = tf.placeholder(tf.float32, [desired_samples, 1])\n self.background_volume_placeholder_ = tf.placeholder(tf.float32, [])\n background_mul = tf.multiply(self.background_data_placeholder_, self.background_volume_placeholder_)\n background_add = tf.add(background_mul, sliced_foreground)\n background_clamp = tf.clip_by_value(background_add, -1.0, 1.0)\n\n # Run the spectrogram and MFCC ops to get a 2D 'fingerprint' of the audio.\n spectrogram = contrib_audio.audio_spectrogram(background_clamp,\n window_size=self.model_settings['window_size_samples'],\n stride=self.model_settings['window_stride_samples'],\n magnitude_squared=True)\n self.mfcc_ = contrib_audio.mfcc(spectrogram, wav_decoder.sample_rate,\n dct_coefficient_count=self.model_settings['dct_coefficient_count'])\n num_spectrogram_bins = spectrogram.shape[-1].value\n lower_edge_hertz, upper_edge_hertz, num_mel_bins = 80.0, 7600.0, self.model_settings['dct_coefficient_count']\n # linear_to_mel_weight_matrix is just filter-bank\n linear_to_mel_weight_matrix = tf.contrib.signal.linear_to_mel_weight_matrix(\n num_mel_bins, num_spectrogram_bins, self.model_settings['sample_rate'], lower_edge_hertz,\n upper_edge_hertz)\n mel_spectrograms = tf.tensordot(\n spectrogram, linear_to_mel_weight_matrix, 1)\n mel_spectrograms.set_shape(spectrogram.shape[:-1].concatenate(\n linear_to_mel_weight_matrix.shape[-1:]))\n self.mel_ = mel_spectrograms\n self.log_mel_ = tf.log(mel_spectrograms + 1e-6)", "def build(self):\n self.computation_graph = tf.Graph()\n with self.computation_graph.as_default():\n\n self.walker_layer = DeepWalker(self.args, self.vocab_size, self.degrees)\n self.regularizer_layer = Regularization(self.args)\n\n self.gamma = tf.placeholder(\"float\")\n self.loss = self.walker_layer()+self.regularizer_layer(self.walker_layer)\n\n self.batch = tf.Variable(0)\n self.step = tf.placeholder(\"float\")\n\n self.learning_rate_new = tf.train.polynomial_decay(self.args.initial_learning_rate,\n self.batch,\n self.true_step_size,\n self.args.minimal_learning_rate,\n self.args.annealing_factor)\n\n self.train_op = tf.train.AdamOptimizer(self.learning_rate_new).minimize(self.loss,\n global_step=self.batch)\n\n self.init = tf.global_variables_initializer()\n\n self.weights = overlap_generator(self.args, self.graph)", "def Graph_Predictor(self):\r\n with tf.variable_scope(self.graph_scope):\r\n graph_inputs = tf.expand_dims(self.inputs, axis=1)\r\n assert tf.shape(graph_inputs)[0] > 0, 'Inputs must have at least 1 batch size'\r\n assert tf.shape(graph_inputs)[2] > 0, 'Inputs must have a length greater than 1'\r\n assert tf.shape(graph_inputs)[3] > 0, 'Inputs must have at least 1 channels'\r\n\r\n assert self.graph_layers == 'every' if isinstance(self.graph_layers, str) else \\\r\n len(self.graph_layers) > 0 if isinstance(self.graph_layers, list) else False, \\\r\n 'Graph layers must be \\'every\\' or a list of int numbers.'\r\n\r\n key_cnn_groups = []\r\n query_cnn_groups = []\r\n key_temp = self.inputs\r\n query_temp = self.inputs\r\n for i in range(self.key_cnn_layers):\r\n key_temp = Conv2D(\r\n key_temp, self.embedding_dims, (1, self.key_cnn_conv_length),\r\n 1, 'SAME', 1, False, 'relu', False, 'KeyCNN_{}'.format(i))\r\n key_cnn_groups.append(key_temp)\r\n\r\n query_temp = Conv2D(\r\n query_temp, self.embedding_dims, (1, self.query_cnn_conv_length),\r\n 1, 'SAME', 1, False, 'relu', False, 'QueryCNN_{}'.format(i))\r\n query_cnn_groups.append(query_temp)\r\n\r\n graphs = []\r\n if self.graph_layers == 'every':\r\n for i in range(len(key_cnn_groups)):\r\n graphs.append(self.Calculate_Graph(key_cnn_groups[i], query_cnn_groups[i], i))\r\n\r\n else:\r\n for i in self.graph_layers:\r\n if i >= 1:\r\n graphs.append(self.Calculate_Graph(key_cnn_groups[i - 1], query_cnn_groups[i - 1], i))\r\n\r\n self.graphs = graphs", "def model(inputs):\n outputs = {}\n\n # First, build the encoder\n encoder_fn = _get_network(params.encoder_name)\n with tf.variable_scope('encoder', reuse=reuse):\n # Produces id/pose units\n enc_outputs = encoder_fn(inputs['images_1'], params, is_training)\n outputs['ids_1'] = enc_outputs['ids']\n\n # Second, build the decoder and projector\n decoder_fn = _get_network(params.decoder_name)\n with tf.variable_scope('decoder', reuse=reuse):\n outputs['voxels_1'] = decoder_fn(outputs['ids_1'], params, is_training)\n if run_projection:\n projector_fn = _get_network(params.projector_name)\n with tf.variable_scope('projector', reuse=reuse):\n outputs['projs_1'] = projector_fn(\n outputs['voxels_1'], inputs['matrix_1'], params, is_training)\n # Infer the ground-truth mask\n with tf.variable_scope('oracle', reuse=reuse):\n outputs['masks_1'] = projector_fn(inputs['voxels'], inputs['matrix_1'],\n params, False)\n\n # Third, build the entire graph (bundled strategy described in PTN paper)\n for k in range(1, params.step_size):\n with tf.variable_scope('projector', reuse=True):\n outputs['projs_%d' % (k + 1)] = projector_fn(\n outputs['voxels_1'], inputs['matrix_%d' %\n (k + 1)], params, is_training)\n with tf.variable_scope('oracle', reuse=True):\n outputs['masks_%d' % (k + 1)] = projector_fn(\n inputs['voxels'], inputs['matrix_%d' % (k + 1)], params, False)\n\n return outputs", "def _build_graph(self, inputs):\n\n # inputs contains a list of input variables defined above\n input_from_sensor1, input_from_sensor2, label = inputs\n print \"ok\"\n print input_from_sensor1\n # In tensorflow, inputs to convolution function are assumed to be\n # NHWC. Add a single channel here.\n #image = tf.expand_dims(image, 3)\n\n #image = image * 2 - 1 # center the pixels values at zero\n # The context manager `argscope` sets the default option for all the layers under\n # this context. Here we use 32 channel convolution with shape 3x3\n\n sensor1 = Sequential('sensor1', input_from_sensor1) \\\n .FullyConnected('fc0', 512, activation=tf.nn.relu) \\\n .FullyConnected('fc1', 10, activation=tf.identity)() \n\n print sensor1\n\n sensor2 = Sequential('sensor2', input_from_sensor2) \\\n .FullyConnected('fc0', 512, activation=tf.nn.relu) \\\n .FullyConnected('fc1', 10, activation=tf.identity)()\n\n output = Connect('cloud', [sensor1, sensor2], \"inner_product\") \\\n .FullyConnected('fc0', 512, activation=tf.nn.relu) \\\n .FullyConnected('fc1', 10, activation=tf.identity)()\n\n tf.nn.softmax(output, name='prob') # a Bx10 with probabilities\n\n #g = tf.get_default_graph()\n #for v in g.as_graph_def().node:\n # print v.name\n\n # a vector of length B with loss of each sample\n cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=output, labels=label)\n cost = tf.reduce_mean(cost, name='cross_entropy_loss') # the average cross-entropy loss\n\n correct = tf.cast(tf.nn.in_top_k(output, label, 1), tf.float32, name='correct')\n accuracy = tf.reduce_mean(correct, name='accuracy')\n\n # This will monitor training error (in a moving_average fashion):\n # 1. write the value to tensosrboard\n # 2. write the value to stat.json\n # 3. print the value after each epoch\n train_error = tf.reduce_mean(1 - correct, name='train_error')\n summary.add_moving_summary(train_error, accuracy)\n\n # Use a regex to find parameters to apply weight decay.\n # Here we apply a weight decay on all W (weight matrix) of all fc layers\n wd_cost = tf.multiply(1e-5,\n regularize_cost('fc.*/W', tf.nn.l2_loss),\n name='regularize_loss')\n\n self.cost = tf.add_n([wd_cost, cost], name='total_cost')\n\n summary.add_moving_summary(cost, wd_cost, self.cost)\n\n # monitor histogram of all weight (of conv and fc layers) in tensorboard\n summary.add_param_summary(('.*/W', ['histogram', 'rms']))", "def build_graph(self, values, values_mask, keys, keys_mask):\n with vs.variable_scope(\"CrossAttn\"):\n\n # Calculate attention distribution\n values_t = tf.transpose(values, perm=[0, 2, 1]) # (batch_size, value_vec_size, num_values)\n attn_matrix = tf.matmul(keys, values_t) # shape (batch_size, num_keys, num_values)\n\n values_mask_matrix = tf.expand_dims(values_mask, 1) # shape (batch_size, 1, num_values)\n keys_mask_matrix = tf.expand_dims(keys_mask, 2) # shape (batch_size, num_keys, 1)\n\n _, attn_dist_values = masked_softmax(attn_matrix, values_mask_matrix, 2) # shape (batch_size, num_keys, num_values). take softmax over values\n _, attn_dist_keys = masked_softmax(attn_matrix, keys_mask_matrix, 1) # shape (batch_size, num_keys, num_values). take softmax over keys\n \n attn_dist_keys = tf.transpose(attn_dist_keys, perm=[0, 2, 1]) # shape (batch_size, num_values, num_keys)\n\n att_vec_for_keys = tf.matmul(attn_dist_values, values) # shape (batch_size, num_keys, value_vec_size)\n att_vec_for_values = tf.matmul(attn_dist_keys, keys) # shape (batch_size, num_values, value_vec_size)\n\n # Apply dropout\n att_vec_for_keys = tf.nn.dropout(att_vec_for_keys, self.keep_prob)\n att_vec_for_values = tf.nn.dropout(att_vec_for_values, self.keep_prob)\n\n return att_vec_for_keys, att_vec_for_values", "def build_graph(self):\n tf.logging.info('Building graph...')\n t0 = time.time()\n\n self._add_placeholders()\n\n with tf.device(\"/gpu:%d\"%(config.gpu_selection)):\n self._add_seq2seq()\n\n self.global_step = tf.Variable(0, name='global_step', trainable=False)\n\n\n if self._mode == 'train':\n self._add_train_op()\n\n self._summaries = tf.summary.merge_all()\n\n t1 = time.time()\n tf.logging.info('Time to build graph: %i seconds', t1 - t0)", "def build(self):\n self.computation_graph = tf.Graph()\n with self.computation_graph.as_default():\n\n self.walker_layer = DeepWalker(self.args, self.vocab_size, self.degrees)\n self.cluster_layer = Clustering(self.args)\n\n self.gamma = tf.placeholder(\"float\")\n self.loss = self.walker_layer()+self.gamma*self.cluster_layer(self.walker_layer)\n\n self.batch = tf.Variable(0)\n self.step = tf.placeholder(\"float\")\n\n self.learning_rate_new = tf.train.polynomial_decay(self.args.initial_learning_rate,\n self.batch,\n self.true_step_size,\n self.args.minimal_learning_rate,\n self.args.annealing_factor)\n\n self.train_op = tf.train.AdamOptimizer(self.learning_rate_new).minimize(self.loss,\n global_step=self.batch)\n\n self.init = tf.global_variables_initializer()", "def build_model(cls, args):\n base_architecture(args) \n return StyleGANGeneratorPretrain(args)", "def build_fpn_mask_graph(rois, feature_maps, image_meta,\n pool_size, num_classes, train_bn=True):\n # ROI Pooling\n # Shape: [batch, boxes, pool_height, pool_width, channels]\n x = PyramidROIAlign([pool_size, pool_size],\n name=\"roi_align_mask\")([rois, image_meta] + feature_maps)\n\n # Conv layers\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv1\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv2\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn2')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv3\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn3')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv4\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn4')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation=\"relu\"),\n name=\"mrcnn_mask_deconv\")(x)\n x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation=\"sigmoid\"),\n name=\"mrcnn_mask\")(x)\n return x", "def create_graph():\n # Creates graph from saved graph_def.pb.\n with tf.gfile.FastGFile(os.path.join(\n '/home/ubuntu/hdd/tensorFlowDic/', 'classify_image_graph_def.pb'), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')", "def _build(self):\n self.autoencoder = autoencoder(\n images = self.images,\n name = 'autoencoder')\n # self.hash = self.autoencoder.hash\n self.z = tf.placeholder(tf.float32, shape = [None, CODEWORD_LENGTH], \n name = 'z-layer') \n with tf.variable_scope('latent_space') as scope:\n for i in range(CODEWORD_LENGTH):\n tf.summary.histogram('z_' + str(i) ,self.z[:,i]) \n tf.summary.histogram('codeword_' + str(i), self.autoencoder.codeword[:,i])\n self.generator = gan_generator( z = self.z,\n input_params = self.autoencoder.params,\n name = 'gan_generator')\n self.discriminator = gan_discriminator ( images = self.images,\n generation = self.generator.generation,\n name = 'gan_discriminator' )\n self.generator.cook(fake = self.discriminator.fake)\n mean_z, var_z = tf.nn.moments(self.z, axes=[0])\n mean_codeword, var_codeword = tf.nn.moments(self.autoencoder.codeword, axes = [0])\n\n with tf.variable_scope ('divergence') as scope:\n tf.summary.scalar( tensor = tf.nn.l2_loss(mean_z-mean_codeword), name = 'mean divergence')\n tf.summary.scalar( tensor = tf.nn.l2_loss(var_z-var_codeword), name = 'variance divergence')\n # divergence = self.hash ", "def _build_graph(self):\n self.X_tr = tf.placeholder(tf.float32, [self.N, None])\n self.Y_tr = tf.placeholder(tf.float32, [self.C, None])\n self.X_te = tf.placeholder(tf.float32, [self.N, 1])\n\n self.distance = tf.negative(tf.sqrt(tf.reduce_sum(tf.square(self.X_tr - self.X_te), reduction_indices=0)))\n self.values, self.indices = tf.nn.top_k(self.distance, k=self.K, sorted=False)\n\n self.nearest_neighbors = []\n for i in range(self.K):\n self.nearest_neighbors.append(tf.argmax(self.Y_tr[:, self.indices[i]]))\n\n self.neighbours_tensor = tf.stack(self.nearest_neighbors)\n\n self.y, self.idx, self.count = tf.unique_with_counts(self.neighbours_tensor)\n self.pred = tf.slice(self.y, begin=[tf.argmax(self.count, 0)], size=tf.constant([1], dtype=tf.int64))[0]", "def _run_internal_graph(self, inputs, training=None, mask=None):\n # Note: masking support is relevant mainly for Keras.\n # It cannot be factored out without having the fully reimplement the network\n # calling logic on the Keras side. We choose to incorporate it in\n # Network because 1) it may be useful to fully support in tf.layers in\n # the future and 2) Keras is a major user of Network. If you don't\n # use masking, it does not interfere with regular behavior at all and you\n # can ignore it.\n if mask is None:\n masks = [None for _ in range(len(inputs))]\n else:\n masks = mask\n\n # Dictionary mapping reference tensors to tuples\n # (computed tensor, compute mask)\n # we assume a 1:1 mapping from tensor to mask\n # TODO(fchollet): raise exception when a `.compute_mask()` call\n # does not return a list the same size as `call`\n tensor_map = {}\n for x, y, mask in zip(self.inputs, inputs, masks):\n tensor_map[str(id(x))] = (y, mask)\n\n depth_keys = list(self._nodes_by_depth.keys())\n depth_keys.sort(reverse=True)\n for depth in depth_keys:\n nodes = self._nodes_by_depth[depth]\n for node in nodes:\n # This is always a single layer, never a list.\n layer = node.outbound_layer\n reference_input_tensors = node.input_tensors\n reference_output_tensors = node.output_tensors\n\n # If all previous input tensors are available in tensor_map,\n # then call node.inbound_layer on them.\n computed_data = [] # List of tuples (input, mask).\n for x in reference_input_tensors:\n if str(id(x)) in tensor_map:\n computed_data.append(tensor_map[str(id(x))])\n\n if len(computed_data) == len(reference_input_tensors):\n # Call layer (reapplying ops to new inputs).\n with ops.name_scope(layer.name):\n if node.arguments:\n kwargs = node.arguments\n else:\n kwargs = {}\n if len(computed_data) == 1:\n computed_tensor, computed_mask = computed_data[0]\n # Ensure mask propagation if applicable.\n if 'mask' in tf_inspect.getargspec(layer.call).args:\n kwargs.setdefault('mask', computed_mask)\n if 'training' in tf_inspect.getargspec(layer.call).args:\n kwargs.setdefault('training', training)\n\n output_tensors = nest.flatten(\n layer.call(computed_tensor, **kwargs))\n if hasattr(layer, 'compute_mask'):\n output_masks = nest.flatten(\n layer.compute_mask(computed_tensor, computed_mask))\n else:\n output_masks = [None for _ in range(len(output_tensors))]\n computed_tensors = [computed_tensor]\n computed_masks = [computed_mask]\n else:\n computed_tensors = [x[0] for x in computed_data]\n computed_masks = [x[1] for x in computed_data]\n if 'mask' in tf_inspect.getargspec(layer.call).args:\n kwargs.setdefault('mask', computed_masks)\n if 'training' in tf_inspect.getargspec(layer.call).args:\n kwargs.setdefault('training', training)\n\n output_tensors = nest.flatten(\n layer.call(computed_tensors, **kwargs))\n if hasattr(layer, 'compute_mask'):\n output_masks = nest.flatten(\n layer.compute_mask(computed_tensors, computed_masks))\n else:\n output_masks = [None for _ in range(len(output_tensors))]\n\n if context.in_graph_mode():\n if layer.activity_regularizer is not None:\n regularization_losses = [\n layer.activity_regularizer(x) for x in output_tensors\n ]\n # Apply activity regularizer if any:\n layer.add_loss(regularization_losses, computed_tensors)\n\n # Update tensor_map.\n for x, y, mask in zip(reference_output_tensors, output_tensors,\n output_masks):\n tensor_map[str(id(x))] = (y, mask)\n\n output_tensors = []\n output_masks = []\n output_shapes = []\n for x in self.outputs:\n assert str(id(x)) in tensor_map, 'Could not compute output ' + str(x)\n tensor, mask = tensor_map[str(id(x))]\n output_shapes.append(tf_layers_util.static_shape(x))\n output_tensors.append(tensor)\n output_masks.append(mask)\n\n if len(output_tensors) == 1:\n output_tensors = output_tensors[0]\n if output_shapes is not None:\n output_shapes = output_shapes[0]\n if output_masks is not None:\n output_masks = output_masks[0]\n\n if context.in_graph_mode():\n # Update cache;\n # keys are based on ids on input tensors and inputs masks.\n cache_key = (tf_layers_util.object_list_uid(inputs)\n + '_' + tf_layers_util.object_list_uid(masks))\n self._output_tensor_cache[cache_key] = output_tensors\n self._output_mask_cache[cache_key] = output_masks\n\n if output_shapes is not None:\n input_shapes = [tf_layers_util.static_shape(x) for x in inputs]\n cache_key = tf_layers_util.object_list_uid(input_shapes)\n self._output_shape_cache[cache_key] = output_shapes\n\n return output_tensors, output_masks", "def buildFlattener():\n with IsolatedSession() as issn:\n mat_input = tf.placeholder(tf.float32, [None, None])\n mat_output = tf.identity(tf.reshape(mat_input, shape=[-1]), name='output')\n gfn = issn.asGraphFunction([mat_input], [mat_output])\n\n return gfn", "def GNN(V_init, E_init, sizes, iterations=3, edge_layers = 2,\n edge_hidden = 100, node_layers = 2, node_hidden = 100, act=tf.nn.relu):\n V, E = V_init, E_init\n\n # Get dimensions\n N_v = int(V.get_shape()[1])\n C_v = int(V.get_shape()[2])\n C_e = int(E.get_shape()[3])\n\n with tf.variable_scope(\"GraphNeuralNet\"):\n with tf.variable_scope(\"Masks\"):\n mask = tf.sequence_mask(\n sizes, maxlen=N_v, dtype=tf.float32, name=\"Mask1D\"\n )\n mask_V = tf.expand_dims(mask, 2)\n mask_E = tf.expand_dims(mask_V,1) * tf.expand_dims(mask_V,2)\n \n # Initialize hidden state\n with tf.variable_scope(\"NodeInit\"):\n V = mask_V * MLP(V, node_layers, node_hidden)\n with tf.variable_scope(\"EdgeInit\"):\n E = mask_E * MLP(E, edge_layers, edge_hidden)\n tf.summary.image(\"Edge\", E[:,:,:,:3])\n\n for i in range(iterations):\n # with tf.variable_scope(\"Iteration{}\".format(i)):\n # reuse = None\n with tf.name_scope(\"Iteration{}\".format(i)):\n reuse = True if i > 0 else None\n with tf.variable_scope(\"EdgeUpdate\", reuse=reuse):\n # Update edges given {V,E}\n f = PairFeatures(\n V, E, edge_hidden, reuse=reuse, name=\"EdgeFeatures\", activation=act\n )\n dE = MLP(\n f, edge_layers, edge_hidden, name=\"EdgeMLP\", activation=act, reuse=reuse # changed\n )\n # dE = tf.layers.dropout(dE, dropout, training=bool(dropout))\n E = E + mask_E * dE\n with tf.variable_scope(\"NodeUpdate\", reuse=reuse):\n # Update nodes given {V,E'}\n # f = PairFeatures(\n # V, E, node_hidden, reuse=reuse, name=\"NodeFeatures\", activation=act\n # )\n tf.summary.image(\"EdgeOut\", E[:,:,:,:3])\n dV = MLP(\n E, node_layers, node_hidden, name = \"NodeMessages\", activation=act, reuse=reuse\n )\n dV = tf.reduce_sum(dV, 2)\n dV = MLP(\n dV, node_layers, node_hidden, name = \"NodeMLP\", activation=act, reuse=reuse # changed\n )\n # dV = tf.layers.dropout(dV, dropout, training=bool(dropout))\n V = V + mask_V * dV\n return V, E, mask_V, mask_E", "def generator_model(self):\n # Initialize the weights\n init = tf.random_normal_initializer(0.0, 0.02)\n\n # Image input\n input_img = tf.keras.Input(shape=(512, 512, 1))\n\n # Define encoder layers\n encoder_1 = self.encoder(input_img, num_filters=64, batchnorm=False) # 256 x 256 x 64\n encoder_2 = self.encoder(encoder_1, num_filters=128) # 128 x 128 x128\n encoder_3 = self.encoder(encoder_2, num_filters=256) # 64 x 64 x 256\n encoder_4 = self.encoder(encoder_3, num_filters=256) # 32 x 32 x 256\n encoder_5 = self.encoder(encoder_4, num_filters=512) # 16 x 16 x 512\n encoder_6 = self.encoder(encoder_5, num_filters=512) # 8 x 8 x 512\n encoder_7 = self.encoder(encoder_6, num_filters=512) # 4 x 4 x 512\n encoder_8 = self.encoder(encoder_7, num_filters=512) # 2 x 2 x 512\n\n # Bottleneck, no batch norm and relu\n bottleneck = Conv2D(filters=512, kernel_size=(4, 4), strides=(2, 2), padding='same', kernel_initializer=init,\n use_bias=False)(encoder_8) # 1 x 1 x 512\n bottleneck = Activation('relu')(bottleneck)\n\n # Decoder model\n decoder_1 = self.decoder(bottleneck, encoder_8, num_filters=512) # 2 x 2 x 512\n decoder_2 = self.decoder(decoder_1, encoder_7, num_filters=512) # 4 x 4 x 512\n decoder_3 = self.decoder(decoder_2, encoder_6, num_filters=512) # 8 x 8 x 512\n decoder_4 = self.decoder(decoder_3, encoder_5, num_filters=512) # 16 x 16 x 512 # Original dropout is False\n decoder_5 = self.decoder(decoder_4, encoder_4, num_filters=256, dropout=False) # 32 x 32 x 256\n decoder_6 = self.decoder(decoder_5, encoder_3, num_filters=256, dropout=False) # 64 x 64 x 256\n decoder_7 = self.decoder(decoder_6, encoder_2, num_filters=128, dropout=False) # 128 x 128 x 128\n decoder_8 = self.decoder(decoder_7, encoder_1, num_filters=64, dropout=False) # 256 x 256 x 64\n\n # Output\n output = Conv2DTranspose(filters=1, kernel_size=(4, 4), strides=(2, 2), padding='same',\n kernel_initializer=init)(decoder_8)\n output_image = Activation('tanh')(output)\n\n # Define model\n generator_model = tf.keras.Model(input_img, output_image)\n return generator_model", "def _create_model(x: tf.Tensor, drop: float, is_training: bool) -> tf.Tensor:\n x = tf.reshape(x, [-1, 28, 28, 1])\n _layers = [1, 1]\n _filters = [32, 64]\n\n # create the residual blocks\n for i, l in enumerate(_layers):\n x = _conv_block(x, l, _filters[i], is_training)\n\n x = tf.layers.Flatten()(x)\n _fc_size = [1024]\n\n # create the fully connected blocks\n for s in _fc_size:\n x = _fc_block(x, s, is_training, drop)\n # add an output layer (10 classes, one output for each)\n return tf.layers.Dense(10)(x)", "def create_graph(self, feature, **kwargs):\n self.input_size = feature.shape[1:3]\n\n net = PSPNet101({'data': feature}, is_training=True, num_classes=self.class_num)\n self.pred = net.layers['conv6']\n pred = tf.image.resize_bilinear(self.pred, self.input_size)\n self.output_size = pred.shape[1:3]\n self.output = tf.nn.softmax(pred)", "def build_graph(self, save_model_path):\n if os.path.exists(\"{}.meta\".format(save_model_path)):\n logger.info(\"Graph existed, ready to be reloaded...\")\n else:\n logger.info(\"No graph can be loaded, so create a new graph...\")\n tf.reset_default_graph()\n # placeholders\n x = self.neural_net_image_input((32, 32, 3))\n y = self.neural_net_label_input(10)\n keep_prob = self.neural_net_keep_prob_input()\n\n # model\n logits_out = self.conv_net(x, keep_prob)\n\n # Name logits_out\n logits_out = tf.identity(logits_out, name='logits')\n\n # loss and optimizer\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits_out, labels=y), name='cost')\n optimzer = tf.train.AdamOptimizer(name='optimizer').minimize(loss)\n\n # Accuracy\n correct_pred = tf.equal(tf.argmax(y, axis=1), tf.argmax(logits_out, axis=1))\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')\n\n # print(type(tf.Variable(1)))\n saver = tf.train.Saver()\n if not os.path.exists('./savedModel'):\n os.mkdir('./savedModel')\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n saver.save(sess, './savedModel/cnn-model')", "def _build_depth_graph(self):\n self.depth_net = DepthNetwork(self.cfg.STRUCTURE, is_training=False)\n images = self.images_placeholder[tf.newaxis]\n poses = self.poses_placeholder[tf.newaxis]\n intrinsics = self.intrinsics_placeholder[tf.newaxis]\n\n # fix the input shape\n images = tf.reshape(images, [1, 5, 192, 1088, 3])\n Ts = VideoSE3Transformation(matrix=poses)\n\n depths = self.depth_net.forward(Ts, images, intrinsics)\n self.outputs['depths'] = depths", "def build_graph(self, values, values_mask, keys, keys_mask):\n with vs.variable_scope(\"BidirectionalAttn\"):\n # Divide the weight matrix in 3 parts\n weights_sim1 = tf.get_variable(name = \"weights_sim1\", shape = [self.key_vec_size, 1], dtype = tf.float32, initializer = tf.random_normal_initializer())\n weights_sim2 = tf.get_variable(name = \"weights_sim2\", shape = [self.value_vec_size, 1], dtype = tf.float32, initializer = tf.random_normal_initializer())\n weights_sim3 = tf.get_variable(name = \"weights_sim3\", shape = [self.key_vec_size], dtype = tf.float32, initializer = tf.random_normal_initializer())\n\n # Obtain Similarity Matrix sim_matrix/S where S_ij = w.T*[c;q;c o q]\n # c: context/keys, q: question/values, w = [w1,w2,w3]\n # values shape: (batch_size, num_values, value_vec_size)\n # keys shape: (batch_size, num_keys, value_vec_size)\n batch_size = tf.shape(values)[0] # batch_size\n num_values = tf.shape(values)[1] # question_len\n num_keys = tf.shape(keys)[1] # context_len\n values_t = tf.transpose(values, perm=[0, 2, 1]) # (batch_size, value_vec_size, num_values)\n\n # Part 1\n S1 = tf.reshape(tf.matmul(tf.reshape(keys, [-1, self.key_vec_size]), weights_sim1),[-1, num_keys, 1]) # shape : (batch_size, num_keys, 1)\n \n # Part 2\n S2 = tf.reshape(tf.matmul(tf.reshape(values, [-1, self.value_vec_size]), weights_sim2),[-1, num_values, 1]) # shape : (batch_size, num_values, 1)\n S2 = tf.transpose(S2, perm=[0, 2, 1]) # shape : (batch_size, 1, num_values). Transposed for Broadcasting\n\n # Part 3: GPU efficient version\n weights_sim3 = tf.expand_dims(tf.expand_dims(weights_sim3,0),1) # make it (1, 1, key_vec_size)\n ctile = tf.multiply(keys, weights_sim3) #(batch_size, num_keys, value_vec_size)\n S3 = tf.matmul(ctile, values_t)\n\n # Final sim_matrix/S obtained via broadcasting and adding 3 terms: S_shape->(batch_size, num_keys, num_values)\n sim_matrix = S1+S2+S3\n\n # Calculate mask same shape as S based on keys_mask & values_mask\n sim_mask = tf.expand_dims(keys_mask, 2)*tf.expand_dims(values_mask, 1) # (batch_size, num_keys, num_values)\n\n # Context-to-Question (C2Q) Attention\n # Calculate attention distribution\n _, attn_dist = masked_softmax(sim_matrix, sim_mask, 2)\n\n # Use attention distribution to take weighted sum of values: a is the attended query vector\n att_vec = tf.matmul(attn_dist, values) # shape (batch_size, num_keys, value_vec_size)\n\n # Question-to-Context (Q2C) Attention\n m = tf.reduce_max(sim_matrix, 2) # shape(batch_size, num_keys)\n _, beta_dist = masked_softmax(m, keys_mask, 1) # shape (batch_size, num_keys)\n beta_dist = tf.expand_dims(beta_dist, 1) # shape (batch_size, 1, num_keys)\n # Use attention distribution to take weighted sum of values: c_prime is Q2C attention vector\n c_prime = tf.matmul(beta_dist, keys) # shape (batch_size, 1, key_vec_size)\n\n # Apply dropout\n att_vec = tf.nn.dropout(att_vec, self.keep_prob)\n c_prime = tf.nn.dropout(c_prime, self.keep_prob)\n\n return att_vec, c_prime", "def create_graph():\n\t# Creates graph from saved graph_def.pb.\n\twith tf.gfile.FastGFile(modelFullPath, 'rb') as f:\n\t\tgraph_def = tf.GraphDef()\n\t\tgraph_def.ParseFromString(f.read())\n\t\t_ = tf.import_graph_def(graph_def, name='')", "def define_graph():\n \n num_cells = 65\n num_class = 2\n input_data = tf.placeholder(tf.float32,[None, MAX_WORDS_IN_REVIEW, EMBEDDING_SIZE],name=\"input_data\")\n labels = tf.placeholder(tf.float32,[None,num_class], name=\"labels\")\n dropout_keep_prob = tf.placeholder_with_default(0.6,shape=())\n lstm_cell_1 = tf.contrib.rnn.GRUCell(num_cells)\n lstm_cell_2 = tf.contrib.rnn.GRUCell(num_cells)\n lstm_cell_1 = tf.contrib.rnn.DropoutWrapper(cell=lstm_cell_1, output_keep_prob=dropout_keep_prob)\n lstm_cell_2 = tf.contrib.rnn.DropoutWrapper(cell=lstm_cell_2, output_keep_prob=dropout_keep_prob)\n (value_1,value_2),_ = tf.nn.bidirectional_dynamic_rnn(cell_fw =lstm_cell_1, cell_bw = lstm_cell_2, dtype = tf.float32, inputs = input_data)\n final_value = tf.concat((value_1, value_2),2)\n final_output = final_value[:,-1,:]\n logits = tf.layers.dense(final_output,2)\n pred = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))\n Accuracy = tf.reduce_mean(tf.cast(pred, tf.float32), name = 'accuracy')\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=labels), name = 'loss')\n optimizer = tf.train.AdamOptimizer(learning_rate = 0.0001).minimize(loss)\n return input_data, labels, dropout_keep_prob, optimizer, Accuracy, loss", "def build(self, graph, name_scopes, training):\n n_features = self.n_features\n placeholder_scope = TensorflowGraph.get_placeholder_scope(\n graph, name_scopes)\n with graph.as_default():\n with placeholder_scope:\n self.mol_features = tf.placeholder(\n tf.float32, shape=[None, n_features], name='mol_features')\n\n layer_sizes = self.layer_sizes\n weight_init_stddevs = self.weight_init_stddevs\n bias_init_consts = self.bias_init_consts\n dropouts = self.dropouts\n lengths_set = {\n len(layer_sizes),\n len(weight_init_stddevs),\n len(bias_init_consts),\n len(dropouts),\n }\n assert len(lengths_set) == 1, 'All layer params must have same length.'\n n_layers = lengths_set.pop()\n assert n_layers > 0, 'Must have some layers defined.'\n\n prev_layer = self.mol_features\n prev_layer_size = n_features\n for i in range(n_layers):\n layer = tf.nn.relu(\n model_ops.fully_connected_layer(\n tensor=prev_layer,\n size=layer_sizes[i],\n weight_init=tf.truncated_normal(\n shape=[prev_layer_size, layer_sizes[i]],\n stddev=weight_init_stddevs[i]),\n bias_init=tf.constant(\n value=bias_init_consts[i], shape=[layer_sizes[i]])))\n layer = model_ops.dropout(layer, dropouts[i], training)\n prev_layer = layer\n prev_layer_size = layer_sizes[i]\n\n output = []\n for task in range(self.n_tasks):\n output.append(\n tf.squeeze(\n model_ops.fully_connected_layer(\n tensor=prev_layer,\n size=layer_sizes[i],\n weight_init=tf.truncated_normal(\n shape=[prev_layer_size, 1],\n stddev=weight_init_stddevs[i]),\n bias_init=tf.constant(value=bias_init_consts[i],\n shape=[1]))))\n return output", "def _build_network(self):\n self.new_trainable_variable(\"w0_sin\", np.zeros(\n (config.somites * 2 - 2, HIDDEN_LAYER_UNITS), dtype=np.float64))\n self.new_trainable_variable(\"b0_sin\", np.zeros(HIDDEN_LAYER_UNITS, dtype=np.float64))\n self.new_trainable_variable(\"w1_sin\", np.zeros(\n (HIDDEN_LAYER_UNITS, config.oscillators), dtype=np.float64))\n self.new_trainable_variable(\"b1_sin\", np.zeros(config.oscillators, dtype=np.float64))\n\n self.new_trainable_variable(\"w0_cos\", np.zeros(\n (config.somites * 2 - 2, HIDDEN_LAYER_UNITS), dtype=np.float64))\n self.new_trainable_variable(\"b0_cos\", np.zeros(HIDDEN_LAYER_UNITS, dtype=np.float64))\n self.new_trainable_variable(\"w1_cos\", np.zeros(\n (HIDDEN_LAYER_UNITS, config.oscillators), dtype=np.float64))\n self.new_trainable_variable(\"b1_cos\", np.zeros(config.oscillators, dtype=np.float64))\n\n def action_infer(state: np.array) -> np.array:\n \"\"\"\n Get state and return feedback.\n\n state: [f_0, f_1, ..., phi_0, phi_1, ..., t_0, t_1, ...]\n return: [phase_feedback0, phase_feedback1, ..., angle_range0, angle_range1, ...]\n\n Discrepancy for torsion spring = alpha / 2 * k * range * T * sin(phi_i)\n \"\"\"\n forces = state[:config.somites]\n phis = state[config.somites:config.somites + config.oscillators]\n tensions = state[config.somites + config.oscillators:]\n\n f_sin, f_cos = self._calc_fs(np.concatenate((forces, tensions)))\n discrepancies = -0.5 * config.caterpillar_params[\"vertical_ts_k\"] * config.caterpillar_params[\"realtime_tunable_ts_rom\"] * tensions * np.sin(phis)\n return f_sin * np.sin(phis) + f_cos * np.cos(phis) - self.get_discrep_coeffs() * discrepancies, np.ones(config.oscillators) * config.caterpillar_params[\"realtime_tunable_ts_rom\"]\n\n return action_infer" ]
[ "0.69234383", "0.69234383", "0.6907245", "0.6791445", "0.6789565", "0.6765206", "0.6709734", "0.66579914", "0.6548826", "0.65472156", "0.6503266", "0.6484194", "0.6441474", "0.6392635", "0.6377581", "0.6376619", "0.6360305", "0.63222873", "0.6321313", "0.63196373", "0.6286161", "0.6281461", "0.62745786", "0.6257801", "0.6243213", "0.624155", "0.6219567", "0.62148637", "0.6211076", "0.6195835", "0.617546", "0.6169689", "0.6162213", "0.61551714", "0.61543316", "0.61532164", "0.6139259", "0.6119007", "0.6114992", "0.6113225", "0.6098507", "0.6097977", "0.6097508", "0.6092228", "0.6090705", "0.60884047", "0.6073244", "0.60710835", "0.6068641", "0.6058936", "0.60586685", "0.6057813", "0.60572183", "0.6055127", "0.60482675", "0.6044015", "0.60292304", "0.6024144", "0.60230345", "0.6022529", "0.6014716", "0.6010331", "0.6002969", "0.59971833", "0.599265", "0.59920895", "0.5992019", "0.59757245", "0.5967638", "0.596463", "0.5964511", "0.5958841", "0.5955903", "0.59383553", "0.5921647", "0.5917471", "0.59167534", "0.59096867", "0.5909304", "0.5903346", "0.58994985", "0.58972996", "0.58857733", "0.5870885", "0.5852595", "0.5851481", "0.5845619", "0.5840587", "0.5836277", "0.58325934", "0.5826356", "0.58201367", "0.5805294", "0.5786129", "0.57810724", "0.5780404", "0.57802755", "0.57793856", "0.5778924", "0.5774491" ]
0.6129226
37
Compute the losses for the generators and discriminators.
def __loss(self, fakeA, fakeB, reconstructedA, reconstructedB, identA, identB): # compute the generators loss G_loss = self.__G_loss(self.D_B, fakeB) F_loss = self.__G_loss(self.D_A, fakeA) cc_loss = self.__cycle_consistency_loss(reconstructedA, reconstructedB) ident_loss = self.__identity_loss(identA, identB) Gen_loss = G_loss + F_loss + cc_loss + ident_loss # Compute the disciminators loss. Use fake images from image pool to improve stability D_A_loss = self.__D_loss(self.D_A, self.realA, self.fakeA) D_B_loss = self.__D_loss(self.D_B, self.realB, self.fakeB) return Gen_loss, D_A_loss, D_B_loss
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_losses(self):\n cycle_consistency_loss_a = \\\n self._lambda_a * losses.cycle_consistency_loss(\n real_images=self.input_a, generated_images=self.cycle_images_a,\n )\n cycle_consistency_loss_b = \\\n self._lambda_b * losses.cycle_consistency_loss(\n real_images=self.input_b, generated_images=self.cycle_images_b,\n )\n\n lsgan_loss_a = losses.lsgan_loss_generator(self.prob_fake_a_is_real)\n lsgan_loss_b = losses.lsgan_loss_generator(self.prob_fake_b_is_real)\n\n g_loss_A = \\\n cycle_consistency_loss_a + cycle_consistency_loss_b + lsgan_loss_b\n g_loss_B = \\\n cycle_consistency_loss_b + cycle_consistency_loss_a + lsgan_loss_a\n\n d_loss_A = losses.lsgan_loss_discriminator(\n prob_real_is_real=self.prob_real_a_is_real,\n prob_fake_is_real=self.prob_fake_pool_a_is_real,\n )\n d_loss_B = losses.lsgan_loss_discriminator(\n prob_real_is_real=self.prob_real_b_is_real,\n prob_fake_is_real=self.prob_fake_pool_b_is_real,\n )\n\n optimizer = tf.train.AdamOptimizer(self.learning_rate, beta1=0.5)\n\n self.model_vars = tf.trainable_variables()\n\n d_A_vars = [var for var in self.model_vars if 'd_A' in var.name]\n g_A_vars = [var for var in self.model_vars if 'g_A' in var.name]\n d_B_vars = [var for var in self.model_vars if 'd_B' in var.name]\n g_B_vars = [var for var in self.model_vars if 'g_B' in var.name]\n\n self.d_A_trainer = optimizer.minimize(d_loss_A, var_list=d_A_vars)\n self.d_B_trainer = optimizer.minimize(d_loss_B, var_list=d_B_vars)\n self.g_A_trainer = optimizer.minimize(g_loss_A, var_list=g_A_vars)\n self.g_B_trainer = optimizer.minimize(g_loss_B, var_list=g_B_vars)\n\n for var in self.model_vars:\n print(var.name)\n\n # Summary variables for tensorboard\n self.g_A_loss_summ = tf.summary.scalar(\"g_A_loss\", g_loss_A)\n self.g_B_loss_summ = tf.summary.scalar(\"g_B_loss\", g_loss_B)\n self.d_A_loss_summ = tf.summary.scalar(\"d_A_loss\", d_loss_A)\n self.d_B_loss_summ = tf.summary.scalar(\"d_B_loss\", d_loss_B)", "def losses(self):\n # compute all kinds of losses \n\n # 1. Logits losses for classification \n\n # 2. regression loss for bbox \n\n return classification_loss, bbox_reg_loss", "def build_losses(self):\n self.batch_losses = tf.squared_difference(self.predicted_rv, self.label)\n self.total_loss = tf.reduce_mean(self.batch_losses)", "def build_losses(self):\n self.batch_losses = tf.squared_difference(self.predicted_rv, self.label)\n self.total_loss = tf.reduce_mean(self.batch_losses)", "def losses(self):\n pass", "def _compute_losses(discriminator, d_real, d_fake, interpolated_x, interpolated_c):\n wasserstein_distance = tf.reduce_mean(d_real) - tf.reduce_mean(d_fake)\n\n gradient_penalty_x = wgan.compute_gradient_penalty(\n lambda interpolated: discriminator(interpolated, interpolated_c),\n interpolated_x\n )\n \n gradient_penalty_c = wgan.compute_gradient_penalty(\n lambda interpolated: discriminator(interpolated_x, interpolated),\n interpolated_c\n )\n\n g_loss = tf.reduce_mean(d_fake)\n d_loss = wasserstein_distance + (\n wgan.GRADIENT_PENALTY_LAMBDA * gradient_penalty_x +\n wgan.GRADIENT_PENALTY_LAMBDA * gradient_penalty_c\n )\n\n return g_loss, d_loss", "def compute_loss(self):\n self.prototypes = self.compute_prototypes()\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.episode.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss", "def losses(self):\n for name in self._nodes:\n if isinstance(self._nodes[name], Loss):\n yield name", "def compute_loss(self):\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.data.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss", "def get_loss_funcs():\n\n def _eucl_loss(x, y):\n return K.sum(K.square(x - y)) / batch_size / 2\n\n losses = {}\n losses[\"weight_stage1_L1\"] = _eucl_loss\n losses[\"weight_stage1_L2\"] = _eucl_loss\n losses[\"weight_stage2_L1\"] = _eucl_loss\n losses[\"weight_stage2_L2\"] = _eucl_loss\n losses[\"weight_stage3_L1\"] = _eucl_loss\n losses[\"weight_stage3_L2\"] = _eucl_loss\n losses[\"weight_stage4_L1\"] = _eucl_loss\n losses[\"weight_stage4_L2\"] = _eucl_loss\n losses[\"weight_stage5_L1\"] = _eucl_loss\n losses[\"weight_stage5_L2\"] = _eucl_loss\n losses[\"weight_stage6_L1\"] = _eucl_loss\n losses[\"weight_stage6_L2\"] = _eucl_loss\n\n return losses", "def print_losses(epoch_gen_adv_loss, epoch_gen_l1_loss, epoch_disc_real_loss, epoch_disc_fake_loss,\n epoch_disc_real_acc, epoch_disc_fake_acc, data_loader_len, l1_weight):\n print(' Generator: adversarial loss = {:.4f}, L1 loss = {:.4f}, full loss = {:.4f}'.format(\n epoch_gen_adv_loss / data_loader_len,\n epoch_gen_l1_loss / data_loader_len,\n (epoch_gen_adv_loss / data_loader_len)*(1.0-l1_weight) + (epoch_gen_l1_loss / data_loader_len)*l1_weight\n ))\n\n print(' Discriminator: loss = {:.4f}'.format(\n (epoch_disc_real_loss + epoch_disc_fake_loss) / (data_loader_len*2)\n ))\n\n print(' acc. = {:.4f} (real acc. = {:.4f}, fake acc. = {:.4f})'.format(\n (epoch_disc_real_acc + epoch_disc_fake_acc) / (data_loader_len*2),\n epoch_disc_real_acc / data_loader_len,\n epoch_disc_fake_acc / data_loader_len\n ))", "def genLoss(self, *data):\r\n _, (x_unlab, _) = data\r\n z = self.getInputNoise(self.hypers['ul_BS'])\r\n fake_logits = self.D(self.G(z))\r\n g_losses = -1*logOneMinusSoftmax(fake_logits)[:,self.D.numClasses-1]\r\n return torch.mean(g_losses)", "def _build_loss(self):\n self._build_loss_D()\n self._build_loss_G()\n tf.add_to_collection('losses', self.loss_D)\n tf.add_to_collection('losses', self.loss_G)", "def generator_loss(gen_images):\n output = disc_net(gen_images)\n cats = output.new_full(output.shape, real_label)\n return gen_loss_criterion(output, cats)", "def compute_loss(self, sample):\n observations_batch, actions_batch, return_batch, masks_batch, \\\n old_action_log_probs_batch, adv_targ = sample\n\n assert old_action_log_probs_batch.shape == (self.mini_batch_size, 1)\n assert adv_targ.shape == (self.mini_batch_size, 1)\n assert return_batch.shape == (self.mini_batch_size, 1)\n\n values, action_log_probs, dist_entropy = self.evaluate_actions(\n observations_batch, actions_batch)\n\n assert values.shape == (self.mini_batch_size, 1)\n assert action_log_probs.shape == (self.mini_batch_size, 1)\n assert values.requires_grad\n assert action_log_probs.requires_grad\n assert dist_entropy.requires_grad\n\n # [TODO] Implement policy loss\n ratio = torch.exp(action_log_probs - old_action_log_probs_batch)\n surr1 = ratio * adv_targ\n surr2 = torch.clamp(ratio, 1.0 - self.clip_param, 1.0 + self.clip_param) * adv_targ\n policy_loss = -torch.min(surr1, surr2).mean()\n\n # [TODO] Implement value loss\n value_loss = F.mse_loss(return_batch, values)\n\n # This is the total loss\n loss = policy_loss + self.config.value_loss_weight * value_loss - self.config.entropy_loss_weight * dist_entropy\n\n return loss, policy_loss, value_loss, dist_entropy", "def __call__(self):\n #Losses and optimizers\n for epoch in range(self.nb_epochs): # loop over the dataset multiple times\n self.train_loss = 0.0\n self.gan_loss = 0.0\n self.loss_discrim = 0.0\n val_loss = 0.0\n nb_data = 0.\n nb_data_val = 0.\n for i, data in enumerate(self.trainloader, 0):\n # get the batch; data is a list of [inputs, labels]\n inputs, real = data\n inputs, real = inputs.to(device), real.to(device)\n if i%self.discrimTrainPeriod==0:\n self.trainDiscrim(inputs, real)\n else:\n self.trainGen(inputs, real)\n nb_data += 1.\n #occasionnally save an example target/generated\n if i%self.displayPeriod==0:\n self.gen.eval()\n real = self.unNormalize(real[0,:,:,:].detach().cpu())\n self.transformToImage(real).save(self.targetFile)\n fake = self.gen(inputs)\n fake = self.unNormalize(fake[0,:,:,:].detach().cpu())\n self.transformToImage(fake).save(self.generatedFile)\n\n self.gen.eval()\n for i, data in enumerate(self.valloader, 0):\n with torch.no_grad():\n # get the inputs; data is a list of [inputs, labels]\n inputs, real = data\n inputs, real = inputs.to(device), real.to(device)\n #compute L1 loss\n fake = self.gen(inputs)\n lossGenL1 = self.criterionL1(fake, real)\n #statistics\n val_loss += lossGenL1.item()\n nb_data_val += 1.\n self.gan_loss = self.gan_loss / nb_data\n self.train_loss = self.train_loss / nb_data\n self.loss_discrim = self.loss_discrim / nb_data\n val_loss = val_loss / nb_data_val\n self.gan_loss_list.append(self.gan_loss)\n self.train_loss_list.append(self.train_loss)\n self.val_loss_list.append(val_loss)\n print(\"Epoch \", epoch, \"; train loss = \", self.train_loss,\n \"; val loss = \", val_loss, \"; gan loss = \", self.gan_loss,\n \"; loss discrim = \", self.loss_discrim)\n\n plt.plot(range(len(self.train_loss_list)), self.train_loss_list,\n self.val_loss_list, self.gan_loss_list)\n plt.xlabel(\"Epochs\")\n plt.ylabel(\"Generator Loss\")\n plt.savefig(self.graphFile)\n #save the weights\n torch.save(self.gen.state_dict(), self.savefileGen)\n torch.save(self.discrim.state_dict(), self.savefileDiscrim)", "def compute_loss(self, inputs):\r\n outputs = self.net.compute_outputs(inputs)\r\n loss_grad = self.net.compute_loss_grad(outputs - inputs)\r\n loss = np.sum((inputs - outputs) ** 2, axis=0).mean() / 2.0\r\n return loss, loss_grad", "def build_losses(loss_config):\n \n classification_loss = _build_classification_loss(\n loss_config.classification_loss)\n localization_loss = _build_localization_loss(\n loss_config.localization_loss)\n classification_weight = loss_config.classification_weight\n localization_weight = loss_config.localization_weight\n hard_example_miner = None\n\n return (classification_loss, localization_loss,\n classification_weight,\n localization_weight, hard_example_miner)", "def gen_loss_orig(self, noise_samples):\n generator_samples = self.gen_model(noise_samples)\n logits_gen = self.disc_model(generator_samples)\n # loss = -tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros(logits_gen.shape), logits=logits_gen))\n loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones(logits_gen.shape), logits=logits_gen))\n return loss", "def generator_loss(self, disc_generated_output, gen_output, target):\n # Compute the loss function\n loss_function = self.loss_func()\n\n # Generated GAN loss\n gan_loss = loss_function(tf.ones_like(disc_generated_output), disc_generated_output)\n\n # L1 loss\n l1_loss = tf.reduce_mean(tf.abs(target - gen_output))\n\n # Total generator loss\n total_gen_loss = gan_loss + (self.lambd * l1_loss)\n return total_gen_loss, gan_loss, l1_loss", "def compute_loss(self):\n def calc_loss(inputs, outputs):\n reconstruction_loss = tf.metrics.binary_crossentropy(\n tf_flat(inputs), tf_flat(outputs))\n reconstruction_loss *= OUT_SIZE * OUT_SIZE\n kl_loss = -0.5 * tf.reduce_sum(1.0 + self.log_sigma - tf.square(\n self.mu) - tf.exp(self.log_sigma), 1)\n return tf.reduce_mean(reconstruction_loss + kl_loss)\n return calc_loss", "def losses(self):\n losses = []\n for layer in self.layers:\n losses += layer.losses\n if context.in_eager_mode():\n return losses\n\n relevant_inputs = self.inputs or []\n for i in range(1, len(self._inbound_nodes)):\n inputs = self.get_input_at(i)\n if isinstance(inputs, list):\n relevant_inputs += inputs\n else:\n relevant_inputs.append(inputs)\n reachable = tf_layers_util.get_reachable_from_inputs(relevant_inputs,\n losses)\n relevant_conditional_losses = [x for x in losses if x in reachable]\n unconditional_losses = [\n x for x in losses if x._unconditional_loss] # pylint: disable=protected-access\n return list(set(\n relevant_conditional_losses + unconditional_losses + self._losses))", "def build_loss(self):\n import tensorflow as tf\n\n y_1d = [tf.reduce_sum(tf.multiply(self.variables[\"y\"][i], self.variables[\"y_action\"][i]), axis=1) for i in range(len(self.variables[\"y\"]))]\n loss = np.sum([tf.nn.l2_loss(y_1d[i] - self.variables[\"y_true\"]) for i in range(len(y_1d))])\n\n l1_reg = 0\n l2_reg = 0\n\n keys = sorted(self.variables.keys())\n keys = [key for key in keys if critere_keys(key) and \"W\" in key]\n for key in keys:\n l1_reg += tf.reduce_sum(tf.abs(self.variables[key]))\n l2_reg += tf.nn.l2_loss(self.variables[key])\n\n self.loss = loss + self.alpha_reg * l1_reg + self.beta_reg * l2_reg\n\n self.train_step = tf.train.RMSPropOptimizer(self.decay_learning_rate,\n decay=0.99, momentum=0., centered=True).minimize(self.loss, global_step=self.global_step)", "def compute_loss(self):", "def build_loss(self):\n\n opt = tf.train.AdamOptimizer(self.learning_rate)\n mse = tf.losses.mean_squared_error(self.label[-1], self.outputs[-1])\n loss = tf.losses.get_total_loss()\n\n return mse, loss", "def compute_loss(self, targets, logits, seq_length):\n\n\t\twith tf.name_scope('evaluate_loss'):\n\t\t\tloss, norm = self.loss_computer(targets, logits, seq_length)\n\t\t\t\n\t\treturn loss, norm", "def _define_loss(self):\n\n cost = []\n unit_cost = []\n for nn in range(len(self.ffnet_out)):\n data_out = self.data_out_batch[nn]\n if self.filter_data:\n # this will zero out predictions where there is no data,\n # matching Robs here\n pred = tf.multiply(\n self.networks[self.ffnet_out[nn]].layers[-1].outputs,\n self.data_filter_batch[nn])\n else:\n pred = self.networks[self.ffnet_out[nn]].layers[-1].outputs\n\n nt = tf.cast(tf.shape(pred)[0], tf.float32)\n # define cost function\n if self.noise_dist == 'gaussian':\n with tf.name_scope('gaussian_loss'):\n cost.append(tf.nn.l2_loss(data_out - pred) / nt)\n unit_cost.append(tf.reduce_mean(tf.square(data_out-pred), axis=0))\n\n elif self.noise_dist == 'poisson':\n with tf.name_scope('poisson_loss'):\n\n if self.poisson_unit_norm is not None:\n # normalize based on rate * time (number of spikes)\n cost_norm = tf.multiply(self.poisson_unit_norm[nn], nt)\n else:\n cost_norm = nt\n\n cost.append(-tf.reduce_sum(tf.divide(\n tf.multiply(data_out, tf.log(self._log_min + pred)) - pred,\n cost_norm)))\n\n unit_cost.append(-tf.divide(\n tf.reduce_sum(\n tf.multiply(\n data_out, tf.log(self._log_min + pred)) - pred, axis=0),\n cost_norm))\n\n elif self.noise_dist == 'bernoulli':\n with tf.name_scope('bernoulli_loss'):\n # Check per-cell normalization with cross-entropy\n # cost_norm = tf.maximum(\n # tf.reduce_sum(data_out, axis=0), 1)\n cost.append(tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=data_out, logits=pred)))\n unit_cost.append(tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=data_out, logits=pred), axis=0))\n else:\n TypeError('Cost function not supported.')\n\n self.cost = tf.add_n(cost)\n self.unit_cost = unit_cost\n\n # Add regularization penalties\n reg_costs = []\n with tf.name_scope('regularization'):\n for nn in range(self.num_networks):\n reg_costs.append(self.networks[nn].define_regularization_loss())\n self.cost_reg = tf.add_n(reg_costs)\n\n self.cost_penalized = tf.add(self.cost, self.cost_reg)\n\n # save summary of cost\n # with tf.variable_scope('summaries'):\n tf.summary.scalar('cost', self.cost)\n tf.summary.scalar('cost_penalized', self.cost_penalized)\n tf.summary.scalar('reg_pen', self.cost_reg)", "def compute_loss_test_dqn(test_set_generators, list_players, value_net=None, list_experts=None, id_to_test=None):\n\n list_losses = []\n with torch.no_grad():\n if id_to_test is None:\n iterator = range(len(test_set_generators))\n else:\n iterator = [id_to_test]\n for k in iterator:\n target = []\n val_approx = []\n player = list_players[k]\n if list_experts is not None:\n try:\n target_net = list_experts[k]\n except IndexError:\n target_net = None\n elif value_net is not None:\n target_net = value_net\n if target_net is None:\n list_losses.append(0)\n else:\n for i_batch, batch_instances in enumerate(test_set_generators[k]):\n batch = batch_instances.G_torch.batch\n mask_values = batch_instances.J.eq(0)[:, 0]\n action_values = target_net(batch_instances.G_torch,\n batch_instances.n_nodes,\n batch_instances.Omegas,\n batch_instances.Phis,\n batch_instances.Lambdas,\n batch_instances.Omegas_norm,\n batch_instances.Phis_norm,\n batch_instances.Lambdas_norm,\n batch_instances.J,\n )\n action_values = action_values[mask_values]\n batch = batch[mask_values]\n # if it's the turn of the attacker\n if player == 1:\n # we take the argmin\n values, actions = scatter_min(action_values, batch, dim=0)\n else:\n # we take the argmax\n values, actions = scatter_max(action_values, batch, dim=0)\n val_approx.append(values)\n target.append(batch_instances.target)\n # Compute the loss\n target = torch.cat(target)\n val_approx = torch.cat(val_approx)\n loss_target_net = float(torch.sqrt(torch.mean((val_approx[:, 0] - target[:, 0]) ** 2)))\n list_losses.append(loss_target_net)\n\n return list_losses", "def compute_loss(self, obs, returns):", "def multi_discriminator_loss(disc_real_source, disc_real_target, disc_gen):\n\n loss_obj = tf.keras.losses.BinaryCrossentropy(from_logits=True)\n real_source_loss = loss_obj(tf.ones_like(disc_real_source), disc_real_source)\n real_target_loss = loss_obj(tf.ones_like(disc_real_target), disc_real_target)\n gen_loss = loss_obj(tf.zeros_like(disc_gen), disc_gen)\n return real_source_loss + real_target_loss + gen_loss", "def generator_loss(self, logits):\n if self.lossfunc == 'use_lsgan':\n # use mean squared error\n loss = tf.reduce_mean(tf.squared_difference(logits, REAL_LABEL))\n elif self.lossfunc == 'sigmoid_cross_entropy_with_logits':\n # heuristic, non-saturating loss\n # loss = tf.sigmoid(D(fake_y))\n # loss = -tf.reduce_mean(ops.safe_log(loss)) / 2\n \n loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits,labels=tf.ones_like(logits))\n loss = tf.reduce_mean(loss)\n elif self.lossfunc == 'wgan':\n loss = -tf.reduce_mean(logits)\n return loss", "def vae_loss(gen_images, input_images, mu_sigmas):\n # List to aggregate binary cross-entropy reconstruction losses\n # from all of the image outputs:\n BCEs = []\n # List to aggregate KL divergence losses from each of the mu/sigma\n # projections:\n KLDs = []\n\n # TODO Your code goes here.\n\n return BCEs, KLDs", "def get_adv_losses(discriminator_real_outputs, discriminator_fake_outputs,\n kind):\n if kind == 'classic':\n loss_fn = classic_gan_losses\n elif kind == 'nonsaturating':\n loss_fn = nonsaturating_gan_losses\n elif kind == 'wasserstein':\n loss_fn = wasserstein_gan_losses\n elif kind == 'hinge':\n loss_fn = hinge_gan_losses\n return loss_fn(discriminator_real_outputs, discriminator_fake_outputs)", "def top_losses(self, n=4, val_data=None, preproc=None):\n\n # check validation data and arguments\n if val_data is not None:\n val = val_data\n else:\n val = self.val_data\n if val is None:\n raise Exception(\"val_data must be supplied to get_learner or top_losses\")\n if type(n) == type(42):\n n = (0, n)\n\n # multilabel = True if U.is_multilabel(val) else False\n classification, multilabel = U.is_classifier(self.model)\n\n # get predicictions and ground truth\n y_pred = self.predict(val_data=val)\n y_true = self.ground_truth(val_data=val)\n y_true = y_true.astype(\"float32\")\n\n # adjust y_true for regression problems\n if (\n not classification\n and len(y_true.shape) == 1\n and (len(y_pred.shape) == 2 and y_pred.shape[1] == 1)\n ):\n y_true = np.expand_dims(y_true, -1)\n\n # compute loss\n # this doesn't work in tf.keras 1.14\n # losses = self.model.loss_functions[0](tf.convert_to_tensor(y_true), tf.convert_to_tensor(y_pred))\n # if U.is_tf_keras():\n # L = self.model.loss_functions[0].fn\n # else:\n # L = self.model.loss_functions[0]\n L = U.loss_fn_from_model(self.model)\n losses = L(tf.convert_to_tensor(y_true), tf.convert_to_tensor(y_pred))\n if DISABLE_V2_BEHAVIOR:\n losses = tf.Session().run(losses)\n else:\n losses = losses.numpy()\n\n class_names = [] if preproc is None else preproc.get_classes()\n if preproc is None:\n class_fcn = lambda x: \"%s\" % (x)\n else:\n class_fcn = lambda x: class_names[x]\n\n # regression output modifications\n if not classification:\n if len(y_pred.shape) == 2 and y_pred.shape[1] == 1:\n y_pred = np.squeeze(y_pred)\n y_pred = np.around(y_pred, 2)\n if len(y_true.shape) == 2 and y_true.shape[1] == 1:\n y_true = np.squeeze(y_true)\n y_true = np.around(y_true, 2)\n\n # sort by loss and prune correct classifications, if necessary\n if classification and not multilabel:\n y_pred = np.squeeze(y_pred)\n y_true = np.squeeze(y_true)\n if len(y_pred.shape) == 1:\n y_p = np.where(y_pred > 0.5, 1, 0)\n y_t = np.where(y_true > 0.5, 1, 0)\n else:\n y_p = np.argmax(y_pred, axis=1)\n y_t = np.argmax(y_true, axis=1)\n tups = [\n (i, x, class_fcn(y_t[i]), class_fcn(y_p[i]))\n for i, x in enumerate(losses)\n if y_p[i] != y_t[i]\n ]\n else:\n tups = [\n (i, x, y_true[i], np.around(y_pred[i], 2)) for i, x in enumerate(losses)\n ]\n tups.sort(key=operator.itemgetter(1), reverse=True)\n\n # prune by given range\n tups = tups[n[0] : n[1]] if n is not None else tups\n return tups", "def pseudo_loss(self, params, batches):\n loss = 0\n for batch in batches:\n states = batch[\"states\"]\n actions = batch[\"actions\"]\n returns = batch[\"returns\"]\n\n preds = self.predict_jax(params, states)\n\n baseline = jnp.mean(returns, axis=0)\n preds_select = jnp.take_along_axis(preds, jnp.expand_dims(actions, axis=2), axis=2).squeeze()\n loss += (-jnp.mean(jnp.sum(preds_select * (returns - baseline))))\n\n return loss + self.l2_regularizer(params, 0.001) # try to divide by len(batches)?", "def wasserstein_gan_losses(discriminator_real_outputs,\n discriminator_fake_outputs):\n generator_loss = -tf.reduce_mean(discriminator_fake_outputs)\n discriminator_loss = -generator_loss - tf.reduce_mean(\n discriminator_real_outputs)\n return generator_loss, discriminator_loss", "def test_loss_hook(self, losses):\n self.runinfo[\"dev_losses\"].append(losses)", "def _compute_loss(self, loss_weights, init_image, gram_style_features,\n content_features):\n style_weight, content_weight, ta_weight = loss_weights\n\n # Feed our init image through our model. This will give us the content and\n # style representations at our desired layers.\n model_outputs = self.model(init_image)\n\n style_output_features = model_outputs[:self.num_style_layers]\n content_output_features = model_outputs[self.num_style_layers:]\n\n total_style_score = 0\n total_content_score = 0\n total_ta_score = 0\n # Accumulate style losses from all layers\n # Here, we equally weight each contribution of each loss layer\n averge_style_weight = 1.0 / float(self.num_style_layers)\n for target_style, comb_style in zip(gram_style_features,\n style_output_features):\n total_style_score += averge_style_weight * \\\n self._get_style_loss(comb_style[0], target_style)\n\n # Accumulate content losses from all layers\n average_content_weight = 1.0 / float(self.num_content_layers)\n for target_content, comb_content in zip(content_features,\n content_output_features):\n total_content_score += average_content_weight * \\\n self._get_content_loss(comb_content[0], target_content)\n # Get Variation loss of the image\n total_ta_score = self._get_total_variational_loss(\n init_image) * ta_weight\n total_style_score *= style_weight\n total_content_score *= content_weight\n\n # Get total loss\n total_loss = total_style_score + total_content_score + total_ta_score\n return total_loss, total_style_score, total_content_score", "def get_network_losses_variables_and_gradients(self, real_images):\n with tf.GradientTape() as gen_tape, tf.GradientTape() as dis_tape:\n # Get fake logits from generator.\n (fake_images,\n fake_logits,\n generator_loss) = self.generator_loss_phase(training=True)\n\n # Get discriminator loss.\n _, discriminator_loss = self.discriminator_loss_phase(\n fake_images, real_images, fake_logits, training=True\n )\n\n # Create empty dicts to hold loss, variables, gradients.\n loss_dict = {}\n vars_dict = {}\n grads_dict = {}\n\n # Loop over generator and discriminator.\n for (loss, gradient_tape, scope_name) in zip(\n [generator_loss, discriminator_loss],\n [gen_tape, dis_tape],\n [\"generator\", \"discriminator\"]\n ):\n # Get variables and gradients from generator wrt. loss.\n variables, gradients = self.get_variables_and_gradients(\n loss, gradient_tape, scope_name\n )\n\n # Add loss, variables, and gradients to dictionaries.\n loss_dict[scope_name] = loss\n vars_dict[scope_name] = variables\n grads_dict[scope_name] = gradients\n\n # Create variable and gradient histogram summaries.\n self.create_variable_and_gradient_histogram_summaries(\n variables, gradients, scope_name\n )\n\n return loss_dict, vars_dict, grads_dict", "def calculate_loss(self, output, batch):\n\n detailed_loss = {}\n for loss_func_key, this_loss_func, weight in self.loss_funcs:\n this_loss = this_loss_func(output, batch) * weight\n detailed_loss[loss_func_key] = this_loss\n loss = sum(detailed_loss.values())\n return loss, detailed_loss", "def compute_losses(self, predictions, targets):\n smpl_weight = targets['target_smpl_weight']\n\n losses = {}\n if self.loss_beta is not None:\n losses['loss_beta'] = self.loss_beta(\n predictions['pred_shape'] * smpl_weight,\n targets['target_beta'] * smpl_weight)\n if self.loss_theta is not None:\n pred_pose = rotmat_to_quat(predictions['pred_pose']).reshape(\n -1, 96)\n losses['loss_theta'] = self.loss_theta(\n pred_pose * smpl_weight * targets['target_theta_weight'],\n targets['target_theta'] * smpl_weight *\n targets['target_theta_weight'])\n if self.loss_twist is not None:\n losses['loss_twist'] = self.loss_twist(\n predictions['pred_phi'] * targets['target_twist_weight'],\n targets['target_twist'] * targets['target_twist_weight'])\n if self.loss_uvd is not None:\n pred_uvd = predictions['pred_uvd_jts']\n target_uvd = targets['target_uvd_29'][:, :pred_uvd.shape[1]]\n target_uvd_weight = targets['target_weight_29'][:, :pred_uvd.\n shape[1]]\n losses['loss_uvd'] = self.loss_uvd(\n 64 * predictions['pred_uvd_jts'],\n 64 * target_uvd,\n target_uvd_weight,\n avg_factor=target_uvd_weight.sum())\n\n return losses", "def get_loss(self, outputs, labels):\n try:\n assert self._loss in ['mse','mae','l1','l2','huber','logcosh','bce','contrastive'], 'Specify correct loss function'\n except AssertionError as msg:\n sys.exit(msg)\n \n if self._loss == 'mse' or self._loss == 'l2':\n # L2 loss function\n self.criterion = lambda x,y: torch.pow(x - y,2)\n loss = self.criterion(outputs, labels)\n \n # Adding up the losses (L1 loss) or meaning the losses (MAE loss)\n # of all batch instances\n if self._loss == 'mse':\n loss = torch.mean(loss)\n elif self._loss == 'l2':\n loss = torch.sum(loss)\n \n elif self._loss == 'mae' or self._loss == 'l1':\n # L1 loss function\n self.criterion = lambda x,y: torch.abs(x - y)\n loss = self.criterion(outputs, labels)\n \n # Adding up the losses (L1 loss) or meaning the losses (MAE loss)\n # of all batch instances\n if self._loss == 'mae':\n loss = torch.mean(loss)\n elif self._loss == 'l1':\n loss = torch.sum(loss)\n \n elif self._loss == 'huber':\n # Huber loss function\n self.criterion = torch.nn.SmoothL1Loss()\n loss = self.criterion(outputs.float(), labels.float())\n \n # Adding up the losses of all batch instances\n loss = torch.mean(loss)\n \n elif self._loss == 'logcosh':\n # Log-cosh loss function\n loss = torch.log(torch.cosh(outputs.float() - labels.float()))\n \n # Adding up the losses of all batch instances\n loss = torch.sum(loss) \n \n elif self._loss == 'bce':\n if self._dist_fn == 'cos':\n self.criterion = nn.BCEWithLogitsLoss()\n else:\n self.criterion = nn.BCELoss()\n loss = self.criterion(outputs.float(), labels.float())\n \n elif self._loss == 'contrastive':\n margin = 1\n loss = torch.sum((1-labels) * torch.pow(outputs,2)+ labels * torch.pow(torch.clamp(margin - outputs, min = 0.0),2))\n\n return loss", "def calc_loss(self, outputs, labels):\n information_loss = self.bottleneck.buffer_capacity.mean() # Taking the mean is equivalent of scaling with 1/K\n cross_entropy = F.cross_entropy(outputs, target=labels)\n total = cross_entropy + self.beta * information_loss\n self.ce_loss.append(cross_entropy.cpu().detach().numpy())\n self.info_loss.append(information_loss.cpu().detach().numpy())\n self.total_loss.append(total.cpu().detach().numpy())\n return total", "def _build_loss(self, results, features, labels):\n losses, loss = getters.get_loss(\n self.loss.IDENTIFIER, results, labels, **self.loss.to_dict())\n self._loss = loss\n self._losses = losses\n\n other_losses = get_tracked(tf.GraphKeys.REGULARIZATION_LOSSES)\n if other_losses:\n loss = [loss] + other_losses\n loss = tf.add_n(loss, name=\"TotalLoss\")\n self._total_loss = loss\n return losses, loss", "def model_loss(inp, fake, real_label, fake_label):\n \n \n Dreal,realcls,R1 = gradpen(inp)\n [Dfake,fakecls] = D(fake)\n # 1. Adversarial loss\n \n glabel = tf.ones_like(Dfake)#tf.random.uniform((Dfake.shape), 1-LN, 1)\n dlabelr = tf.ones_like(Dreal)#tf.random.uniform((Dreal.shape), 1-LN, 1)\n dlabelf = tf.zeros_like(Dfake)#tf.random.uniform((Dfake.shape), 0, LN)\n \n \n \n # D has no sigmoid activation: \"from_logits=True\"\n real_loss = tf.keras.losses.binary_crossentropy(\n dlabelr, Dreal, from_logits=True)\n real_loss = tf.reduce_mean(real_loss)\n \n fake_loss = tf.keras.losses.binary_crossentropy(\n dlabelf, Dfake, from_logits=True)\n fake_loss = tf.reduce_mean(fake_loss)\n \n Dadv = 0.5*(real_loss+fake_loss)\n \n Gadv = tf.keras.losses.binary_crossentropy(\n glabel, Dfake, from_logits=True)\n Gadv = tf.reduce_mean(Gadv)\n \n # 2. Classification loss\n \n Dcls = tf.keras.losses.binary_crossentropy(real_label, realcls, from_logits=True)\n Dcls = tf.reduce_mean(Dcls)\n \n Gcls = tf.keras.losses.binary_crossentropy(fake_label, fakecls, from_logits=True)\n Gcls = tf.reduce_mean(Gcls)\n \n # 3. Total loss\n \n Dloss = Dadv + (GAMMA/2)*R1 + LAMBDA_CLS*Dcls\n \n Gloss = Gadv + LAMBDA_CLS*Gcls\n \n return (Dloss, Dadv, Dcls, R1), (Gloss, Gadv, Gcls)", "def get_loss(self):\r\n\r\n if F.loss_type==\"cosine\":\r\n self.losscos = r2d*tf.acos(1-tf.losses.cosine_distance(tf.nn.l2_normalize(self.labels,1), tf.nn.l2_normalize(self.out, 1), dim=1))\r\n self.loss = tf.losses.cosine_distance(tf.nn.l2_normalize(self.labels,1), tf.nn.l2_normalize(self.out, 1), dim=1)\r\n elif F.loss_type==\"mse2d\":\r\n xl, yl, zl = tf.split(self.labels, 3, axis=1)\r\n xo, yo, zo = tf.split(self.out, 3, axis=1)\r\n thetal, thetao = tf.asin(-yl), tf.asin(-yo)\r\n phil, phio = tf.atan2(-zl, -xl), tf.atan2(-zo, -xo)\r\n self.lb = tf.concat([thetal, phil], axis=1)\r\n self.ob = tf.concat([thetao, phio], axis=1)\r\n self.loss = tf.scalar_mul(tf.constant(r2d), tf.losses.mean_squared_error(self.lb, self.ob, 2))\r\n elif F.loss_type==\"mse3d\":\r\n self.loss = tf.losses.mean_squared_error(tf.nn.l2_normalize(self.labels, 0), tf.nn.l2_normalize(self.out, 0))", "def _compute_loss(self):\n state, action, reward, next_state, done = self.replay_buffer.sample(self.batch_size)\n\n state = torch.FloatTensor(state)\n next_state = torch.FloatTensor(next_state)\n action = torch.LongTensor(action)\n reward = torch.FloatTensor(reward)\n done = torch.FloatTensor(done)\n\n q_values = self.dqn(state)\n q_value = q_values.gather(1, action.unsqueeze(1)).squeeze(1)\n\n next_q_values = self.target_dqn(next_state)\n next_q_value = next_q_values.max(1)[0]\n target = reward + self.discount_factor * next_q_value * (1 - done)\n\n # loss = F.smooth_l1_loss(q_value, target.detach())\n loss = F.mse_loss(q_value, target.detach())\n\n return loss", "def _loss(W):\r\n M = X @ W\r\n if loss_type == 'l2':\r\n R = X - M\r\n loss = 0.5 / X.shape[0] * (R ** 2).sum()\r\n G_loss = - 1.0 / X.shape[0] * X.T @ R\r\n elif loss_type == 'logistic':\r\n loss = 1.0 / X.shape[0] * (np.logaddexp(0, M) - X * M).sum()\r\n G_loss = 1.0 / X.shape[0] * X.T @ (sigmoid(M) - X)\r\n elif loss_type == 'poisson':\r\n S = np.exp(M)\r\n loss = 1.0 / X.shape[0] * (S - X * M).sum()\r\n G_loss = 1.0 / X.shape[0] * X.T @ (S - X)\r\n else:\r\n raise ValueError('unknown loss type')\r\n return loss, G_loss", "def classic_gan_losses(discriminator_real_outputs, discriminator_fake_outputs):\n discriminator_loss_real = tf.losses.sigmoid_cross_entropy(\n tf.ones_like(discriminator_real_outputs), discriminator_real_outputs)\n discriminator_loss_fake = tf.losses.sigmoid_cross_entropy(\n tf.zeros_like(discriminator_fake_outputs), discriminator_fake_outputs)\n discriminator_loss = discriminator_loss_real + discriminator_loss_fake\n generator_loss = -discriminator_loss_fake\n return generator_loss, discriminator_loss", "def mse_loss(angles_gt, angles_gen):\n loss = (angles_gt - angles_gen)**2\n # loss = torch.sum(loss, dim=-1) # sum loss over dimensions\n loss = torch.mean(loss, dim=-1) # mean loss over images per task\n loss = torch.mean(loss, dim=-1) # mean loss over tasks\n return loss", "def compute_depth_losses(self, inputs, outputs, losses):\n outputs_l, outputs_r = outputs\n\n depth_pred_l = outputs_l[\"depth\"]\n depth_pred_r = outputs_r[\"depth\"]\n\n for metric in self.depth_metric_names:\n losses[metric] = 0.\n\n for i, depth_pred in enumerate([depth_pred_l, depth_pred_r]):\n depth_pred = torch.clamp(F.interpolate(\n depth_pred, [375, 1242], mode=\"bilinear\", align_corners=False), 1e-3, 80)\n depth_pred = depth_pred.detach()\n\n s = \"l\" if i == 0 else \"r\"\n depth_gt = inputs[\"depth_gt_\" + s]\n mask = depth_gt > 0\n\n # garg/eigen crop\n crop_mask = torch.zeros_like(mask)\n crop_mask[:, :, 153:371, 44:1197] = 1\n mask = mask * crop_mask\n\n depth_gt = depth_gt[mask]\n depth_pred = depth_pred[mask]\n depth_pred *= torch.median(depth_gt) / torch.median(depth_pred)\n\n depth_pred = torch.clamp(depth_pred, min=1e-3, max=80)\n\n depth_errors = compute_depth_errors(depth_gt, depth_pred)\n\n for i, metric in enumerate(self.depth_metric_names):\n losses[metric] += np.array(depth_errors[i].cpu())/2", "def create_loss_and_optimizer(net, learning_rate=0.01):\n loss = nn.CrossEntropyLoss()\n optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.99, weight_decay=5 * 1e-4)\n return loss, optimizer", "def get_gen_loss(gen, disc, real, condition, adv_criterion, recon_criterion, lambda_recon):\n fake = gen(condition)\n disc_fake_pred = disc(fake, condition)\n adv_loss = adv_criterion(disc_fake_pred, torch.ones_like(disc_fake_pred))\n recon_loss = recon_criterion(real, fake)\n gen_loss = adv_loss + (recon_loss * lambda_recon)\n return gen_loss", "def train(self):\n d_loss = []\n g_loss = []\n for index, (real, _) in enumerate(self.data_loader):\n d_loss.append(self._train_discriminator(real))\n\n # Every n_critic batches train the generator.\n if index % self.params.n_critic == 0:\n g_loss.append((self._train_generator()))\n\n return d_loss, g_loss", "def __optimizers(self, Gen_loss, D_A_loss, D_B_loss):\n def make_optimizer(loss, variables, name='Adam'):\n \"\"\" Adam optimizer with learning rate 0.0002 for the first 100k steps (~100 epochs)\n and a linearly decaying rate that goes to zero over the next 100k steps\n \"\"\"\n global_step = tf.Variable(0, trainable=False, name='global_step')\n starter_learning_rate = self.opt.lr\n end_learning_rate = 0.0\n start_decay_step = self.opt.niter\n decay_steps = self.opt.niter_decay\n beta1 = self.opt.beta1\n learning_rate = (tf.where(tf.greater_equal(global_step, start_decay_step),\n tf.train.polynomial_decay(starter_learning_rate,\n global_step-start_decay_step,\n decay_steps, end_learning_rate,\n power=1.0),\n starter_learning_rate))\n\n learning_step = (tf.train.AdamOptimizer(learning_rate, beta1=beta1, name=name)\n .minimize(loss, global_step=global_step, var_list=variables))\n\n return learning_step\n\n Gen_optimizer = make_optimizer(Gen_loss, self.G.variables + self.F.variables, name='Adam_Gen')\n D_A_optimizer = make_optimizer(D_A_loss, self.D_A.variables, name='Adam_D_A')\n D_B_optimizer = make_optimizer(D_B_loss, self.D_B.variables, name='Adam_D_B')\n\n with tf.control_dependencies([Gen_optimizer, D_A_optimizer, D_B_optimizer]):\n return tf.no_op(name='optimizers')", "def _create_loss(self):\n\n with tf.name_scope(\"loss\"):\n \n # gini=(tf.nn.l2_loss( self.score))/100000\n gini = tf.losses.softmax_cross_entropy(self.score, 0*self.score)\n \n promo_prob=tf.reduce_sum(tf.multiply(self.score, self.cohort_weight),\n axis=1)\n inc_value = tf.reduce_mean(tf.multiply(promo_prob, self.value))- self.control_value\n inc_cost = tf.reduce_mean( tf.multiply(promo_prob, self.cost)) - self.control_cost\n \n\n\n # determine loss function based on self.obj_rule\n if self.obj_rule == 'cpiv':\n self.objective = inc_cost / inc_value\n\n elif self.obj_rule == 'ivc':\n # maximize ivc\n self.objective = - inc_value / inc_cost\n\n elif self.obj_rule == 'lagrangian':\n assert self.shadow is not None, 'Need to pass in shadow value if use lagrangian as obj_rule.'\n self.objective = inc_cost - self.shadow * inc_value\n\n elif self.obj_rule == 'value':\n # maximize delta values\n self.objective = - inc_value\n\n # use only cost as objective\n elif self.obj_rule == 'cost':\n # maximize delta cost\n self.objective = - inc_cost\n\n else:\n raise Exception('Invalid obj_rule!')\n\n # regularization\n reg_loss = tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n # weights = tf.trainable_variables() # all vars of your graph\n # reg_loss = tf.norm( weights,ord=1)\n\n # final loss\n self.loss = self.objective +reg_loss+.1*gini", "def loss(self, X, y=None):\n scores = None\n ############################################################################\n # TODO: Implement the forward pass for the two-layer net, computing the #\n # class scores for X and storing them in the scores variable. #\n ############################################################################\n hid1, hid1cache = affine_relu_forward(X, self.params['W1'], self.params['b1'])\n scores, scorecache = affine_forward(hid1, self.params['W2'], self.params['b2'])\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n # If y is None then we are in test mode so just return scores\n if y is None:\n return scores\n\n loss, grads = 0, {}\n ############################################################################\n # TODO: Implement the backward pass for the two-layer net. Store the loss #\n # in the loss variable and gradients in the grads dictionary. Compute data #\n # loss using softmax, and make sure that grads[k] holds the gradients for #\n # self.params[k]. Don't forget to add L2 regularization! #\n # #\n # NOTE: To ensure that your implementation matches ours and you pass the #\n # automated tests, make sure that your L2 regularization includes a factor #\n # of 0.5 to simplify the expression for the gradient. #\n ############################################################################\n loss, dscores = softmax_loss(scores, y)\n loss += 0.5 * self.reg *( np.sum(self.params['W1']**2) + np.sum(self.params['W2']**2) )\n\n dhid1, grads['W2'], grads['b2'] = affine_backward(dscores, scorecache)\n dx, grads['W1'], grads['b1'] = affine_relu_backward(dhid1, hid1cache)\n\n grads['W1'] += self.reg * self.params['W1']\n grads['W2'] += self.reg * self.params['W2']\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, grads", "def hinge_gan_losses(discriminator_real_outputs, discriminator_fake_outputs):\n generator_loss = -tf.reduce_mean(discriminator_fake_outputs)\n discriminator_loss = (\n tf.reduce_mean(tf.nn.relu(1. - discriminator_real_outputs))\n + tf.reduce_mean(tf.nn.relu(1. + discriminator_fake_outputs)))\n return generator_loss, discriminator_loss", "def generator_loss(self, fake_images=None, real_images=None, fake_output=None, l1_lambda=100, loss_strategy='both'):\n #TODO with try/except\n assert loss_strategy in ['GAN', 'L1', 'both'], \"Error: invalid type of loss. Should be 'GAN', 'L1' or 'both'\"\n if loss_strategy == \"GAN\":\n fake_loss = self.cross_entropy(ones_like(fake_output), fake_output)\n return fake_loss\n elif loss_strategy == \"L1\":\n L1_loss = l1_lambda*self.l1(real_images, fake_images)\n return L1_loss\n elif loss_strategy == 'both':\n fake_loss = self.cross_entropy(ones_like(fake_output), fake_output)\n L1_loss = self.l1(real_images, fake_images)\n return fake_loss + l1_lambda*L1_loss", "def compute_losses_test_advanced(model: torch.nn.Module, test_loader: torch.utils.data.DataLoader,\r\n criterion: torch.nn.CrossEntropyLoss):\r\n with torch.no_grad():\r\n loss_sum0_test = 0\r\n loss_sum1_test = 0\r\n loss_sumclass_test = 0\r\n loss_sumtot_test = 0\r\n for input_data, target_data, class_data in iter(test_loader):\r\n output, out_aux = model(input_data)\r\n loss_out = criterion(output, target_data)\r\n loss_aux0 = criterion(out_aux[0], class_data[:, 0])\r\n loss_aux1 = criterion(out_aux[1], class_data[:, 1])\r\n loss = loss_out + loss_aux0 + loss_aux1\r\n\r\n loss_sum0_test += loss_aux0\r\n loss_sum1_test += loss_aux1\r\n loss_sumclass_test += loss_out\r\n loss_sumtot_test += loss\r\n\r\n return loss_sum0_test / (len(test_loader.dataset) / test_loader.batch_size), \\\r\n loss_sum1_test / (len(test_loader.dataset) / test_loader.batch_size), \\\r\n loss_sumclass_test / (len(test_loader.dataset) / test_loader.batch_size), \\\r\n loss_sumtot_test / (len(test_loader.dataset) / test_loader.batch_size)", "def discriminator_loss(gen_images, real_images):\n real = real_images.new_full((real_images.shape[0], 1), real_label)\n gen = gen_images.new_full((gen_images.shape[0], 1), fake_label)\n\n realloss = disc_loss_criterion(disc_net(real_images), real)\n genloss = disc_loss_criterion(disc_net(gen_images.detach()), gen)\n\n return (genloss + realloss) / 2", "def gen_loss_wasserstein(self, noise_samples):\n generator_samples = self.gen_model(noise_samples)\n logits_gen = self.disc_model(generator_samples)\n\n loss = -tf.reduce_mean(logits_gen)\n return loss", "def generator_loss(discriminator, fake_images, real_labels, con_aug):\n\n discriminator.train()\n criterion = nn.BCELoss()\n condition = con_aug.detach()\n fake_img_fea = discriminator(fake_images)\n fake_logits = discriminator.conditioned_result(fake_img_fea, condition)\n fake_error = criterion(fake_logits, real_labels)\n\n if discriminator.unconditioned_result is not None:\n \"\"\"\n If it is a stage 2 discriminator then an additional error due to the\n score calculated from image features alone is added to the above error\n for loss calculation.\n \"\"\"\n fake_logits1 = discriminator.unconditioned_result(fake_img_fea)\n uncond_fake_error = criterion(fake_logits1, real_labels)\n fake_error += uncond_fake_error\n return fake_error", "def calc_transmission_losses (self):\n #~ print self.generation_wind_proposed, self.cd['line losses']\n self.transmission_losses = self.generation_wind_proposed * \\\n (self.cd['line losses'] / 100.0)\n #~ print 'self.transmission_losses',self.transmission_losses", "def get_losses(self):\n if self.loss is not None:\n return [self.loss]\n else:\n return []", "def get_batch_loss(self, inputs):\n inputs = inputs.copy()\n\n if 'site' in inputs.keys():\n inputs['site'] = inputs['site'].squeeze(-1)\n\n for input_name in inputs.keys():\n inputs[input_name] = inputs[input_name].to(self.net.device)\n\n net_pred = self.net(inputs['sh'], inputs['mean_b0'])\n inputs.update(net_pred)\n\n inputs_needed = [(inp, loss['detach_input'])\n for loss in self.losses + self.style_losses\n for inp in loss['inputs']]\n for input_needed, detach_input in inputs_needed:\n inputs = compute_modules(\n input_needed, inputs,\n {'autoencoder': self.net, **self.adversarial_net},\n self.modules,\n detach_input=detach_input)\n\n for net_name, adv_net in self.adversarial_net.items():\n if not any(net_name in s for s in inputs_needed):\n # If we do not need the output of that network\n continue\n\n net_inputs = [name + '_fake' if name not in ['mask'] else name\n for name in adv_net.inputs]\n feat_net_pred = adv_net.forward(\n *(inputs[name] for name in net_inputs))\n\n # Add the name of the network to the keys\n inputs.update({k + '_fake_' + net_name: v\n for k, v in feat_net_pred.items()})\n\n loss_dict = {}\n\n batch_loss_reconst = []\n for loss_d in self.losses:\n loss = loss_d['fun'](*[inputs[name] for name in loss_d['inputs']])\n loss = loss_d['coeff'] * loss\n batch_loss_reconst.append(loss)\n loss_dict[loss_d['type'] + '_' + loss_d['inputs'][0]] = loss\n if len(self.losses) != 0:\n batch_loss_reconst = torch.stack(batch_loss_reconst, dim=0).sum()\n loss_dict['reconst_loss'] = batch_loss_reconst\n else:\n batch_loss_reconst = 0\n\n batch_loss_style = []\n for loss_d in self.style_losses:\n loss = loss_d['fun'](*[inputs[name] for name in loss_d['inputs']])\n loss = loss_d['coeff'] * loss\n batch_loss_style.append(loss)\n loss_dict[loss_d['type'] + '_' + loss_d['inputs'][0]] = loss\n if len(self.style_losses) != 0:\n batch_loss_style = torch.stack(batch_loss_style, dim=0).sum()\n loss_dict['style_loss'] = batch_loss_style\n else:\n batch_loss_style = 0\n\n batch_loss = batch_loss_reconst + batch_loss_style\n loss_dict['batch_loss'] = batch_loss\n\n return inputs, loss_dict", "def _get_loss(self, states):\n states = self.normalization_layer(states)\n rnd_pred = self.rnd(states)\n\n with torch.no_grad():\n rnd_target = self.rnd_target(states)\n\n rnd_loss = self.rnd_loss_func(rnd_pred, rnd_target)\n\n return rnd_loss", "def calc_loss(self, x: np.ndarray, y: np.ndarray) -> float:\n return self.descent.calc_loss(x, y)", "def training_losses(self):\r\n if self._training_losses is None:\r\n # Builds the per-task metrics and losses.\r\n # This the total summed training loss of tasks in the joint training.\r\n self._training_losses = dict(\r\n total_loss=tf.keras.metrics.Mean(\"training_loss\", dtype=tf.float32))\r\n for name in self.multi_task.tasks:\r\n self._training_losses[name] = tf.keras.metrics.Mean(\r\n \"training_loss\", dtype=tf.float32)\r\n return self._training_losses", "def generator_loss_calculation(self, fake_examples, _):\n _, fake_scores = self.D(fake_examples)\n criterion = BCEWithLogitsLoss()\n generator_loss = criterion(fake_scores, torch.zeros_like(fake_scores))\n return generator_loss", "def get_loss(self, x):\n x = self.normalize(x)\n pernalty_func = 0\n if self.coincide_fun is not None:\n tmp_res = self.coincide_fun(x)\n for i in range(self.cons_num):\n cons = self.constrain[i]\n self.loss_list[i] = cons(x, tmp_res)\n pernalty_tmp = self.pow(self.relu(self.mul_k[i] / self.sigma_k + self.loss_list[i]), self.pow_rate)\\\n - self.pow((self.mul_k[i] / self.sigma_k), self.pow_rate)\n pernalty_func += pernalty_tmp\n objective_val = self.objective_func(x, tmp_res)\n else:\n for i in range(self.cons_num):\n cons = self.constrain[i]\n self.loss_list[i] = cons(x)\n pernalty_tmp = self.pow(self.relu(self.mul_k[i] / self.sigma_k + self.loss_list[i]), self.pow_rate)\\\n - self.pow((self.mul_k[i] / self.sigma_k), self.pow_rate)\n pernalty_func += pernalty_tmp\n objective_val = self.objective_func(x)\n loss1 = self.obj_weight * objective_val\n lagrangian_func = loss1 + self.sigma_k / 2 * pernalty_func\n res = [lagrangian_func, self.loss_list, objective_val, x]\n return res", "def compute_loss(self, X, y):\r\n # INSERT YOUR CODE HERE\r\n #raise Exception('Function not yet implemented!')\r\n\r\n # Computing the loss using the below formula\r\n # Loss = -(1/m)*sum( (y_i)*log(σ(wTx_i)) + (1-y_i)*log(1 - σ(wTx_i)))\r\n # m = number of examples and i for ith example\r\n\r\n loss = 0\r\n X = np.append(X, np.array([[1]]*X.shape[0]), axis=1)\r\n # for idx,example in enumerate(X):\r\n # loss = loss + y[idx] * np.log(self.sigmoid(np.dot(example, self.w))) + (1 - y[idx]) * np.log(1 - self.sigmoid(np.dot(example, self.w)))\r\n # loss = -loss/ X.shape[0]\r\n\r\n loss = -np.mean(y * np.log(self.sigmoid(np.dot(X, self.w))) + (1 - y) * np.log(1 - self.sigmoid(np.dot(X, self.w))))\r\n return loss", "def yield_loss(self, outputs, targets):\n return torch.sqrt(nn.MSELoss()(outputs, targets))", "def calculate_loss(self, word_inputs, feature_inputs, word_seq_lengths, char_inputs, char_seq_lengths, \\\r\n char_seq_recover, batch_label, mask, vfeature_input, v_modal_label, t_modal_label, v_mask, alpha=0):\r\n outs, textual_clf_outputs, visual_clf_outputs = self._get_lstm_features(word_inputs, feature_inputs,\r\n word_seq_lengths, char_inputs,\r\n char_seq_lengths, char_seq_recover,\r\n vfeature_input, v_mask, alpha)\r\n batch_size = word_inputs.size(0)\r\n seq_len = word_inputs.size(1)\r\n # crf\r\n ner_loss = self.crf.neg_log_likelihood_loss(outs, mask, batch_label)\r\n scores, tag_seq = self.crf._viterbi_decode(outs, mask) # batch,\r\n if self.average_batch:\r\n ner_loss = ner_loss / batch_size\r\n\r\n # modal clf loss\r\n # visual_clf_outputs: (obj*b, 2) mask (b, obj) ->\r\n visual_loss = self.modal_clf_loss(visual_clf_outputs, v_modal_label)\r\n textual_loss = self.modal_clf_loss(textual_clf_outputs, t_modal_label)\r\n\r\n total_loss = self.ner_loss_lambda * ner_loss + self.v_loss_lambda * visual_loss \\\r\n + self.t_loss_lambda * textual_loss\r\n\r\n return total_loss, \\\r\n self.ner_loss_lambda * ner_loss, \\\r\n self.t_loss_lambda * textual_loss, \\\r\n self.v_loss_lambda * visual_loss, \\\r\n tag_seq, textual_clf_outputs, visual_clf_outputs", "def compute_loss(self, batch, y_next_true):\n\n # Get the output of the gru layer for the input which serves as input to the reconstruction + forecasting model\n gru_output = self.model(batch, training=True)\n\n # Forecasting model loss calculation\n # Using mse yields the same result as RMSE and is more stable\n y_next_pred = self.model.forecasting_model(gru_output, training=True)\n y_next_pred = y_next_pred[:, -1, :] # only get the prediction for the last timestamp\n\n mse_for = tf.keras.losses.MeanSquaredError()\n loss_for = mse_for(y_next_true, y_next_pred)\n\n # Reconstruction model loss calculation\n # Like VAE based on: https://bit.ly/3oRMiQz\n mse_rec = tf.keras.losses.MeanSquaredError()\n reconstructed_output = self.model.reconstruction_model(gru_output)\n reconstruction_target = gru_output if 'reconstruct_gru' in self.hyper.variants else batch\n\n loss_rec = mse_rec(reconstruction_target, reconstructed_output)\n loss_rec += sum(self.model.reconstruction_model.losses) # Add KLD regularization loss\n\n # Overall loss\n loss = loss_for + loss_rec\n\n return loss", "def get_loss(self, inputs, outputs, add_summary=True):\n cfg = self.cfg()\n torch.autograd.set_detect_anomaly(True)\n # g_loss = tf.zeros(dtype=tf.float32, shape=[])\n g_loss = self.add_proj_loss(inputs, outputs, cfg.proj_weight, add_summary)\n r_loss = self.regularization_loss(cfg)\n# print(g_loss, r_loss)\n g_loss += r_loss\n # if cfg.proj_weight:\n # g_loss += self.add_proj_loss(inputs, outputs, cfg.proj_weight, add_summary)\n\n # if cfg.drc_weight:\n # g_loss += add_drc_loss(cfg, inputs, outputs, cfg.drc_weight, add_summary)\n #\n # if cfg.pc_rgb:\n # g_loss += add_proj_rgb_loss(cfg, inputs, outputs, cfg.proj_rgb_weight, add_summary, self._sigma_rel)\n #\n # if cfg.proj_depth_weight:\n # g_loss += add_proj_depth_loss(cfg, inputs, outputs, cfg.proj_depth_weight, self._sigma_rel, add_summary)\n #\n # if add_summary:\n # tf.contrib.summary.scalar(\"losses/total_task_loss\", g_loss)\n\n return g_loss", "def loss_fn(self, recons, inputs, mu, log_var, **kwargs):\n# kld_weight = kwargs['M_N'] # Account for the minibatch samples from the dataset\n recons_loss = F.mse_loss(recons, inputs)\n# recons_loss = F.binary_cross_entropy(recons, inputs)\n KLD = torch.mean(-0.5 * torch.sum(1 + log_var - mu**2 - log_var.exp(), dim=1), dim=0)\n loss = recons_loss - KLD\n return loss, recons_loss, KLD", "def train_step_discriminators(self, X):\n self.discriminator_image.zero_grad()\n self.discriminator_latent.zero_grad()\n\n Z = self.noise_fn(self.batch_size)\n\n with torch.no_grad():\n X_hat = self.generator(Z)\n Z_hat = self.encoder(X)\n X_tilde = self.generator(Z_hat)\n Z_tilde = self.encoder(X_hat)\n\n X_confidence = self.discriminator_image(X)\n X_hat_confidence = self.discriminator_image(X_hat)\n X_tilde_confidence = self.discriminator_image(X_tilde)\n Z_confidence = self.discriminator_latent(Z)\n Z_hat_confidence = self.discriminator_latent(Z_hat)\n Z_tilde_confidence = self.discriminator_latent(Z_tilde)\n\n X_loss = 2 * self.criterion_gen(X_confidence, self.target_ones)\n X_hat_loss = self.criterion_gen(X_hat_confidence, self.target_zeros)\n X_tilde_loss = self.criterion_gen(X_tilde_confidence, self.target_zeros)\n Z_loss = 2 * self.criterion_gen(Z_confidence, self.target_ones)\n Z_hat_loss = self.criterion_gen(Z_hat_confidence, self.target_zeros)\n Z_tilde_loss = self.criterion_gen(Z_tilde_confidence, self.target_zeros)\n\n loss_images = (X_loss + X_hat_loss + X_tilde_loss) / 4\n loss_latent = (Z_loss + Z_hat_loss + Z_tilde_loss) / 4\n loss = loss_images + loss_latent\n\n loss.backward()\n self.optim_di.step()\n self.optim_dl.step()\n\n return loss_images.item(), loss_latent.item()", "def train(X_train, y_train, X_test, y_test, net):\n \n # convert X, y to tensors:\n X_train = torch.tensor(X_train, dtype=torch.float32)\n y_train = torch.tensor(y_train, dtype=torch.float32)\n \n X_test = torch.tensor(X_test, dtype=torch.float32)\n y_test = torch.tensor(y_test, dtype=torch.float32)\n\n # iterator:\n train_set = TensorDataset(X_train, y_train)\n train_loader = DataLoader(train_set, batch_size, shuffle=True)\n\n test_set = TensorDataset(X_test, y_test)\n test_loader = DataLoader(test_set, batch_size, shuffle=True)\n\n # optimizer:\n optimizer = torch.optim.Adam(net.parameters(), lr=lr)\n loss = nn.MSELoss()\n\n # loss accumulator:\n time_line = []\n train_metric = []\n test_metric = []\n\n # loop:\n for epoch in range(epochs):\n # update parameters:\n for Xb, yb in train_loader:\n train_ls = loss(net(Xb), yb)\n optimizer.zero_grad()\n train_ls.backward()\n optimizer.step()\n # update train and test losses:\n with torch.no_grad():\n if not epoch % 50:\n time_line.append(epoch)\n metric = 0\n for Xb, yb in train_loader:\n metric += loss(net(Xb), yb) / batch_size\n train_metric.append(metric)\n metric = 0\n for Xb, yb in test_loader:\n metric += loss(net(Xb), yb) / batch_size\n test_metric.append(metric)\n # verbose:\n print('Epoch: ', epoch)\n\n # final report of the losses: \n print('Train loss.....{0:6.3f}'.format(train_metric[-1]))\n print('Test loss......{0:6.3f}'.format(test_metric[-1]))\n\n # plot losses with respect to epochs:\n plt.plot(time_line, train_metric, color='b')\n plt.plot(time_line, test_metric, color='r')\n plt.show()", "def _define_generator_loss(self):\n loss = tf.reduce_mean(self._gen_discriminator_out)\n return tf.negative(loss, name='generator_loss')", "def loss_fn(model):\n with flax.deprecated.nn.stateful() as state:\n with flax.deprecated.nn.stochastic(dropout_rng):\n logits = model(example, train=True)\n loss, weight_sum = compute_weighted_cross_entropy(logits, targets)\n mean_loss = loss / weight_sum\n return mean_loss, (logits, state)", "def generator_loss(self, D_output_fake, gan_mode='lsgan', maxloss='mean'):\n if gan_mode == 'lsgan':\n if maxloss == 'mean':\n # use mean squared error\n loss = tf.reduce_mean(tf.squared_difference(D_output_fake, REAL_LABEL))\n elif maxloss == 'max':\n # use max squared error\n loss = tf.reduce_max(tf.squared_difference(D_output_fake, REAL_LABEL))\n elif maxloss == 'softmax':\n #use softmax squared error\n loss_map = (tf.squared_difference(D_output_fake, REAL_LABEL))\n batchsize = loss_map.get_shape()[0].value\n reshaped_loss_map = tf.reshape(loss_map, shape=[batchsize, -1])\n softmax_weight = tf.nn.softmax(reshaped_loss_map, dim=1)\n loss = tf.reduce_sum(softmax_weight * reshaped_loss_map)\n elif maxloss == 'focal':\n loss_map = (tf.squared_difference(D_output_fake, REAL_LABEL) +\n tf.square(D_output_fake)) / 2\n loss_map_shape = loss_map.get_shape()\n D_output_fake_shape = D_output_fake.get_shape()\n prob_weight = (1 - D_output_fake) * 1.5 # here debug the prob coef\n print 'loss_map_shape:', loss_map_shape\n print 'D_output_fake_shape:', D_output_fake_shape\n loss = tf.reduce_mean(prob_weight * loss_map)\n\n elif gan_mode == 'lcgan':\n loss = tf.reduce_mean(tf.pow(tf.abs(tf.subtract(D_output_fake, REAL_LABEL)), 3))\n elif gan_mode == 'gan':\n # heuristic, non-saturating loss\n loss = -tf.reduce_mean(ops.safe_log(D_output_fake)) / 2\n elif gan_mode == 'gan_logits':\n if self.patchgan:\n constant05 = tf.constant(0.5, shape=(self.batch_size, 64))\n loss = tf.reduce_mean(tf.losses.sigmoid_cross_entropy(constant05, D_output_fake))\n else:\n constant05 = tf.constant(0.5, shape=(self.batch_size, 1))\n loss = tf.reduce_mean(tf.losses.sigmoid_cross_entropy(constant05, D_output_fake))\n elif gan_mode == 'wgangp':\n fake_result = D_output_fake\n g_loss = - tf.reduce_mean(fake_result) # This optimizes the generator.\n return g_loss\n else:\n print 'unknown gan mode %s' % gan_mode\n exit(0)\n return loss", "def train_step_generators(self, X):\n self.generator.zero_grad()\n self.encoder.zero_grad()\n\n Z = self.noise_fn(self.batch_size)\n\n X_hat = self.generator(Z)\n Z_hat = self.encoder(X)\n X_tilde = self.generator(Z_hat)\n Z_tilde = self.encoder(X_hat)\n\n X_hat_confidence = self.discriminator_image(X_hat)\n Z_hat_confidence = self.discriminator_latent(Z_hat)\n X_tilde_confidence = self.discriminator_image(X_tilde)\n Z_tilde_confidence = self.discriminator_latent(Z_tilde)\n\n X_hat_loss = self.criterion_gen(X_hat_confidence, self.target_ones)\n Z_hat_loss = self.criterion_gen(Z_hat_confidence, self.target_ones)\n X_tilde_loss = self.criterion_gen(X_tilde_confidence, self.target_ones)\n Z_tilde_loss = self.criterion_gen(Z_tilde_confidence, self.target_ones)\n\n X_recon_loss = self.criterion_recon_image(X_tilde, X) * ALPHA_RECONSTRUCT_IMAGE\n Z_recon_loss = self.criterion_recon_latent(Z_tilde, Z) * ALPHA_RECONSTRUCT_LATENT\n\n X_loss = (X_hat_loss + X_tilde_loss) / 2 * ALPHA_DISCRIMINATE_IMAGE\n Z_loss = (Z_hat_loss + Z_tilde_loss) / 2 * ALPHA_DISCRIMINATE_LATENT\n loss = X_loss + Z_loss + X_recon_loss + Z_recon_loss\n\n loss.backward()\n self.optim_e.step()\n self.optim_g.step()\n\n return X_loss.item(), Z_loss.item(), X_recon_loss.item(), Z_recon_loss.item()", "def loss(self, X, y):\n\n # Initialize the loss to zero.\n loss = 0.0\n num_classes = self.W.shape[0] # C = num_classes\n num_train = X.shape[0]\n \n exp_a = np.zeros((num_classes,num_train))\n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the normalized softmax loss. Store it as the variable loss.\n # (That is, calculate the sum of the losses of all the training \n # set margins, and then normalize the loss by the number of \n # training examples.)\n # ================================================================ #\n \n \n for i in np.arange(num_train):\n \n Loss = 0.0\n\n class_scores = np.dot(self.W,X[i,:].T) # calculating class scores (C x 1 vector)\n class_scores -= np.max(class_scores) # considering the possible issue for numerical instability and account for it\n\n exp_a[:,i] = np.exp(class_scores) # turning class scores to probabilities (C x 1 vector), without normalization\n\n Loss -= np.log(exp_a[y[i],i]/np.sum(exp_a[:,i]))\n \n\n #p[:,i] = exp_a[:,i]/np.sum(exp_a[:,i]) # p now is a valid probability matrix\n #print(p[:,i])\n\n loss += Loss \n #print(Loss,i) \n \n pass\n loss /= num_train\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss", "def loss_fn(params):\n logits = models.ProgramTransformer(config).apply(\n {'params': params},\n inputs,\n outputs,\n programs,\n rngs={'dropout': train_rng})\n loss, weight_sum = compute_weighted_cross_entropy(logits, programs, weights)\n mean_loss = loss / weight_sum\n return mean_loss, logits", "def loss_fn(self, targets, outputs, model):", "def compute_test():\n model.eval()\n sets = list(features.keys())\n for dset, loaders in zip(sets, [train_loaders, val_loaders, test_loaders]):\n final_specific_loss = 0\n final_total_loss = 0\n for loader in loaders:\n loader_total_loss = 0\n loader_specific_loss = 0\n for data in loader:\n output = model(data.to(device))\n specific_loss = specific_loss_torch_geom(output, (data.pos, data.y),\n data.batch, batch_sizes[dset]).detach()\n loader_specific_loss += specific_loss\n loader_total_loss += torch.mean(specific_loss)\n # Average the loss over each loader\n loader_specific_loss /= len(loader)\n loader_total_loss /= len(loader)\n # Average the loss over the different loaders\n final_specific_loss += loader_specific_loss / len(loaders)\n final_total_loss += loader_total_loss / len(loaders)\n del output, loader_specific_loss\n\n print(\"Test set results \", dset, \": loss= {:.4f}\".format(final_total_loss))\n print(dset, \": \", final_specific_loss)\n print(\"Results in log scale\", np.log10(final_specific_loss.detach().cpu()),\n np.log10(final_total_loss.detach().cpu().numpy()))\n if args.wandb:\n wandb.run.summary[\"test results\"] = np.log10(final_specific_loss.detach().cpu())\n # free unnecessary data\n\n\n final_specific_numpy = np.log10(final_specific_loss.detach().cpu())\n del final_total_loss, final_specific_loss\n torch.cuda.empty_cache()\n return final_specific_numpy", "def _compute_loss(self, parameters, inputs, ground_truth):\n predictions = self.network_forward(parameters, inputs)\n loss = np.mean((ground_truth - predictions) ** 2)\n return loss", "def _compute_loss(self, parameters, inputs, ground_truth):\n predictions = self.network_forward(parameters, inputs)\n loss = np.mean((ground_truth - predictions) ** 2)\n return loss", "def loss(self, predicts, labels, objects_num):\n\n def cond1(num, object_num, loss, predict, label, nilboy):\n return num < object_num\n\n class_loss = tf.constant(0, tf.float32)\n object_loss = tf.constant(0, tf.float32)\n noobject_loss = tf.constant(0, tf.float32)\n coord_loss = tf.constant(0, tf.float32)\n loss = [0, 0, 0, 0]\n for i in range(self.batch_size):\n predict = predicts[i, :, :, :]\n label = labels[i, :, :]\n object_num = objects_num[i]\n nilboy = tf.ones([self.cell_size, self.cell_size, 1])\n tuple_results = tf.while_loop(cond1, self.body1,\n [\n tf.constant(0), object_num,\n [class_loss, object_loss, noobject_loss, coord_loss],\n predict, label, nilboy\n ])\n for j in range(4):\n loss[j] = loss[j] + tuple_results[2][j]\n nilboy = tuple_results[5]\n\n tf.add_to_collection('losses', (loss[0] + loss[1] + loss[2] + loss[3]) / self.batch_size)\n\n tf.summary.scalar('class_loss', loss[0] / self.batch_size)\n tf.summary.scalar('object_loss', loss[1] / self.batch_size)\n tf.summary.scalar('noobject_loss', loss[2] / self.batch_size)\n tf.summary.scalar('coord_loss', loss[3] / self.batch_size)\n tf.summary.scalar('weight_loss', tf.add_n(tf.get_collection('losses')) - (\n loss[0] + loss[1] + loss[2] + loss[3]) / self.batch_size)\n\n return tf.add_n(tf.get_collection('losses'), name='total_loss'), nilboy", "def loss_and_grad(self, X, y):\n\n # Initialize the loss and gradient to zero.\n loss = 0.0\n grad = np.zeros_like(self.W)\n grad_tmp = np.zeros_like(self.W)\n num_classes = self.W.shape[0] # C = num_classes\n num_train = X.shape[0]\n \n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the softmax loss and the gradient. Store the gradient\n # as the variable grad.\n # ================================================================ #\n \n exp_a = np.zeros((num_classes,num_train))\n for i in np.arange(num_train):\n \n Loss = 0.0\n\n class_scores = np.dot(self.W,X[i,:].T) # calculating class scores (C x 1 vector)\n class_scores -= np.max(class_scores) # considering the possible issue for numerical instability and account for it\n\n exp_a[:,i] = np.exp(class_scores) # turning class scores to probabilities (C x 1 vector), without normalization\n\n Loss -= np.log(exp_a[y[i],i]/np.sum(exp_a[:,i]))\n \n \n #if i==0:\n grada = np.zeros(X.shape[1])\n \n for j in range(num_classes):\n if j != y[i]:\n grad_tmp[j,:] = X[i,:].T * (exp_a[j,i] / np.sum(exp_a[:,i])) \n else: \n grad_tmp[j,:] = X[i,:].T * (exp_a[j,i] / np.sum(exp_a[:,i])) - X[i,:].T \n\n grad += grad_tmp\n loss += Loss \n \n pass\n\n\n loss /= num_train\n grad /= num_train\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss, grad", "def get_loss(self, outputs, targets, masks, joints):\n losses = dict()\n heatmaps_losses, push_losses, pull_losses = self.loss(outputs, targets, masks, joints)\n for idx in range(len(targets)):\n if heatmaps_losses[idx] is not None:\n heatmaps_loss = heatmaps_losses[idx].mean(dim=0)\n if 'heatmap_loss' not in losses:\n losses['heatmap_loss'] = heatmaps_loss\n else:\n losses['heatmap_loss'] += heatmaps_loss\n if push_losses[idx] is not None:\n push_loss = push_losses[idx].mean(dim=0)\n if 'push_loss' not in losses:\n losses['push_loss'] = push_loss\n else:\n losses['push_loss'] += push_loss\n if pull_losses[idx] is not None:\n pull_loss = pull_losses[idx].mean(dim=0)\n if 'pull_loss' not in losses:\n losses['pull_loss'] = pull_loss\n else:\n losses['pull_loss'] += pull_loss\n return losses", "def get_loss(self, xs, y):\n \"*** YOUR CODE HERE ***\"\n predictedY = self.run(xs)\n return nn.SoftmaxLoss(predictedY, y)\n # return nn.SquareLoss(predictedY, y)", "def calculate_loss(estimated_separation, true_separation, mask, true_latents, estimated_mix, true_mix, args):\n stats = torch.zeros(7)\n sdr = sdr_objective(estimated_separation, true_separation, mask)\n stats[:4] = sdr\n total_loss = -sdr.sum()\n reconstruction_sdr = sdr_objective(estimated_mix, true_mix).mean() if args.reconstruction_loss_weight > 0 else 0.0\n stats[4] = reconstruction_sdr\n total_loss += -args.reconstruction_loss_weight * reconstruction_sdr\n if args.similarity_loss_weight > 0.0 or args.dissimilarity_loss_weight > 0.0:\n mask = mask.squeeze(-1)\n true_latents = true_latents * mask.unsqueeze(-1).unsqueeze(-1)\n true_latents = true_latents.transpose(0, 1)\n dissimilarity = dissimilarity_loss(true_latents, mask) if args.dissimilarity_loss_weight > 0.0 else 0.0\n stats[5] = dissimilarity\n total_loss += args.dissimilarity_loss_weight * dissimilarity\n similarity = similarity_loss(true_latents, mask) if args.similarity_loss_weight > 0.0 else 0.0\n stats[6] = similarity\n total_loss += -args.similarity_loss_weight * similarity\n return total_loss, stats", "def losses(self, win2: ArrayLike, lose2: ArrayLike) -> ArrayLike:\n return -1 * torch.log(self.probs(win2, lose2))", "def _get_losses(self):\n # Fast-path already loaded\n if self.__losses is not None:\n return self.__losses\n # Initialize the dictionary\n self.__losses = dict()\n # Simply populate this dictionary\n for name in dir(torch.nn.modules.loss):\n if len(name) < 5 or name[0] == \"_\" or name[-4:] != \"Loss\": # Heuristically ignore non-loss members\n continue\n builder = getattr(torch.nn.modules.loss, name)\n if isinstance(builder, type): # Still an heuristic\n self.__losses[name[:-4].lower()] = self._make_drop_params(builder)\n # Add/replace the l1 and l2 losses\n self.__losses[\"l1\"] = self._l1loss_builder\n self.__losses[\"l2\"] = self._l2loss_builder\n # Return the dictionary\n return self.__losses", "def _validation_loss(self) -> Tuple[float, int]:\n logger.info(\"Validating\")\n\n self.model.eval()\n\n # Replace parameter values with the shadow values from the moving averages.\n if self._moving_average is not None:\n self._moving_average.assign_average_value()\n\n if self._validation_iterator is not None:\n val_iterator = self._validation_iterator\n else:\n val_iterator = self.iterator\n\n num_gpus = len(self._cuda_devices)\n\n raw_val_generator = val_iterator(self._validation_data,\n num_epochs=1,\n shuffle=False)\n val_generator = lazy_groups_of(raw_val_generator, num_gpus)\n num_validation_batches = math.ceil(val_iterator.get_num_batches(self._validation_data)/num_gpus)\n val_generator_tqdm = Tqdm.tqdm(val_generator,\n total=num_validation_batches)\n batches_this_epoch = 0\n val_loss = 0\n for batch_group in val_generator_tqdm:\n\n output_dict = self.get_output_dict(batch_group, for_training=False)\n loss = self.get_batch_loss(output_dict, for_training=False)\n\n if loss is not None:\n # You shouldn't necessarily have to compute a loss for validation, so we allow for\n # `loss` to be None. We need to be careful, though - `batches_this_epoch` is\n # currently only used as the divisor for the loss function, so we can safely only\n # count those batches for which we actually have a loss. If this variable ever\n # gets used for something else, we might need to change things around a bit.\n batches_this_epoch += 1\n val_loss += loss.detach().cpu().numpy()\n\n # Update the description with the latest metrics\n val_metrics = training_util.get_metrics(self.model, val_loss, batches_this_epoch)\n description = training_util.description_from_metrics(val_metrics)\n val_generator_tqdm.set_description(description, refresh=False)\n\n # Now restore the original parameter values.\n if self._moving_average is not None:\n self._moving_average.restore()\n\n return val_loss, batches_this_epoch", "def calculate_validation_loss(self):\n self.network.train()\n self.validation_average_loss = self.calculate_average_loss(self.validation_dataloader)", "def loss(data, y_pred):\n # TODO: Try using points other than the training data points for the divergence calculation.\n y_true = data[:,:2]\n p1 = data[:,2:5]\n p2 = data[:,5:8]\n p3 = data[:,8:11]\n p4 = data[:,11:14]\n\n ### Calculate divergence using model predictions:\n\n # Step 1: Use the model to calculate predicted wind field in the surrounding points p1, p2, p3 and p4.\n y_pred_p1 = model(p1)\n y_pred_p2 = model(p2)\n y_pred_p3 = model(p3)\n y_pred_p4 = model(p4)\n\n # Step 2: Calculate the partial derivatives with a three-point centered difference.\n scale_x = self.scaler_data.scale_[0] #scale-factor for x\n scale_y = self.scaler_data.scale_[1] #scale-factor for y\n\n dudx = (y_pred_p1[:, 0] - y_pred_p3[:, 0]) / (p1[:,0] - p3[:,0]) # <- pj = transformed data\n dvdy = (y_pred_p2[:, 1] - y_pred_p4[:, 1]) / (p2[:,1] - p4[:,1]) # <- pj = transformed data\n\n # Step 3: Calculate the divergence.\n divergence = ( dudx / scale_x + dvdy / scale_y ) * np.mean([scale_x, scale_y])\n #tf.print(K.mean(K.abs(divergence)))\n\n # Step 4: Calculate and return total loss.\n return K.mean(K.square(y_true - y_pred)) + gamma*K.mean(K.square(divergence))", "def output_loss_and_grads(self, h, V, c, y):\n\n loss, dh, dV, dc = 0.0, [], np.zeros_like(self.V), np.zeros_like(self.c)\n # calculate the output (o) - unnormalized log probabilities of classes\n # calculate yhat - softmax of the output\n # calculate the cross-entropy loss\n # calculate the derivative of the cross-entropy softmax loss with respect to the output (o)\n # calculate the gradients with respect to the output parameters V and c\n # calculate the gradients with respect to the hidden layer h\n for t in range(self.sequence_length):\n hp = h[:, t, :] # BS x H\n #o = self.output(hp, V, c) # leng x BS\n o = self.output(hp, V, c) # BS x leng\n #exp = np.exp(o) # leng x BS\n exp = np.exp(o) # BS x leng\n #s = exp / np.sum(exp, axis=0, keepdims=True) # leng x BS\n s = exp / np.sum(exp, axis=1, keepdims=True) # BS x leng\n yp = y[:, t, :]\n #dO = s - yp # leng x BS\n dO = s - yp # BS x leng\n #dV += np.dot(dO, hp.T) # ( leng x BS ) * ( H x BS ).T = leng x H\n dV += np.dot(hp.T, dO) # ( BS x H ).T * ( BS x leng ) = H x leng\n #dc += np.sum(dO, axis=1).reshape([-1, 1]) #\n dc += np.sum(dO, axis=0).reshape([1, -1]) #\n #dh.append(np.dot(self.V.T, dO)) # ( leng x H ).T * ( leng x BS ) = ( BS x H )\n dh.append(np.dot(dO, self.V.T)) # ( BS x leng ) * ( H x leng ).T = ( BS x H )\n loss += -np.sum(np.log(s)*yp)\n return loss, np.array(dh), dV, dc" ]
[ "0.7707404", "0.7442763", "0.69570446", "0.69570446", "0.6913104", "0.6866929", "0.6692329", "0.6500259", "0.64484847", "0.6443854", "0.6421832", "0.64056575", "0.63885695", "0.6378459", "0.6364855", "0.6319561", "0.62842685", "0.6246546", "0.624369", "0.6235395", "0.62323135", "0.6231457", "0.6221209", "0.61913234", "0.6151556", "0.6148285", "0.61376566", "0.61225677", "0.61111283", "0.6045971", "0.60423833", "0.6033245", "0.60295135", "0.6009586", "0.600298", "0.598738", "0.59772086", "0.59732705", "0.59626204", "0.59598476", "0.59569824", "0.5954368", "0.59435993", "0.5940915", "0.5939996", "0.5932382", "0.5917335", "0.58865416", "0.587704", "0.58745867", "0.587433", "0.58601373", "0.58576304", "0.5852835", "0.58526146", "0.58503795", "0.58408743", "0.58217317", "0.5813568", "0.58117235", "0.5801958", "0.5792648", "0.57913834", "0.5783896", "0.5783103", "0.5776308", "0.5775008", "0.5769413", "0.5763483", "0.57607085", "0.575955", "0.57595396", "0.57554615", "0.5748754", "0.5742599", "0.5740511", "0.57253546", "0.5723047", "0.57174456", "0.57127285", "0.57006085", "0.56908786", "0.56847113", "0.56837916", "0.568106", "0.5678791", "0.56727844", "0.5671162", "0.5671162", "0.5670632", "0.5663015", "0.56513476", "0.565041", "0.5650409", "0.5650115", "0.564887", "0.56483865", "0.5645655", "0.5643222", "0.56376106" ]
0.6739313
6
Compute the discriminator loss.
def __D_loss(self, D, real, fake): loss = 0.5 * (tf.reduce_mean(tf.squared_difference(D(real), 1.0)) + \ tf.reduce_mean(tf.square(D(fake)))) return loss
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _define_discriminator_loss(self):\n real_d_loss = tf.reduce_mean(self._real_discriminator_out)\n real_d_loss = tf.negative(real_d_loss, name='real_discriminator_loss')\n gen_d_loss = tf.reduce_mean(self._gen_discriminator_out,\n name='gen_discriminator_loss')\n return tf.add(real_d_loss, gen_d_loss, name='discrminator_loss')", "def discriminator_loss(self, real_output, fake_output):\n real_loss = self.cross_entropy(ones_like(real_output), real_output)\n fake_loss = self.cross_entropy(zeros_like(fake_output), fake_output)\n total_loss = real_loss + fake_loss\n return total_loss", "def discriminator_loss(self, disc_real_output, disc_generated_output):\n # Compute the loss function\n loss_function = self.loss_func()\n\n # Discriminator loss for real image\n real_loss = loss_function(tf.ones_like(disc_real_output), disc_real_output)\n\n # Discriminator loss for generated image\n generated_loss = loss_function(tf.zeros_like(disc_generated_output), disc_generated_output)\n\n # Total discriminator loss\n total_disc_loss = real_loss + generated_loss\n return total_disc_loss", "def discriminator_loss(gen_images, real_images):\n real = real_images.new_full((real_images.shape[0], 1), real_label)\n gen = gen_images.new_full((gen_images.shape[0], 1), fake_label)\n\n realloss = disc_loss_criterion(disc_net(real_images), real)\n genloss = disc_loss_criterion(disc_net(gen_images.detach()), gen)\n\n return (genloss + realloss) / 2", "def discriminatorLoss(realOutput, fakeOutput):\n realLoss = cross_entropy(tf.ones_like(realOutput), realOutput)\n fakeLoss = cross_entropy(tf.zeros_like(fakeOutput), fakeOutput)\n totalLoss = realLoss + fakeLoss\n return totalLoss", "def _discriminator_loss(self, y_real: tf.Tensor, y_fake: tf.Tensor) -> tf.Tensor:\n\n loss = self.loss(tf.ones_like(y_real), y_real - y_fake)\n\n return tf.reduce_mean(loss)", "def ls_discriminator_loss(scores_real, scores_fake):\r\n loss = (torch.mean((scores_real - 1) ** 2) + torch.mean(scores_fake ** 2)) / 2\r\n return loss", "def _discriminator_loss(self, y, y_hat):\n\n l1 = tf.nn.sigmoid_cross_entropy_with_logits(labels = tf.ones(tf.shape(y)),logits = y)\n l2 = tf.nn.sigmoid_cross_entropy_with_logits(labels = tf.zeros(tf.shape(y_hat)),logits = y_hat)\n l = tf.reduce_mean(l1+l2)\n print('_discriminator_loss shape,', tf.shape(l))\n return l", "def multi_discriminator_loss(disc_real_source, disc_real_target, disc_gen):\n\n loss_obj = tf.keras.losses.BinaryCrossentropy(from_logits=True)\n real_source_loss = loss_obj(tf.ones_like(disc_real_source), disc_real_source)\n real_target_loss = loss_obj(tf.ones_like(disc_real_target), disc_real_target)\n gen_loss = loss_obj(tf.zeros_like(disc_gen), disc_gen)\n return real_source_loss + real_target_loss + gen_loss", "def ls_discriminator_loss(scores_real, scores_fake):\n N = scores_real.size()\n# print(N)\n\n true_labels = Variable(torch.ones(N)).type(dtype)\n\n fake_image_loss = (torch.mean((scores_real - true_labels)**2))\n real_image_loss = (torch.mean((scores_fake)**2))\n\n loss = 0.5*fake_image_loss + 0.5*real_image_loss\n\n return loss", "def standard_discriminator_loss(disc_real, disc_gen):\n\n loss_obj = tf.keras.losses.BinaryCrossentropy(from_logits=True)\n real_loss = loss_obj(tf.ones_like(disc_real), disc_real)\n gen_loss = loss_obj(tf.zeros_like(disc_gen), disc_gen)\n return real_loss + gen_loss", "def discriminator_loss(discriminator, fake_images, real_images, fake_labels, real_labels, con_aug, stage):\n discriminator.train()\n criterion = nn.BCELoss()\n fake = fake_images.detach()\n condition = con_aug.detach()\n batch_size = real_images.size(0)\n \"\"\"\n ********************************************************************************\n The next two lines should be removed if we don't have a very powerful GPU.\n I cannot train the 256 x 256 image in stage 2 in my GPU(Tesla K80). So modified stage 2\n so that all processing are done for 64 x 64 and output is also 64 x 64 image.\n *********************************************************************************\n \"\"\"\n if (stage==2):\n real_images = F.interpolate(real_images, scale_factor = 4)\n real_dis_fea = discriminator(real_images)\n fake_dis_fea = discriminator(fake)\n\n \"\"\"\n Here we use three types of error and add them.\n real_error: error between real images and real labels.\n wrong_error: error between real images and wrong labels.\n fake_error: error between fake images and fake labels.\n \"\"\"\n real_logits = discriminator.conditioned_result(real_dis_fea, condition)\n real_error = criterion(real_logits, real_labels)\n\n wrong_logits = discriminator.conditioned_result(real_dis_fea[:(batch_size-1)], condition[1:])\n wrong_error = criterion(wrong_logits, fake_labels[1:])\n\n fake_logits = discriminator.conditioned_result(fake_dis_fea, condition)\n fake_error = criterion(fake_logits, fake_labels)\n\n if discriminator.unconditioned_result is not None:\n \"\"\"\n In case of stage 2 generator in addition to above errors we also\n use another error calculated from scores computed using the image features\n only without using the text features.\n \"\"\"\n real_logits1 = discriminator.unconditioned_result(real_dis_fea)\n uncond_real_error = criterion(real_logits1, real_labels)\n\n fake_logits1 = discriminator.unconditioned_result(fake_dis_fea)\n uncond_fake_error = criterion(fake_logits1, fake_labels)\n\n error = (real_error + uncond_real_error)/2.0 + (wrong_error+fake_error+uncond_fake_error)/3.0\n real_error = (real_error + uncond_real_error)/2.0\n fake_error = (fake_error + uncond_fake_error)/2.0\n\n else:\n error = real_error + (wrong_error * fake_error) * 0.5\n\n return error, real_error.item(), fake_error.item(), wrong_error.item()", "def discriminator_loss(logits_real, logits_fake, device):\r\n true_labels = torch.ones(logits_real.size()).to(device=device, dtype=torch.float32)\r\n loss = bce_loss(logits_real, true_labels) + bce_loss(logits_fake, true_labels - 1)\r\n return loss", "def compute_loss(self):\n self.prototypes = self.compute_prototypes()\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.episode.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss", "def generator_loss(discriminator, fake_images, real_labels, con_aug):\n\n discriminator.train()\n criterion = nn.BCELoss()\n condition = con_aug.detach()\n fake_img_fea = discriminator(fake_images)\n fake_logits = discriminator.conditioned_result(fake_img_fea, condition)\n fake_error = criterion(fake_logits, real_labels)\n\n if discriminator.unconditioned_result is not None:\n \"\"\"\n If it is a stage 2 discriminator then an additional error due to the\n score calculated from image features alone is added to the above error\n for loss calculation.\n \"\"\"\n fake_logits1 = discriminator.unconditioned_result(fake_img_fea)\n uncond_fake_error = criterion(fake_logits1, real_labels)\n fake_error += uncond_fake_error\n return fake_error", "def discriminator_loss_fn(y_data, y_generated, data_label=0, label_noise=0.0):\n assert data_label == 1 or data_label == 0\n # TODO:\n # Implement the discriminator loss.\n # See pytorch's BCEWithLogitsLoss for a numerically stable implementation.\n # ====== YOUR CODE: ======\n device = y_data.device\n loss_fn = nn.BCEWithLogitsLoss()\n data_noise = torch.rand(*y_data.shape) * label_noise - (label_noise / 2)\n generated_noise = torch.rand(*y_data.shape) * label_noise - (label_noise / 2)\n\n loss_data = loss_fn(y_data, (data_noise + data_label).to(device))\n loss_generated = loss_fn(y_generated, (generated_noise + (1 - data_label)).to(device))\n # ========================\n return loss_data + loss_generated", "def compute_loss(self):", "def discriminator_loss(logits_real, logits_fake):\n #loss = None\n # Batch size.\n N = logits_real.size()\n\n # 目标label,全部设置为1意味着判别器需要做到的是将正确的全识别为正确,错误的全识别为错误\n true_labels = Variable(torch.ones(N)).type(dtype)\n\n\n real_image_loss = Bce_loss(logits_real, true_labels) # 识别正确的为正确\n fake_image_loss = Bce_loss(logits_fake, 1 - true_labels) # 识别错误的为错误\n\n loss = real_image_loss + fake_image_loss\n\n return loss", "def D_loss_basic(self, netD, real, fake):\n # Real\n D_real = netD(real)\n D_real_loss = self.GANLoss(D_real, True, True)\n # fake\n D_fake = netD(fake)\n D_fake_loss = self.GANLoss(D_fake, False, True)\n # loss for discriminator\n D_loss = (D_real_loss + D_fake_loss) * 0.5\n # gradient penalty for wgan-gp\n if self.gan_mode == 'wgangp':\n gradient_penalty, gradients = base_function.cal_gradient_penalty(netD, real, fake)\n D_loss +=gradient_penalty\n\n D_loss = D_loss * self.loss_d_weight\n D_loss.backward()\n\n return D_loss", "def relativistic_standard_discriminator_loss(disc_real, disc_gen):\n loss_obj = tf.keras.losses.BinaryCrossentropy(from_logits=True)\n return loss_obj(tf.ones_like(disc_real), (disc_real - disc_gen))", "def _compute_loss(self, parameters, inputs, ground_truth):\n predictions = self.network_forward(parameters, inputs)\n loss = np.mean((ground_truth - predictions) ** 2)\n return loss", "def _compute_loss(self, parameters, inputs, ground_truth):\n predictions = self.network_forward(parameters, inputs)\n loss = np.mean((ground_truth - predictions) ** 2)\n return loss", "def update_discriminator_loss(self, loss, real_output, fake_output):\n loss.update_state(ones_like(real_output), real_output)\n loss.update_state(zeros_like(fake_output), fake_output)", "def model_loss(self,input_real,input_z,out_channel_dim):\t\r\n label_smooth = 0.9 \r\n \r\n #get output of generator\r\n gen_img, gen_logits = self.generator(input_z,out_channel_dim,True)\r\n\r\n\t#pass real image to dicriminator\r\n disc_model_real, disc_logits_real = self.discriminator(input_real)\r\n\t\r\n\t#pass generated image to dicriminator\r\n disc_model_fake, disc_logits_fake = self.discriminator(gen_img,reuse=True)\r\n \r\n\t \t\r\n disc_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_logits_real,labels=label_smooth*tf.ones_like(disc_model_real))) \r\n disc_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_logits_fake,labels=tf.zeros_like(disc_model_fake)))\r\n \r\n\r\n\t\"\"\"\r\n\tLoss for discriminator is sum of loss for real image and fake image \r\n\t\"\"\"\t\r\n disc_loss = disc_loss_real + disc_loss_fake\r\n \r\n\r\n \"\"\"\r\n\tTo find loss for generator, fake image is passed with label= real (0.9)\r\n\t\"\"\"\r\n gen_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_logits_fake,labels=label_smooth*tf.ones_like(disc_model_fake)))\r\n \r\n return disc_loss,gen_loss,gen_img", "def compute_loss(self):\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.data.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss", "def compute_loss(self):\n def calc_loss(inputs, outputs):\n reconstruction_loss = tf.metrics.binary_crossentropy(\n tf_flat(inputs), tf_flat(outputs))\n reconstruction_loss *= OUT_SIZE * OUT_SIZE\n kl_loss = -0.5 * tf.reduce_sum(1.0 + self.log_sigma - tf.square(\n self.mu) - tf.exp(self.log_sigma), 1)\n return tf.reduce_mean(reconstruction_loss + kl_loss)\n return calc_loss", "def generator_loss_std(score_discriminator):\n labels = Variable(torch.ones(score_discriminator.size()), requires_grad=False).type(torch.FloatTensor)\n bce_loss = nn.BCEWithLogitsLoss()\n loss = bce_loss(score_discriminator, labels)\n return loss", "def _compute_loss(self):\n state, action, reward, next_state, done = self.replay_buffer.sample(self.batch_size)\n\n state = torch.FloatTensor(state)\n next_state = torch.FloatTensor(next_state)\n action = torch.LongTensor(action)\n reward = torch.FloatTensor(reward)\n done = torch.FloatTensor(done)\n\n q_values = self.dqn(state)\n q_value = q_values.gather(1, action.unsqueeze(1)).squeeze(1)\n\n next_q_values = self.target_dqn(next_state)\n next_q_value = next_q_values.max(1)[0]\n target = reward + self.discount_factor * next_q_value * (1 - done)\n\n # loss = F.smooth_l1_loss(q_value, target.detach())\n loss = F.mse_loss(q_value, target.detach())\n\n return loss", "def dnn_loss_calculation(self, labeled_examples, labels):\n predicted_labels, _ = self.DNN(labeled_examples)\n labeled_loss = self.labeled_loss_function(predicted_labels, labels, order=self.settings.labeled_loss_order)\n labeled_loss *= self.settings.labeled_loss_multiplier\n return labeled_loss", "def detector_loss(self, input, target, mask=None, loss_type=\"softmax\"):\n if loss_type == \"l2\":\n loss_func = nn.MSELoss(reduction=\"mean\")\n loss = loss_func(input, target)\n elif loss_type == \"softmax\":\n loss_func_BCE = nn.BCELoss(reduction='none').cuda()\n loss = loss_func_BCE(nn.functional.softmax(input, dim=1), target)\n loss = (loss.sum(dim=1) * mask).sum()\n loss = loss / (mask.sum() + 1e-10)\n return loss", "def loss(self):\n return la.norm(self.resids) / self.normX", "def genLoss(self, *data):\r\n _, (x_unlab, _) = data\r\n z = self.getInputNoise(self.hypers['ul_BS'])\r\n fake_logits = self.D(self.G(z))\r\n g_losses = -1*logOneMinusSoftmax(fake_logits)[:,self.D.numClasses-1]\r\n return torch.mean(g_losses)", "def __G_loss(self, D, fake):\n loss = tf.reduce_mean(tf.squared_difference(D(fake), 1.0))\n\n return loss", "def loss_(self, batch):\n raise NotImplementedError", "def get_loss(self):\n return self.loss / self.cnt", "def calculate_loss(estimated_separation, true_separation, mask, true_latents, estimated_mix, true_mix, args):\n stats = torch.zeros(7)\n sdr = sdr_objective(estimated_separation, true_separation, mask)\n stats[:4] = sdr\n total_loss = -sdr.sum()\n reconstruction_sdr = sdr_objective(estimated_mix, true_mix).mean() if args.reconstruction_loss_weight > 0 else 0.0\n stats[4] = reconstruction_sdr\n total_loss += -args.reconstruction_loss_weight * reconstruction_sdr\n if args.similarity_loss_weight > 0.0 or args.dissimilarity_loss_weight > 0.0:\n mask = mask.squeeze(-1)\n true_latents = true_latents * mask.unsqueeze(-1).unsqueeze(-1)\n true_latents = true_latents.transpose(0, 1)\n dissimilarity = dissimilarity_loss(true_latents, mask) if args.dissimilarity_loss_weight > 0.0 else 0.0\n stats[5] = dissimilarity\n total_loss += args.dissimilarity_loss_weight * dissimilarity\n similarity = similarity_loss(true_latents, mask) if args.similarity_loss_weight > 0.0 else 0.0\n stats[6] = similarity\n total_loss += -args.similarity_loss_weight * similarity\n return total_loss, stats", "def calculate_loss(self, pred, gold, smoothing=False):\n gold = gold.contiguous().view(-1)\n if smoothing:\n epsilon = 0.1\n n_class = pred.size(1)\n one_hot = torch.zeros_like(pred).scatter(1, gold.view(-1, 1), 1)\n one_hot = one_hot * (1 - epsilon) + \\\n (1 - one_hot) * epsilon / (n_class - 1)\n\n log_prb = F.log_softmax(pred, dim=1)\n # create non-padding mask with torch.ne()\n non_pad_mask = gold.ne(self.constants.PAD)\n loss = -(one_hot * log_prb).sum(dim=1)\n # losses are averaged later\n loss = loss.masked_select(non_pad_mask).sum()\n else:\n loss = F.cross_entropy(\n pred, gold, ignore_index=self.constants.PAD, reduction='sum')\n return loss", "def discriminator():\n\n # img = Input(shape=(28, 28, 1))\n # validity = ident(img)\n\n model = Model(img, validity)\n\n model.compile(loss=\"binary_crossentropy\", optimizer=op1,\n metrics=['accuracy'])\n\n # model.summary()\n\n return model", "def _define_generator_loss(self):\n loss = tf.reduce_mean(self._gen_discriminator_out)\n return tf.negative(loss, name='generator_loss')", "def discriminator_model_lungs():\n # Initialize the weights\n init = tf.random_normal_initializer(0.0, 0.02)\n\n img_shape = (400, 400, 1)\n\n # Source and target image input\n source_img = tf.keras.Input(shape=img_shape)\n target_img = tf.keras.Input(shape=img_shape)\n\n # Concatenate images channel-wise\n src_tgt_img = Concatenate()([source_img, target_img]) # L : 400 x 400 x 1 # G: 200 x 200 x 1\n\n # C128\n d1 = Conv2D(filters=128, kernel_size=(4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(\n src_tgt_img) # L: 200 x 200 x 128 # G: 100 x 100 x 128 # RF: 4\n d1 = LeakyReLU(alpha=0.2)(d1)\n\n # C256\n d2 = Conv2D(filters=256, kernel_size=(4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(\n d1) # G: 100 x 100 x 256 # L: 50 x 50 x 256 # RF: 10\n d2 = BatchNormalization()(d2)\n d2 = LeakyReLU(alpha=0.2)(d2)\n\n # C512\n d3 = Conv2D(filters=512, kernel_size=(4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(\n d2) # G: 50 x 50 x 512 # L: 25 x 25 x 512 # RF: 22\n d3 = BatchNormalization()(d3)\n d3 = LeakyReLU(alpha=0.2)(d3)\n d3 = ZeroPadding2D()(d3) # G: 52 x 52 x 512 # L: 27 x 27 x 512\n\n # Patch output\n d4 = Conv2D(filters=1, kernel_size=(3, 3), strides=(1, 1), padding='valid', kernel_initializer=init)(\n d3) # G: 50 x 50 x 1 # L: 25 x 25 x 1 # RF: 38\n output_patch = Activation('sigmoid')(d4)\n\n # Define model\n discriminator_model = tf.keras.Model([source_img, target_img], output_patch)\n return discriminator_model", "def loss_op(self):\n return self.loss", "def loss(self):\n return self._loss", "def _get_loss(self):\n raise NotImplementedError", "def get_loss(self):\n raise NotImplementedError", "def train_discriminator(G, D, d_optimizer, loss, real_data, fake_data, loss_fn):\r\n\tN = real_data.size(0)\r\n\td_optimizer.zero_grad()\r\n\r\n\t# Train D on real data\r\n\tpred_real = D(real_data)\r\n\tif loss_fn == \"dflt\":\r\n\t\terror_real = loss(pred_real, Variable(torch.ones(N, 1)))\r\n\telif loss_fn == \"nonsaturating\":\r\n\t\terror_real, _ = nonsaturating_loss(G, D, real_data, fake_data)\r\n\telif loss_fn == \"wasserstein_gp\":\r\n\t\terror_real, _ = loss_wasserstein_gp(G, D, real_data, fake_data)\r\n\r\n\terror_real.backward()\r\n\r\n\t# Train on fake data\r\n\tpred_fake = D(fake_data)\r\n\tif loss_fn == \"dflt\":\r\n\t\terror_fake = loss(pred_fake, Variable(torch.ones(N, 1)))\r\n\telif loss_fn == \"nonsaturating\":\r\n\t\t_, error_fake = nonsaturating_loss(G, D, real_data, fake_data)\r\n\telif loss_fn == \"wasserstein_gp\":\r\n\t\t_, error_fake = loss_wasserstein_gp(G, D, real_data, fake_data)\r\n\r\n\r\n\terror_fake.backward()\r\n\r\n\td_optimizer.step()\r\n\treturn error_real + error_fake, pred_real, pred_fake", "def get_disc_loss(gen, disc, criterion, real, num_images, z_dim, device):\n \n # 1. Create noise vectors and generate a batch of num_images fake images.\n # Make sure to pass the device argument to the noise.\n noise = get_noise(num_images, z_dim, device)\n \n # Don't forget to detach the generator!\n fake_images = gen(noise).detach() # detach to avoid training G on these labels\n \n # 2. Train Fake Images\n # Get the discriminator's prediction of the fake image and calculate the loss.\n pred_fake = disc(fake_images)\n \n # Remember the loss function you set earlier? You need a 'ground truth' tensor in order to calculate the loss.\n # All of these are fake, so the label is 0\n ground_truth_fake = torch.zeros_like(pred_fake)\n loss_fake = criterion(pred_fake, ground_truth_fake)\n loss_fake.backward(retain_graph=True)\n \n # Repeat the process with `ground_truth_real`\n # Train Real Images\n pred_real = disc(real)\n ground_truth_real = torch.ones_like(pred_real)\n loss_real = criterion(pred_real, ground_truth_real)\n loss_real.backward(retain_graph=True)\n disc_loss = (loss_real + loss_fake) / 2\n return disc_loss", "def compute_loss(self, **kwargs):\n raise NotImplementedError", "def build_discriminator(self):\n with tf.variable_scope(\"discriminator\") as scope:\n\n # --- build the convolutional layers\n self.d_convlayers = list()\n mi = self.num_colors\n dim = self.img_dim\n count = 0\n for mo, filter_size, stride, apply_batch_norm in self.d_sizes['conv_layers']:\n name = f\"convlayer_{count}\" # name is used for get_variable later\n count += 1\n layer = ConvLayer(name, mi, mo, apply_batch_norm, filter_size, stride, lrelu)\n self.d_convlayers.append(layer)\n mi = mo\n print(f\"dim: {dim}\")\n # --- keep track of image dimensionality: need this for the first Dense layer\n dim = int(np.ceil(float(dim) / stride))\n\n # --- get the input dimensionalith for the first Dense layer\n mi = mi * dim * dim\n\n # --- build the dense layers\n self.d_denselayers = list()\n for mo, apply_batch_norm in self.d_sizes['dense_layers']:\n name = f\"denselayer_{count}\"\n count += 1\n layer = DenseLayer(name, mi, mo, apply_batch_norm, lrelu)\n mi = mo\n self.d_denselayers.append(layer)\n\n # --- final logistic regression layer (use it in the d_forward\n # function below to get the final logits)\n name = f\"denselayer_{count}\"\n self.d_finallayer = DenseLayer(name, mi, 1, False, lambda x: x)\n\n # --- get and return the logits\n logits = self.d_forward(self.X)\n return logits", "def calculate_loss(estimated_separation, true_separation, mask, true_latents, estimated_mix, true_mix, args):\n stats = torch.zeros(7).to(mask.device)\n\n sdr = sdr_objective(estimated_separation, true_separation, mask)\n stats[:4] = sdr\n total_loss = -sdr.sum()\n\n reconstruction_sdr = sdr_objective(estimated_mix, true_mix).mean() if args.reconstruction_loss_weight > 0 else 0.0\n stats[4] = reconstruction_sdr\n total_loss += -args.reconstruction_loss_weight * reconstruction_sdr\n\n if args.similarity_loss_weight > 0.0 or args.dissimilarity_loss_weight > 0.0:\n mask = mask.squeeze(-1)\n true_latents = true_latents * mask.unsqueeze(-1).unsqueeze(-1)\n true_latents = true_latents.transpose(0, 1)\n\n dissimilarity = dissimilarity_loss(true_latents, mask) if args.dissimilarity_loss_weight > 0.0 else 0.0\n stats[5] = dissimilarity\n total_loss += args.dissimilarity_loss_weight * dissimilarity\n\n similarity = similarity_loss(true_latents, mask) if args.similarity_loss_weight > 0.0 else 0.0\n stats[6] = similarity\n total_loss += -args.similarity_loss_weight * similarity\n\n return total_loss, stats", "def relativistic_average_standard_discriminator_loss(disc_real, disc_gen):\n\n loss_obj = tf.keras.losses.BinaryCrossentropy(from_logits=True)\n real_loss = loss_obj(tf.ones_like(disc_real), disc_real - tf.reduce_mean(disc_gen))\n gen_loss = loss_obj(tf.zeros_like(disc_gen), disc_gen - tf.reduce_mean(disc_real))\n return (real_loss + gen_loss) / 2", "def compute_loss(self, inputs):\r\n outputs = self.net.compute_outputs(inputs)\r\n loss_grad = self.net.compute_loss_grad(outputs - inputs)\r\n loss = np.sum((inputs - outputs) ** 2, axis=0).mean() / 2.0\r\n return loss, loss_grad", "def _compute_losses(discriminator, d_real, d_fake, interpolated_x, interpolated_c):\n wasserstein_distance = tf.reduce_mean(d_real) - tf.reduce_mean(d_fake)\n\n gradient_penalty_x = wgan.compute_gradient_penalty(\n lambda interpolated: discriminator(interpolated, interpolated_c),\n interpolated_x\n )\n \n gradient_penalty_c = wgan.compute_gradient_penalty(\n lambda interpolated: discriminator(interpolated_x, interpolated),\n interpolated_c\n )\n\n g_loss = tf.reduce_mean(d_fake)\n d_loss = wasserstein_distance + (\n wgan.GRADIENT_PENALTY_LAMBDA * gradient_penalty_x +\n wgan.GRADIENT_PENALTY_LAMBDA * gradient_penalty_c\n )\n\n return g_loss, d_loss", "def disc_step(real_data,fake_data):\n with tf.GradientTape() as tape:\n loss = discriminator_loss(real_data,fake_data)\n loss = tf.add_n([loss] + discriminator.losses)\n gradients = tape.gradient(loss, discriminator.trainable_variables)\n d_optimizer.apply_gradients(zip(gradients, discriminator.trainable_variables))\n return loss", "def __call__(self, prediction, target_is_real, for_discriminator=True):\n if self.gan_mode in ['lsgan', 'vanilla']:\n target_tensor = self.get_target_tensor(prediction, target_is_real)\n loss = self.loss(prediction, target_tensor)\n elif self.gan_mode == 'hinge':\n if for_discriminator:\n if target_is_real:\n loss = nn.ReLU()(1 - prediction).mean()\n else:\n loss = nn.ReLU()(1 + prediction).mean() \n else:\n assert target_is_real, \"The generator's hinge loss must be aiming for real\"\n loss = - prediction.mean()\n return loss\n\n elif self.gan_mode == 'wgangp':\n if target_is_real:\n loss = -prediction.mean()\n else:\n loss = prediction.mean()\n elif self.gan_mode == 'softwgan':\n if target_is_real:\n loss = F.softplus(-prediction).mean()\n else:\n loss = F.softplus(prediction).mean()\n return loss", "def train_step_discriminators(self, X):\n self.discriminator_image.zero_grad()\n self.discriminator_latent.zero_grad()\n\n Z = self.noise_fn(self.batch_size)\n\n with torch.no_grad():\n X_hat = self.generator(Z)\n Z_hat = self.encoder(X)\n X_tilde = self.generator(Z_hat)\n Z_tilde = self.encoder(X_hat)\n\n X_confidence = self.discriminator_image(X)\n X_hat_confidence = self.discriminator_image(X_hat)\n X_tilde_confidence = self.discriminator_image(X_tilde)\n Z_confidence = self.discriminator_latent(Z)\n Z_hat_confidence = self.discriminator_latent(Z_hat)\n Z_tilde_confidence = self.discriminator_latent(Z_tilde)\n\n X_loss = 2 * self.criterion_gen(X_confidence, self.target_ones)\n X_hat_loss = self.criterion_gen(X_hat_confidence, self.target_zeros)\n X_tilde_loss = self.criterion_gen(X_tilde_confidence, self.target_zeros)\n Z_loss = 2 * self.criterion_gen(Z_confidence, self.target_ones)\n Z_hat_loss = self.criterion_gen(Z_hat_confidence, self.target_zeros)\n Z_tilde_loss = self.criterion_gen(Z_tilde_confidence, self.target_zeros)\n\n loss_images = (X_loss + X_hat_loss + X_tilde_loss) / 4\n loss_latent = (Z_loss + Z_hat_loss + Z_tilde_loss) / 4\n loss = loss_images + loss_latent\n\n loss.backward()\n self.optim_di.step()\n self.optim_dl.step()\n\n return loss_images.item(), loss_latent.item()", "def setup_loss(self):\n self.loss = nn.CrossEntropyLoss(weight = self.to_device(self.datasetManager.class_weights))\n #self.loss = nn.CrossEntropyLoss()", "def get_loss(self):\n return categorical_cross_entropy.get_loss(loss_key=self.loss_key,\n output_name=self.args.output_name)", "def loss(self) -> KernelLoss:\n return self._loss", "def calc_loss(self, x: np.ndarray, y: np.ndarray) -> float:\n return self.descent.calc_loss(x, y)", "def loss(self):\n if not self.run:\n self._run()\n return self.model_loss", "def loss_fn(gr_truth, pred):\n return 100 * dice_loss(pred, gr_truth) + softmax_weighted_loss(pred, gr_truth)", "def compute_loss(self, *args, **kwargs):\n raise NotImplementedError", "def disc_loss_orig(self, real_samples, noise_samples):\n generator_samples = self.gen_model(noise_samples)\n\n logits_real = self.disc_model(real_samples)\n logits_gen = self.disc_model(generator_samples)\n loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones(logits_real.shape), logits=logits_real))\n loss_gen = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros(logits_gen.shape), logits=logits_gen))\n\n loss = loss_real + loss_gen\n return loss", "def loss(self, y: torch.Tensor, state: AlgorithmState) -> torch.Tensor:\n\n raise NotImplementedError()", "def calculate_loss(self, X, y):\n probs = self.predict(X)\n\n num_examples = X.shape[0]\n\n sub = np.subtract(probs, y)\n abs_sum = np.abs(sub)\n sm = np.sum(abs_sum)\n loss = 1 - sm / num_examples\n print(\"Current loss: [ \" + str(\"{:6.5f}\").format(loss) + \" ]\")\n return loss", "def compute_loss(self, sample):\n observations_batch, actions_batch, return_batch, masks_batch, \\\n old_action_log_probs_batch, adv_targ = sample\n\n assert old_action_log_probs_batch.shape == (self.mini_batch_size, 1)\n assert adv_targ.shape == (self.mini_batch_size, 1)\n assert return_batch.shape == (self.mini_batch_size, 1)\n\n values, action_log_probs, dist_entropy = self.evaluate_actions(\n observations_batch, actions_batch)\n\n assert values.shape == (self.mini_batch_size, 1)\n assert action_log_probs.shape == (self.mini_batch_size, 1)\n assert values.requires_grad\n assert action_log_probs.requires_grad\n assert dist_entropy.requires_grad\n\n # [TODO] Implement policy loss\n ratio = torch.exp(action_log_probs - old_action_log_probs_batch)\n surr1 = ratio * adv_targ\n surr2 = torch.clamp(ratio, 1.0 - self.clip_param, 1.0 + self.clip_param) * adv_targ\n policy_loss = -torch.min(surr1, surr2).mean()\n\n # [TODO] Implement value loss\n value_loss = F.mse_loss(return_batch, values)\n\n # This is the total loss\n loss = policy_loss + self.config.value_loss_weight * value_loss - self.config.entropy_loss_weight * dist_entropy\n\n return loss, policy_loss, value_loss, dist_entropy", "def loss_total(self, mask):\n\n def loss(y_true, y_pred):\n\n # Compute predicted image with non-hole pixels set to ground truth\n y_comp = mask * y_true + (1-mask) * y_pred\n\n # Compute the vgg features. \n if self.vgg_device:\n with tf.device(self.vgg_device):\n vgg_out = self.vgg(y_pred)\n vgg_gt = self.vgg(y_true)\n vgg_comp = self.vgg(y_comp)\n else:\n vgg_out = self.vgg(y_pred)\n vgg_gt = self.vgg(y_true)\n vgg_comp = self.vgg(y_comp)\n \n # Compute loss components\n l1 = self.loss_valid(mask, y_true, y_pred)\n l2 = self.loss_hole(mask, y_true, y_pred)\n l3 = self.loss_perceptual(vgg_out, vgg_gt, vgg_comp)\n l4 = self.loss_tv(mask, y_comp)\n l5 = - 0.5 * K.sum(1 + self.z_log_var -self.cl - K.square(self.z_mean)/K.exp(self.cl) - K.exp(self.z_log_var)/K.exp(self.cl))\n # Return loss function\n return l1 + 6*l2 + 0.05*l3 + 0.1*l4 +l5 \n return loss", "def get_gen_loss(gen, disc, criterion, num_images, z_dim, device):\n # Create noise vectors and generate a batch of fake images.\n noise = get_noise(num_images, z_dim, device)\n \n # Get the discriminator's prediction of the fake image.\n fake_images = gen(noise)\n \n # Get the discriminator's prediction of the fake image.\n pred_fake = disc(fake_images)\n \n # Target vectors with 1`s. In this case, 1 represents real\n # From the perspective of the generator, \"true\" or 1 is the answer it wants\n target = torch.ones_like(pred_fake)\n \n # Calculate the generator's loss.\n gen_loss = criterion(pred_fake, target)\n gen_loss.backward(retain_graph=True)\n return gen_loss", "def calculate_loss(self, output, target, redmode = 'mean'):\n\n loss = F.cross_entropy(output, target, reduction = redmode)\n return loss", "def _loss(self, preds, labels):\n if self.sigmoid_loss:\n assert preds.shape == labels.shape\n return torch.nn.BCEWithLogitsLoss()(preds, labels) * preds.shape[1]\n else:\n if len(labels.shape) == 2: # flatten to 1D\n labels = torch.max(labels, axis=1)[1] # this can handle both bool and float types\n return torch.nn.CrossEntropyLoss()(preds, labels)", "def define_discriminator(image_shape=(256, 256, 1)):\n\n # weight initialization\n init = RandomNormal(stddev=0.02)\n # source image input\n in_src_image = Input(shape=image_shape)\n # target image input\n in_target_image = Input(shape=image_shape)\n # concatenate images channel-wise\n merged = Concatenate()([in_src_image, in_target_image])\n # C64\n d = Conv2D(64, (4, 4), strides=(2, 2), padding='same',\n kernel_initializer=init)(merged)\n d = LeakyReLU(alpha=0.2)(d)\n # C128\n d = Conv2D(128, (4, 4), strides=(2, 2), padding='same',\n kernel_initializer=init)(d)\n d = BatchNormalization()(d)\n d = LeakyReLU(alpha=0.2)(d)\n # C256\n d = Conv2D(256, (4, 4), strides=(2, 2), padding='same',\n kernel_initializer=init)(d)\n d = BatchNormalization()(d)\n d = LeakyReLU(alpha=0.2)(d)\n # C512\n d = Conv2D(512, (4, 4), strides=(2, 2), padding='same',\n kernel_initializer=init)(d)\n d = BatchNormalization()(d)\n d = LeakyReLU(alpha=0.2)(d)\n # second last output layer\n d = Conv2D(512, (4, 4), padding='same', kernel_initializer=init)(d)\n d = BatchNormalization()(d)\n d = LeakyReLU(alpha=0.2)(d)\n # patch output\n d = Conv2D(1, (4, 4), padding='same', kernel_initializer=init)(d)\n patch_out = Activation('sigmoid')(d)\n # define model\n model = Model([in_src_image, in_target_image], patch_out)\n # compile model\n opt = Adam(lr=0.0002, beta_1=0.5)\n model.compile(loss='binary_crossentropy', optimizer=opt,\n loss_weights=[0.5])\n\n return model", "def gen_loss_orig(self, noise_samples):\n generator_samples = self.gen_model(noise_samples)\n logits_gen = self.disc_model(generator_samples)\n # loss = -tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros(logits_gen.shape), logits=logits_gen))\n loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones(logits_gen.shape), logits=logits_gen))\n return loss", "def loss(self):\n return self._get(\"loss\")", "def discriminator(self):\n\n # Initializate the neural network\n discriminator = Sequential()\n\n # Convolution, bias, activate\n discriminator.add(Conv2D(filters=self.first_layer_filter,\n kernel_size=self.kernel_size,\n strides=self.stride_size,\n padding='same',\n data_format='channels_last',\n kernel_initializer='glorot_uniform',\n input_shape=self.image_shape))\n # Activate\n discriminator.add(LeakyReLU(0.2))\n # Convolution\n discriminator.add(Conv2D(filters=self.second_layer_filter,\n kernel_size=self.kernel_size,\n strides=self.stride_size,\n padding='same',\n data_format='channels_last',\n kernel_initializer='glorot_uniform'))\n\n # Normalize\n discriminator.add(BatchNormalization(momentum=0.5, epsilon=1e-5))\n # Activate\n discriminator.add(LeakyReLU(0.2))\n\n # Convolution\n discriminator.add(Conv2D(filters=self.third_layer_filter,\n kernel_size=self.kernel_size,\n strides=self.stride_size,\n padding='same',\n data_format='channels_last',\n kernel_initializer='glorot_uniform'))\n\n # Normalize\n discriminator.add(BatchNormalization(momentum=0.5, epsilon=1e-5))\n # Activate\n discriminator.add(LeakyReLU(0.2))\n # Convolution\n discriminator.add(Conv2D(filters=self.last_layer_size,\n kernel_size=self.kernel_size,\n strides=self.stride_size,\n padding='same',\n data_format='channels_last',\n kernel_initializer='glorot_uniform'))\n # Normalize\n discriminator.add(BatchNormalization(momentum=0.5, epsilon=1e-5))\n # Activate\n discriminator.add(LeakyReLU(0.2))\n\n discriminator.add(Flatten())\n discriminator.add(Dense(1))\n discriminator.add(Activation('sigmoid'))\n\n optimizer = Adam(lr=self.lr, beta_1=self.beta)\n discriminator.compile(loss=self.loss,\n optimizer=optimizer,\n metrics=None)\n\n return discriminator", "def get_loss(self, Loss, results, inputs, device):\n return", "def unlabeled_loss_calculation(self, labeled_examples, unlabeled_examples):\n _, fake_scores = self.D(unlabeled_examples)\n criterion = BCEWithLogitsLoss()\n unlabeled_loss = criterion(fake_scores, torch.zeros_like(fake_scores))\n unlabeled_loss *= self.settings.matching_loss_multiplier\n unlabeled_loss *= self.settings.dggan_loss_multiplier\n return unlabeled_loss", "def build_discriminator():\n\n #Slope and weight initializer are chosen to match parmeters in the paper\n weight_initializer = tf.keras.initializers.RandomNormal(stddev=0.02)\n slope = 0.2\n inputs = keras.Input(shape=(64,64,3))\n x = preprocessing.Rescaling(scale=1./127.5, offset=-1.)(inputs)\n\n # First conv layer\n x = Conv2D(\n 64,\n 4,\n 2,\n padding='same',\n use_bias=False,\n kernel_initializer=weight_initializer\n )(x)\n x = LeakyReLU(alpha=slope)(x)\n\n # Second conv layer\n x = Conv2D(\n 128,\n 4,\n 2,\n padding='same',\n use_bias=False,\n kernel_initializer=weight_initializer\n )(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=slope)(x)\n \n # Third conv layer\n x = Conv2D(\n 256,\n 4,\n 2,\n padding='same',\n use_bias=False,\n kernel_initializer=weight_initializer\n )(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=slope)(x)\n\n # Fourth conv layer\n x = Conv2D(\n 512,\n 4,\n 2,\n padding='same',\n use_bias=False,\n kernel_initializer=weight_initializer\n )(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=slope)(x)\n\n # Predictions. Note that we use logits so thhere is no activation at the end. \n x = layers.Flatten()(x)\n x = layers.Dense(1,kernel_initializer=weight_initializer)(x)\n \n model = keras.Model(inputs=inputs, outputs=x)\n return model", "def _compute_loss(self, decoder_output, labels):\n with tf.name_scope(\"compute_loss\"):\n language_logit = decoder_output.logits[0]\n attention_logit = decoder_output.logits[1]\n batch_size = self.params['dataset']['batch_size']\n\n language_losses = self._cross_entropy_sequence_loss(\n logits=language_logit,\n targets=tf.transpose(labels[\"label\"], [1, 0]),\n sequence_length=labels[\"length\"])\n attention_losses = self._cross_entropy_sequence_loss(\n logits=attention_logit,\n targets=tf.transpose(labels[\"label\"], [1, 0]),\n sequence_length=labels[\"length\"])\n\n language_loss = tf.reduce_sum(language_losses) / batch_size\n attention_loss = tf.reduce_sum(attention_losses) / batch_size\n loss = language_loss + attention_loss\n\n return loss", "def dloss(self, output, labels):\n return 2*(output - labels)/labels.shape[1]", "def compute_loss(self, x, gt):\n loss = sum([torch.mean((out - gt)**2) for out in self.forward(x)])\n return loss", "def build_discriminator(self):\n # label input\n in_label = Input(shape=(1,))\n # embedding for categorical input\n li = Embedding(self.n_classes, 50)(in_label)\n # scale up to image dimensions with linear activation\n n_nodes = self.in_shape[0] * self.in_shape[1]\n li = Dense(n_nodes)(li)\n # reshape to additional channel\n li = Reshape((self.in_shape[0], self.in_shape[1], 1))(li)\n # image input\n in_image = Input(shape=self.in_shape)\n # concat label as a channel\n merge = Concatenate()([in_image, li])\n # downsample\n fe = Conv2D(128, (3,3), strides=(2,2), padding='same')(merge)\n fe = LeakyReLU(alpha=0.2)(fe)\n # downsample\n fe = Conv2D(128, (3,3), strides=(2,2), padding='same')(fe)\n fe = LeakyReLU(alpha=0.2)(fe)\n # flatten feature maps\n fe = Flatten()(fe)\n # dropout\n fe = Dropout(0.4)(fe)\n # output\n out_layer = Dense(1, activation='sigmoid')(fe)\n # define model\n self.d_model = Model([in_image, in_label], out_layer)\n # compile model\n opt = Adam(lr=0.0002, beta_1=0.5)\n self.d_model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])", "def loss_fn(self, recons, inputs, mu, log_var, **kwargs):\n# kld_weight = kwargs['M_N'] # Account for the minibatch samples from the dataset\n recons_loss = F.mse_loss(recons, inputs)\n# recons_loss = F.binary_cross_entropy(recons, inputs)\n KLD = torch.mean(-0.5 * torch.sum(1 + log_var - mu**2 - log_var.exp(), dim=1), dim=0)\n loss = recons_loss - KLD\n return loss, recons_loss, KLD", "def cross_entropy_loss():\n return nn.CrossEntropyLoss()", "def _discriminate(self, real_image, gen_image):\n buf_image = history_image(gen_image, self.buffer_size)\n batch = tf.stack([real_image, gen_image, buf_image])\n disc = self.discriminator(batch)\n real_outs, gen_outs, buf_outs = disc[0], disc[1], disc[2]\n disc_loss = tf.reduce_mean(tf.square(real_outs - 1)) + tf.reduce_mean(tf.square(buf_outs))\n gen_loss = tf.reduce_mean(tf.square(gen_outs - 1))\n return disc_loss, gen_loss", "def fake_loss_calculation(self, unlabeled_examples, fake_examples):\n _, fake_scores = self.D(fake_examples)\n criterion = BCEWithLogitsLoss()\n fake_loss = criterion(fake_scores, torch.ones_like(fake_scores))\n fake_loss *= self.settings.contrasting_loss_multiplier\n fake_loss *= self.settings.dggan_loss_multiplier\n return fake_loss", "def compute_loss(self, obs, returns):", "def calculate_loss(self, train_x, train_y):\n self.log.info(\"Calculating average categorical crossentropy loss...\")\n\n num_words = np.sum([len(y) for y in train_y])\n return self.calculate_total_loss(train_x, train_y)/float(num_words)", "def calc_loss(self, guess: List[float], answer: List[float]) -> float:\n #print(\"Guess: %s Answer: %s\" % (guess, answer))\n return self.tested_network.loss_function.func(guess, answer)", "def discriminator_loss_std(logits_real, logits_fake):\n bce_loss = nn.BCEWithLogitsLoss()\n labels_real = Variable(torch.ones(logits_real.size()), requires_grad=False).type(torch.FloatTensor)\n labels_fake = Variable(torch.zeros(logits_fake.size()), requires_grad=False).type(torch.FloatTensor)\n loss = bce_loss(logits_real, labels_real) + bce_loss(logits_fake, labels_fake)\n return loss", "def compute_loss(self, features, mode, params, precomputed):\n raise NotImplementedError(\"Model does not implement loss.\")", "def train_discriminator(self, real_data, fake_data):\n self.d_optimizer.zero_grad()\n\n prediction_r = self.discriminator(real_data)\n target = torch.ones(real_data.size(0), 1)\n if self.label_smooth: target = .9 * target\n error_r = self.loss_function(prediction_r, target) # real\n error_r.backward()\n\n prediction_f = self.discriminator(fake_data)\n error_f = self.loss_function(prediction_f, torch.zeros(fake_data.size(0), 1)) # fake\n error_f.backward()\n\n self.d_optimizer.step()\n\n return error_r + error_f", "def discriminator_fn(img, unused_conditioning, weight_decay=2.5e-5,\n is_training=True):\n with framework.arg_scope(\n [layers.conv2d, layers.fully_connected],\n activation_fn=leaky_relu, normalizer_fn=None,\n weights_regularizer=layers.l2_regularizer(weight_decay),\n biases_regularizer=layers.l2_regularizer(weight_decay)):\n net = layers.conv2d(img, 64, [4, 4], stride=2)\n net = layers.conv2d(net, 128, [4, 4], stride=2)\n net = layers.flatten(net)\n with framework.arg_scope([layers.batch_norm], is_training=is_training):\n net = layers.fully_connected(net, 1024, normalizer_fn=layers.batch_norm)\n return layers.linear(net, 1)", "def get_loss(self):\r\n\r\n if F.loss_type==\"cosine\":\r\n self.losscos = r2d*tf.acos(1-tf.losses.cosine_distance(tf.nn.l2_normalize(self.labels,1), tf.nn.l2_normalize(self.out, 1), dim=1))\r\n self.loss = tf.losses.cosine_distance(tf.nn.l2_normalize(self.labels,1), tf.nn.l2_normalize(self.out, 1), dim=1)\r\n elif F.loss_type==\"mse2d\":\r\n xl, yl, zl = tf.split(self.labels, 3, axis=1)\r\n xo, yo, zo = tf.split(self.out, 3, axis=1)\r\n thetal, thetao = tf.asin(-yl), tf.asin(-yo)\r\n phil, phio = tf.atan2(-zl, -xl), tf.atan2(-zo, -xo)\r\n self.lb = tf.concat([thetal, phil], axis=1)\r\n self.ob = tf.concat([thetao, phio], axis=1)\r\n self.loss = tf.scalar_mul(tf.constant(r2d), tf.losses.mean_squared_error(self.lb, self.ob, 2))\r\n elif F.loss_type==\"mse3d\":\r\n self.loss = tf.losses.mean_squared_error(tf.nn.l2_normalize(self.labels, 0), tf.nn.l2_normalize(self.out, 0))", "def _compute_loss(self, predictions, targets, **params):\n pass", "def compute_loss(self,\n pred_seg: Dict[str, torch.Tensor],\n target: torch.Tensor,\n ) -> Dict[str, torch.Tensor]:\n seg_logits = pred_seg[\"seg_logits\"]\n return {\n \"seg_ce\": self.alpha * self.ce_loss(seg_logits, target.long()),\n \"seg_dice\": (1 - self.alpha) * self.dice_loss(seg_logits, target),\n }", "def get_disc_loss(gen, disc, criterion, real, num_images, z_dim, device):\n noise = get_noise(n_samples=num_images, z_dim=z_dim, device=device)\n fake_img = gen(noise).detach() # DO NOT FORGET to detach\n\n fake_logits = disc(fake_img)\n loss_fake = criterion(fake_logits, torch.zeros_like(fake_logits))\n\n real_logits = disc(real)\n loss_real = criterion(real_logits, torch.ones_like(real_logits))\n\n return torch.mean(loss_fake + loss_real)", "def build_discriminator():\n leakyrelu_alpha = 0.2\n momentum = 0.8\n input_shape = (256, 256, 3)\n\n input_layer = Input(shape=input_shape)\n\n # Add the first convolution block\n dis1 = Conv2D(filters=64, kernel_size=3, strides=1, padding='same')(input_layer)\n dis1 = LeakyReLU(alpha=leakyrelu_alpha)(dis1)\n\n # Add the 2nd convolution block\n dis2 = Conv2D(filters=64, kernel_size=3, strides=2, padding='same')(dis1)\n dis2 = LeakyReLU(alpha=leakyrelu_alpha)(dis2)\n dis2 = BatchNormalization(momentum=momentum)(dis2)\n\n # Add the third convolution block\n dis3 = Conv2D(filters=128, kernel_size=3, strides=1, padding='same')(dis2)\n dis3 = LeakyReLU(alpha=leakyrelu_alpha)(dis3)\n dis3 = BatchNormalization(momentum=momentum)(dis3)\n\n # Add the fourth convolution block\n dis4 = Conv2D(filters=128, kernel_size=3, strides=2, padding='same')(dis3)\n dis4 = LeakyReLU(alpha=leakyrelu_alpha)(dis4)\n dis4 = BatchNormalization(momentum=0.8)(dis4)\n\n # Add the fifth convolution block\n dis5 = Conv2D(256, kernel_size=3, strides=1, padding='same')(dis4)\n dis5 = LeakyReLU(alpha=leakyrelu_alpha)(dis5)\n dis5 = BatchNormalization(momentum=momentum)(dis5)\n\n # Add the sixth convolution block\n dis6 = Conv2D(filters=256, kernel_size=3, strides=2, padding='same')(dis5)\n dis6 = LeakyReLU(alpha=leakyrelu_alpha)(dis6)\n dis6 = BatchNormalization(momentum=momentum)(dis6)\n\n # Add the seventh convolution block\n dis7 = Conv2D(filters=512, kernel_size=3, strides=1, padding='same')(dis6)\n dis7 = LeakyReLU(alpha=leakyrelu_alpha)(dis7)\n dis7 = BatchNormalization(momentum=momentum)(dis7)\n\n # Add the eight convolution block\n dis8 = Conv2D(filters=512, kernel_size=3, strides=2, padding='same')(dis7)\n dis8 = LeakyReLU(alpha=leakyrelu_alpha)(dis8)\n dis8 = BatchNormalization(momentum=momentum)(dis8)\n\n # Add a dense layer\n #avgd = keras.layers.AveragePooling2D(pool_size=(4,4) , strides = (4,4))(dis8)\n\n #flat = keras.layers.Flatten()(dis8)\n dis9 = Dense(units=1024)(dis8)\n dis9 = LeakyReLU(alpha=0.2)(dis9)\n\n # Last dense layer - for classification\n output = Dense(units=1, activation='sigmoid')(dis9)\n\n model = Model(inputs=[input_layer], outputs=[output], name='discriminator')\n return model", "def _get_ner_loss(self):\n # per example loss\n no_entity_id = self.config[\"model\"][\"ner\"][\"no_entity_id\"]\n logits_shape = tf.shape(self.ner_logits_train)\n labels_shape = logits_shape[:3]\n labels = get_dense_labels_from_indices(indices=self.ner_labels_ph, shape=labels_shape, no_label_id=no_entity_id)\n per_example_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=labels, logits=self.ner_logits_train\n ) # [batch_size, num_tokens, num_tokens]\n\n # mask\n maxlen = logits_shape[1]\n span_mask = upper_triangular(maxlen, dtype=tf.float32)\n sequence_mask = tf.sequence_mask(self.num_tokens_ph, dtype=tf.float32) # [batch_size, num_tokens]\n mask = span_mask[None, :, :] * sequence_mask[:, None, :] * sequence_mask[:, :, None] # [batch_size, num_tokens, num_tokens]\n\n masked_per_example_loss = per_example_loss * mask\n total_loss = tf.reduce_sum(masked_per_example_loss)\n num_valid_spans = tf.cast(tf.reduce_sum(mask), tf.float32)\n loss = total_loss / num_valid_spans\n\n loss *= self.config[\"model\"][\"ner\"][\"loss_coef\"]\n return loss", "def discriminator_model_organs():\n # Initialize the weights\n init = tf.random_normal_initializer(0.0, 0.02)\n\n img_shape = (512, 512, 1)\n\n # Source and target image input\n source_img = tf.keras.Input(shape=img_shape)\n target_img = tf.keras.Input(shape=img_shape)\n\n # Concatenate images channel-wise\n src_tgt_img = Concatenate()([source_img, target_img]) # L: 512 x 512 x 1 # G: 256 x 256 x 1\n\n # C128\n d1 = Conv2D(filters=128, kernel_size=(4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(\n src_tgt_img) # L: 256 x 256 x 128 # G: 128 x 128 x 128 # RF: 4\n d1 = LeakyReLU(alpha=0.2)(d1)\n\n # C256\n d2 = Conv2D(filters=256, kernel_size=(4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(\n d1) # L: 128 x 128 x 256 # G: 64 x 64 x 256 # RF: 10\n d2 = BatchNormalization()(d2)\n d2 = LeakyReLU(alpha=0.2)(d2)\n\n # C256\n d3 = Conv2D(filters=256, kernel_size=(4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(\n d2) # L: 64 x 64 x 256 # G: 32 x 32 x 256 # RF: 22\n d3 = BatchNormalization()(d3)\n d3 = LeakyReLU(alpha=0.2)(d3)\n\n # C512\n d4 = Conv2D(filters=512, kernel_size=(4, 4), strides=(1, 1), padding='valid', kernel_initializer=init)(\n d3) # L: 61 x 61 x 512 # G: 29 x 29 x 512 # RF: 46\n d4 = BatchNormalization()(d4)\n d4 = LeakyReLU(alpha=0.2)(d4)\n d4 = ZeroPadding2D()(d4) # L: 63 x 63 x 512 # G: 31 x 31 x 512\n\n # Patch output\n d5 = Conv2D(filters=1, kernel_size=(4, 4), strides=(1, 1), padding='valid', kernel_initializer=init)(\n d4) # L: 60 x 60 x 1 # G: 28 x 28 x 1 # RF: 70\n output_patch = Activation('sigmoid')(d5)\n\n # Define model\n discriminator_model = tf.keras.Model([source_img, target_img], output_patch)\n return discriminator_model", "def _compute_loss(self, state, action, reward, next_state, done):\n state = torch.FloatTensor(state)\n q_values = self.dqn(state)\n q_value = q_values[action]\n\n next_state = torch.FloatTensor(next_state)\n next_q_values = self.dqn(next_state)\n next_q_value = next_q_values.max()\n\n if done:\n target = reward\n else:\n target = reward + self.discount_factor * next_q_value\n\n loss = (q_value - target).pow(2).mean()\n\n return loss" ]
[ "0.8110355", "0.78501636", "0.7769317", "0.7654356", "0.7549625", "0.7488209", "0.73963207", "0.7360349", "0.7320101", "0.7283635", "0.72751373", "0.7269417", "0.7124739", "0.7010574", "0.699223", "0.69803566", "0.69646436", "0.69372475", "0.6897706", "0.6882033", "0.684818", "0.684818", "0.6724429", "0.67047584", "0.66848344", "0.6662063", "0.66390294", "0.66290766", "0.66234183", "0.66083395", "0.66054124", "0.6605205", "0.65986323", "0.6589006", "0.65835524", "0.65797913", "0.65670735", "0.6551719", "0.65504366", "0.65354925", "0.6526655", "0.65175694", "0.65165627", "0.65124696", "0.65074366", "0.6502993", "0.64901567", "0.64853334", "0.64743847", "0.6473812", "0.6449636", "0.64439076", "0.6419177", "0.6417252", "0.6410015", "0.6398876", "0.6397849", "0.6397296", "0.63909113", "0.6387823", "0.63715476", "0.63699555", "0.6368938", "0.6367938", "0.6364131", "0.63629425", "0.6352911", "0.63516116", "0.63492125", "0.63373333", "0.6331458", "0.63228875", "0.6322821", "0.6311266", "0.63087285", "0.6299017", "0.6275988", "0.6255227", "0.6242987", "0.6242689", "0.6227739", "0.62270445", "0.6224896", "0.6217945", "0.62165666", "0.62123364", "0.6206446", "0.620301", "0.6197986", "0.6195049", "0.6194734", "0.6191507", "0.618833", "0.61774343", "0.61708117", "0.6162945", "0.6161179", "0.61593187", "0.6153999", "0.6152121" ]
0.65725476
36
Compute the generator loss.
def __G_loss(self, D, fake): loss = tf.reduce_mean(tf.squared_difference(D(fake), 1.0)) return loss
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generator_loss(self, disc_generated_output, gen_output, target):\n # Compute the loss function\n loss_function = self.loss_func()\n\n # Generated GAN loss\n gan_loss = loss_function(tf.ones_like(disc_generated_output), disc_generated_output)\n\n # L1 loss\n l1_loss = tf.reduce_mean(tf.abs(target - gen_output))\n\n # Total generator loss\n total_gen_loss = gan_loss + (self.lambd * l1_loss)\n return total_gen_loss, gan_loss, l1_loss", "def compute_loss(self):", "def _define_generator_loss(self):\n loss = tf.reduce_mean(self._gen_discriminator_out)\n return tf.negative(loss, name='generator_loss')", "def generator_loss(gen_images):\n output = disc_net(gen_images)\n cats = output.new_full(output.shape, real_label)\n return gen_loss_criterion(output, cats)", "def _compute_loss(self):\n state, action, reward, next_state, done = self.replay_buffer.sample(self.batch_size)\n\n state = torch.FloatTensor(state)\n next_state = torch.FloatTensor(next_state)\n action = torch.LongTensor(action)\n reward = torch.FloatTensor(reward)\n done = torch.FloatTensor(done)\n\n q_values = self.dqn(state)\n q_value = q_values.gather(1, action.unsqueeze(1)).squeeze(1)\n\n next_q_values = self.target_dqn(next_state)\n next_q_value = next_q_values.max(1)[0]\n target = reward + self.discount_factor * next_q_value * (1 - done)\n\n # loss = F.smooth_l1_loss(q_value, target.detach())\n loss = F.mse_loss(q_value, target.detach())\n\n return loss", "def loss_op(self):\n return self.loss", "def generator_loss(self, logits):\n if self.lossfunc == 'use_lsgan':\n # use mean squared error\n loss = tf.reduce_mean(tf.squared_difference(logits, REAL_LABEL))\n elif self.lossfunc == 'sigmoid_cross_entropy_with_logits':\n # heuristic, non-saturating loss\n # loss = tf.sigmoid(D(fake_y))\n # loss = -tf.reduce_mean(ops.safe_log(loss)) / 2\n \n loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits,labels=tf.ones_like(logits))\n loss = tf.reduce_mean(loss)\n elif self.lossfunc == 'wgan':\n loss = -tf.reduce_mean(logits)\n return loss", "def ls_generator_loss(scores_fake):\r\n loss = torch.mean((scores_fake - 1) ** 2) / 2\r\n return loss", "def compute_loss(self, obs, returns):", "def get_loss(self):\n return self.loss / self.cnt", "def compute_loss(self):\n def calc_loss(inputs, outputs):\n reconstruction_loss = tf.metrics.binary_crossentropy(\n tf_flat(inputs), tf_flat(outputs))\n reconstruction_loss *= OUT_SIZE * OUT_SIZE\n kl_loss = -0.5 * tf.reduce_sum(1.0 + self.log_sigma - tf.square(\n self.mu) - tf.exp(self.log_sigma), 1)\n return tf.reduce_mean(reconstruction_loss + kl_loss)\n return calc_loss", "def _generator_loss(self, y_real: tf.Tensor, y_fake: tf.Tensor) -> tf.Tensor:\n\n loss = self.loss(tf.ones_like(y_fake), y_fake - y_real)\n\n return tf.reduce_mean(loss)", "def loss(self):\n return self._loss", "def compute_loss(self):\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.data.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss", "def compute_loss(self, x, gt):\n loss = sum([torch.mean((out - gt)**2) for out in self.forward(x)])\n return loss", "def genLoss(self, *data):\r\n _, (x_unlab, _) = data\r\n z = self.getInputNoise(self.hypers['ul_BS'])\r\n fake_logits = self.D(self.G(z))\r\n g_losses = -1*logOneMinusSoftmax(fake_logits)[:,self.D.numClasses-1]\r\n return torch.mean(g_losses)", "def generator_loss_calculation(self, fake_examples, _):\n _, fake_scores = self.D(fake_examples)\n criterion = BCEWithLogitsLoss()\n generator_loss = criterion(fake_scores, torch.zeros_like(fake_scores))\n return generator_loss", "def compute_loss(self):\n self.prototypes = self.compute_prototypes()\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.episode.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss", "def loss(self) -> KernelLoss:\n return self._loss", "def loss(self):\n if not self.run:\n self._run()\n return self.model_loss", "def _generator_loss(self, y_hat):\n\n l = -tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels = tf.zeros(tf.shape(y_hat)),logits = y_hat ))\n print('generatorloss shape',tf.shape(l))\n return l", "def loss(self):\n return la.norm(self.resids) / self.normX", "def mse_loss(angles_gt, angles_gen):\n loss = (angles_gt - angles_gen)**2\n # loss = torch.sum(loss, dim=-1) # sum loss over dimensions\n loss = torch.mean(loss, dim=-1) # mean loss over images per task\n loss = torch.mean(loss, dim=-1) # mean loss over tasks\n return loss", "def loss_(self, batch):\n raise NotImplementedError", "def gen_loss_orig(self, noise_samples):\n generator_samples = self.gen_model(noise_samples)\n logits_gen = self.disc_model(generator_samples)\n # loss = -tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros(logits_gen.shape), logits=logits_gen))\n loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones(logits_gen.shape), logits=logits_gen))\n return loss", "def get_loss(self):\n raise NotImplementedError", "def generatorLoss(fakeOutput):\n return cross_entropy(tf.ones_like(fakeOutput), fakeOutput)", "def loss_total(self):\r\n def loss(y_true, y_pred):\r\n l2 = 1/2*K.sum(K.square(y_true-y_pred))\r\n\r\n return l2\r\n return loss", "def compute_loss(self, **kwargs):\n raise NotImplementedError", "def standard_generator_loss(disc_gen):\n loss_obj = tf.keras.losses.BinaryCrossentropy(from_logits=True)\n return loss_obj(tf.ones_like(disc_gen), disc_gen)", "def build_loss(self):\n if self.mode != \"encode\":\n total_loss = tf.losses.get_total_loss()\n tf.summary.scalar(\"losses/total\", total_loss)\n\n self.total_loss = total_loss", "def _get_loss(self):\n raise NotImplementedError", "def calculate_loss(self, pred, gold, smoothing=False):\n gold = gold.contiguous().view(-1)\n if smoothing:\n epsilon = 0.1\n n_class = pred.size(1)\n one_hot = torch.zeros_like(pred).scatter(1, gold.view(-1, 1), 1)\n one_hot = one_hot * (1 - epsilon) + \\\n (1 - one_hot) * epsilon / (n_class - 1)\n\n log_prb = F.log_softmax(pred, dim=1)\n # create non-padding mask with torch.ne()\n non_pad_mask = gold.ne(self.constants.PAD)\n loss = -(one_hot * log_prb).sum(dim=1)\n # losses are averaged later\n loss = loss.masked_select(non_pad_mask).sum()\n else:\n loss = F.cross_entropy(\n pred, gold, ignore_index=self.constants.PAD, reduction='sum')\n return loss", "def compute_loss(self, inputs):\r\n outputs = self.net.compute_outputs(inputs)\r\n loss_grad = self.net.compute_loss_grad(outputs - inputs)\r\n loss = np.sum((inputs - outputs) ** 2, axis=0).mean() / 2.0\r\n return loss, loss_grad", "def yield_loss(self, outputs, targets):\n return torch.sqrt(nn.MSELoss()(outputs, targets))", "def calc_loss(self, x: np.ndarray, y: np.ndarray) -> float:\n return self.descent.calc_loss(x, y)", "def compute_loss(self, *args, **kwargs):\n raise NotImplementedError", "def get_loss(self, Loss, results, inputs, device):\n return", "def __loss(self, fakeA, fakeB, reconstructedA, reconstructedB, identA, identB):\n # compute the generators loss\n G_loss = self.__G_loss(self.D_B, fakeB)\n F_loss = self.__G_loss(self.D_A, fakeA)\n cc_loss = self.__cycle_consistency_loss(reconstructedA, reconstructedB)\n ident_loss = self.__identity_loss(identA, identB)\n Gen_loss = G_loss + F_loss + cc_loss + ident_loss\n\n # Compute the disciminators loss. Use fake images from image pool to improve stability\n D_A_loss = self.__D_loss(self.D_A, self.realA, self.fakeA)\n D_B_loss = self.__D_loss(self.D_B, self.realB, self.fakeB)\n\n return Gen_loss, D_A_loss, D_B_loss", "def _compute_loss(self, parameters, inputs, ground_truth):\n predictions = self.network_forward(parameters, inputs)\n loss = np.mean((ground_truth - predictions) ** 2)\n return loss", "def _compute_loss(self, parameters, inputs, ground_truth):\n predictions = self.network_forward(parameters, inputs)\n loss = np.mean((ground_truth - predictions) ** 2)\n return loss", "def build_loss(self):\n\n opt = tf.train.AdamOptimizer(self.learning_rate)\n mse = tf.losses.mean_squared_error(self.label[-1], self.outputs[-1])\n loss = tf.losses.get_total_loss()\n\n return mse, loss", "def get_gen_loss(gen, disc, real, condition, adv_criterion, recon_criterion, lambda_recon):\n fake = gen(condition)\n disc_fake_pred = disc(fake, condition)\n adv_loss = adv_criterion(disc_fake_pred, torch.ones_like(disc_fake_pred))\n recon_loss = recon_criterion(real, fake)\n gen_loss = adv_loss + (recon_loss * lambda_recon)\n return gen_loss", "def _compute_loss(self, state, action, reward, next_state, done):\n state = torch.FloatTensor(state)\n q_values = self.dqn(state)\n q_value = q_values[action]\n\n next_state = torch.FloatTensor(next_state)\n next_q_values = self.dqn(next_state)\n next_q_value = next_q_values.max()\n\n if done:\n target = reward\n else:\n target = reward + self.discount_factor * next_q_value\n\n loss = (q_value - target).pow(2).mean()\n\n return loss", "def calc_loss(self, codes, encodings):\n return tf.reduce_mean((tf.stop_gradient(encodings) - codes) ** 2)", "def generator_loss_fn(y_generated, data_label=0):\n assert data_label == 1 or data_label == 0\n # TODO:\n # Implement the Generator loss.\n # Think about what you need to compare the input to, in order to\n # formulate the loss in terms of Binary Cross Entropy.\n # ====== YOUR CODE: ======\n device = y_generated.device\n loss_fn = nn.BCEWithLogitsLoss()\n loss = loss_fn(y_generated, torch.full_like(y_generated, data_label, device=device))\n # ========================\n return loss", "def compute_loss(self, batch, y_next_true):\n\n # Get the output of the gru layer for the input which serves as input to the reconstruction + forecasting model\n gru_output = self.model(batch, training=True)\n\n # Forecasting model loss calculation\n # Using mse yields the same result as RMSE and is more stable\n y_next_pred = self.model.forecasting_model(gru_output, training=True)\n y_next_pred = y_next_pred[:, -1, :] # only get the prediction for the last timestamp\n\n mse_for = tf.keras.losses.MeanSquaredError()\n loss_for = mse_for(y_next_true, y_next_pred)\n\n # Reconstruction model loss calculation\n # Like VAE based on: https://bit.ly/3oRMiQz\n mse_rec = tf.keras.losses.MeanSquaredError()\n reconstructed_output = self.model.reconstruction_model(gru_output)\n reconstruction_target = gru_output if 'reconstruct_gru' in self.hyper.variants else batch\n\n loss_rec = mse_rec(reconstruction_target, reconstructed_output)\n loss_rec += sum(self.model.reconstruction_model.losses) # Add KLD regularization loss\n\n # Overall loss\n loss = loss_for + loss_rec\n\n return loss", "def compute_loss(self, sample):\n observations_batch, actions_batch, return_batch, masks_batch, \\\n old_action_log_probs_batch, adv_targ = sample\n\n assert old_action_log_probs_batch.shape == (self.mini_batch_size, 1)\n assert adv_targ.shape == (self.mini_batch_size, 1)\n assert return_batch.shape == (self.mini_batch_size, 1)\n\n values, action_log_probs, dist_entropy = self.evaluate_actions(\n observations_batch, actions_batch)\n\n assert values.shape == (self.mini_batch_size, 1)\n assert action_log_probs.shape == (self.mini_batch_size, 1)\n assert values.requires_grad\n assert action_log_probs.requires_grad\n assert dist_entropy.requires_grad\n\n # [TODO] Implement policy loss\n ratio = torch.exp(action_log_probs - old_action_log_probs_batch)\n surr1 = ratio * adv_targ\n surr2 = torch.clamp(ratio, 1.0 - self.clip_param, 1.0 + self.clip_param) * adv_targ\n policy_loss = -torch.min(surr1, surr2).mean()\n\n # [TODO] Implement value loss\n value_loss = F.mse_loss(return_batch, values)\n\n # This is the total loss\n loss = policy_loss + self.config.value_loss_weight * value_loss - self.config.entropy_loss_weight * dist_entropy\n\n return loss, policy_loss, value_loss, dist_entropy", "def compute_loss(self, targets, logits, seq_length):\n\n\t\twith tf.name_scope('evaluate_loss'):\n\t\t\tloss, norm = self.loss_computer(targets, logits, seq_length)\n\t\t\t\n\t\treturn loss, norm", "def loss(self):\n return self._get(\"loss\")", "def generator_loss(self, fake_images=None, real_images=None, fake_output=None, l1_lambda=100, loss_strategy='both'):\n #TODO with try/except\n assert loss_strategy in ['GAN', 'L1', 'both'], \"Error: invalid type of loss. Should be 'GAN', 'L1' or 'both'\"\n if loss_strategy == \"GAN\":\n fake_loss = self.cross_entropy(ones_like(fake_output), fake_output)\n return fake_loss\n elif loss_strategy == \"L1\":\n L1_loss = l1_lambda*self.l1(real_images, fake_images)\n return L1_loss\n elif loss_strategy == 'both':\n fake_loss = self.cross_entropy(ones_like(fake_output), fake_output)\n L1_loss = self.l1(real_images, fake_images)\n return fake_loss + l1_lambda*L1_loss", "def calc_loss(self, codes, encodings):\n return tf.reduce_mean((encodings - tf.stop_gradient(codes)) ** 2)", "def ls_generator_loss(scores_fake):\n N = scores_fake.size()\n\n true_labels = Variable(torch.ones(N)).type(dtype)\n\n loss = 0.5 * ((torch.mean((scores_fake - true_labels)**2)))\n\n return loss", "def loss_fn(gr_truth, pred):\n return 100 * dice_loss(pred, gr_truth) + softmax_weighted_loss(pred, gr_truth)", "def calculate_loss(self, X, y):\n probs = self.predict(X)\n\n num_examples = X.shape[0]\n\n sub = np.subtract(probs, y)\n abs_sum = np.abs(sub)\n sm = np.sum(abs_sum)\n loss = 1 - sm / num_examples\n print(\"Current loss: [ \" + str(\"{:6.5f}\").format(loss) + \" ]\")\n return loss", "def calc_loss(self, outputs, labels):\n information_loss = self.bottleneck.buffer_capacity.mean() # Taking the mean is equivalent of scaling with 1/K\n cross_entropy = F.cross_entropy(outputs, target=labels)\n total = cross_entropy + self.beta * information_loss\n self.ce_loss.append(cross_entropy.cpu().detach().numpy())\n self.info_loss.append(information_loss.cpu().detach().numpy())\n self.total_loss.append(total.cpu().detach().numpy())\n return total", "def get_gen_loss(gen, disc, criterion, num_images, z_dim, device):\n # Create noise vectors and generate a batch of fake images.\n noise = get_noise(num_images, z_dim, device)\n \n # Get the discriminator's prediction of the fake image.\n fake_images = gen(noise)\n \n # Get the discriminator's prediction of the fake image.\n pred_fake = disc(fake_images)\n \n # Target vectors with 1`s. In this case, 1 represents real\n # From the perspective of the generator, \"true\" or 1 is the answer it wants\n target = torch.ones_like(pred_fake)\n \n # Calculate the generator's loss.\n gen_loss = criterion(pred_fake, target)\n gen_loss.backward(retain_graph=True)\n return gen_loss", "def _create_loss(self):\n with tf.device('/cpu:0'):\n with tf.name_scope('loss'):\n self.loss = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(\n labels=self.labels_placeholder, \n logits=self.logits, name='loss'))", "def loss(self, y: torch.Tensor, state: AlgorithmState) -> torch.Tensor:\n\n raise NotImplementedError()", "def _get_loss(self, states):\n states = self.normalization_layer(states)\n rnd_pred = self.rnd(states)\n\n with torch.no_grad():\n rnd_target = self.rnd_target(states)\n\n rnd_loss = self.rnd_loss_func(rnd_pred, rnd_target)\n\n return rnd_loss", "def loss(self, **kwargs):\n pass", "def loss(self):\n return 'mse'", "def _loss(self):\n\n cross_entropy = tf.reduce_mean(-tf.log(self.probability + epsilon) * self.y)\n self.loss = cross_entropy\n\n self.accuracy = tf.reduce_mean(\n tf.cast(tf.equal(tf.argmax(self.y, 1), self.prediction), tf.float32))", "def get_loss(self):\r\n\r\n if F.loss_type==\"cosine\":\r\n self.losscos = r2d*tf.acos(1-tf.losses.cosine_distance(tf.nn.l2_normalize(self.labels,1), tf.nn.l2_normalize(self.out, 1), dim=1))\r\n self.loss = tf.losses.cosine_distance(tf.nn.l2_normalize(self.labels,1), tf.nn.l2_normalize(self.out, 1), dim=1)\r\n elif F.loss_type==\"mse2d\":\r\n xl, yl, zl = tf.split(self.labels, 3, axis=1)\r\n xo, yo, zo = tf.split(self.out, 3, axis=1)\r\n thetal, thetao = tf.asin(-yl), tf.asin(-yo)\r\n phil, phio = tf.atan2(-zl, -xl), tf.atan2(-zo, -xo)\r\n self.lb = tf.concat([thetal, phil], axis=1)\r\n self.ob = tf.concat([thetao, phio], axis=1)\r\n self.loss = tf.scalar_mul(tf.constant(r2d), tf.losses.mean_squared_error(self.lb, self.ob, 2))\r\n elif F.loss_type==\"mse3d\":\r\n self.loss = tf.losses.mean_squared_error(tf.nn.l2_normalize(self.labels, 0), tf.nn.l2_normalize(self.out, 0))", "def generator_loss(self, D_output_fake, gan_mode='lsgan', maxloss='mean'):\n if gan_mode == 'lsgan':\n if maxloss == 'mean':\n # use mean squared error\n loss = tf.reduce_mean(tf.squared_difference(D_output_fake, REAL_LABEL))\n elif maxloss == 'max':\n # use max squared error\n loss = tf.reduce_max(tf.squared_difference(D_output_fake, REAL_LABEL))\n elif maxloss == 'softmax':\n #use softmax squared error\n loss_map = (tf.squared_difference(D_output_fake, REAL_LABEL))\n batchsize = loss_map.get_shape()[0].value\n reshaped_loss_map = tf.reshape(loss_map, shape=[batchsize, -1])\n softmax_weight = tf.nn.softmax(reshaped_loss_map, dim=1)\n loss = tf.reduce_sum(softmax_weight * reshaped_loss_map)\n elif maxloss == 'focal':\n loss_map = (tf.squared_difference(D_output_fake, REAL_LABEL) +\n tf.square(D_output_fake)) / 2\n loss_map_shape = loss_map.get_shape()\n D_output_fake_shape = D_output_fake.get_shape()\n prob_weight = (1 - D_output_fake) * 1.5 # here debug the prob coef\n print 'loss_map_shape:', loss_map_shape\n print 'D_output_fake_shape:', D_output_fake_shape\n loss = tf.reduce_mean(prob_weight * loss_map)\n\n elif gan_mode == 'lcgan':\n loss = tf.reduce_mean(tf.pow(tf.abs(tf.subtract(D_output_fake, REAL_LABEL)), 3))\n elif gan_mode == 'gan':\n # heuristic, non-saturating loss\n loss = -tf.reduce_mean(ops.safe_log(D_output_fake)) / 2\n elif gan_mode == 'gan_logits':\n if self.patchgan:\n constant05 = tf.constant(0.5, shape=(self.batch_size, 64))\n loss = tf.reduce_mean(tf.losses.sigmoid_cross_entropy(constant05, D_output_fake))\n else:\n constant05 = tf.constant(0.5, shape=(self.batch_size, 1))\n loss = tf.reduce_mean(tf.losses.sigmoid_cross_entropy(constant05, D_output_fake))\n elif gan_mode == 'wgangp':\n fake_result = D_output_fake\n g_loss = - tf.reduce_mean(fake_result) # This optimizes the generator.\n return g_loss\n else:\n print 'unknown gan mode %s' % gan_mode\n exit(0)\n return loss", "def relativistic_standard_generator_loss(disc_real, disc_gen):\n loss_obj = tf.keras.losses.BinaryCrossentropy(from_logits=True)\n return loss_obj(tf.ones_like(disc_real), (disc_gen - disc_real))", "def _compute_loss(self, decoder_output, labels):\n with tf.name_scope(\"compute_loss\"):\n language_logit = decoder_output.logits[0]\n attention_logit = decoder_output.logits[1]\n batch_size = self.params['dataset']['batch_size']\n\n language_losses = self._cross_entropy_sequence_loss(\n logits=language_logit,\n targets=tf.transpose(labels[\"label\"], [1, 0]),\n sequence_length=labels[\"length\"])\n attention_losses = self._cross_entropy_sequence_loss(\n logits=attention_logit,\n targets=tf.transpose(labels[\"label\"], [1, 0]),\n sequence_length=labels[\"length\"])\n\n language_loss = tf.reduce_sum(language_losses) / batch_size\n attention_loss = tf.reduce_sum(attention_losses) / batch_size\n loss = language_loss + attention_loss\n\n return loss", "def ss_loss_(self, batch):\n raise NotImplementedError", "def loss(self, batch: base.Batch, key: base.RngKey) -> base.Array:", "def loss_function(self, train_head, train_tail, train_relation, train_head_corrupted, train_tail_corrupted):\n\n # train_head = tf.nn.l2_normalize(train_head, 1)\n # train_tail = tf.nn.l2_normalize(train_tail, 1)\n # train_head_corrupted = tf.nn.l2_normalize(train_head_corrupted, 1)\n # train_tail_corrupted = tf.nn.l2_normalize(train_tail_corrupted, 1)\n\n # loss = tf.reduce_mean(\n # tf.maximum(self.dict_paras['margin']\n # + self.distance(tf.add(train_head, train_relation), train_tail)\n # - self.distance(tf.add(train_head_corrupted, train_relation), train_tail_corrupted), 0.))\n\n loss = tf.reduce_mean(self.distance(tf.add(train_head, train_relation), train_tail))\n\n return loss", "def calculate_training_loss(self):\n self.network.train()\n self.training_average_loss = self.calculate_average_loss(self.training_dataloader)", "def _compute_loss(self, predictions, targets, **params):\n pass", "def gen_loss_wasserstein(self, noise_samples):\n generator_samples = self.gen_model(noise_samples)\n logits_gen = self.disc_model(generator_samples)\n\n loss = -tf.reduce_mean(logits_gen)\n return loss", "def get_loss(self, samples):\n return self.run_on_samples(self.loss.eval, samples)", "def generator_loss(discriminator, fake_images, real_labels, con_aug):\n\n discriminator.train()\n criterion = nn.BCELoss()\n condition = con_aug.detach()\n fake_img_fea = discriminator(fake_images)\n fake_logits = discriminator.conditioned_result(fake_img_fea, condition)\n fake_error = criterion(fake_logits, real_labels)\n\n if discriminator.unconditioned_result is not None:\n \"\"\"\n If it is a stage 2 discriminator then an additional error due to the\n score calculated from image features alone is added to the above error\n for loss calculation.\n \"\"\"\n fake_logits1 = discriminator.unconditioned_result(fake_img_fea)\n uncond_fake_error = criterion(fake_logits1, real_labels)\n fake_error += uncond_fake_error\n return fake_error", "def compute_loss(self, features, mode, params, precomputed):\n raise NotImplementedError(\"Model does not implement loss.\")", "def calculate_loss(self, output, batch):\n\n detailed_loss = {}\n for loss_func_key, this_loss_func, weight in self.loss_funcs:\n this_loss = this_loss_func(output, batch) * weight\n detailed_loss[loss_func_key] = this_loss\n loss = sum(detailed_loss.values())\n return loss, detailed_loss", "def pseudo_loss(self, params, batches):\n loss = 0\n for batch in batches:\n states = batch[\"states\"]\n actions = batch[\"actions\"]\n returns = batch[\"returns\"]\n\n preds = self.predict_jax(params, states)\n\n baseline = jnp.mean(returns, axis=0)\n preds_select = jnp.take_along_axis(preds, jnp.expand_dims(actions, axis=2), axis=2).squeeze()\n loss += (-jnp.mean(jnp.sum(preds_select * (returns - baseline))))\n\n return loss + self.l2_regularizer(params, 0.001) # try to divide by len(batches)?", "def _compute_loss(self, batch: Dict[str, torch.Tensor]) -> torch.Tensor:\n\n feat_static_cat = batch[\"feat_static_cat\"]\n feat_static_real = batch[\"feat_static_real\"]\n past_time_feat = batch[\"past_time_feat\"]\n past_target = batch[\"past_target\"]\n future_time_feat = batch[\"future_time_feat\"]\n future_target = batch[\"future_target\"]\n past_observed_values = batch[\"past_observed_values\"]\n\n picnn = self.model.picnn\n\n _, scale, hidden_state, _, _ = self.model.unroll_lagged_rnn(\n feat_static_cat,\n feat_static_real,\n past_time_feat,\n past_target,\n past_observed_values,\n future_time_feat,\n future_target,\n )\n\n hidden_state = hidden_state[:, : self.model.context_length]\n\n distr = self.model.output_distribution(picnn, hidden_state, scale)\n\n context_target = past_target[:, -self.model.context_length + 1 :]\n target = torch.cat(\n (context_target, future_target),\n dim=1,\n )\n\n loss_values = self.loss(distr, target)\n\n return loss_values.mean()", "def generator_loss(logits_fake, device):\r\n true_labels = torch.ones(logits_fake.size()).to(device=device, dtype=torch.float32)\r\n loss = bce_loss(logits_fake, true_labels)\r\n return loss", "def get_probability_loss(self):\n return sum(self._loss)/len(self._loss)", "def get_loss_fn(self):\n raise NotImplementedError()", "def compute_loss(self, x, y):\n\n self.batch_size = x.shape[0]\n self.x = x\n self.y = y\n self.soft = self.softmax(x) + 10**(-11)\n out = np.zeros(self.batch_size)\n for i in range(self.batch_size):\n out[i] = -(y[i] @ np.log(self.soft[i]))\n\n return out", "def loss_fn(params):\n logits = models.ProgramTransformer(config).apply(\n {'params': params},\n inputs,\n outputs,\n programs,\n rngs={'dropout': train_rng})\n loss, weight_sum = compute_weighted_cross_entropy(logits, programs, weights)\n mean_loss = loss / weight_sum\n return mean_loss, logits", "def loss_fn(model):\n with flax.deprecated.nn.stateful() as state:\n with flax.deprecated.nn.stochastic(dropout_rng):\n logits = model(example, train=True)\n loss, weight_sum = compute_weighted_cross_entropy(logits, targets)\n mean_loss = loss / weight_sum\n return mean_loss, (logits, state)", "def loss_weights(self):\n return None", "def _calc_loss(self, p_act_output:torch.Tensor, p_pred_output:torch.Tensor) -> float:\r\n\r\n return self._loss_fct(p_act_output, p_pred_output)", "def computeLoss(self):\n return sum(np.arccosh(-minkowskiArrayDot(self.examples, self.centroid)) ** 2)[0] / np.shape(self.examples)[0]", "def calculate_loss(self, output, target, **kwargs):\n ##dont do aggregation\n raise NotImplementedError", "def calculate_loss(self, output, target, **kwargs):\n ##dont do aggregation\n raise NotImplementedError", "def loss_fn(self, targets, outputs, model):", "def loss_total(self, mask):\n\n def loss(y_true, y_pred):\n\n # Compute predicted image with non-hole pixels set to ground truth\n y_comp = mask * y_true + (1-mask) * y_pred\n\n # Compute the vgg features. \n if self.vgg_device:\n with tf.device(self.vgg_device):\n vgg_out = self.vgg(y_pred)\n vgg_gt = self.vgg(y_true)\n vgg_comp = self.vgg(y_comp)\n else:\n vgg_out = self.vgg(y_pred)\n vgg_gt = self.vgg(y_true)\n vgg_comp = self.vgg(y_comp)\n \n # Compute loss components\n l1 = self.loss_valid(mask, y_true, y_pred)\n l2 = self.loss_hole(mask, y_true, y_pred)\n l3 = self.loss_perceptual(vgg_out, vgg_gt, vgg_comp)\n l4 = self.loss_tv(mask, y_comp)\n l5 = - 0.5 * K.sum(1 + self.z_log_var -self.cl - K.square(self.z_mean)/K.exp(self.cl) - K.exp(self.z_log_var)/K.exp(self.cl))\n # Return loss function\n return l1 + 6*l2 + 0.05*l3 + 0.1*l4 +l5 \n return loss", "def setup_loss(self):\n with vs.variable_scope(\"loss\"):\n self.loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.label_placeholder, logits=self.label_predictions))", "def _define_loss(self):\n\n cost = []\n unit_cost = []\n for nn in range(len(self.ffnet_out)):\n data_out = self.data_out_batch[nn]\n if self.filter_data:\n # this will zero out predictions where there is no data,\n # matching Robs here\n pred = tf.multiply(\n self.networks[self.ffnet_out[nn]].layers[-1].outputs,\n self.data_filter_batch[nn])\n else:\n pred = self.networks[self.ffnet_out[nn]].layers[-1].outputs\n\n nt = tf.cast(tf.shape(pred)[0], tf.float32)\n # define cost function\n if self.noise_dist == 'gaussian':\n with tf.name_scope('gaussian_loss'):\n cost.append(tf.nn.l2_loss(data_out - pred) / nt)\n unit_cost.append(tf.reduce_mean(tf.square(data_out-pred), axis=0))\n\n elif self.noise_dist == 'poisson':\n with tf.name_scope('poisson_loss'):\n\n if self.poisson_unit_norm is not None:\n # normalize based on rate * time (number of spikes)\n cost_norm = tf.multiply(self.poisson_unit_norm[nn], nt)\n else:\n cost_norm = nt\n\n cost.append(-tf.reduce_sum(tf.divide(\n tf.multiply(data_out, tf.log(self._log_min + pred)) - pred,\n cost_norm)))\n\n unit_cost.append(-tf.divide(\n tf.reduce_sum(\n tf.multiply(\n data_out, tf.log(self._log_min + pred)) - pred, axis=0),\n cost_norm))\n\n elif self.noise_dist == 'bernoulli':\n with tf.name_scope('bernoulli_loss'):\n # Check per-cell normalization with cross-entropy\n # cost_norm = tf.maximum(\n # tf.reduce_sum(data_out, axis=0), 1)\n cost.append(tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=data_out, logits=pred)))\n unit_cost.append(tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=data_out, logits=pred), axis=0))\n else:\n TypeError('Cost function not supported.')\n\n self.cost = tf.add_n(cost)\n self.unit_cost = unit_cost\n\n # Add regularization penalties\n reg_costs = []\n with tf.name_scope('regularization'):\n for nn in range(self.num_networks):\n reg_costs.append(self.networks[nn].define_regularization_loss())\n self.cost_reg = tf.add_n(reg_costs)\n\n self.cost_penalized = tf.add(self.cost, self.cost_reg)\n\n # save summary of cost\n # with tf.variable_scope('summaries'):\n tf.summary.scalar('cost', self.cost)\n tf.summary.scalar('cost_penalized', self.cost_penalized)\n tf.summary.scalar('reg_pen', self.cost_reg)", "def build_loss(self):\n import tensorflow as tf\n\n y_1d = [tf.reduce_sum(tf.multiply(self.variables[\"y\"][i], self.variables[\"y_action\"][i]), axis=1) for i in range(len(self.variables[\"y\"]))]\n loss = np.sum([tf.nn.l2_loss(y_1d[i] - self.variables[\"y_true\"]) for i in range(len(y_1d))])\n\n l1_reg = 0\n l2_reg = 0\n\n keys = sorted(self.variables.keys())\n keys = [key for key in keys if critere_keys(key) and \"W\" in key]\n for key in keys:\n l1_reg += tf.reduce_sum(tf.abs(self.variables[key]))\n l2_reg += tf.nn.l2_loss(self.variables[key])\n\n self.loss = loss + self.alpha_reg * l1_reg + self.beta_reg * l2_reg\n\n self.train_step = tf.train.RMSPropOptimizer(self.decay_learning_rate,\n decay=0.99, momentum=0., centered=True).minimize(self.loss, global_step=self.global_step)", "def profit_loss(self) -> float:\n return self.net_worth / self.initial_net_worth", "def evaluate_loss(net, data_iter, loss): #@save\n metric = d2l.Accumulator(2) # Sum of losses, no. of examples\n for X, y in data_iter:\n l = loss(net(X), y)\n metric.add(d2l.reduce_sum(l), d2l.size(l))\n return metric[0] / metric[1]", "def unnormalized_loss(self):\n return 0.5 * la.norm(self.resids) ** 2", "def compute_loss(self, x, label):\n # Forward propagation\n y_hat = self.forward_pass(x)\n return -np.log(y_hat[label])", "def loss_fn(self, recons, inputs, mu, log_var, **kwargs):\n# kld_weight = kwargs['M_N'] # Account for the minibatch samples from the dataset\n recons_loss = F.mse_loss(recons, inputs)\n# recons_loss = F.binary_cross_entropy(recons, inputs)\n KLD = torch.mean(-0.5 * torch.sum(1 + log_var - mu**2 - log_var.exp(), dim=1), dim=0)\n loss = recons_loss - KLD\n return loss, recons_loss, KLD" ]
[ "0.77820057", "0.7764371", "0.75634307", "0.75527585", "0.74715674", "0.73343015", "0.73278564", "0.7294605", "0.7289977", "0.72849333", "0.7278991", "0.7254628", "0.72545844", "0.72445464", "0.7168463", "0.71656865", "0.71556157", "0.71551716", "0.7152865", "0.71504474", "0.7121234", "0.7119874", "0.7118611", "0.71146804", "0.7086388", "0.7055467", "0.70489347", "0.70355695", "0.7000493", "0.69992715", "0.6959849", "0.6956327", "0.69108963", "0.68926084", "0.6892281", "0.6886131", "0.68803823", "0.6879687", "0.68731123", "0.68584055", "0.68584055", "0.6855979", "0.6841375", "0.68147415", "0.6797837", "0.67939216", "0.6790406", "0.67532474", "0.67469037", "0.6744009", "0.6743991", "0.67431104", "0.67341006", "0.67226726", "0.669793", "0.669392", "0.66881096", "0.6668589", "0.6661276", "0.6658749", "0.66549253", "0.66351116", "0.6624654", "0.661481", "0.65961707", "0.65865636", "0.65836364", "0.6583531", "0.6572997", "0.6570928", "0.6563291", "0.6563074", "0.6559063", "0.6555953", "0.6554854", "0.6554659", "0.6554443", "0.654699", "0.6539671", "0.6537934", "0.65326476", "0.65190196", "0.6518188", "0.65121907", "0.64996123", "0.64950955", "0.64867866", "0.6486509", "0.6483434", "0.6483434", "0.64833295", "0.6481573", "0.64755106", "0.6458851", "0.6457175", "0.64292747", "0.642526", "0.6415058", "0.64144963", "0.6414212" ]
0.67413956
52
Compute the cycle consistenty loss. L_cyc = lamA [Expectation of L1_norm(F(G(A)) A)] + lamb [Expectation of L1_norm(G(F(B)) B)]
def __cycle_consistency_loss(self, reconstructedA, reconstructedB): loss = self.opt.lamA * tf.reduce_mean(tf.abs(reconstructedA - self.realA)) + \ self.opt.lamB * tf.reduce_mean(tf.abs(reconstructedB - self.realB)) return loss
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cycle_consistency_loss(self, ra, rb, fa, fb):\n with tf.device(\"/gpu:0\"):\n backward_loss = tf.reduce_mean(tf.abs(self.Ga2b(fa) - rb))\n with tf.device(\"/gpu:1\"):\n forward_loss = tf.reduce_mean(tf.abs(self.Gb2a(fb) - ra))\n loss = self.lambda1 * forward_loss + self.lambda2 * backward_loss\n return loss", "def directed_cycle_score(A):\n\n # Implement your cycle score given Problem 4 Part 2\n temp_matrix = np.zeros(A.shape)\n alpha = 0.05\n k = 0\n summation_term = 999999\n num_terms = A.shape[0]\n # while change < 0.05:\n for i in range(num_terms):\n summation_term = (1 / np.math.factorial(k)) * expm(A)\n temp_matrix += summation_term\n\n cycle_score = np.trace(temp_matrix) - (A.shape[0] * num_terms)\n return cycle_score", "def cycle_consistency_loss(self, reconstructed_x, reconstructed_y, x, y, loss_mode=2, ):\n if loss_mode == 1:\n forward_loss = tf.reduce_mean(tf.abs(reconstructed_x - x))\n backward_loss = tf.reduce_mean(tf.abs(reconstructed_y - y))\n elif loss_mode == 2:\n forward_loss = tf.reduce_mean(tf.square(reconstructed_x - x))\n backward_loss = tf.reduce_mean(tf.square(reconstructed_y - y))\n elif loss_mode == 3:\n forward_loss = tf.reduce_mean(tf.losses.huber_loss(x, reconstructed_x, weights=5, delta=0.2))\n backward_loss = tf.reduce_mean(tf.losses.huber_loss(y, reconstructed_y, weights=5, delta=0.2))\n elif loss_mode == 0:\n print 'cycle softmax'\n forward_loss_map = tf.square(reconstructed_x - x)\n backward_loss_map = tf.square(reconstructed_y - y)\n batchsize = forward_loss_map.get_shape()[0].value\n cycle_softmax_coef = 0.75\n\n reshaped_forward_loss_map = tf.reshape(forward_loss_map, shape=[batchsize, -1])\n forward_softmax_weight = tf.nn.softmax(reshaped_forward_loss_map*cycle_softmax_coef, dim=1)\n forward_loss = tf.reduce_sum(forward_softmax_weight * reshaped_forward_loss_map)\n\n reshaped_backward_loss_map = tf.reshape(backward_loss_map, shape=[batchsize, -1])\n backward_softmax_weight = tf.nn.softmax(reshaped_backward_loss_map*cycle_softmax_coef, dim=1)\n backward_loss = tf.reduce_sum(backward_softmax_weight * reshaped_backward_loss_map)\n\n else:\n print 'Unknown cycle loss mode'\n exit(0)\n loss = self.lambda1 * forward_loss + self.lambda2 * backward_loss\n return self.lambda1 * forward_loss, self.lambda2 * backward_loss, loss", "def compute_losses(self):\n cycle_consistency_loss_a = \\\n self._lambda_a * losses.cycle_consistency_loss(\n real_images=self.input_a, generated_images=self.cycle_images_a,\n )\n cycle_consistency_loss_b = \\\n self._lambda_b * losses.cycle_consistency_loss(\n real_images=self.input_b, generated_images=self.cycle_images_b,\n )\n\n lsgan_loss_a = losses.lsgan_loss_generator(self.prob_fake_a_is_real)\n lsgan_loss_b = losses.lsgan_loss_generator(self.prob_fake_b_is_real)\n\n g_loss_A = \\\n cycle_consistency_loss_a + cycle_consistency_loss_b + lsgan_loss_b\n g_loss_B = \\\n cycle_consistency_loss_b + cycle_consistency_loss_a + lsgan_loss_a\n\n d_loss_A = losses.lsgan_loss_discriminator(\n prob_real_is_real=self.prob_real_a_is_real,\n prob_fake_is_real=self.prob_fake_pool_a_is_real,\n )\n d_loss_B = losses.lsgan_loss_discriminator(\n prob_real_is_real=self.prob_real_b_is_real,\n prob_fake_is_real=self.prob_fake_pool_b_is_real,\n )\n\n optimizer = tf.train.AdamOptimizer(self.learning_rate, beta1=0.5)\n\n self.model_vars = tf.trainable_variables()\n\n d_A_vars = [var for var in self.model_vars if 'd_A' in var.name]\n g_A_vars = [var for var in self.model_vars if 'g_A' in var.name]\n d_B_vars = [var for var in self.model_vars if 'd_B' in var.name]\n g_B_vars = [var for var in self.model_vars if 'g_B' in var.name]\n\n self.d_A_trainer = optimizer.minimize(d_loss_A, var_list=d_A_vars)\n self.d_B_trainer = optimizer.minimize(d_loss_B, var_list=d_B_vars)\n self.g_A_trainer = optimizer.minimize(g_loss_A, var_list=g_A_vars)\n self.g_B_trainer = optimizer.minimize(g_loss_B, var_list=g_B_vars)\n\n for var in self.model_vars:\n print(var.name)\n\n # Summary variables for tensorboard\n self.g_A_loss_summ = tf.summary.scalar(\"g_A_loss\", g_loss_A)\n self.g_B_loss_summ = tf.summary.scalar(\"g_B_loss\", g_loss_B)\n self.d_A_loss_summ = tf.summary.scalar(\"d_A_loss\", d_loss_A)\n self.d_B_loss_summ = tf.summary.scalar(\"d_B_loss\", d_loss_B)", "def test_cfu_cycles(self):\n # Input: (function, in0, in1, cmd_valid, rsp_ready)\n # Output: (result, rsp_valid, cmd_ready)\n X = None\n DATA = [\n # Nothing\n ((0, 0, 0, 0, 0), (X, 0, 1)),\n # Same cycle instruction, CPU not ready\n ((0, 1, 2, 1, 0), (3, 1, 1)),\n ((0, 0, 0, 0, 1), (3, 1, 0)),\n ((0, 0, 0, 0, 0), (X, 0, 1)),\n # Multi-cycle instruction, CPU ready\n ((3, 3, 0, 1, 1), (X, 0, 1)),\n ((0, 0, 0, 0, 1), (X, 0, 0)),\n ((0, 0, 0, 0, 1), (X, 0, 0)),\n ((0, 0, 0, 0, 1), (6, 1, 0)),\n # Same cycle instruction, CPU ready\n ((0, 5, 3, 1, 1), (8, 1, 1)),\n # Multi-cycle instruction, CPU not ready\n ((3, 2, 0, 1, 0), (X, 0, 1)),\n ((0, 0, 0, 0, 0), (X, 0, 0)),\n ((0, 0, 0, 0, 0), (2, 1, 0)),\n ((0, 0, 0, 0, 1), (2, 1, 0)),\n # Multi-cycle instruction, but always ready next cycle\n ((4, 3, 5, 1, 1), (X, 0, 1)),\n ((0, 0, 0, 0, 1), (8, 1, 0)),\n # CPU not ready\n ((4, 3, 4, 1, 0), (X, 0, 1)),\n ((0, 0, 0, 0, 0), (X, 1, 0)),\n ((0, 0, 0, 0, 0), (X, 1, 0)),\n ((0, 0, 0, 0, 1), (7, 1, 0)),\n # Fallback instruction - same cycle, CPU ready\n ((7, 0, 0, 1, 1), (X, 1, 1)),\n ]\n\n def process():\n for n, (inputs, expected_outputs) in enumerate(DATA):\n func, i0, i1, cmd_valid, rsp_ready = inputs\n exp_result, exp_rsp_valid, exp_cmd_ready = expected_outputs\n yield self.dut.cmd_function_id.eq(func)\n yield self.dut.cmd_in0.eq(i0)\n yield self.dut.cmd_in1.eq(i1)\n yield self.dut.cmd_valid.eq(cmd_valid)\n yield self.dut.rsp_ready.eq(rsp_ready)\n yield Delay(0.1)\n if exp_result is not None:\n self.assertEqual((yield self.dut.rsp_out), exp_result)\n if exp_rsp_valid is not None:\n self.assertEqual((yield self.dut.rsp_valid), exp_rsp_valid)\n # We don't currently support returning non-OK responses, so\n # if our response is valid, it must be OK.\n if exp_rsp_valid:\n self.assertTrue((yield self.dut.rsp_ok))\n if exp_cmd_ready is not None:\n self.assertEqual((yield self.dut.cmd_ready), exp_cmd_ready)\n yield\n self.run_sim(process, False)", "def test_lcl_convergence():\n with pytest.raises(RuntimeError):\n lcl(1000. * units.mbar, 30. * units.degC, 20. * units.degC, max_iters=2)", "def cl_alm2d(alm1=None, alm2=None, lmax=100):\n if alm2 is None:\n alm2 = alm1\n cl = np.zeros(lmax+1)\n ls = np.arange(lmax+1)\n for l in ls:\n ms = np.arange(-l,l+1)\n \n cl[l] += ((alm1[l][ms]*np.conjugate(alm2[l][ms])).real).sum()/(2.*l+1.)\n return cl", "def lcs_dp(A, B):\n m = len(A)\n n = len(B) \n # array for storing the intermediate calculations \n temp_arr = [[None]*(n+1) for ]", "def cLCG(G):\n \n gens = []\n \n for g in G:\n gens.append(LCG(*g))\n \n m0 = G[0][3]-1\n \n while True:\n yield sum([(-1**j)*next(g) for j,g in enumerate(gens)]) % m0", "def test_lcl_convergence_issue():\n pressure = np.array([990, 973, 931, 925, 905]) * units.hPa\n temperature = np.array([14.4, 14.2, 13, 12.6, 11.4]) * units.degC\n dewpoint = np.array([14.4, 11.7, 8.2, 7.8, 7.6]) * units.degC\n lcl_pressure, _ = lcl(pressure[0], temperature[0], dewpoint[0])\n assert_almost_equal(lcl_pressure, 990 * units.hPa, 0)", "def lam(freq):\n return C / freq", "def test_cca_speed(self):\n shape = (64, 64)\n H1 = Variable(torch.randn(shape[0], shape[1], dtype=torch.double), requires_grad=True)\n H2 = Variable(torch.randn(shape[0], shape[1], dtype=torch.double), requires_grad=True)\n reg = 0.1\n N = 100\n\n fwd_func = CorrelationLoss.forward\n start = time()\n for _ in range(N):\n corr = fwd_func(None, H1, H2, reg, False, None) # using autograd\n corr.backward()\n print(\"autograd time taken\", time() - start)\n\n start = time()\n for _ in range(N):\n corr = CorrLoss(H1, H2, reg, False, None) # using my forward & backward\n corr.backward()\n print(\"my grad time taken\", time() - start)", "def tacsim_combined_in_C(G1, G2=None, node_attribute='weight', edge_attribute='weight', lamb=0.5, norm=True):\n # X: node similarity; Y: edge similarity\n X, Y = tacsim_in_C(G1, G2, node_attribute, edge_attribute)\n\n As, At = node_edge_adjacency(G1)\n if G2 is None:\n Bs, Bt = As, At\n else:\n Bs, Bt = node_edge_adjacency(G2)\n\n Z = Y + lamb * np.dot(np.dot(As.T, X), Bs) + (1 - lamb) * np.dot(np.dot(At.T, X), Bt)\n\n if norm:\n return normalized(Z)\n else:\n return Z", "def test_allow_effect_during_refractory(self):\n np.random.seed(6564)\n f = 0.5\n self.syn_dense.W = np.random.randn(self.M, self.N)\n self.syn_dense.f_nmda = f\n self.syn_dense.change_during_ref = True\n\n self.T.active_state = False\n\n sim = simulation.Simulation(self.G, self.T, self.syn_dense, dt=self.dt)\n sim.run(self.t_max)\n\n self.assertGreater(np.linalg.norm(self.T.i_ampa), 0.1)\n self.assertGreater(np.linalg.norm(self.T.i_nmda), 0.1)", "def test_find_cycles_multiple_cycles(self):\n self._build_sample_graph()\n # Adding cycle a -> d -> a\n self.skill_graph.add_prerequisite(self.sa.id, self.sd.id)\n # Adding cycle g -> h -> g\n sg = self.skill_graph.add(Skill.build('g', ''))\n sh = self.skill_graph.add(Skill.build('h', ''))\n self.skill_graph.add_prerequisite(sg.id, sh.id)\n self.skill_graph.add_prerequisite(sh.id, sg.id)\n\n expected = [[self.sa.id, self.sd.id], [sg.id, sh.id]]\n skill_map = SkillMap.load(self.course)\n successors = skill_map.build_successors()\n result = SkillMapMetrics(skill_map).simple_cycles()\n self.assertEqual(len(result), len(expected))\n for cycle in result:\n self.assertIn(sorted(cycle), expected)", "def cyclic_merit_lag(x,*args):\n CS = args[0]\n print \"rindex\",CS.rindex\n ht = get_ht(x,CS.rindex)\n hf = time2freq(ht)\n CS.hf = hf\n CS.ht = ht\n cs_model,csplus,csminus,phases = make_model_cs(hf,CS.s0,CS.bw,CS.ref_freq)\n merit = 2*(np.abs(cs_model[:,1:] - CS.cs[:,1:])**2).sum() #ignore zeroth harmonic (dc term)\n \n # the objval list keeps track of how the convergence is going\n CS.objval.append(merit)\n \n #gradient_lag\n diff = cs_model - CS.cs #model - data\n cc1 = cs2cc(diff * csminus)\n \n# original c code for reference:\n# for (ilag=0; ilag<cc1.nlag; ilag++) {\n# gradient->data[ilag] = 0.0 + I * 0.0;\n# int lag = (ilag<=cc1.nlag/2) ? ilag : ilag-cc1.nlag;\n# tau = (double)lag * (double)cs->nchan /\n# ( (double)cc1.nlag * cc1.bw*1.e6 );\n# for (ih=1; ih<cc1.nharm; ih++) {\n# phs = M_PI * tau * (double)ih * cc1.ref_freq;\n# phasor = cos(phs)+I*sin(phs);\n# fftwf_complex *ccval = get_cc(&cc1,ih,ip,ilag);\n# gradient->data[ilag] += 4.0 * (*ccval) * phasor\n# * conj(s0->data[ih]) / (float)cs->nchan;\n# }\n# }\n\n #we reuse phases and csminus, csplus from the make_model_cs call\n\n phasors = np.exp(1j*phases)\n cs0 = np.repeat(CS.s0[np.newaxis,:],CS.nlag,axis=0) #filter2cs\n grad = 4.0 * cc1 * phasors * np.conj(cs0) / CS.nchan\n grad = grad[:,1:].sum(1) # sum over all harmonics to get function of lag\n \n #conjugate(res)\n #calc positive shear\n #multiply\n #cs2cc\n cc2 = cs2cc(np.conj(diff) * csplus)\n grad2 = 4.0 * cc2 * np.conj(phasors) * cs0 / CS.nchan\n \n grad = grad + grad2[:,1:].sum(1)\n CS.grad = grad[:]\n CS.model = cs_model[:]\n\n if CS.iprint:\n print \"merit= %.7e grad= %.7e\" % (merit,(np.abs(grad)**2).sum())\n \n if CS.make_plots:\n if CS.niter % CS.plot_every == 0:\n CS.plotCurrentSolution()\n \n \n \n grad = get_params(grad, CS.rindex)\n CS.niter += 1\n \n return merit,grad", "def transition(self, closure):\n nlp = closure()\n\n for i in range(len(self.momentums)):\n self.momentums[i] = torch.randn_like(self.momentums[i])\n\n for m, p in zip(self.momentums, self.params):\n m -= (1/2) * (self.t/self.L) * p.grad\n\n for l in range(self.L):\n with torch.no_grad():\n for m, p in zip(self.momentums, self.params):\n p += (self.t/self.L) * m\n if (l+1) != self.L:\n nlp = closure()\n for m, p in zip(self.momentums, self.params):\n m -= (self.t/self.L) * p.grad\n\n return nlp", "def mclCycle(self, moveData, senseData):\n self.countCycles += 1\n # Insert code for these steps here:\n # 1. Set up a new sample list and a new weights list\n sample_lst = []\n weight_lst = []\n # 2. Loop over every particle, and for each particle:\n # 3. Call motionUpdate on the particle and moveData\n # 4. Compute the new weight for this particle by calling perceptionUpdate on the new (updated) location\n # 5. Add these to the new samples and new weights lists\n for p in self.samples:\n new_p = self.motionUpdate(p, moveData)\n new_w = self.perceptionUpdate(new_p, senseData)\n sample_lst.append(new_p)\n weight_lst.append(new_w)\n # 6. Normalize the weights (note I've provided a method for this)\n weight_lst = self.normalize(weight_lst)\n # 7. Use the weights to resample from the new sample list (see the method I've provided)\n sample_lst, weight_lst = self.resample(sample_lst, weight_lst)\n # 8. Store the new samples into self.samples, and the new weights to a local variable, newSampleWeights\n self.samples = sample_lst\n newSampleWeights = weight_lst\n self.printMCLStatus()\n CoM = self.findCenterOfMass(newSampleWeights)\n return CoM", "def test_simple_bind_gradient_graph_possible_with_cycle():\n data = mx.symbol.Variable('data')\n res = data + data + data + data + data + data + data + data\n res._simple_bind(ctx=mx.cpu(), data=(1,))", "def test_l1norm () :\n n = 10\n rfs = RewardFnSpace(list(range(n)))\n for i in range(10): \n b = rfs.bs[i]\n rfs.lp += b == 0\n rfs.lp.solve()\n rfs._setCoeffs()\n coeffs = np.array(rfs.coeffs)\n assert(np.linalg.norm(coeffs - np.ones(n)) < 1e-4)", "def vcycle(v, b):\n if (len(v) - 1) & (len(v) - 2) != 0:\n raise ValueError(\"Lenth of v must be 2**n + 1.\")\n\n for i in range(3):\n jacobi23(v, b)\n\n if len(v) <= 3:\n return\n\n r = b - Amul(v)\n r2 = 4. * restrict(r)\n e2 = np.zeros_like(r2)\n vcycle(e2, r2)\n v += prolong(e2)\n\n for i in range(3):\n jacobi23(v, b)", "def test_find_cycles_one_cycle(self):\n self._build_sample_graph()\n # Adding cycle a -> d -> a\n self.skill_graph.add_prerequisite(self.sa.id, self.sd.id)\n skill_map = SkillMap.load(self.course)\n self.assertEqual(6, len(skill_map.skills()))\n successors = skill_map.build_successors()\n self.assertEqual(\n sorted(SkillMapMetrics(skill_map).simple_cycles()[0]),\n [self.sa.id, self.sd.id])", "def dLdp(C1s,C0s,ks,bs,sigma=1):\n # return np.array(jit(jacfwd(L,argnums=1))(q,ps,C1s,C0s,ks,bs,sigma))\n \n # A = FIM(q,ps,C1s,C0s,ks,bs,sigma)\n \n # Construct A(q,ps)\n A = FIM(C1s,C0s,ks,bs,sigma)\n\n # Construct dAdp(q,ps)\n dAdp = jit(jacfwd(A,argnums=1))\n \n # Construct inv_A(q,ps)\n inv_A=lambda q,ps: jnp.linalg.inv(A(q,ps))\n \n # print(np.trace(-dAinv(inv_A,dAdp),axis1=0,axis2=1)-np.array(jit(jacfwd(L,argnums=1))(q,ps,C1s,C0s,ks,bs,sigma)))\n \n # Construct dLdP(q,ps)\n\n\n\n return lambda q,ps: -np.array(jnp.trace(dAinv(inv_A(q,ps),dAdp(q,ps)),axis1=0,axis2=1))", "def l2_reg_cost(cost, lambtha, weights, L, m):\n f = 0\n while (L):\n index = \"W{}\".format(L)\n weight = weights[index]\n f += np.linalg.norm(weight)\n L -= 1\n return cost + lambtha / (2 * m) * f", "def MCLDemo():\n doorsWorld = [(0.0, 32.0, \"wall\"), (32.0, 48.0, \"no wall\"),\n (48.0, 93.0, \"wall\"), (93.0, 109.0, \"no wall\"), (109.0, 121.0, \"wall\"),\n (121.0, 137.0, \"no wall\"), (137.0, 182.0, \"wall\"), (182.0, 185.0, \"no wall\")]\n opposites = {\"wall\": \"no wall\", \"no wall\": \"wall\"}\n\n monte = MonteCarloLocalizer(1000, 0, 185, doorsWorld)\n\n # quick simulation to test the code\n actualLoc = 1.0\n expectedLoc = 1.0\n twoNumsStr = \"{0:7.3f} {1:7.3f}\"\n print(\"------------ Initial location, expected and actual:\", twoNumsStr.format(expectedLoc, actualLoc))\n while expectedLoc < 180:\n distMoved = random.gauss(2.0, 0.25)\n print(\"------------ Movement, expected and actual:\", twoNumsStr.format(2.0, distMoved))\n\n expectedLoc += 2.0\n actualLoc = actualLoc + distMoved\n print(\"------------ New location, expected and actual:\", twoNumsStr.format(expectedLoc, actualLoc))\n\n actualSensor = monte.getMapValue(actualLoc)\n oppSensor = opposites[actualSensor]\n sensorData = random.choices([actualSensor, oppSensor, \"unknown\"], [96, 1, 4])\n reportedData = sensorData[0]\n print(\"------------ Sensor value, actual and reported:\", actualSensor, reportedData)\n\n result = monte.mclCycle(2.0, reportedData)\n monte.printPoint(expectedLoc, 'E')\n monte.printPoint(actualLoc, 'A')\n if result is not None:\n monte.printPoint(result, 'C')\n print(\"MCL Result:\", result)", "def causal_structure_learning(X, lambda1=0.001, loss_type='l2', max_iter=100, h_tol=1e-8, rho_max=1e+16, w_threshold=0.3):\r\n\r\n def _loss(W):\r\n \"\"\"Evaluate value and gradient of loss.\"\"\"\r\n M = X @ W\r\n if loss_type == 'l2':\r\n R = X - M\r\n loss = 0.5 / X.shape[0] * (R ** 2).sum()\r\n G_loss = - 1.0 / X.shape[0] * X.T @ R\r\n elif loss_type == 'logistic':\r\n loss = 1.0 / X.shape[0] * (np.logaddexp(0, M) - X * M).sum()\r\n G_loss = 1.0 / X.shape[0] * X.T @ (sigmoid(M) - X)\r\n elif loss_type == 'poisson':\r\n S = np.exp(M)\r\n loss = 1.0 / X.shape[0] * (S - X * M).sum()\r\n G_loss = 1.0 / X.shape[0] * X.T @ (S - X)\r\n else:\r\n raise ValueError('unknown loss type')\r\n return loss, G_loss\r\n\r\n def _h(W):\r\n \"\"\"Evaluate value and gradient of acyclicity constraint.\"\"\"\r\n # E = slin.expm(W * W)\r\n # h = np.trace(E) - d\r\n M = np.eye(d) + W * W / d\r\n E = np.linalg.matrix_power(M, d - 1)\r\n h = (E.T * M).sum() - d\r\n G_h = E.T * W * 2\r\n return h, G_h\r\n\r\n def _adj(w):\r\n \"\"\"Convert doubled variables ([2 d^2] array) back to original variables ([d, d] matrix).\"\"\"\r\n return (w[:d * d] - w[d * d:]).reshape([d, d])\r\n\r\n def _func(w):\r\n \"\"\"Evaluate value and gradient of augmented Lagrangian for doubled variables ([2 d^2] array).\"\"\"\r\n W = _adj(w)\r\n loss, G_loss = _loss(W)\r\n h, G_h = _h(W)\r\n obj = loss + 0.5 * rho * h * h + alpha * h + lambda1 * w.sum()\r\n G_smooth = G_loss + (rho * h + alpha) * G_h\r\n g_obj = np.concatenate((G_smooth + lambda1, - G_smooth + lambda1), axis=None)\r\n return obj, g_obj\r\n\r\n n, d = X.shape\r\n w_est, rho, alpha, h = np.zeros(2 * d * d), 1.0, 0.0, np.inf # double w_est into (w_pos, w_neg)\r\n bnds = [(0, 0) if i == j else (0, None) for _ in range(2) for i in range(d) for j in range(d)]\r\n for iter_j in range(max_iter):\r\n w_new, h_new = None, None\r\n print(iter_j)\r\n while rho < rho_max:\r\n sol = sopt.minimize(_func, w_est, method='L-BFGS-B', jac=True, bounds=bnds)\r\n w_new = sol.x\r\n h_new, _ = _h(_adj(w_new))\r\n if h_new > 0.25 * h:\r\n rho *= 10\r\n else:\r\n break\r\n w_est, h = w_new, h_new\r\n alpha += rho * h\r\n if h <= h_tol or rho >= rho_max:\r\n break\r\n W_est = _adj(w_est)\r\n # print(W_est)\r\n W_est[np.abs(W_est) < w_threshold] = 0\r\n # print(W_est)\r\n return W_est, h", "def test_lcsmodel_class():\n\n # Set the problem size.\n n = 1000\n p = 3\n\n # Define the test model\n TM = test.Model2(n,p)\n\n # Note: diff_A/diff_b do not require A/b as an input in this case,\n # but in the more general case they might.\n\n # Check the basic model calculations.\n theta = numpy.array((1., 0.1, 0.2, 0.1))\n A,B = TM.eval_A_and_b(theta)\n\n dA_1, dB_1 = TM.diff_A_and_b(A, B, theta, 0)\n dA_2, dB_2 = TM.diff_A_and_b(A, B, theta, 1)\n dA_3, dB_3 = TM.diff_A_and_b(A, B, theta, 2)\n dA_4, dB_4 = TM.diff_A_and_b(A, B, theta, 3)\n Z = numpy.zeros_like(dA_1.todense())\n z = numpy.zeros_like(dB_1)\n \n print \"dA/dtheta_1 check:\", numpy.allclose(dA_1.todense(), TM.A1.todense())\n print \"dA/dtheta_2 check:\", numpy.allclose(dA_2.todense(), TM.A2.todense())\n print \"dA/dtheta_3 check:\", numpy.allclose(dA_3.todense(), Z)\n print \"dA/dtheta_4 check:\", numpy.allclose(dA_4.todense(), Z)\n\n print \"db/dtheta_1 check:\", numpy.allclose(dB_1, z)\n print \"db/dtheta_2 check:\", numpy.allclose(dB_2, z)\n print \"db/dtheta_3 check:\", numpy.allclose(dB_3, TM.B1)\n print \"db/dtheta_4 check:\", numpy.allclose(dB_4, TM.B2)\n\n\n #\n # Test the lcs model class\n #\n\n gLCS = LCSModel()\n gLCS.eval_A_and_b = TM.eval_A_and_b\n gLCS.diff_A_and_b = TM.diff_A_and_b\n \n gLCS.quiet=True\n\n x = gLCS.eval(theta)\n #print x.shape\n\n for k in range(p):\n print \"Primal solution for x_{}, matches spsolve calculation: {}\".\\\n format(k, numpy.allclose(x[:,k], spla.spsolve(A,B[:,k])))\n\n\n D = gLCS.jacobian(theta)\n\n # -- If theta[1]=0, and theta[2:3] are fixed, then there is an analytical\n # calculation for x(theta[0]), and in this case we can check the first\n # column of D.\n\n theta = numpy.array((5.1, 0, 1.2, 2.1))\n A, B = TM.eval_A_and_b(theta)\n D = gLCS.jacobian(theta)\n\n for k in range(p):\n D_col_1 = -(1./theta[0]**2) * B[:,k]\n print \"First column of D_{} all close: {}\".\\\n format(k, numpy.allclose(D[k,:,0], D_col_1))\n\n\n # -- We'll use a numerical approximation to check the second column of D\n\n h = 0.000001\n theta = numpy.array((5.1, 1.1, 1.2, 2.1))\n dtheta = numpy.array((0., h, 0., 0.))\n A,B = TM.eval_A_and_b(theta)\n x = gLCS.eval(theta)\n D = gLCS.jacobian(theta)\n\n A_dt, B_dt = TM.eval_A_and_b(theta + dtheta)\n\n for k in range(p):\n x_dt = spla.spsolve(A_dt, B_dt[:,k])\n D_col_2_num_approx = (x_dt - x[:,k])/h\n max_abs_err = numpy.max(numpy.abs(D[k,:,1] - D_col_2_num_approx))\n\n print \"Second column of D_{} all close: {}\".\\\n format(k, numpy.allclose(D[k,:,1], D_col_2_num_approx))\n \n print \"Max abs error in second column of D_{}: {}\".\\\n format(k, max_abs_err)\n \n\n # -- If theta[0] and theta[1] are fixed, A(theta) is determined, and A^{-1}\n # is fixed. With a little math you can analytically calculate the third\n # and fourth columns of D. In fact x(theta) is linear in theta[2] and\n # theta[3], but not in theta[0] and theta[1].\n\n theta = numpy.array((1., 0.1, 0.2, 0.1))\n A,_ = TM.eval_A_and_b(theta)\n D = gLCS.jacobian(theta);\n\n for k in range(p):\n D_col_3 = spla.spsolve(A, TM.B1[:,k])\n\n print \"Third column of D_{} all close: {}\".\\\n format(k, numpy.allclose(D[k,:,2], D_col_3))\n\n\n for k in range(p):\n D_col_4 = spla.spsolve(A, TM.B2[:,k])\n \n print \"Fourth column of D_{} all close: {}\".\\\n format(k, numpy.allclose(D[k,:,3], D_col_4))", "def experiment_linear_conv_ls(_):\n # Min dft1-norm solution found (norm=1.9895)\n adv_norm_type = 'dftinf'\n dual_norm_type = 'dft1'\n attack_step_dir = 'dftinf_sd' # 'dftinf'\n\n module_name = 'train'\n # log_dir = 'runs_linear_conv_ls_%s' % adv_norm_type\n log_dir = 'runs_linear_conv_ls_normfix_%s' % adv_norm_type\n exclude = '*'\n\n d_over_n = [1, 2, 4, 8, 16, 32] # separable >= 1\n dim = 100\n num_train = [int(dim / p) for p in d_over_n]\n\n # Config params\n shared_params = []\n shared_params += [\n ('config', './config.py'),\n ('seed', list(range(3))),\n ]\n\n # Data hyper-parameters\n shared_params += [\n ('temperature', 0.0001),\n ('num_test', 1), # 500\n ('dim', dim),\n ('num_train', num_train),\n ]\n\n # Adversarial configuration: test\n shared_params += nameit('adv', [\n ('norm_type', adv_norm_type),\n # ('lr', 0.1),\n ('niters', 1), # 10\n # ('eps_iter', attack_eps), # Overwritten by cvxpy\n # ('eps_tot', attack_eps), # Overwritten by cvxpy\n ('pre_normalize', True), # multi attacks\n ('post_normalize', True),\n ('eps_from_cvxpy', True),\n ('step_dir', attack_step_dir),\n ])\n\n # Logging to standard output\n shared_params += [\n ('log_interval', 10000), # 1000),\n ('log_keys', '\\'(\"%s\")\\'' % ('\",\"'.join([\n 'risk/train/zero_one',\n 'risk/train/adv/%s' % adv_norm_type,\n 'weight/linear/norm/%s' % dual_norm_type,\n 'margin/%s' % dual_norm_type,\n ]))),\n # Compare with cvxpy\n ('enable_cvxpy', True),\n ]\n\n # Model hyper-parameters\n conv_linear_params = nameit('model', [\n ('arch', 'conv_linear'),\n ('nlayers', 2),\n ('regularizer', 'none'),\n ])\n\n params = []\n\n # GD line search implicit bias\n gd_ls = nameit('optim', [\n ('name', 'gd_ls'),\n ('niters', 100000),\n ('bound_step', True),\n ])\n params += [OrderedDict(shared_params+conv_linear_params+gd_ls)]\n\n return params, log_dir, module_name, exclude", "def analytCylDifn(R, T):\n n = 30\n lmbdavec = spcl.jn_zeros(0, n)\n theta = 0*R\n for i, lmbda in enumerate(lmbdavec):\n theta += ((2./lmbda) * spcl.j0(lmbda*R)/spcl.j1(lmbda)\n * np.exp(-lmbda**2*T))\n return theta", "def l2_reg_cost(cost, lambtha, weights, L, m):\n enorm = 0\n for i in range(1, L + 1):\n layer = 'W{}'.format(i)\n enorm += np.linalg.norm(weights[layer])\n return cost + (lambtha / (2 * m)) * enorm", "def test_find_cycles_not_conected(self):\n self._build_sample_graph()\n # Adding cycle g -> h -> g\n sg = self.skill_graph.add(Skill.build('g', ''))\n sh = self.skill_graph.add(Skill.build('h', ''))\n self.skill_graph.add_prerequisite(sg.id, sh.id)\n self.skill_graph.add_prerequisite(sh.id, sg.id)\n skill_map = SkillMap.load(self.course)\n expected0 = [sg.id, sh.id]\n successors = skill_map.build_successors()\n result = SkillMapMetrics(skill_map).simple_cycles()\n self.assertEqual(sorted(result[0]), expected0)", "def remove_cycles_redund(R, tol=1e-12, verbose=True):\n zero_rays = np.where(np.count_nonzero(R, axis=0) == 0)[0]\n for ray_ind in zero_rays:\n R = np.delete(R, ray_ind, axis=1)\n cycle_rays = np.zeros((R.shape[0], 0))\n A_eq, b_eq, c, x0 = setup_cycle_LP(independent_rows_qr(normalize_columns(np.array(R, dtype='float'))), only_eq=True)\n\n if verbose:\n mp_print('Constructing basis for LP')\n basis = get_basis_columns_qr(np.asarray(A_eq, dtype='float'))\n b_eq, x0 = perturb_LP(b_eq, x0, A_eq, basis, 1e-10)\n if verbose:\n mp_print('Starting linearity check using LP.')\n cycle_present, status, cycle_indices = cycle_check_with_output(c, np.asarray(A_eq, dtype='float'), x0, basis)\n\n if status != 0:\n print(\"Cycle check failed, trying normal LP\")\n A_ub, b_ub, A_eq, b_eq, c, x0 = setup_cycle_LP(independent_rows_qr(normalize_columns(np.array(R, dtype='float'))))\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, method='revised simplex', options={'tol': 1e-12},\n x0=x0)\n if res.status == 4:\n print(\"Numerical difficulties with revised simplex, trying interior point method instead\")\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, method='interior-point', options={'tol': 1e-12})\n\n cycle_present = True if np.max(res.x) > 90 else False\n if cycle_present:\n cycle_indices = np.where(res.x > 90)[0]\n if np.any(np.isnan(res.x)):\n raise Exception('Remove cycles did not work, because LP-solver had issues. Try to solve this.')\n\n # if the objective is unbounded, there is a cycle that sums to zero\n while cycle_present:\n # Find minimal cycle\n met = -1\n counter = 0\n while met < 0:\n cycle_ind = cycle_indices[counter]\n met = get_remove_metabolite_redund(R, cycle_ind)\n counter = counter + 1\n\n cycle_rays = np.append(cycle_rays, R[:, cycle_ind][:, np.newaxis], axis=1)\n R = cancel_with_cycle_redund(R, met, cycle_ind)\n\n # Do new LP to check if there is still a cycle present.\n A_eq, b_eq, c, x0 = setup_cycle_LP(independent_rows_qr(normalize_columns(np.array(R, dtype='float'))), only_eq=True)\n\n basis = get_basis_columns_qr(np.asarray(A_eq, dtype='float'))\n b_eq, x0 = perturb_LP(b_eq, x0, A_eq, basis, 1e-10)\n if verbose:\n mp_print('Starting linearity check in H_ineq using LP.')\n cycle_present, status, cycle_indices = cycle_check_with_output(c, np.asarray(A_eq, dtype='float'), x0, basis)\n\n if status != 0:\n print(\"Cycle check failed, trying normal LP\")\n A_ub, b_ub, A_eq, b_eq, c, x0 = setup_cycle_LP(\n independent_rows_qr(normalize_columns(np.array(R, dtype='float'))))\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, method='revised simplex', options={'tol': 1e-12},\n x0=x0)\n if res.status == 4:\n print(\"Numerical difficulties with revised simplex, trying interior point method instead\")\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, method='interior-point', options={'tol': 1e-12})\n\n cycle_present = True if np.max(res.x) > 90 else False\n if cycle_present:\n cycle_indices = np.where(res.x > 90)[0]\n if np.any(np.isnan(res.x)):\n raise Exception('Remove cycles did not work, because LP-solver had issues. Try to solve this.')\n\n return R, cycle_rays", "def solve_step(self, bc_left=0):\n status = 0\n self.t += self.dt\n\n\n ### Construct the RHS vector\n # Implicit terms\n #cff1 = 0. # Fully implicit\n #cff2 = 0.\n cff1 = 0.5*(1. - 2.*self.c_im)*self.dt\n cff2 = 0.5*self.c_im*self.dt\n RHS = cff1*self.L_rhs.dot(self.B) +\\\n cff2*self.L_rhs.dot(self.B_n_m1)\n\n # Nonlinear (explicit) terms\n cff3 = self.dt*(3 + self.b_ex)*0.5\n cff4 = -self.dt*(1+2*self.b_ex)*0.5\n cff5 = self.dt*(self.b_ex)*0.5\n \n RHS += cff3*self.calc_nonlinear_rhs(self.B)\n RHS += cff4*self.calc_nonlinear_rhs(self.B_n_m1)\n RHS += cff5*self.calc_nonlinear_rhs(self.B_n_m2)\n\n # Other terms from the time-derivative\n RHS += self.B\n\n # Add the BCs to the RHS\n cff0 = 0.5*(1 + self.c_im)*self.dt\n self.add_bcs(RHS, bc_left, cff0, cff1, cff2)\n\n # Use the direct banded matrix solver (faster)\n self.B_n_p1[:] = la.solve_banded( (self._j,self._j), self.L_lhs.data[::-1,:], RHS)\n\n # Check solutions\n if np.any( np.isnan(self.B_n_p1)):\n return -1\n\n # Update the terms last\n self.B_n_m2[:] = self.B_n_m1\n self.B_n_m1[:] = self.B\n self.B[:] = self.B_n_p1\n\n ## Update the boundary terms in these equations\n self.bcs[2] = self.bcs[1]\n self.bcs[1] = self.bcs[0]\n self.bcs[0] = bc_left\n\n return status", "def backward(self):\n self.loss_similarity = [LNCC(warped_img, self.batch_fixed, self.corr_kernel) for warped_img in self.warped_img_list]\n self.loss_similarity_mean = torch.mean(torch.stack(self.loss_similarity))\n self.loss_smooth = [GradNorm(disp_map) for disp_map in self.disp_list]\n self.loss_smooth_mean = torch.mean(torch.stack(self.loss_smooth))\n if len(self.strain_compensated_list) > 1:\n self.loss_consistency_strain = [LNCC(self.strain_compensated_list[t-1][:,:,143:-143,:], self.strain_compensated_list[t][:,:,143:-143,:], self.corr_kernel) for t in range(1, len(self.strain_compensated_list))]\n self.loss_consistency_strain_mean = torch.mean(torch.stack(self.loss_consistency_strain))\n self.loss_total = 1 - self.loss_similarity_mean + self.loss_smooth_mean * self.alpha + (1 - self.loss_consistency_strain_mean) * self.beta\n else:\n self.loss_total = 1 - self.loss_similarity_mean + self.loss_smooth_mean * self.alpha", "def _ls_solver(A, B, warm_start=None):\n # TODO - do conjugate gradient if n is too large\n return np.linalg.lstsq(A.T, B.T)[0].T", "def backward_G(self,i,direction):\n #lambda_idt = self.opt.lambda_identity\n lambda_A = self.opt.lambda_A\n #lambda_B = self.opt.lambda_B\n lambda_reg = 0.01\n lambda_idt=1\n # Identity loss\n if(direction):\n #the idt loss \n self.loss_idt=0\n # if lambda_idt > 0:\n # # G_A should be identity if real_B is fed: ||G_A(B) - B|| 使用fakeB代替\n # self.idt_A = self.netG_A[self.orders[i]](self.fake_B)\n # self.loss_idt_A = self.criterionIdt(\n # self.idt_A, self.fake_B) * lambda_B * lambda_idt\n # # G_B should be identity if real_A is fed: ||G_B(A) - A||\n # self.idt_B = self.netG_B[self.orders[i]](self.real_A)\n # self.loss_idt_B = self.criterionIdt(\n # self.idt_B, self.real_A) * lambda_A * lambda_idt\n # else:\n # self.loss_idt_A = 0\n # self.loss_idt_B = 0\n\n self.loss_G_adv=self.criterionGAN_D(self.netDadv(self.fake_B),True)\n # GAN loss D_A(G_A(A))\n self.pred_fake = self.netD(self.fake_B)\n self.loss_G_A = self.criterionGAN_D(self.pred_fake,self.labels[i+1])\n # GAN loss D_B(G_B(B))\n \n self.loss_G_B = self.criterionGAN_D(self.netD(self.rec_A), self.labels[i])\n \n # Forward cycle loss || G_B(G_A(A)) - A||\n self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A\n # Backward cycle loss || G_A(G_B(B)) - B||\n #self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B\n self.criterionReg=torch.nn.MSELoss()\n #\n self.loss_reg = (self.criterionReg(self.mask_A, torch.ones_like(self.mask_A))+self.criterionReg(self.mask_B, torch.ones_like(self.mask_B)))*0.5*lambda_reg\n # combined loss and calculate gradients\n self.loss_G = self.loss_G_adv+self.loss_G_A + self.loss_cycle_A + self.loss_G_B\n self.loss_G.backward()\n else:\n if lambda_idt > 0:\n self.idt_B = self.netG_A[self.orders_rev[i]](self.real_A)\n self.loss_idt = self.criterionIdt(\n self.idt_B, self.real_A) * lambda_A * lambda_idt\n else:\n self.loss_idt = 0\n\n self.loss_G_adv = self.criterionGAN_D(self.netDadv(self.fake_B), True)\n # GAN loss D_A(G_A(A))\n self.loss_G_A = self.criterionGAN_D(\n self.netD(self.fake_B), self.labels_rev[i])\n # GAN loss D_B(G_B(B))\n\n self.loss_G_B = self.criterionGAN_D(\n self.netD(self.rec_A), self.labels[0])\n\n # Forward cycle loss || G_B(G_A(A)) - A||\n self.loss_cycle_A = self.criterionCycle(\n self.rec_A, self.real_A) * lambda_A\n # Backward cycle loss || G_A(G_B(B)) - B||\n #self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B\n self.criterionReg = torch.nn.MSELoss()\n self.loss_reg = -(self.criterionReg(self.mask_A, torch.ones_like(self.mask_A)) +\n self.criterionReg(self.mask_B, torch.ones_like(self.mask_B)))*0.5*lambda_reg\n # combined loss and calculate gradients\n self.loss_G = self.loss_G_adv+self.loss_G_A + self.loss_cycle_A +self.loss_G_B\n self.loss_G.backward()", "def L_model_backward_test_case():\n np.random.seed(3)\n AL = np.random.randn(1, 2)\n Y = np.array([[1, 0]])\n\n A1 = np.random.randn(4,2)\n W1 = np.random.randn(3,4)\n b1 = np.random.randn(3,1)\n Z1 = np.random.randn(3,2)\n linear_cache_activation_1 = ((A1, W1, b1), Z1)\n\n A2 = np.random.randn(3,2)\n W2 = np.random.randn(1,3)\n b2 = np.random.randn(1,1)\n Z2 = np.random.randn(1,2)\n linear_cache_activation_2 = ((A2, W2, b2), Z2)\n\n caches = (linear_cache_activation_1, linear_cache_activation_2)\n\n return AL, Y, caches", "def experiment_linear_conv_constant_lr(_):\n # Min dft1-norm solution found (norm=1.9895)\n adv_norm_type = 'dftinf'\n dual_norm_type = 'dft1'\n attack_step_dir = 'dftinf_sd' # 'dftinf'\n\n module_name = 'train'\n # log_dir = 'runs_linear_conv_constant_lr_%s' % adv_norm_type\n log_dir = 'runs_linear_conv_constant_lr_normfix_%s' % adv_norm_type\n exclude = '*'\n\n # d_over_n = [1, 2, 4, 8, 16, 32] # separable >= 1\n d_over_n = [16, 32] # separable >= 1\n dim = 100\n num_train = [int(dim / p) for p in d_over_n]\n\n # Config params\n shared_params = []\n shared_params += [\n ('config', './config.py'),\n ('seed', list(range(3))),\n ]\n\n # Data hyper-parameters\n shared_params += [\n ('temperature', 0.0001),\n ('num_test', 1), # 500\n ('dim', dim),\n ('num_train', num_train),\n ]\n\n # Adversarial configuration: test\n shared_params += nameit('adv', [\n ('norm_type', adv_norm_type),\n # ('lr', 0.1),\n ('niters', 1), # 10\n # ('eps_iter', attack_eps), # Overwritten by cvxpy\n # ('eps_tot', attack_eps), # Overwritten by cvxpy\n ('pre_normalize', True), # multi attacks\n ('post_normalize', True),\n ('eps_from_cvxpy', True),\n ('step_dir', attack_step_dir),\n ])\n\n # Logging to standard output\n shared_params += [\n ('log_interval', 10000), # 1000),\n ('log_keys', '\\'(\"%s\")\\'' % ('\",\"'.join([\n 'risk/train/zero_one',\n 'risk/train/adv/%s' % adv_norm_type,\n 'weight/linear/norm/%s' % dual_norm_type,\n 'margin/%s' % dual_norm_type,\n ]))),\n # Compare with cvxpy\n ('enable_cvxpy', True),\n ]\n\n # Model hyper-parameters\n conv_linear_params = nameit('model', [\n ('arch', 'conv_linear'),\n ('nlayers', 2),\n ('regularizer', 'none'),\n ])\n\n params = []\n\n # Conv linear constant lr\n cd_fixed_lr = nameit('optim', [\n ('name', ['gd']),\n ('niters', 100000),\n ('lr', [\n 1e-5, 3e-5, 1e-4, 3e-4, 1e-3, 3e-3, 1e-2, 3e-2, 1e-1,\n 3e-1, 1, 2, 3, 6, 9, 10, 20, 30, 50\n ]),\n ])\n params += [OrderedDict(shared_params+conv_linear_params+cd_fixed_lr)]\n\n return params, log_dir, module_name, exclude", "def test_cycles():\n graph = Graph()\n for one, two in [(1, 2), (2, 3), (3, 1)]:\n graph.add_edge(one, two)\n cycles = list(graph.find_cycles())\n eq_(len(cycles), 1)\n eq_(cycles[0], [1, 2, 3])", "def backward_G(self):\n # First, G(A) should fake the discriminator\n if self.TPN_enabled:\n fake_AB = torch.cat((self.true_time_layer, self.real_A, self.fake_B), 1)\n else:\n fake_AB = torch.cat((self.real_A, self.fake_B), 1)\n pred_fake = self.netD(fake_AB)\n self.loss_G_GAN = self.criterionGAN(pred_fake, True)\n \n # Second, G(A) = B\n # Weighted L1 Loss\n if self.opt.lambda_L2 > 0: # If lambda_L2 is not > 0, no need to perform extra computation\n fake_B_tumour = self.fake_B.clone().detach()\n real_B_tumour = self.real_B.clone().detach()\n fake_B_tumour[fake_B_tumour < 0.5] = 0\n real_B_tumour[fake_B_tumour < 0.5] = 0\n self.loss_G_L1 = self.opt.lambda_L1 * (self.criterionL1(self.fake_B, self.real_B) * (1 - self.opt.lambda_L2) + \\\n self.criterionL1(fake_B_tumour, real_B_tumour) * self.opt.lambda_L2)\n else:\n ### ORIGINAL ###\n self.loss_G_L1 = self.criterionL1(self.fake_B, self.real_B) * self.opt.lambda_L1\n\n # TPN Loss\n if self.TPN_enabled:\n true_time_tensor = torch.ones(self.fake_time.shape) * self.true_time\n self.loss_G_TPN = self.criterionL1(true_time_tensor, self.fake_time.cpu()) * self.opt.gamma\n # combine loss and calculate gradients\n self.loss_G = self.loss_G_GAN + self.loss_G_L1 + self.loss_G_TPN.to(self.device)\n else:\n # combine loss and calculate gradients\n self.loss_G = self.loss_G_GAN + self.loss_G_L1\n\n self.loss_G.backward()", "def alg(c):\n return c[0]*G[0] + c[1]*G[1] + c[2]*G[2]", "def cycle_GL(N):\n #Check user input types\n assert isinstance(N, int), \"In the cycle_GL function, the number of nodes (N) must be an integer. Recieved type %r.\" % type(N).__name__\n #Check user input ranges/sizes\n assert N > 0, \"In the cycle_GL function, number of nodes (N) must be positive. Recieved %r.\" % N\n\n ones = np.ones(N-1)\n L = 2*np.identity(N) - np.diag(ones, 1) - np.diag(ones, -1)\n L[N-1, 0] = -1\n L[0, N-1] = -1\n\n return L", "def backward_G(self):\n # Calculate regularzation loss to make transformed feature and target image feature in the same latent space\n self.loss_reg_gen = self.loss_reg * self.opt.lambda_regularization\n\n # Calculate l1 loss \n loss_app_gen = self.L1loss(self.img_gen, self.input_P2)\n self.loss_app_gen = loss_app_gen * self.opt.lambda_rec \n \n # parsing loss\n label_P2 = self.label_P2.squeeze(1).long()\n #print(self.input_SPL2.min(), self.input_SPL2.max(), self.parsav.min(), self.parsav.max())\n self.loss_par = self.parLoss(self.parsav,label_P2)# * 20. \n self.loss_par1 = self.L1loss(self.parsav, self.input_SPL2) * 100 \n\n # Calculate GAN loss\n base_function._freeze(self.net_D)\n D_fake = self.net_D(self.img_gen)\n self.loss_ad_gen = self.GANloss(D_fake, True, False) * self.opt.lambda_g\n\n # Calculate perceptual loss\n loss_content_gen, loss_style_gen = self.Vggloss(self.img_gen, self.input_P2) \n self.loss_style_gen = loss_style_gen*self.opt.lambda_style\n self.loss_content_gen = loss_content_gen*self.opt.lambda_content\n\n total_loss = 0\n\n for name in self.loss_names:\n if name != 'dis_img_gen':\n #print(getattr(self, \"loss_\" + name))\n total_loss += getattr(self, \"loss_\" + name)\n total_loss.backward()", "def lbd_func(C):\n if C == 0:\n return 0.0\n lbd = 1 / C\n return lbd", "def CG(A,b,tol=1.0e-6,max_iterations=100,LOUD=False):\n [Nrow, Ncol] = A.shape\n assert Nrow == Ncol\n N = Nrow\n converged = False\n iteration = 1\n x = np.random.rand(N) #random initial guess \n r = b - np.dot(A,x)\n s = r.copy()\n while not(converged):\n denom = np.dot(s, np.dot(A,s))\n alpha = np.dot(s,r)/denom\n x = x + alpha*s \n r = b - np.dot(A,x)\n beta = - np.dot(r,np.dot(A,s))/denom\n s = r + beta * s\n relative_change = np.linalg.norm(r)\n if (LOUD):\n print(\"Iteration\",iteration,\": Relative Change =\",relative_change)\n if (relative_change < tol) or (iteration >= max_iterations):\n converged = True\n iteration += 1\n return x", "def fit_onecycle(\n self,\n lr,\n epochs,\n checkpoint_folder=None,\n cycle_momentum=True,\n max_momentum=0.95,\n min_momentum=0.85,\n class_weight=None,\n callbacks=[],\n steps_per_epoch=None,\n verbose=1,\n ):\n if not self._is_adamlike() and cycle_momentum:\n warnings.warn(\n \"cyclical momentum has been disabled because \"\n + 'optimizer is not \"Adam-like\" with beta_1 param'\n )\n cycle_momentum = False\n\n num_samples = U.nsamples_from_data(self.train_data)\n if steps_per_epoch is None:\n steps_per_epoch = math.ceil(num_samples / self.batch_size)\n\n # setup callbacks for learning rates and early stopping\n if not callbacks:\n kcallbacks = []\n else:\n kcallbacks = callbacks[:]\n if cycle_momentum:\n max_momentum = max_momentum\n min_momentum = min_momentum\n else:\n max_momentum = None\n min_momentum = None\n\n from .lroptimize.triangular import CyclicLR\n\n clr = CyclicLR(\n base_lr=lr / 10,\n max_lr=lr,\n step_size=math.ceil((steps_per_epoch * epochs) / 2),\n reduce_on_plateau=0,\n max_momentum=max_momentum,\n min_momentum=min_momentum,\n verbose=verbose,\n )\n kcallbacks.append(clr)\n\n # start training\n policy = \"onecycle\"\n U.vprint(\"\\n\", verbose=verbose)\n U.vprint(\n \"begin training using %s policy with max lr of %s...\" % (policy, lr),\n verbose=verbose,\n )\n hist = self.fit(\n lr,\n epochs,\n early_stopping=None,\n checkpoint_folder=checkpoint_folder,\n verbose=verbose,\n class_weight=class_weight,\n callbacks=kcallbacks,\n steps_per_epoch=steps_per_epoch,\n )\n hist.history[\"lr\"] = clr.history[\"lr\"]\n hist.history[\"iterations\"] = clr.history[\"iterations\"]\n if cycle_momentum:\n hist.history[\"momentum\"] = clr.history[\"momentum\"]\n self.history = hist\n return hist", "def train_cyclic(inputs, outputs, eta=0.55, maxit=1000, momentum=0.1, plot=False):\n global ERROR\n ERROR.clear()\n min_error = 100\n ins_outs = list(zip(inputs, outputs))\n counter = 0\n while counter <= maxit:\n counter += 1\n shuffle(ins_outs)\n for pair in ins_outs:\n i, o = pair\n error2(i, o)\n ERROR.append(layers[-1][\"error2\"].item())\n try:\n if ERROR[-1] < min_error:\n min_error = ERROR[-1]\n optimal_w = getweigths()\n min_error_counter = counter\n print(\n f\"Minimum error found = {min_error}, at counter = {min_error_counter}\", end=\"\\r\")\n except:\n pass\n backpropagate(eta, momentum)\n updateweigths()\n setweigths(optimal_w)\n print(f\"\\vMinimum error reached at the {min_error_counter}st cycle\")\n if plot:\n plt.plot(np.arange(len(ERROR)), ERROR, \"b*-\")\n plt.xlabel(\"Number of cycles\")\n plt.ylabel(\"Sum of quadratic errors\")\n plt.title(\"CYCLIC MODE\\nERROR vs CYCLES\")\n plt.grid()\n plt.show()", "def L2_norm(self):\n analyticals = self.analytical(self.x_values, self.C, self.D)\n error = analyticals - self.numerical\n self.L2 = np.sqrt((1/self.gp)*np.sum(error**2))", "def _compute_lcs(source, target):\n table = _lcs_table(source, target)\n return _backtrack(table, source, target, len(source), len(target))", "def _cycle_finished_gconst(pattern, _):\n _check_missed_beats(pattern)\n # We update the measurements of the rhythm taking the measures of the\n # regular cycles.\n rrs, pqs, rts = _get_measures(pattern, 0)\n if len(pqs) == 0:\n pqm, pqst = 0, 0\n elif len(pqs) == 1:\n # TODO use specific deviations for PQ rather than QT\n pqm, pqst = pqs[0], C.QT_ERR_STD\n else:\n pqm, pqst = np.mean(pqs), max(np.std(pqs), C.MIN_QT_STD)\n if len(rts) == 0:\n rtm, rtst = 0, 0\n elif len(rts) == 1:\n rtm, rtst = rts[0], C.QT_ERR_STD\n else:\n rtm, rtst = np.mean(rts), max(np.std(rts), C.MIN_QT_STD)\n pattern.hypothesis.meas = o.CycleMeasurements((np.mean(rrs), np.std(rrs)), (rtm, rtst), (pqm, pqst))", "def l2_reg_cost(cost, lambtha, weights, L, m):\n w_norm = 0\n for i in range(1, L + 1):\n w_norm += np.linalg.norm(weights['W' + str(i)])\n L2 = cost + (lambtha / (2 * m) * w_norm)\n return L2", "def autoencoder_cycle_loss(self, reconstructed_x, reconstructed_y, x, y, Encoder_X, Encoder_Y, loss_mode=2):\n if loss_mode == 1:\n ae_forward_loss = tf.reduce_mean(tf.abs(Encoder_X(reconstructed_x) - Encoder_X(x)))\n ae_backward_loss = tf.reduce_mean(tf.abs(Encoder_Y(reconstructed_y) - Encoder_Y(y)))\n elif loss_mode == 2:\n ae_forward_loss = tf.reduce_mean(tf.square(Encoder_X(reconstructed_x) - Encoder_X(x)))\n ae_backward_loss = tf.reduce_mean(tf.square(Encoder_Y(reconstructed_y) - Encoder_Y(y)))\n elif loss_mode == 3:\n ae_forward_loss = tf.reduce_mean(tf.losses.huber_loss(Encoder_X(x), Encoder_X(reconstructed_x)))\n ae_backward_loss = tf.reduce_mean(tf.losses.huber_loss(Encoder_Y(y), Encoder_Y(reconstructed_y)))\n else:\n print 'Unknown ae loss mode'\n exit(0)\n loss = self.sigma1 * ae_forward_loss + self.sigma2 * ae_backward_loss\n return self.sigma1 * ae_forward_loss, self.sigma2 * ae_backward_loss, loss", "def test_lu_forward_sub():\t\n\t# test 1\n\tL = np.array([\n\t\t[ 2, 3,-4, 2],\n\t\t[-2, 1,-2, 1],\n\t\t[ 1,-1, 3,-1],\n\t\t[-3, 2, 2, 2]])\t\n\n\tb = np.array([4, -8, 9, 6])\n\n\ty = lu_forward_sub(L, b) \t\t\n\ty_soln = np.array([4,0,5,8])\t\t\t\t\t\t# correct output of LU_FORWARD_SUB\n\tassert norm(y - y_soln) < 1.e-10\n\n\t# test 2\n\tL2 = np.array([\n\t\t [0.01, 0., 0., 0., 0., 0., 0., 0., 0., 0., 1],\n\t\t [-100., 0.01, 0., 0., 0., 0., 0., 0., 0., 0., 100],\n\t\t [0., -100., 0.01, 0., 0., 0., 0., 0., 0., 0., 10000],\n\t\t [0., 0., -100., 0.01, 0., 0., 0., 0., 0., 0., 1000000],\n\t\t [0., 0., 0., -100., 0.01, 0., 0., 0., 0., 0., 100000000],\n\t\t [0., 0., 0., 0., -100., 0.01, 0., 0., 0., 0., 10000000000],\n\t\t [0., 0., 0., 0., 0., -100., 0.01, 0., 0., 0., 1000000000000],\n\t\t [0., 0., 0., 0., 0., 0., -100., 0.01, 0., 0., 100000000000000],\n\t\t [0., 0., 0., 0., 0., 0., 0., -100., 0.01, 0., 10000000000000000],\n\t\t [0., 0., 0., 0., 0., 0., 0., 0., -100, 0.01, 1000000000000000000],\n\t\t [0., 0., 0., 0., 0., 0., 0., 0., 0., -100., 100000000000000000000]])\n\n\tb2 = np.array ([[1.01], [-0.99], [-0.99], [-0.99], [-0.99], [-0.99], [-0.99], [-0.99], [-0.99], [-0.99], [0.]])\n\n\ty2 = lu_forward_sub(L2, b2) \t\t\n\ty_soln2 = np.array([1.01, -101.99, 98.01, 98.01, 98.01, 98.01, 98.01, 98.01, 98.01, 98.01, 99])\t\t\t\t\t\t# correct output of LU_FORWARD_SUB\n\tassert norm(y2 - y_soln2) < 1.e-10", "def transition(self, closure=None):\n nlp = None\n if closure is not None:\n nlp = closure()\n\n with torch.no_grad():\n for m, p in zip(self.momentums, self.params):\n noise = (self.chol_cov @ torch.randn(2, p.nelement())).reshape([2] + list(p.shape))\n p += self.c_2 * m - (self.t - self.c_2)/(self.gam * self.c_2) * p.grad + noise[0]\n m *= self.c_1\n m -= (self.c_2/self.c_2) * p.grad\n m += noise[1]\n\n return nlp", "def test_dc_lcsmodel_class():\n\n # Set the problem size.\n n = 1000\n p = 3\n\n # Define the test model\n TM = test.Model1(n,p)\n\n # Note: diff_A/diff_b do not require A/b as an input in this case,\n # but in the more general case they might.\n\n # Check the basic model calculations.\n theta = numpy.array((1., 0.1, 0.2, 0.1))\n A = TM.eval_A(theta)\n B = TM.eval_b(theta)\n\n dA_1 = TM.diff_A(A, theta, 0).todense()\n dA_2 = TM.diff_A(A, theta, 1).todense()\n dA_3 = TM.diff_A(A, theta, 2).todense()\n dA_4 = TM.diff_A(A, theta, 3).todense()\n Z = numpy.zeros_like(dA_1)\n \n dB_1 = TM.diff_b(B, theta, 0)\n dB_2 = TM.diff_b(B, theta, 1)\n dB_3 = TM.diff_b(B, theta, 2)\n dB_4 = TM.diff_b(B, theta, 3)\n z = numpy.zeros_like(dB_1)\n \n print \"dA/dtheta_1 check:\", numpy.allclose(dA_1, TM.A1.todense())\n print \"dA/dtheta_2 check:\", numpy.allclose(dA_2, TM.A2.todense())\n print \"dA/dtheta_3 check:\", numpy.allclose(dA_3, Z)\n print \"dA/dtheta_4 check:\", numpy.allclose(dA_4, Z)\n\n print \"db/dtheta_1 check:\", numpy.allclose(dB_1, z)\n print \"db/dtheta_2 check:\", numpy.allclose(dB_2, z)\n print \"db/dtheta_3 check:\", numpy.allclose(dB_3, TM.B1)\n print \"db/dtheta_4 check:\", numpy.allclose(dB_4, TM.B2)\n\n\n #\n # Test the lcs model class\n #\n\n gLCS = DC_LCSModel()\n gLCS.eval_A = TM.eval_A\n gLCS.eval_b = TM.eval_b\n gLCS.diff_A = TM.diff_A\n gLCS.diff_b = TM.diff_b\n \n gLCS.quiet=True\n gLCS.A_params_mask = numpy.array([True, True, False, False])\n gLCS.b_params_mask = numpy.array([False, False, True, True])\n\n x = gLCS.eval(theta)\n #print x.shape\n\n for k in range(p):\n print \"Primal solution for x_{}, matches spsolve calculation: {}\".\\\n format(k, numpy.allclose(x[:,k], spla.spsolve(A,B[:,k])))\n\n\n D = gLCS.jacobian(theta)\n\n # -- If theta[1]=0, and theta[2:3] are fixed, then there is an analytical\n # calculation for x(theta[0]), and in this case we can check the first\n # column of D.\n\n theta = numpy.array((5.1, 0, 1.2, 2.1))\n A = TM.eval_A(theta)\n B = TM.eval_b(theta)\n D = gLCS.jacobian(theta)\n\n for k in range(p):\n D_col_1 = -(1./theta[0]**2) * B[:,k]\n print \"First column of D_{} all close: {}\".\\\n format(k, numpy.allclose(D[k,:,0], D_col_1))\n\n\n # -- We'll use a numerical approximation to check the second column of D\n\n h = 0.000001\n theta = numpy.array((5.1, 1.1, 1.2, 2.1))\n dtheta = numpy.array((0., h, 0., 0.))\n A = TM.eval_A(theta)\n B = TM.eval_b(theta)\n x = gLCS.eval(theta)\n D = gLCS.jacobian(theta)\n\n A_dt = TM.eval_A(theta + dtheta)\n B_dt = TM.eval_b(theta + dtheta)\n\n for k in range(p):\n x_dt = spla.spsolve(A_dt, B_dt[:,k])\n D_col_2_num_approx = (x_dt - x[:,k])/h\n max_abs_err = numpy.max(numpy.abs(D[k,:,1] - D_col_2_num_approx))\n\n print \"Second column of D_{} all close: {}\".\\\n format(k, numpy.allclose(D[k,:,1], D_col_2_num_approx))\n \n print \"Max abs error in second column of D_{}: {}\".\\\n format(k, max_abs_err)\n \n\n # -- If theta[0] and theta[1] are fixed, A(theta) is determined, and A^{-1}\n # is fixed. With a little math you can analytically calculate the third\n # and fourth columns of D. In fact x(theta) is linear in theta[2] and\n # theta[3], but not in theta[0] and theta[1].\n\n theta = numpy.array((1., 0.1, 0.2, 0.1))\n A = TM.eval_A(theta)\n D = gLCS.jacobian(theta);\n\n for k in range(p):\n D_col_3 = spla.spsolve(A, TM.B1[:,k])\n\n print \"Third column of D_{} all close: {}\".\\\n format(k, numpy.allclose(D[k,:,2], D_col_3))\n\n\n for k in range(p):\n D_col_4 = spla.spsolve(A, TM.B2[:,k])\n \n print \"Fourth column of D_{} all close: {}\".\\\n format(k, numpy.allclose(D[k,:,3], D_col_4))", "def test010_similarity(self, b_size=8, dim=1024,\n alpha_fwd=0.999, alpha_bkw=0.99, eps=1e-05, itrs=8):\n # instantiate inputs\n input = torch.randn(b_size, dim)\n input_0 = input.clone().detach().requires_grad_(True)\n input_1 = input.clone().detach().requires_grad_(True)\n # instantiate gradient at the output\n grad_out = torch.randn(b_size, dim)\n\n # instantiate Linearized Online Norm class\n onlin = OnlineNorm1D(dim, alpha_fwd=alpha_fwd, alpha_bkw=alpha_bkw,\n eps=eps, b_size=b_size)\n\n # instantiate Looping Online Norm class\n onloop = OnlineNorm1D(dim, eps=eps,\n ctrl_norm=ControlNorm1DLoop(dim,\n alpha_fwd=alpha_fwd,\n alpha_bkw=alpha_bkw,\n eps=eps))\n\n for _ in range(itrs):\n # fprop through Linearized Online Norm class\n y_0 = onlin(input_0)\n # bprop through Linearized Online Norm class\n y_0.backward(grad_out)\n # fprop through Looping Online Norm class\n y_1 = onloop(input_1)\n # bprop through Looping Online Norm class\n y_1.backward(grad_out)\n\n # numerically compare output\n np.testing.assert_allclose(y_0.detach().numpy(),\n y_1.detach().numpy(),\n rtol=1e-4, atol=1e-5)\n # numerically grad_in\n np.testing.assert_allclose(input_0.grad.detach().numpy(),\n input_1.grad.detach().numpy(),\n rtol=1e-4, atol=1e-5)\n\n self.logger.info('Algorithm implemented using linearization of ops '\n 'numerically matches algorithm implemented with '\n 'loops')", "def LCA(T, n1, n2):\n\n pass", "def relaxed_ba_bias(Xinput, L, lamb, beta, max_iter=300):\n X = Xinput.T # X: n_samples x n_dim\n D, m = X.shape\n B = np.sign(np.random.rand(L, m))\n c1 = np.random.rand(L,1)\n c2 = np.random.rand(D,1)\n\n for i in range(max_iter):\n # given B, compute W1\n W1 = lamb*np.matmul(np.matmul((B - c1), X.T), \\\n np.linalg.inv(lamb*np.matmul(X,X.T) + beta*np.eye(D)))\n\n # given B, compute W2\n W2 = np.matmul( np.matmul((X-c2), B.T), \\\n np.linalg.inv(np.matmul(B,B.T) + beta*np.eye(L)))\n\n # compute c1\n c1 = (1.0/m)*np.matmul(B - np.matmul(W1, X), np.ones((m,1)))\n # compute c2\n c2 = (1.0/m)*np.matmul(X - np.matmul(W2, B), np.ones((m,1)))\n\n # given W1, W2, c1, c2, compute B\n Xtmp = X - c2\n H = np.matmul(W1, X) + c1\n B = learn_B_new(Xtmp.T, W2.T, B.T, H.T, lamb);\n\n B = B.T\n\n # X_reconst = np.matmul(W2, np.sign(np.matmul(W1, X) + c1)) + c2\n # mse = np.mean(np.square(X_reconst - X))\n # print('mse {}'.format(mse))\n return W2, W1, c2, c1, B", "def l2_reg_cost(cost, lambtha, weights, L, m):\n sumWeights = 0\n for i in range(1, L + 1):\n sumWeights += np.linalg.norm(weights['W' + str(i)])\n return cost + sumWeights * lambtha / (2 * m)", "def test_bd_cycles_ascending(fprime_test_api):\n length = 60\n count_pred = predicates.greater_than(length - 1)\n results = fprime_test_api.await_telemetry_count(\n count_pred, \"blockDrv.BD_Cycles\", timeout=length\n )\n last = None\n reordered = False\n ascending = True\n for result in results:\n if last is not None:\n last_time = last.get_time()\n result_time = result.get_time()\n if result_time - last_time > 1.5:\n msg = \"FSW didn't send an update between {} and {}\".format(\n last_time.to_readable(), result_time.to_readable()\n )\n fprime_test_api.log(msg)\n elif result_time < last_time:\n msg = \"There is potential reorder error between {} and {}\".format(\n last_time, result_time\n )\n fprime_test_api.log(msg)\n reordered = True\n\n if not result.get_val() > last.get_val():\n msg = \"Not all updates ascended: First ({}) Second ({})\".format(\n last.get_val(), result.get_val()\n )\n fprime_test_api.log(msg)\n ascending = False\n\n last = result\n\n case = True\n case &= fprime_test_api.test_assert(\n ascending, \"Expected all updates to ascend.\", True\n )\n case &= fprime_test_api.test_assert(\n not reordered, \"Expected no updates to be dropped.\", True\n )\n fprime_test_api.predicate_assert(\n count_pred,\n len(results) - 1,\n \"Expected >= {} updates\".format(length - 1),\n True,\n )\n fprime_test_api.assert_telemetry_count(0, \"rateGroup1Comp.RgCycleSlips\")\n assert case, \"Expected all checks to pass (ascending, reordering). See log.\"", "def my_c2d(A, B, dt):\n Ad = np.eye(A.shape[0]) + A * dt\n Bd = B * dt\n prev_A = A\n k = 2\n eps = 1e-8\n above_treshold = True\n while above_treshold:\n f_term = 1 / factorial(k)\n Bd_term = f_term * np.matmul(prev_A, B) * dt ** k\n Bd += Bd_term\n prev_A = matrix_power(A, k)\n Ad_term = f_term * prev_A * dt ** k\n Ad += Ad_term\n k += 1\n if np.sum(Ad_term) < eps and np.sum(Bd_term) < eps:\n above_treshold = False\n\n return Ad, Bd", "def _lagged_coherence_1freq(sig, freq, fs, n_cycles=3):\n\n # Determine number of samples to be used in each window to compute lagged coherence\n n_samps = int(np.ceil(n_cycles * fs / freq))\n\n # For each N-cycle chunk, calculate the fourier coefficient at the frequency of interest, freq\n chunks = _nonoverlapping_chunks(sig, n_samps)\n chunks_len = len(chunks)\n\n hann_window = hann(n_samps)\n fourier_f = np.fft.fftfreq(n_samps, 1 / float(fs))\n fourier_f_idx = np.argmin(np.abs(fourier_f - freq))\n fourier_coefsoi = np.zeros(chunks_len, dtype=complex)\n\n for ind, chunk in enumerate(chunks):\n fourier_coef = np.fft.fft(chunk * hann_window)\n fourier_coefsoi[ind] = fourier_coef[fourier_f_idx]\n\n # Compute the lagged coherence value\n lcs_num = 0\n for ind in range(chunks_len - 1):\n lcs_num += fourier_coefsoi[ind] * np.conj(fourier_coefsoi[ind + 1])\n lcs_denom = np.sqrt(np.sum(np.abs(fourier_coefsoi[:-1])**2) * np.sum(np.abs(fourier_coefsoi[1:])**2))\n\n return np.abs(lcs_num / lcs_denom)", "def ot_ul2_solve_lasso_cd(C, a, b, reg, nitermax=100000, tol=1e-14):\n\n X = get_X_lasso(C.shape[0], C.shape[1])\n X = X.dot(sp.diags((1 / C.ravel())))\n y = np.concatenate((a, b))\n reg2 = 1.0 / (2 * (C.shape[0] + C.shape[1]) * reg)\n model = Lasso(reg2, positive=True, fit_intercept=False, max_iter=nitermax, tol=tol)\n model.fit(X, y)\n G2 = model.coef_.reshape(C.shape) / C\n return G2", "def test_linear(self):\n G1 = SimpleNeurons(3)\n G2 = SimpleNeurons(2)\n\n G1pattern = np.asarray(\n [[ 0, 1, 0, 2],\n [-1, 1, 0, 1],\n [ 1,-1,-1,-1]])\n G2pattern = np.asarray(\n [[0, 1, 4, 0],\n [1, -1.0/3, -1.0/3, -2.0/3]])\n\n G1.out_fct = lambda i: G1pattern[:, i]\n G2.out_fct = lambda i: G2pattern[:, i]\n\n G = RateLayer(2)\n G.add_source(G1)\n G.add_source(G2)\n\n G.Ws[0] = np.asarray(\n [[1, 2, 3],\n [1,-2, 1]])\n G.Ws[1] = np.asarray([1, -3])\n\n M = simulation.StateMonitor(G, 'out')\n\n dt = 1.0\n nsteps = 4\n tmax = nsteps*dt\n\n sim = simulation.Simulation(G1, G2, G, M, dt=dt)\n sim.run(tmax)\n\n self.assertTrue(np.allclose(M.out[0, :], [1, 1, 1, 1]))\n self.assertTrue(np.allclose(M.out[1, :], [0, -1, 0, 1]))", "def backward_val(self):\n self.loss_similarity = [NCC(warped_img, self.batch_fixed) for warped_img in self.warped_img_list]\n self.loss_similarity_mean = torch.mean(torch.stack(self.loss_similarity))\n self.loss_smooth = [GradNorm(disp_map) for disp_map in self.disp_list]\n self.loss_smooth_mean = torch.mean(torch.stack(self.loss_smooth))\n if len(self.strain_compensated_list) > 1:\n self.loss_consistency_strain = [NCC(self.strain_compensated_list[t-1][:,:,143:-143,:], self.strain_compensated_list[t][:,:,143:-143,:]) for t in range(1, len(self.strain_compensated_list))]\n self.loss_consistency_strain_mean = torch.mean(torch.stack(self.loss_consistency_strain))\n self.loss_total = 1 - self.loss_similarity_mean + self.loss_smooth_mean * self.alpha + (1 - self.loss_consistency_strain_mean) * self.beta\n else:\n self.loss_total = 1 - self.loss_similarity_mean + self.loss_smooth_mean * self.alpha", "def perform_linear_conj_grad(lcg, tol=1e-4, max_iters=50, args=()):\n \n (x, update_x, update_direction, init) = lcg\n\n iters = 0\n init(*args)\n\n while iters < max_iters:\n obj_fun_lcg = update_x(*args)\n\n print '\\tL-CG iter ', iters, ' obj_fun_lcg = ', obj_fun_lcg, '\\r',\n if iters > 0 and 1 - old_obj_fun/obj_fun_lcg < tol and obj_fun_lcg>0: break\n update_direction(*args)\n iters += 1\n\n old_obj_fun = obj_fun_lcg\n if old_obj_fun > obj_fun_lcg:\n print >> sys.stderr, 'Warning: value of the CG objective function decreased'\n print\n return obj_fun_lcg", "def step(self, closure=None):\n loss = self.optimizer.step(closure)\n self._la_step += 1\n\n if self._la_step >= self._total_la_steps:\n self._la_step = 0\n # Lookahead and cache the current optimizer parameters\n for group in self.optimizer.param_groups:\n for p in group['params']:\n param_state = self.state[p]\n p.data.mul_(self.la_alpha).add_(1.0 - self.la_alpha, param_state['cached_params']) # crucial line\n param_state['cached_params'].copy_(p.data)\n if self.pullback_momentum == \"pullback\":\n internal_momentum = self.optimizer.state[p][\"momentum_buffer\"]\n self.optimizer.state[p][\"momentum_buffer\"] = internal_momentum.mul_(self.la_alpha).add_(\n 1.0 - self.la_alpha, param_state[\"cached_mom\"])\n param_state[\"cached_mom\"] = self.optimizer.state[p][\"momentum_buffer\"]\n elif self.pullback_momentum == \"reset\":\n self.optimizer.state[p][\"momentum_buffer\"] = torch.zeros_like(p.data)\n\n return loss", "def test_no_effect_during_refractory(self):\n np.random.seed(6564)\n f = 0.5\n self.syn_dense.W = np.random.randn(self.M, self.N)\n self.syn_dense.f_nmda = f\n self.syn_dense.change_during_ref = False\n\n self.T.active_state = False\n\n sim = simulation.Simulation(self.G, self.T, self.syn_dense, dt=self.dt)\n sim.run(self.t_max)\n\n self.assertAlmostEqual(np.linalg.norm(self.T.i_ampa), 0.0)\n self.assertAlmostEqual(np.linalg.norm(self.T.i_nmda), 0.0)", "def compute_CTMC_flow_in_logspace(adj_mat, time_resolution=.1,\n n_timepoints=10):\n\n n = len(adj_mat)\n\n # compute infinitesimal generator\n degrees = np.sum(adj_mat, axis=0) * 1.\n D = scipy.sparse.csr_matrix(np.diag(degrees))\n\n generator_mat = compute_infinitesimal_generator(adj_mat)\n\n # compute 2nd-order approximation to e^(time_resolution * generator_mat)\n tmp = time_resolution * generator_mat\n U = np.eye(n) + tmp + np.linalg.matrix_power(\n tmp, 2) / 2.\n\n print \"Bridge passed ..\"\n # Techinique: U^2k = (U^k)^2\n for j in xrange(n_timepoints):\n # compute flow matrix at time t = time_resolution * 2 ^j\n V = D.dot(U)\n\n # update U\n U = np.linalg.matrix_power(U, 2)\n\n # yield compouted flow matrix\n yield V", "def grad_loss_wrt_b(self, x, y):\n (N,D) = x.shape\n k1 = np.matmul(x,np.transpose(self.w)) + self.b\n y1=y.reshape((N,1))\n dr = (1+np.exp(1*y1*k1))\n nr = -y1\n c2=0\n c1 = nr/dr\n for i in range(N):\n c2 +=c1[i][0]\n l_b = c2 / N\n #b2 = np.copy(self.b)\n #b1 = np.zeros((10,1))\n #b1[0] = b2\n #for i in range(1,10):\n #b1[i] = b1[i-1] - self.lr*l_b\n\n\n\n return l_b\n\n\n #raise NotImplementedError", "def dsc_finder(L_G2_G1, L_G1_GO1, tol1):\n\n L_GO1_G1 = nla.inv(L_G1_GO1)\n # Reciprocal lattice of G1\n # --------------------------------------------------------------\n L_rG1_GO1 = reciprocal_mat(L_G1_GO1)\n L_GO1_rG1 = nla.inv(L_rG1_GO1)\n # Reciprocal lattice of G2\n # --------------------------------------------------------------\n L_G2_GO1 = L_G1_GO1.dot(L_G2_G1)\n L_rG2_GO1 = reciprocal_mat(L_G2_GO1)\n\n # Transformation of the Reciprocal lattices\n # R_rG1TorG2_rG1 = L_rG2_G1*L_G1_rG1\n L_rG2_rG1 = L_GO1_rG1.dot(L_rG2_GO1)\n Sigma_star = sigma_calc(L_rG2_rG1, tol1)\n # # Check Sigma_star == Sigma\n # LI_rG2_rG1 = L_rG2_rG1*Sigma_star\n # if int_man.check_int_mat(LI_rG2_rG1, 1e-10):\n # LI_rG2_rG1 = np.around(np.array(LI_rG2_rG1, dtype='double'))\n # LI_rG2_rG1 = (np.array(LI_rG2_rG1, dtype='int64'))\n # else:\n # raise Exception(\"Not an integer matrix\")\n\n # CSL of the reciprocal lattices\n L_rCSL_rG1 = csl_finder(L_rG2_rG1, L_rG1_GO1, tol1)\n L_rCSL_GO1 = L_rG1_GO1.dot(L_rCSL_rG1)\n\n L_DSC_GO1 = reciprocal_mat(L_rCSL_GO1)\n L_DSC_G1 = L_GO1_G1.dot(L_DSC_GO1)\n Tmat = np.array(L_DSC_G1*Sigma_star, dtype='double')\n\n if int_man.check_int_mat(Tmat, tol1):\n Tmat = np.around(Tmat)\n Tmat = np.array(Tmat, dtype='int64')\n L_DSC_G1 = Tmat/Sigma_star\n else:\n raise Exception(\"DSC*Sigma is not an integer matrix\")\n\n L_DSC1_DSC = rpl.reduce_po_lat(L_DSC_G1, L_G1_GO1, tol1)\n LLL_DSC_G1 = L_DSC_G1.dot(L_DSC1_DSC)\n\n if int_man.check_int_mat(LLL_DSC_G1*Sigma_star, tol1):\n Tmat = np.array(LLL_DSC_G1*Sigma_star, dtype='double')\n Tmat = np.around(Tmat)\n Tmat = np.array(Tmat, dtype='int64')\n LLL_DSC_G1 = Tmat/Sigma_star\n else:\n raise Exception(\"DSC*Sigma is not an integer matrix\")\n\n L_DSC_G1 = make_right_handed(LLL_DSC_G1, L_G1_GO1)\n return L_DSC_G1", "def test_error_map_fct(self):\n # reproducible arbitrariness\n np.random.seed(2343)\n\n nsteps = 12\n nchan = 4\n tmax = nsteps*self.dt\n sequence = np.random.randn(nsteps, self.N)\n\n target = np.random.randn(nchan, nsteps)\n controller = LinearController(self.G, target, tau=None)\n controller.W = np.random.randn(*controller.W.shape)\n controller.error_map_fct = lambda err: np.tanh(err)\n\n self.G.out_fct = lambda i: sequence[i]\n\n class SourceErrorGrabber(object):\n def __init__(self, target):\n self.target = target\n self.order = 10\n \n def prepare(self, tmax, dt):\n nsteps = int_r(tmax/dt)\n self.motor_error = np.zeros((nsteps, self.target.source.N))\n\n def evolve(self, t, dt):\n i = int_r(t/dt)\n self.motor_error[i, :] = self.target.get_source_error()\n\n M = SourceErrorGrabber(controller)\n M1 = simulation.StateMonitor(controller, 'out')\n\n sim = simulation.Simulation(self.G, controller, M, M1, dt=self.dt)\n sim.run(tmax)\n\n for i in xrange(int_r(tmax/self.dt)):\n diff = M1.out[:, i] - target[:, i]\n self.assertTrue(np.allclose(M.motor_error[i],\n np.dot(controller.error_map_fct(diff), controller.W)))", "def P2l_rec_norm(ells, cost):\n P22 = 3. * (1. - cost**2)\n P23 = 15. * cost * (1. - cost**2)\n P2l = np.zeros(len(ells))\n P2l[0] = 0.\n P2l[1] = 0.\n P2l[2] = P22\n P2l[3] = P23\n P2l_norm = np.copy(P2l)\n P2l_norm[2] *= P2l_norm_prefac(2)\n P2l_norm[3] *= P2l_norm_prefac(3)\n for ell in ells[4:]:\n # print ell, P2l[ell-1], P2l[ell-2]\n a = np.sqrt((4 * ell**2 - 1.) / (ell**2 - 4))\n b = cost * P2l_norm[ell - 1]\n c = np.sqrt(((ell - 1.)**2 - 4) /\n (4 * (ell - 1.)**2 - 1)) * P2l_norm[ell - 2]\n # print a,b,c\n P2l_norm[ell] = a * (b - c)\n # print ell, P2l_norm[ell], P2l_norm_prefac(ell)\n P2l[ell] = P2l_norm[ell] / P2l_norm_prefac(ell)\n return P2l", "def grad_chol(L):\n n = len(L)\n I = np.eye(n)\n s1 = I[:, None, :, None] * L[None, :, None, :]\n s2 = I[None, :, :, None] * L[:, None, None, :]\n return (s1 + s2).reshape(2 * (n**2,))", "def eval_tls_global(self, step: int, check_interval: int):\n\n # Phase 1: Calculate own score for each TL\n # If the step is a multiple of the check interval, do the calculations\n if step % check_interval == 0:\n for junc in self.junctions:\n for lane in junc.connected_lanes: # Each TL should control 1 lane\n self.lane_scores[lane.ID] = self.getNumVehicles(lane.ID)\n\n # Phase 2: Add connected score and own score, determine new TL combination\n for junc in self.junctions:\n for tl_combination in junc.tl_combinations:\n tl_combination.score = 0\n for lane in tl_combination.corresponding_lanes:\n connected_score = 0\n for connected_lane in lane.previous_tl_connected_lanes:\n connected_score += self.lane_scores[connected_lane]\n tl_combination.score += self.lane_scores[lane.ID] + (connected_score * self.connectedFactor)\n newState = self.getNewRYGState(junc.tl_combinations)\n junc.next_state = newState\n\n # Getting the current state and making all red lights yellow\n current_state = traci.trafficlight.getRedYellowGreenState(junc.ID)\n if newState:\n if current_state != newState:\n traci.trafficlight.setRedYellowGreenState(junc.ID, current_state.replace(\"G\", \"y\"))\n\n # If the yellow light has been on for 3 steps we switch to the new green state\n elif (step - 3) % check_interval == 0:\n for junc in self.junctions:\n if junc.next_state:\n traci.trafficlight.setRedYellowGreenState(junc.ID, junc.next_state)", "def angle_aberration_CMF_to_LF(r_packet, time_explosion, mu):\n ct = C_SPEED_OF_LIGHT * time_explosion\n beta = r_packet.r / (ct)\n return (r_packet.mu + beta) / (1.0 + beta * mu)", "def step(self, closure=None):\n loss = self.optimizer.step(closure)\n self._la_step += 1\n\n if self._la_step >= self._total_la_steps:\n self._la_step = 0\n # Lookahead and cache the current optimizer parameters\n for group in self.optimizer.param_groups:\n for p in group['params']:\n param_state = self.state[p]\n p.data.mul_(self.la_alpha).add_(\n param_state['cached_params'], alpha=1.0 - self.la_alpha) # crucial line\n param_state['cached_params'].copy_(p.data)\n if self.pullback_momentum == \"pullback\":\n internal_momentum = self.optimizer.state[p][\"momentum_buffer\"]\n self.optimizer.state[p][\"momentum_buffer\"] = internal_momentum.mul_(self.la_alpha).add_(\n 1.0 - self.la_alpha, param_state[\"cached_mom\"])\n param_state[\"cached_mom\"] = self.optimizer.state[p][\"momentum_buffer\"]\n elif self.pullback_momentum == \"reset\":\n self.optimizer.state[p][\"momentum_buffer\"] = torch.zeros_like(\n p.data)\n\n return loss", "def magma_cgels(trans, m, n, nrhs, A, lda, B, ldb, hwork, lwork):\n info = c_int_type()\n trans = _trans_conversion[trans]\n status = _libmagma.magma_cgels(trans, m, n, nrhs, int(A), lda,\n int(B), ldb, int(hwork), lwork,\n ctypes.byref(info))\n magmaCheckStatus(status)", "def __call__(self, d_abc):\r\n # Quantize the duty ratios to N levels\r\n d_abc = np.round(self.N*np.asarray(d_abc))/self.N\r\n # Initialize the normalized switching instant array\r\n tn_sw = np.zeros((4, 2))\r\n tn_sw[3, 1] = 1\r\n # Could be understood as a carrier comparison\r\n if self.falling_edge:\r\n # Normalized switching instants (zero crossing instants)\r\n tn_sw[1:4, 0] = np.sort(d_abc)\r\n tn_sw[0:3, 1] = tn_sw[1:4, 0]\r\n # Compute the switching state array\r\n q_abc = (tn_sw[:, 0] < d_abc[:, np.newaxis]).astype(int)\r\n else:\r\n # Rising edge\r\n tn_sw[1:4, 0] = np.sort(1 - d_abc)\r\n tn_sw[0:3, 1] = tn_sw[1:4, 0]\r\n q_abc = (tn_sw[:, 0] >= 1 - d_abc[:, np.newaxis]).astype(int)\r\n # Change the carrier direction for the next call\r\n self.falling_edge = not self.falling_edge\r\n # Switching state space vector\r\n q = abc2complex(q_abc)\r\n return tn_sw, q", "def test020_speed(self, b_size=64, dim=1024,\n alpha_fwd=0.999, alpha_bkw=0.99, eps=1e-05, epoch=100):\n input = torch.randn(b_size, dim)\n\n # instantiate Linearized Online Norm class\n onlin = OnlineNorm1D(dim, alpha_fwd=alpha_fwd, alpha_bkw=alpha_bkw,\n eps=eps, b_size=b_size)\n\n # time lin algo\n forward = 0\n backward = 0\n for _ in range(epoch):\n start = time.time()\n # fprop through lin algo\n out = onlin(input)\n forward += time.time() - start\n\n start = time.time()\n # bprop through lin algo\n out.sum().backward()\n backward += time.time() - start\n\n self.logger.info(f'Linearized Control Normalization Speed Test: '\n f'Forward {forward * 1e6/1e5:.3f} us | '\n f'Backward {backward * 1e6/1e5:.3f} us | '\n f'Total {(forward + backward) * 1e6/1e5:.3f} us')\n\n # Speed test online norm\n # instantiate Looping Online Norm class\n onloop = OnlineNorm1D(dim, eps=eps,\n ctrl_norm=ControlNorm1DLoop(dim,\n alpha_fwd=alpha_fwd,\n alpha_bkw=alpha_bkw,\n eps=eps))\n\n # time loop algo\n forward = 0\n backward = 0\n for _ in range(epoch):\n start = time.time()\n # fprop through loop algo\n out = onloop(input)\n forward += time.time() - start\n\n start = time.time()\n # bprop through loop algo\n out.sum().backward()\n backward += time.time() - start\n\n self.logger.info(f'Loop Control Normalization Speed Test: '\n f'Forward {forward * 1e6/1e5:.3f} us | '\n f'Backward {backward * 1e6/1e5:.3f} us | '\n f'Total {(forward + backward) * 1e6/1e5:.3f} us')\n\n self.logger.info('Make input tensors representative of size you will '\n 'use and then use the correct algorithm based on '\n 'speed of execution.')", "def ap_entropy(X, M, R):\n\tN = len(X)\n\n\tEm = embed_seq(X, 1, M)\t\n\tEmp = embed_seq(X, 1, M + 1) #\ttry to only build Emp to save time\n\n\tCm, Cmp = np.zeros(N - M + 1), np.zeros(N - M)\n\t# in case there is 0 after counting. np.log(0) is undefined.\n\n\tfor i in range(0, N - M):\n#\t\tprint i\n\t\tfor j in range(i, N - M): # start from i, self-match counts in ApEn\n#\t\t\tif max(abs(Em[i]-Em[j])) <= R:# compare N-M scalars in each subseq v 0.01b_r1\n\t\t\tif in_range(Em[i], Em[j], R):\n\t\t\t\tCm[i] += 1\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t### Xin Liu\n\t\t\t\tCm[j] += 1\n\t\t\t\tif abs(Emp[i][-1] - Emp[j][-1]) <= R: # check last one\n\t\t\t\t\tCmp[i] += 1\n\t\t\t\t\tCmp[j] += 1\n\t\tif in_range(Em[i], Em[N-M], R):\n\t\t\tCm[i] += 1\n\t\t\tCm[N-M] += 1\n\t\t# try to count Cm[j] and Cmp[j] as well here\n\t\n#\t\tif max(abs(Em[N-M]-Em[N-M])) <= R: # index from 0, so N-M+1 is N-M v 0.01b_r1\n#\tif in_range(Em[i], Em[N - M], R): # for Cm, there is one more iteration than Cmp\n#\t\t\tCm[N - M] += 1 # cross-matches on Cm[N - M]\n\t\n\tCm[N - M] += 1 # Cm[N - M] self-matches\n#\timport code;code.interact(local=locals())\n\tCm /= (N - M +1 )\n\tCmp /= ( N - M )\n#\timport code;code.interact(local=locals())\n\tPhi_m, Phi_mp = sum(np.log(Cm)), sum(np.log(Cmp))\n\n\tAp_En = (Phi_m - Phi_mp) / (N - M)\n\n\treturn Ap_En", "def _generate_loss_bonus(self, angles: tf.Variable) -> tf.Tensor:\n # Get the operator for all the cx gates\n cx = tf.constant(self.circuit_model.get_cnot_operator())\n # Start from identity to iterate\n circ = tf.complex(tf.eye(2 ** self.circuit_model.nqubit, dtype=tf.float64), tf.constant(0, dtype=tf.float64))\n for layer in range(self.circuit_model.layers - 1, -1, -1):\n start_angle = 3 * layer * self.circuit_model.nqubit\n rot_u3 = self._r_u3(angles[start_angle:start_angle + 3])\n for qb in range(1, self.circuit_model.nqubit):\n rot_u3 = tf.tensordot(self._r_u3(angles[start_angle + 3 * qb: start_angle + 3 * qb + 3]),\n rot_u3, axes=0)\n\n rot_u3 = self._fix_dimension(rot_u3)\n circ = tf.matmul(circ, cx)\n circ = tf.matmul(circ, rot_u3)\n\n loss = self._get_norm(circ)\n\n return loss", "def GMRES_1(A, b, x0, max_iterations=50):\n\n last_x = x0\n curr_x = last_x\n last_r = b - A @ x0\n curr_iter = 0\n residual_queue = []\n while curr_iter < max_iterations:\n Ar = A @ last_r\n alpha = (last_r.transpose() @ Ar) / (Ar.transpose() @ Ar)\n curr_x = last_x + alpha * last_r\n curr_r = last_r - alpha * Ar\n c = np.linalg.norm(A @ curr_x - b, 2) / np.linalg.norm(b, 2)\n residual_queue.append(np.linalg.norm(A @ curr_x - b, 2))\n if curr_iter == max_iterations - 1:\n print_graph(residual_queue, curr_iter, \"residual\", \"GMRES(1)\")\n last_x = curr_x\n last_r = curr_r\n curr_iter += 1\n print(\"Number of Iterations: \" + str(curr_iter))\n\n return curr_x", "def l2_reg_gradient_descent(Y, weights, cache, alpha, lambtha, L):\n m = Y.shape[1]\n len_cache = len(cache)\n\n # learning for the last layer:\n Al = cache['A{}'.format(len_cache - 1)] # last A\n A_prev = cache['A{}'.format(len_cache - 2)] # pre last A\n dZl = Al - Y # last dZ\n dWl = np.matmul(dZl, A_prev.T) / m # last dW, shape (1, nodes)\n dbl = (1 / m) * np.sum(dZl, axis=1, keepdims=True)\n Wl_str = 'W{}'.format(len_cache - 1)\n Wl = weights[Wl_str] # last W\n # last layer W learning:\n weights[Wl_str] = Wl - (alpha * lambtha / m) * Wl - alpha * dWl\n bl_str = 'b{}'.format(len_cache - 1)\n bl = weights[bl_str] # last b\n weights[bl_str] = bl - alpha * dbl # last layer b learning\n\n # next: learning for the rest of the layers:\n dZ = dZl\n W_next = Wl\n for i in reversed(range(1, len_cache - 1)):\n A = cache['A{}'.format(i)]\n A_prev = cache['A{}'.format(i - 1)]\n dZ = np.matmul(W_next.T, dZ) * (1 - A ** 2)\n dW = (1 / m) * (np.matmul(dZ, A_prev.T))\n db = np.sum(dZ, axis=1, keepdims=True) / m\n W_c_str = 'W{}'.format(i)\n W_c = weights[W_c_str] # current W\n b_c_str = 'b{}'.format(i)\n b_c = weights[b_c_str] # current b\n weights[W_c_str] = W_c - (alpha * lambtha / m) * W_c - alpha * dW\n weights[b_c_str] = b_c - alpha * db\n W_next = W_c", "def _l1m_objective(a,X,*args):\n \n return(np.sum(np.apply_along_axis(_euclidnorm,1,_diffmat_objective(a,X))))", "def find_lcs(l1: str, l2: str, length1: int, length2: int):\n \"\"\" Theorem:{\n Initialize matrix with 0 for first row and colm\n If s1[i] = s2[j], update matrix[i][j] with value\n of matrix[i-1][j-1]+1\n Else update matrix[i][j] with max of value among\n matrix[i][j-1],matrix[i-1][j]\n Matrix[n][m] will be lcs\n }\n \"\"\"\n matrix = [[None]*(length1+1) for i in range(0, length2+1)]\n for i in range(0, length2+1):\n for j in range(0, length1+1):\n if i == 0 or j == 0:\n matrix[i][j] = 0\n elif l1[j-1] == l2[i-1]:\n matrix[i][j] = matrix[i-1][j-1] + 1\n else:\n matrix[i][j] = max(matrix[i][j-1], matrix[i-1][j])\n lcs = [None for i in range(0, matrix[length2][length1])]\n index = matrix[length2][length1]\n m = length2 \n n = length1\n while(m > -1 and n > -1):\n if l2[m-1] == l1[n-1]:\n lcs[index-1] = l2[m-1]\n index -= 1\n m -= 1\n n -= 1\n elif matrix[m-1][n] > matrix[m][n-1]:\n m -= 1\n else:\n n -= 1\n return lcs", "def c1(adp1, adp2):\n\n def get_axis(adp):\n \"\"\"\n Returns ADP as its three principle axis representation.\n :param adp: List/Array type of length 6.\n :returns: List of three arrays of length 3.\n \"\"\"\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],\n [float(adp[3]), float(adp[1]), float(adp[5])],\n [float(adp[4]), float(adp[5]), float(adp[2])]])\n w, v = np.linalg.eig(adp)\n return [np.array((w[j] * v[:, j]).flatten().tolist()[0]) for j \\\n in xrange(3)]\n\n adp1_axis = get_axis(adp1)\n adp2_axis = get_axis(adp2)\n\n val = 0\n for i in xrange(3):\n addval = abs(norm(adp1_axis[i] - adp2_axis[i]))\n addval = addval * abs((1 - abs(np.dot(adp1_axis[i], adp2_axis[i]))))\n val += addval\n return val", "def smooth_l1_loss(inputs, beta=1., reduction='mean', **kwargs):\n args = ArgHelper.parse(locals())\n args['beta'] = float(args['beta'])\n args['reduction'] = reduction.upper()\n op_lib = loss_ops_lib.SmoothL1Loss\n if context.executing_eagerly():\n return op_lib \\\n .instantiate(\n beta=args['beta'],\n reduction=args['reduction'],\n ).apply(inputs)\n else:\n return op_lib.blend(**args)", "def matrix_regression(Y, X, lmbd=-1, L=-1, symmetric=True, iterations=5000, etol=10e-5, verbose=False):\n # check the dimensions of Y and X\n if Y.shape[1] > X.shape[1]:\n raise ValueError('X must have at least as many columns as Y.')\n if X.shape[0] != X.shape[0]:\n raise ValueError('X and Y must have the same row dimension.')\n if Y.ndim != 2 or X.ndim != 2:\n raise ValueError('X and Y must be matrices.')\n\n # default penalty parameter\n if lmbd <= 0:\n lmbd = 2 * (np.sqrt(Y.shape[1]) + np.sqrt(X.shape[1]) + 1) * (np.sqrt(X.shape[1]) + np.sqrt(X.shape[0]))\n\n # initial guess for solution\n prev_W = symmetrize(np.random.rand(X.shape[1],Y.shape[1]))\n Z = prev_W\n\n # compute Lipschitz constant for optimizer\n if L == -1:\n U, s, V = np.linalg.svd(X.T.dot(X))\n L = s[0]\n\n iters = 0\n err = 1\n alpha = 1\n \n # Implements step 3 of Algorithm 2 of Ji and Ye (2009). Other steps are avoided because we already computed the Lipschitz constant.\n while iters < iterations and err > etol:\n W = gradient_step(Y, X, lmbd, L, Z) # first part of step 3\n prev_alpha = alpha\n alpha = (1 + np.sqrt(1 + 4*(prev_alpha**2)))/2 # second part of step 3, equation (18)\n Z = W + ((prev_alpha - 1)/alpha) * (W - prev_W) # third part of step 3, equation (19)\n \n err = np.abs(prev_W - W).mean() # measure error relative to previous step\n iters += 1\n prev_W = W # update\n\n if iters%100==0 and verbose: print('Iteration {}. Error {}'.format(iters,err))\n \n if verbose: print('Iteration {}. Error {}'.format(iters,err))\n if iters == iterations: print('Warning: max iterations hit.')\n \n if symmetric: W = symmetrize(W) # optionally impose constraints on graph\n return W", "def exercise1d():\n\n # Defination of muscles\n muscle_parameters = MuscleParameters()\n print(muscle_parameters.showParameters())\n\n mass_parameters = MassParameters()\n print(mass_parameters.showParameters())\n\n # Create muscle object\n muscle = Muscle(muscle_parameters)\n\n # Create mass object\n mass = Mass(mass_parameters)\n\n pylog.warning(\"Isotonic muscle contraction to be implemented\")\n\n # Instatiate isotonic muscle system\n sys = IsotonicMuscleSystem()\n\n # Add the muscle to the system\n sys.add_muscle(muscle)\n\n # Add the mass to the system\n sys.add_mass(mass)\n\n # You can still access the muscle inside the system by doing\n # >>> sys.muscle.L_OPT # To get the muscle optimal length\n\n # Evalute for a single load\n load = 100.\n\n # Evalute for a single muscle stimulation\n muscle_stimulation = 1.\n\n # Set the initial condition\n x0 = [0.0, sys.muscle.L_OPT,\n sys.muscle.L_OPT + sys.muscle.L_SLACK, 0.0]\n \n # x0[0] - -> activation\n # x0[1] - -> contractile length(l_ce)\n # x0[2] - -> position of the mass/load\n # x0[3] - -> velocity of the mass/load\n \n\n # Set the time for integration\n t_start = 0.0\n t_stop = 0.5\n time_step = 0.001\n time_stabilize = 0.2\n\n time = np.arange(t_start, t_stop, time_step)\n \n loads = np.arange(20, 351, 10)\n \n velocities = []\n\n for index, load in enumerate(loads):\n \n # Run the integration\n result = sys.integrate(x0=x0,\n time=time,\n time_step=time_step,\n time_stabilize=time_stabilize,\n stimulation=muscle_stimulation,\n load=load) \n\n if (result.l_mtc[-1] < sys.muscle.L_OPT + sys.muscle.L_SLACK):\n velocities.append(np.max(result.v_ce))\n print('max')\n else:\n velocities.append(np.min(result.v_ce))\n print('min')\n\n\n #Muscle contracile Velocity - Tension (load) relationship\n \n plt.figure('Isotonic muscle experiment')\n plt.title('Isotonic muscle experiment')\n plt.xlabel('Muscle Contractile Velocity [m/s]')\n plt.ylabel('Tension (load) [N]')\n plt.plot(velocities, loads)\n plt.grid()\n \n #For different stimulations 1.f\n \n muscle_stimulation = np.arange(0,1.1,0.2)\n plt.figure('Isotonic muscle exp with different stimulations')\n plt.title('Isotonic muscle experiment with different stimulations')\n\n for stim in muscle_stimulation:\n velocities = []\n for index, load in enumerate(loads):\n # Run the integration\n result = sys.integrate(x0=x0,\n time=time,\n time_step=time_step,\n time_stabilize=time_stabilize,\n stimulation=stim,\n load=load) \n\n if (result.l_mtc[-1] < sys.muscle.L_OPT + sys.muscle.L_SLACK):\n velocities.append(np.max(result.v_ce))\n else:\n velocities.append(np.min(result.v_ce))\n plt.xlabel('Muscle Contractile Velocity [m/s]')\n plt.ylabel('Tension (load) [N]')\n plt.plot(velocities, loads)\n \n plt.legend(('0','0.2','0.4','0.6','0.8','1.0'))\n plt.grid()", "def blk_tridiag_chol(A_Txdxd, B_Tm1xdxd):\n def compute_chol(LC, AB_2xdxd):\n L_dxd = LC[0]\n A_dxd, B_dxd = AB_2xdxd[0], AB_2xdxd[1]\n C_dxd = tf.matmul(B_dxd, tf.matrix_inverse(L_dxd), \n transpose_a=True, transpose_b=True)\n D = A_dxd - tf.matmul(C_dxd, C_dxd, transpose_b=True)\n L_dxd = tf.cholesky(D)\n return [L_dxd, C_dxd]\n \n L1_dxd = tf.cholesky(A_Txdxd[0])\n C1_dxd = tf.zeros_like(B_Tm1xdxd[0], dtype=tf.float64)\n \n result_2xTm1xdxd = tf.scan(fn=compute_chol, elems=[A_Txdxd[1:], B_Tm1xdxd],\n initializer=[L1_dxd, C1_dxd])\n\n AChol_Txdxd = tf.concat([tf.expand_dims(L1_dxd, 0), result_2xTm1xdxd[0]], \n axis=0) \n BChol_Tm1xdxd = result_2xTm1xdxd[1]\n \n return [AChol_Txdxd, BChol_Tm1xdxd]", "def _reward(self):\n # Clock reward -----------------------------------------------------------------\n A, B = self.get_von_mises(0.0, self.ratio, self.kappa)\n phi = self.phase / self.cycle_len\n #print('Cycles completed = ', self.cycle_complete)\n\n #print('A, B = ', (A,B))\n\n phi_FL = self.wrap(phi + self.theta_FL)\n phi_FR = self.wrap(phi + self.theta_FR)\n phi_RL = self.wrap(phi + self.theta_RL)\n phi_RR = self.wrap(phi + self.theta_RR)\n\n #print(phi_FL)\n #print(phi_FR)\n #print(phi_RL)\n #print(phi_RR)\n\n FL_swing = self.in_swing(A, B, phi_FL)\n FR_swing = self.in_swing(A, B, phi_FR)\n RL_swing = self.in_swing(A, B, phi_RL)\n RR_swing = self.in_swing(A, B, phi_RR)\n\n #print('Time since reset = ', self.rex.GetTimeSinceReset())\n #print('phase phi = ', phi)\n #print('FL swing = ', FL_swing)\n #print('FR swing = ', FR_swing)\n #print('RL swing = ', RL_swing)\n #print('RR swing = ', RR_swing)\n\n if FL_swing:\n c_swing_frc_FL = 1\n c_swing_spd_FL = 0\n else:\n c_swing_frc_FL = 0\n c_swing_spd_FL = 1\n\n if FR_swing:\n c_swing_frc_FR = 1\n c_swing_spd_FR = 0\n else:\n c_swing_frc_FR = 0\n c_swing_spd_FR = 1\n\n if RL_swing:\n c_swing_frc_RL = 1\n c_swing_spd_RL = 0\n else:\n c_swing_frc_RL = 0\n c_swing_spd_RL = 1\n\n if RR_swing:\n c_swing_frc_RR = 1\n c_swing_spd_RR = 0\n else:\n c_swing_frc_RR = 0\n c_swing_spd_RR = 1\n\n FL_foot_force, FR_foot_force, RL_foot_force, RR_foot_force = self.get_contact_forces()\n FL_vel, FR_vel, RL_vel, RR_vel = self.get_foot_velocities()\n\n FL_penalty = c_swing_frc_FL*FL_foot_force + c_swing_spd_FL*FL_vel\n FR_penalty = c_swing_frc_FR*FR_foot_force + c_swing_spd_FR*FR_vel\n RL_penalty = c_swing_frc_RL*RL_foot_force + c_swing_spd_RL*RL_vel\n RR_penalty = c_swing_frc_RR*RR_foot_force + c_swing_spd_RR*RR_vel\n\n foot_penalties = FL_penalty + FR_penalty + RL_penalty + RR_penalty\n \n # Deviation Penalties ----------------------------------------------------------\n # Base height\n base_height = self.rex.GetBasePosition()[-1]\n height_err = np.abs(base_height - self.height_des)\n \n if height_err < 0.02:\n height_err = 0\n\n # Speed \n vx, vy, _ = p.getBaseVelocity(bodyUniqueId=self.rex.quadruped)[0]\n vx = -vx # in rex, forward is the negative x direction\n x_vel_err = 4*np.abs(vx - self.speed) # higher emphasis on x velocity error\n y_vel_err = np.abs(vy - self.side_speed)\n\n # Orientation\n orient_curr = self.rex.GetBaseOrientation()\n orient_des = [0, 0, 0, 1] # not exact, but shouldn't be too far from this\n orient_err = 6 * (1 - np.inner(orient_curr, orient_des)**2 )\n\n shoulder_orient_des = [0, 0, 0, 1]\n FL_sh, FR_sh, RL_sh, RR_sh = self.get_shoulder_orientation()\n\n # quaternion similarity: 1 - <q1, q2>**2 == 0 when 100% similar\n # good when error < 0.01 (individually)\n # put HUGE penalty on this\n shoulder_err = 20 * ((1 - np.inner(shoulder_orient_des, FL_sh)**2) + \n (1 - np.inner(shoulder_orient_des, FR_sh)**2) +\n (1 - np.inner(shoulder_orient_des, RL_sh)**2) + \n (1 - np.inner(shoulder_orient_des, RR_sh)**2))\n\n # Energy Penalties --------------------------------------------------------------\n energy_penalty = np.abs(np.dot(self.rex.GetMotorTorques(),\n self.rex.GetMotorVelocities())) * self._time_step\n\n # Acceleration\n a_trans, a_rot = self.get_base_accelerations()\n accel_penalty = 0.15 * np.abs(a_trans.sum() + a_rot.sum())\n\n # need to encourage exploration: current issue --> Rex is stuck at origin\n # because positive rewards all the time\n # need lim error --> 0, reward > 0 \n\n beta = -0.75\n\n reward = beta + \\\n 0.200 * np.exp(-orient_err - shoulder_err) + \\\n 0.275 * np.exp(-foot_penalties) + \\\n 0.075 * np.exp(-height_err) + \\\n 0.250 * np.exp(-x_vel_err) + \\\n 0.100 * np.exp(-y_vel_err) + \\\n 0.075 * np.exp(-accel_penalty) + \\\n 0.025 * np.exp(-energy_penalty)\n\n\n return reward", "def test_linear_in_cond(self):\n # reproducible arbitrariness\n np.random.seed(3232)\n\n cond_out = np.random.randn(self.Nc)\n alpha = 2.3\n\n self.conductor.out_step = np.copy(cond_out)\n self.tutor.out_step = self.rule.theta + 10*np.random.randn(self.Ns)\n\n W0 = np.copy(self.syns.W)\n\n sim = simulation.Simulation(self.conductor, self.student, self.tutor,\n self.syns, self.rule, dt=self.dt)\n sim.run(self.dt)\n\n change1 = self.syns.W - W0\n\n self.syns.W = np.copy(W0)\n self.conductor.out_step = alpha*cond_out\n sim.run(self.dt)\n\n change2 = self.syns.W - W0\n\n self.assertTrue(np.allclose(change2, alpha*change1))", "def test_linear_in_cond(self):\n # reproducible arbitrariness\n np.random.seed(3232)\n\n cond_out = np.random.randn(self.Nc)\n alpha = 2.3\n\n self.conductor.out_step = np.copy(cond_out)\n self.tutor.out_step = self.rule.theta + 10*np.random.randn(self.Ns)\n\n W0 = np.copy(self.syns.W)\n\n sim = simulation.Simulation(self.conductor, self.student, self.tutor,\n self.syns, self.rule, dt=self.dt)\n sim.run(self.dt)\n\n change1 = self.syns.W - W0\n\n self.syns.W = np.copy(W0)\n self.conductor.out_step = alpha*cond_out\n sim.run(self.dt)\n\n change2 = self.syns.W - W0\n\n self.assertTrue(np.allclose(change2, alpha*change1))", "def gel_solve(\n A,\n y,\n l_1,\n l_2,\n ns,\n b_init=None,\n block_solve_fun=block_solve_agd,\n block_solve_kwargs=None,\n max_cd_iters=None,\n rel_tol=1e-6,\n Cs=None,\n Is=None,\n verbose=False,\n):\n p = len(A)\n m = len(y)\n device = A[0].device\n dtype = A[0].dtype\n y = y.to(device, dtype)\n if block_solve_kwargs is None:\n block_solve_kwargs = dict()\n\n # Create initial values if not specified.\n if b_init is None:\n b_init = 0.0, torch.zeros(p, max(ns), device=device, dtype=dtype)\n\n if not isinstance(ns, torch.Tensor):\n ns = torch.tensor(ns)\n sns = ns.to(device, dtype).sqrt()\n a_1 = l_1 * sns\n ma_1 = m * a_1\n a_2 = 2 * l_2 * sns\n b_0, B = b_init\n b_0_prev, B_prev = b_0, B\n k = 1 # iteration number\n pbar_stats = {} # stats for the outer progress bar\n pbar = tqdm.tqdm(\n desc=\"Solving gel with CD (l_1 {:.2g}, l_2 {:.2g})\".format(l_1, l_2),\n disable=not verbose,\n )\n\n while True:\n # First minimize with respect to b_0. This has a closed form solution\n # given by b_0 = 1'@(y - sum_j A_j@b_j) / m.\n b_0 = (y - sum(A[j] @ B[j, : ns[j]] for j in range(p))).sum() / m\n\n # Now, minimize with respect to each b_j.\n for j in tqdm.trange(\n p, desc=\"Solving individual blocks\", disable=not verbose, leave=False\n ):\n r_j = y - b_0 - sum(A[k] @ B[k, : ns[k]] for k in range(p) if k != j)\n\n # Check if b_j must be set to 0. The condition is ||A_j'@r_j|| <=\n # m*a_1.\n if (A[j].t() @ r_j).norm(p=2) <= ma_1[j]:\n B[j] = 0\n else:\n # Otherwise, minimize. First make sure initial value is not 0.\n if len((B[j, : ns[j]].abs() < 1e-6).nonzero()) == ns[j]:\n B[j, : ns[j]] = 1e-3\n\n # Add C_j and I_j to the arguments if using Newton's method.\n if block_solve_fun is block_solve_newton:\n block_solve_kwargs[\"C_j\"] = Cs[j]\n block_solve_kwargs[\"I_j\"] = Is[j]\n\n B[j, : ns[j]] = block_solve_fun(\n r_j,\n A[j],\n a_1[j].item(),\n a_2[j].item(),\n m,\n B[j, : ns[j]],\n verbose=verbose,\n **block_solve_kwargs,\n )\n\n # Compute relative change in b.\n b_0_diff = b_0 - b_0_prev\n B_diff = B - B_prev\n delta_norm = (b_0_diff ** 2 + (B_diff ** 2).sum()).sqrt()\n b_norm = (b_0 ** 2 + (B ** 2).sum()).sqrt()\n\n pbar_stats[\"rel change\"] = \"{:.2g}\".format(delta_norm.item() / b_norm.item())\n pbar.set_postfix(pbar_stats)\n pbar.update()\n\n # Check max iterations exit criterion.\n if max_cd_iters is not None and k == max_cd_iters:\n break\n k += 1\n\n # Check tolerance exit criterion.\n if delta_norm.item() <= rel_tol * b_norm.item() and k > 2:\n break\n b_0_prev, B_prev = b_0, B\n\n pbar.close()\n return b_0.item(), B", "def test_single_ended_ols_wls_estimate_synthetic():\n\n from dtscalibration import DataStore\n import numpy as np\n\n np.random.seed(0)\n\n cable_len = 100.\n nt = 50\n time = np.arange(nt)\n x = np.linspace(0., cable_len, 500)\n ts_cold = np.ones(nt) * 4.\n ts_warm = np.ones(nt) * 20.\n\n C_p = 15246\n C_m = 2400.\n dalpha_r = 0.0005284\n dalpha_m = 0.0004961\n dalpha_p = 0.0005607\n gamma = 482.6\n cold_mask = x < 0.5 * cable_len\n warm_mask = np.invert(cold_mask) # == False\n temp_real = np.ones((len(x), nt))\n temp_real[cold_mask] *= ts_cold + 273.15\n temp_real[warm_mask] *= ts_warm + 273.15\n\n st = C_p * np.exp(-dalpha_r * x[:, None]) * \\\n np.exp(-dalpha_p * x[:, None]) * \\\n np.exp(gamma / temp_real) / (np.exp(gamma / temp_real) - 1)\n ast = C_m * np.exp(-dalpha_r * x[:, None]) * \\\n np.exp(-dalpha_m * x[:, None]) / (np.exp(gamma / temp_real) - 1)\n\n print('alphaint', cable_len * (dalpha_p - dalpha_m))\n print('alpha', dalpha_p - dalpha_m)\n print('C', np.log(C_p / C_m))\n print('x0', x.max())\n\n ds = DataStore({\n 'st': (['x', 'time'], st),\n 'ast': (['x', 'time'], ast),\n 'userAcquisitionTimeFW': (['time'], np.ones(nt)),\n 'cold': (['time'], ts_cold),\n 'warm': (['time'], ts_warm)\n },\n coords={\n 'x': x,\n 'time': time},\n attrs={\n 'isDoubleEnded': '0'})\n\n sections = {\n 'cold': [slice(0., 0.5 * cable_len)],\n 'warm': [slice(0.5 * cable_len, cable_len)]}\n\n # OLS\n ds.calibration_single_ended(sections=sections,\n st_label='st',\n ast_label='ast',\n method='ols',\n solver='sparse')\n\n np.testing.assert_almost_equal(\n ds.gamma.values, gamma, decimal=6)\n np.testing.assert_almost_equal(\n ds.dalpha.values, dalpha_p - dalpha_m, decimal=8)\n np.testing.assert_almost_equal(\n ds.TMPF.values, temp_real - 273.15, decimal=4)\n\n # WLS\n ds.calibration_single_ended(sections=sections,\n st_label='st',\n ast_label='ast',\n st_var=1.,\n ast_var=1.,\n method='wls',\n solver='sparse')\n\n np.testing.assert_almost_equal(\n ds.gamma.values, gamma, decimal=6)\n np.testing.assert_almost_equal(\n ds.dalpha.values, dalpha_p - dalpha_m, decimal=8)\n np.testing.assert_almost_equal(\n ds.TMPF.values, temp_real - 273.15, decimal=4)\n\n pass", "def tcs(self,lpf=0, opf=1):\n S = self.M.allocState({})\n self.M.propagate(S, 0, 1)\n\n # set initial beam data\n S.ref_IonZ = self.refIonZ\n S.IonZ = self.IonZ\n\n S.moment0 = self.BC0\n S.moment1 = self.ENV0\n\n S.ref_IonEk = self.refIonEk\n \n S.phis = S.moment0[PS_S,:]\n S.IonEk = S.moment0[PS_PS,:]*MeVtoeV + S.ref_IonEk\n\n #S.clng = self.clng\n\n fin = len(self.M)\n\n \n # store initial beam data\n self.LD[0][0] = S.pos\n\n #Mean data\n self.LD[0][1] = S.moment0_env[0]\n self.LD[0][2] = S.moment0_env[2]\n self.LD[0][3] = S.moment0_env[4]\n self.LD[0][4] = S.moment0_rms[0]\n self.LD[0][5] = S.moment0_rms[2]\n self.LD[0][6] = S.moment0_rms[4]\n self.LD[0][7] = S.ref_phis\n self.LD[0][8] = S.ref_IonEk\n\n # store initial beam data\n self.LD2[0][0] = S.pos\n #Mean data\n self.LD2[0][1] = S.moment0_env[1]\n self.LD2[0][2] = S.moment0_env[3]\n self.LD2[0][3] = S.moment0_env[5]\n self.LD2[0][4] = S.moment0_rms[1]\n self.LD2[0][5] = S.moment0_rms[3]\n self.LD2[0][6] = S.moment0_rms[5]\n\n\n # propagate step by step and store beam data\n for i in range(1,len(self.M)):\n self.M.propagate(S, i, 1)\n \n \n self.LD[i][0] = S.pos\n #Mean data\n self.LD[i][1] = S.moment0_env[0]\n self.LD[i][2] = S.moment0_env[2]\n self.LD[i][3] = S.moment0_env[4]\n self.LD[i][4] = S.moment0_rms[0]\n self.LD[i][5] = S.moment0_rms[2]\n self.LD[i][6] = S.moment0_rms[4]\n self.LD[i][7] = S.ref_phis\n self.LD[i][8] = S.ref_IonEk\n\n self.LD2[i][0] = S.pos\n #Mean data\n self.LD2[i][1] = S.moment0_env[1]\n self.LD2[i][2] = S.moment0_env[3]\n self.LD2[i][3] = S.moment0_env[5]\n self.LD2[i][4] = S.moment0_rms[1]\n self.LD2[i][5] = S.moment0_rms[3]\n self.LD2[i][6] = S.moment0_rms[5]\n\n #output data for plotting\n if opf: np.savetxt('ldata.txt',self.LD)\n\n if not lpf: return S", "def lwr_recursion(r):\r\n\r\n # r is (P+1, nc, nc)\r\n nc = r.shape[1]\r\n P = r.shape[0] - 1\r\n\r\n a = np.zeros((P, nc, nc)) # ar coefs\r\n b = np.zeros_like(a) # lp coefs\r\n sigb = np.zeros_like(r[0]) # forward prediction error covariance\r\n sigf = np.zeros_like(r[0]) # backward prediction error covariance\r\n delta = np.zeros_like(r[0])\r\n\r\n # initialize\r\n idnt = np.eye(nc)\r\n sigf[:] = r[0]\r\n sigb[:] = r[0]\r\n\r\n # iteratively find sequences A_{p+1}(i) and B_{p+1}(i)\r\n for p in range(P):\r\n\r\n # calculate delta_{p+1}\r\n # delta_{p+1} = r(p+1) + sum_{i=1}^{p} a(i)r(p+1-i)\r\n delta[:] = r[p + 1]\r\n for i in range(1, p + 1):\r\n delta += np.dot(a[i - 1], r[p + 1 - i])\r\n\r\n # intermediate values XXX: should turn these into solution-problems\r\n ka = np.dot(delta, linalg.inv(sigb))\r\n kb = np.dot(delta.conj().T, linalg.inv(sigf))\r\n\r\n # store a_{p} before updating sequence to a_{p+1}\r\n ao = a.copy()\r\n # a_{p+1}(i) = a_{p}(i) - ka*b_{p}(p+1-i) for i in {1,2,...,p}\r\n # b_{p+1}(i) = b_{p}(i) - kb*a_{p}(p+1-i) for i in {1,2,...,p}\r\n for i in range(1, p + 1):\r\n a[i - 1] -= np.dot(ka, b[p - i])\r\n for i in range(1, p + 1):\r\n b[i - 1] -= np.dot(kb, ao[p - i])\r\n\r\n a[p] = -ka\r\n b[p] = -kb\r\n\r\n sigf = np.dot(idnt - np.dot(ka, kb), sigf)\r\n sigb = np.dot(idnt - np.dot(kb, ka), sigb)\r\n\r\n return a, sigf", "def calc_change (change_amnts, rate_of_transition, from_cohort, present):\n row, col = cuda.grid(2)\n\n if row < from_cohort.shape[0] and col < from_cohort.shape[1]:\n change_amnts[row,col] = \\\n rate_of_transition[row,col] * from_cohort[row,col] \n if present[row, col] and change_amnts[row, col] > from_cohort[row, col]:\n change_amnts[row, col] = from_cohort[row,col]", "def cheby_op2(L, c, arange):\r\n if not isinstance(c, list) and not isinstance(c, tuple):\r\n r = cheby_op2(L, [c], arange)\r\n return r[0]\r\n\r\n # L=tf.sparse.to_dense(L)\r\n \r\n \r\n N_scales = len(c)\r\n M = np.array([coeff.size for coeff in c])\r\n max_M = M.max()\r\n\r\n a1 = (arange[1] - arange[0]) / 2.0\r\n a2 = (arange[1] + arange[0]) / 2.0\r\n\r\n Twf_old = 0\r\n Twf_cur = (L-a2*np.identity(L.shape[0])) / a1\r\n r = [0.5*c[j][0]*Twf_old + c[j][1]*Twf_cur for j in range(N_scales)]\r\n\r\n for k in range(1, max_M):\r\n Twf_new = (2/a1) * (L*Twf_cur - a2*Twf_cur) - Twf_old\r\n for j in range(N_scales):\r\n if 1 + k <= M[j] - 1:\r\n r[j] = r[j] + c[j][k+1] * Twf_new\r\n\r\n Twf_old = Twf_cur\r\n Twf_cur = Twf_new\r\n\r\n return r" ]
[ "0.661372", "0.61719704", "0.60370505", "0.5863777", "0.569097", "0.5663334", "0.56262916", "0.5592393", "0.54651314", "0.5444343", "0.53959835", "0.5394466", "0.53734285", "0.53725505", "0.5371813", "0.5362057", "0.5342894", "0.53264534", "0.5277725", "0.5275031", "0.5255691", "0.5249246", "0.5233609", "0.5224999", "0.5223098", "0.52079904", "0.51894665", "0.5184089", "0.51771814", "0.51650673", "0.51589257", "0.5155217", "0.51540655", "0.514906", "0.5145459", "0.5142964", "0.5135638", "0.51331353", "0.51159114", "0.51135266", "0.5102807", "0.5100065", "0.50795746", "0.50761324", "0.5075811", "0.5075704", "0.5067165", "0.5057237", "0.50527656", "0.5052242", "0.50505257", "0.50386614", "0.50332725", "0.50291556", "0.5015305", "0.5015212", "0.5011803", "0.50028926", "0.49973065", "0.4992892", "0.4984065", "0.49812272", "0.49689326", "0.49688086", "0.49627092", "0.49622753", "0.4960552", "0.49599728", "0.49584335", "0.49572906", "0.49549475", "0.49517015", "0.49424684", "0.4939306", "0.4937235", "0.49358222", "0.49328476", "0.49301594", "0.49286395", "0.49255922", "0.49204683", "0.49109605", "0.4909295", "0.49083093", "0.49074933", "0.49027324", "0.49015215", "0.48992056", "0.48985818", "0.4897774", "0.48958877", "0.48934555", "0.4891099", "0.4891099", "0.48880202", "0.48846632", "0.48790726", "0.48788145", "0.48761016", "0.487324" ]
0.6959504
0
Compute the identity loss. L_idt = lamda_idt [lamA [Expectation of L1_norm(F(A) A)] + lamB [Expectation of L1_norm(G(B) B)]]
def __identity_loss(self, identA, identB): loss = self.opt.lambda_ident * (self.opt.lamA * tf.reduce_mean(tf.abs(identB - self.realA)) + \ self.opt.lamB * tf.reduce_mean(tf.abs(identA - self.realB))) return loss
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def l1_loss(inputs, reduction='mean', **kwargs):\n args = ArgHelper.parse(locals())\n args['reduction'] = reduction.upper()\n op_lib = loss_ops_lib.L1Loss\n if context.executing_eagerly():\n return op_lib \\\n .instantiate(reduction=args['reduction']) \\\n .apply(inputs)\n else:\n return op_lib.blend(**args)", "def L_pseudo_inverse_tf(self) -> tf.Tensor:\n return tf.py_func(np.linalg.pinv, [self.L_tf], tf.float32)", "def l1_loss(D, G, real_data, generated_data, losses, options):\n return torch.nn.L1Loss()(generated_data, real_data)", "def identity_block(input_tensor, units):\n x = layers.Dense(units, kernel_regularizer=reg)(input_tensor)\n x = l()(x)\n x = layers.Activation('relu')(x)\n\n x = layers.Dense(units, kernel_regularizer=reg)(x)\n x = l()(x)\n x = layers.Activation('relu')(x)\n\n x = layers.Dense(units, kernel_regularizer=reg)(x)\n x = l()(x)\n x = layers.add([x, input_tensor])\n x = layers.Activation('relu')(x)\n\n return x", "def one_step(i_t, h_tm1):\n h_t = self.activation(T.dot(i_t, self.W) + T.dot(h_tm1, self.W_rec) + self.b)\n return h_t", "def forward(self, output, target):\n fake_A, fake_B, idt_A, idt_B = output\n #Generators are trained to trick the discriminators so the following should be ones\n self.adv_loss_A = -torch.mean(self.dualgan.D_A(fake_A)) \n self.adv_loss_B = -torch.mean(self.dualgan.D_B(fake_B))\n \n #Reconstruction loss\n self.rec_loss_A = F.l1_loss(self.dualgan.G_A(fake_B), self.real_A)\n self.rec_loss_B = F.l1_loss(self.dualgan.G_B(fake_A), self.real_B)\n \n #Identity loss\n self.id_loss_A = F.l1_loss(idt_A, self.real_A)\n self.id_loss_B = F.l1_loss(idt_B, self.real_B)\n \n return self.l_adv*(self.adv_loss_A+self.adv_loss_B)+self.l_rec*(self.rec_loss_A+self.rec_loss_B)+self.l_idt*(self.id_loss_A+self.id_loss_B)", "def _lambda(self, x, y, t, x_his, y_his, t_his):\n lam = self.mu + tf.reduce_sum(self._kernel(x - x_his, y - y_his, t - t_his), axis=0)\n return lam", "def ImpliesLTL(one: LTL, two: LTL) -> LTL:\n vars = one.variables\n vars += two.variables\n formula = \"(\" + one.formula + \") -> (\" + two.formula + \")\"\n return LTL(formula, vars)", "def logit_fn(args: StepFunctionArgs) -> SingleScorePerStepTensor:\n logits = args.attribution_model.output2logits(args.forward_output)\n target_ids = args.target_ids.reshape(logits.shape[0], 1)\n return logits.gather(-1, target_ids).squeeze(-1)", "def loss_fn(model):\n with flax.nn.stateful(state) as new_state:\n with flax.nn.stochastic(prng_key):\n logits = model(batch['image'])\n loss = cross_entropy_loss(logits, batch['label'])\n # TODO(britefury): check if applying L2 regularization to weights but\n # *not* biases improves results\n weight_penalty_params = jax.tree_leaves(model.params)\n weight_l2 = sum([jnp.sum(x ** 2)\n for x in weight_penalty_params\n if x.ndim > 1])\n weight_penalty = l2_reg * 0.5 * weight_l2\n loss = loss + weight_penalty\n return loss, (new_state, logits)", "def loss_function(self, x, x_hat_logit, mu, log_sigma):\n rec_loss = nn.functional.binary_cross_entropy_with_logits(x_hat_logit, x, size_average=False)\n kl_loss = -0.5 * torch.sum(1 + log_sigma - mu.pow(2) - log_sigma.exp())\n\n return rec_loss + (kl_loss), rec_loss, kl_loss", "def cross_entropy(m_true, alpha, alpha0, m_probs, lambd=1.0):\n\n loss = tf.reduce_sum(input_tensor=m_true * (tf.math.digamma(alpha0) - tf.math.digamma(alpha)), axis=1, keepdims=True)\n loss = tf.reduce_mean(input_tensor=loss)\n if lambd > 0:\n kl = kullback_leibler_dirichlet(m_true, alpha)\n loss = loss + lambd * kl\n return loss", "def kl_latent_space(network, *args):\n\n z, log_det_J = network(*args)\n loss = tf.reduce_mean(0.5 * tf.square(tf.norm(z, axis=-1)) - log_det_J)\n return loss", "def _l1m_objective(a,X,*args):\n \n return(np.sum(np.apply_along_axis(_euclidnorm,1,_diffmat_objective(a,X))))", "def _generator_loss(self, y_hat):\n\n l = -tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels = tf.zeros(tf.shape(y_hat)),logits = y_hat ))\n print('generatorloss shape',tf.shape(l))\n return l", "def ml_kl_loss(self, simulation, c1 = 1.0, ndims = 2, ehigh=1e5, emax = 1e10, turnover=200):\n loss = MLKL(c1, simulation, ndims, ehigh, emax, turnover)\n return loss.lossFunction", "def L_tf(self) -> tf.Tensor:\n return tf.diag(self.out_degrees_tf_vector) - self.A_tf", "def regularized_multinomial_likelihood(m_true, alpha, alpha0, m_probs, global_step, annealing_step=1000, max_lambda=1.0):\n\n ll = multinomial_likelihood(m_true, alpha, alpha0, m_probs)\n kl = kullback_leibler_dirichlet(m_true, alpha)\n lamb = tf.cast(tf.minimum(max_lambda, global_step / annealing_step), dtype=tf.float32)\n loss = ll + lamb * kl\n return loss", "def l1_loss(obs, actual):\n # (tf.Tensor, tf.Tensor, float) -> tf.Tensor\n return tf.reduce_sum(tf.abs(obs - actual) , 1)", "def L1(yhat, y):\n\n loss = np.sum(np.abs(y - yhat))\n \n return loss", "def idn(x):\n\n def grad(dy):\n return dy\n\n return tf.ones_like(x), grad", "def gl64(function,a,b):\r\n # Parameters\r\n a = torch.tensor(a)\r\n b = torch.tensor(b)\r\n k1 = (b-a)/2\r\n k2 = (b+a)/2\r\n gl64 = torch.tensor(0.)\r\n c = 0\r\n\r\n for i in range(64):\r\n w_k = w_i[c]\r\n x_k = k1*x_i[c]+k2\r\n gl64 = gl64 + w_k*function(x_k.unsqueeze(0))\r\n c += 1\r\n \r\n return gl64*k1", "def loss(self):\n return la.norm(self.resids) / self.normX", "def l1(y_true, y_pred):\n if K.ndim(y_true) == 4:\n return K.mean(K.abs(y_pred - y_true), axis=[1,2,3])\n elif K.ndim(y_true) == 3:\n return K.mean(K.abs(y_pred - y_true), axis=[1,2])\n else:\n raise NotImplementedError(\"Calculating L1 loss on 1D tensors? should not occur for this network\")", "def human_policy_kl_loss(student_logits, teacher_logits, action_type_kl_cost):\n # student_logits: list of ArgsActionLogits\n action_type_loss = kl(student_logits, teacher_logits, 1)\n kl_loss = action_type_kl_cost * torch.mean(action_type_loss)\n\n return kl_loss", "def loss(labels,q,M,a,b):\n x=-(labels*np.log(s.expit(z(q,M,a,b)))+(1-labels)*np.log(1-s.expit(z(q,M,a,b))))\n return np.sum(x)+l/2*(np.sum(M**2)+b**2)", "def __call__(self, x):\n h = F.relu(self.l0(x))\n h = F.relu(self.l1(h))\n return self.l2(h)", "def l2_loss(inputs, reduction='mean', **kwargs):\n args = ArgHelper.parse(locals())\n args['reduction'] = reduction.upper()\n op_lib = loss_ops_lib.L2Loss\n if context.executing_eagerly():\n return op_lib \\\n .instantiate(reduction=args['reduction']) \\\n .apply(inputs)\n else:\n return op_lib.blend(**args)", "def nll_loss(\n inputs,\n axis=1,\n ignore_index=None,\n reduction='valid',\n **kwargs\n):\n args = ArgHelper.parse(locals())\n args['reduction'] = reduction.upper()\n op_lib = loss_ops_lib.NLLLoss\n if context.executing_eagerly():\n return op_lib \\\n .instantiate(\n axis=axis,\n reduction=args['reduction'],\n ignore_index=ignore_index,\n ).apply(inputs)\n else:\n return op_lib.blend(**args)", "def generatorLoss(fakeOutput):\n return cross_entropy(tf.ones_like(fakeOutput), fakeOutput)", "def identity(inputs: torch.Tensor):\n return inputs", "def encoder_kl_loss(latent_dist):\n zeros = tf.zeros_like(latent_dist.mean())\n batch_size = tf.cast(tf.shape(zeros)[0], zeros.dtype)\n prior = tf.distributions.Normal(loc=zeros, scale=(zeros + 1))\n total_kl = tf.reduce_sum(tf.distributions.kl_divergence(latent_dist, prior))\n mean_kl = total_kl / batch_size\n return mean_kl", "def loss(self, x, y):\n (N,D) = x.shape\n k1 = np.matmul(x,np.transpose(self.w)) + self.b\n y1 = y.reshape((N,1))\n c2 = 0\n c1 = (np.log(1+np.exp(-1*y1*k1)))\n for i in range(N):\n c2 += c1[i][0]\n l = c2 / N + (0.5 * self.l2_reg * np.dot(self.w,np.transpose(self.w)))\n l1 = l[0][0]\n return l1\n\n\n #raise NotImplementedError", "def A(lr):\n pass", "def loss_fn(self, recons, inputs, mu, log_var, **kwargs):\n# kld_weight = kwargs['M_N'] # Account for the minibatch samples from the dataset\n recons_loss = F.mse_loss(recons, inputs)\n# recons_loss = F.binary_cross_entropy(recons, inputs)\n KLD = torch.mean(-0.5 * torch.sum(1 + log_var - mu**2 - log_var.exp(), dim=1), dim=0)\n loss = recons_loss - KLD\n return loss, recons_loss, KLD", "def kl(Y, Y_hat):\n # Pull out the argument to the sigmoid\n assert hasattr(Y_hat, 'owner')\n owner = Y_hat.owner\n assert owner is not None\n op = owner.op\n\n if not hasattr(op, 'scalar_op'):\n owner = Y_hat.owner.inputs[0].owner\n assert owner is not None\n op = owner.op\n\n if not hasattr(op, 'scalar_op'):\n raise ValueError(\"Expected Y_hat to be generated by an Elemwise op, got \"+str(op)+\" of type \"+str(type(op)))\n\n assert isinstance(op.scalar_op, T.nnet.sigm.ScalarSigmoid)\n z ,= owner.inputs\n\n term_1 = Y * T.nnet.softplus(-z)\n term_2 = (1 - Y) * T.nnet.softplus(z)\n\n total = term_1 + term_2\n\n ave_ch = total.mean(axis=1)\n assert ave_ch.ndim == 3\n\n ave_x = ave_ch.mean(axis=1)\n assert ave_x.ndim == 2\n\n ave_y = ave_x.mean(axis=1)\n assert ave_y.ndim == 1\n\n return ave_y", "def loss_fn(model):\n with flax.deprecated.nn.stateful() as state:\n with flax.deprecated.nn.stochastic(dropout_rng):\n logits = model(example, train=True)\n loss, weight_sum = compute_weighted_cross_entropy(logits, targets)\n mean_loss = loss / weight_sum\n return mean_loss, (logits, state)", "def __loss(self, fakeA, fakeB, reconstructedA, reconstructedB, identA, identB):\n # compute the generators loss\n G_loss = self.__G_loss(self.D_B, fakeB)\n F_loss = self.__G_loss(self.D_A, fakeA)\n cc_loss = self.__cycle_consistency_loss(reconstructedA, reconstructedB)\n ident_loss = self.__identity_loss(identA, identB)\n Gen_loss = G_loss + F_loss + cc_loss + ident_loss\n\n # Compute the disciminators loss. Use fake images from image pool to improve stability\n D_A_loss = self.__D_loss(self.D_A, self.realA, self.fakeA)\n D_B_loss = self.__D_loss(self.D_B, self.realB, self.fakeB)\n\n return Gen_loss, D_A_loss, D_B_loss", "def model_loss(inp, fake, real_label, fake_label):\n \n \n Dreal,realcls,R1 = gradpen(inp)\n [Dfake,fakecls] = D(fake)\n # 1. Adversarial loss\n \n glabel = tf.ones_like(Dfake)#tf.random.uniform((Dfake.shape), 1-LN, 1)\n dlabelr = tf.ones_like(Dreal)#tf.random.uniform((Dreal.shape), 1-LN, 1)\n dlabelf = tf.zeros_like(Dfake)#tf.random.uniform((Dfake.shape), 0, LN)\n \n \n \n # D has no sigmoid activation: \"from_logits=True\"\n real_loss = tf.keras.losses.binary_crossentropy(\n dlabelr, Dreal, from_logits=True)\n real_loss = tf.reduce_mean(real_loss)\n \n fake_loss = tf.keras.losses.binary_crossentropy(\n dlabelf, Dfake, from_logits=True)\n fake_loss = tf.reduce_mean(fake_loss)\n \n Dadv = 0.5*(real_loss+fake_loss)\n \n Gadv = tf.keras.losses.binary_crossentropy(\n glabel, Dfake, from_logits=True)\n Gadv = tf.reduce_mean(Gadv)\n \n # 2. Classification loss\n \n Dcls = tf.keras.losses.binary_crossentropy(real_label, realcls, from_logits=True)\n Dcls = tf.reduce_mean(Dcls)\n \n Gcls = tf.keras.losses.binary_crossentropy(fake_label, fakecls, from_logits=True)\n Gcls = tf.reduce_mean(Gcls)\n \n # 3. Total loss\n \n Dloss = Dadv + (GAMMA/2)*R1 + LAMBDA_CLS*Dcls\n \n Gloss = Gadv + LAMBDA_CLS*Gcls\n \n return (Dloss, Dadv, Dcls, R1), (Gloss, Gadv, Gcls)", "def loss(self,A2,label):\r\n m = label.shape[0]\r\n\r\n log_likelihood = -np.log(A2[label,range(m)])\r\n loss = np.sum(log_likelihood) / m\r\n return loss", "def cross_entropy_loss():\n return nn.CrossEntropyLoss()", "def backward_G(self,i,direction):\n #lambda_idt = self.opt.lambda_identity\n lambda_A = self.opt.lambda_A\n #lambda_B = self.opt.lambda_B\n lambda_reg = 0.01\n lambda_idt=1\n # Identity loss\n if(direction):\n #the idt loss \n self.loss_idt=0\n # if lambda_idt > 0:\n # # G_A should be identity if real_B is fed: ||G_A(B) - B|| 使用fakeB代替\n # self.idt_A = self.netG_A[self.orders[i]](self.fake_B)\n # self.loss_idt_A = self.criterionIdt(\n # self.idt_A, self.fake_B) * lambda_B * lambda_idt\n # # G_B should be identity if real_A is fed: ||G_B(A) - A||\n # self.idt_B = self.netG_B[self.orders[i]](self.real_A)\n # self.loss_idt_B = self.criterionIdt(\n # self.idt_B, self.real_A) * lambda_A * lambda_idt\n # else:\n # self.loss_idt_A = 0\n # self.loss_idt_B = 0\n\n self.loss_G_adv=self.criterionGAN_D(self.netDadv(self.fake_B),True)\n # GAN loss D_A(G_A(A))\n self.pred_fake = self.netD(self.fake_B)\n self.loss_G_A = self.criterionGAN_D(self.pred_fake,self.labels[i+1])\n # GAN loss D_B(G_B(B))\n \n self.loss_G_B = self.criterionGAN_D(self.netD(self.rec_A), self.labels[i])\n \n # Forward cycle loss || G_B(G_A(A)) - A||\n self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A\n # Backward cycle loss || G_A(G_B(B)) - B||\n #self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B\n self.criterionReg=torch.nn.MSELoss()\n #\n self.loss_reg = (self.criterionReg(self.mask_A, torch.ones_like(self.mask_A))+self.criterionReg(self.mask_B, torch.ones_like(self.mask_B)))*0.5*lambda_reg\n # combined loss and calculate gradients\n self.loss_G = self.loss_G_adv+self.loss_G_A + self.loss_cycle_A + self.loss_G_B\n self.loss_G.backward()\n else:\n if lambda_idt > 0:\n self.idt_B = self.netG_A[self.orders_rev[i]](self.real_A)\n self.loss_idt = self.criterionIdt(\n self.idt_B, self.real_A) * lambda_A * lambda_idt\n else:\n self.loss_idt = 0\n\n self.loss_G_adv = self.criterionGAN_D(self.netDadv(self.fake_B), True)\n # GAN loss D_A(G_A(A))\n self.loss_G_A = self.criterionGAN_D(\n self.netD(self.fake_B), self.labels_rev[i])\n # GAN loss D_B(G_B(B))\n\n self.loss_G_B = self.criterionGAN_D(\n self.netD(self.rec_A), self.labels[0])\n\n # Forward cycle loss || G_B(G_A(A)) - A||\n self.loss_cycle_A = self.criterionCycle(\n self.rec_A, self.real_A) * lambda_A\n # Backward cycle loss || G_A(G_B(B)) - B||\n #self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B\n self.criterionReg = torch.nn.MSELoss()\n self.loss_reg = -(self.criterionReg(self.mask_A, torch.ones_like(self.mask_A)) +\n self.criterionReg(self.mask_B, torch.ones_like(self.mask_B)))*0.5*lambda_reg\n # combined loss and calculate gradients\n self.loss_G = self.loss_G_adv+self.loss_G_A + self.loss_cycle_A +self.loss_G_B\n self.loss_G.backward()", "def identity(cls, ring, rig, lin):\n assert lin.ring == rig\n src = tgt = lin\n V, U = lin.hom\n lins = [[\n Lin(\n lin[i,j].value, \n lin[i,j].value, \n elim.identity(ring, lin[i,j].value.n)) # unwrapped\n for j in range(U.n)] for i in range(V.n)]\n i_2 = Lin2(tgt, src, lins)\n return i_2", "def loss(self, X, Y, lmd):\n P, _ = self.forward(X)\n loss = np.mean(-np.log(np.einsum('ij,ji->i', Y.T, P)))\n\n reg = 0 # Regularization term\n for w in self.W:\n reg += np.sum(np.square(w))\n\n reg *= lmd\n\n cost = loss + reg\n\n return cost", "def inductorenergy(L, I):\n return 1 / 2 * L * I ** 2", "def ls_generator_loss(scores_fake):\n N = scores_fake.size()\n\n true_labels = Variable(torch.ones(N)).type(dtype)\n\n loss = 0.5 * ((torch.mean((scores_fake - true_labels)**2)))\n\n return loss", "def optimizer_step(g, H, lambda_=0):\n if lambda_: # LM instead of GN\n D = (H.diagonal(dim1=-2, dim2=-1) + 1e-9).diag_embed()\n H = H + D*lambda_\n try:\n P = torch.inverse(H)\n except RuntimeError as e:\n logging.warning(f'Determinant: {torch.det(H)}')\n raise e\n delta = -(P @ g[..., None])[..., 0]\n return delta", "def forward_step_layer(t1, t2, activation_f=torchfun.relu):\n return batch_norm_tensor(activation_f(t1.bmm(t2)))", "def nll_on_features(self, h, batch, reduction=\"mean\"):\n batch = batch.to(h.device)\n y = batch.outputs\n # Extract features with the model\n features = h.view(batch.size, -1)\n # Log loss\n logits = self.head(features)\n log_probs = F.log_softmax(logits, dim=-1)\n nll_loss = F.nll_loss(log_probs, y, reduction=reduction)\n return nll_loss", "def test_identity(model_name):\n env = DummyVecEnv([lambda: IdentityEnv(10)])\n\n model = LEARN_FUNC_DICT[model_name](env)\n evaluate_policy(model, env, n_eval_episodes=20, reward_threshold=90)\n\n obs = env.reset()\n assert model.action_probability(obs).shape == (1, 10), \"Error: action_probability not returning correct shape\"\n action = env.action_space.sample()\n action_prob = model.action_probability(obs, actions=action)\n assert np.prod(action_prob.shape) == 1, \"Error: not scalar probability\"\n action_logprob = model.action_probability(obs, actions=action, logp=True)\n assert np.allclose(action_prob, np.exp(action_logprob)), (action_prob, action_logprob)\n\n # Free memory\n del model, env", "def sigmoid_cross_entropy(inputs, reduction='valid', **kwargs):\n args = ArgHelper.parse(locals())\n args['reduction'] = reduction.upper()\n op_lib = loss_ops_lib.SigmoidCrossEntropy\n if context.executing_eagerly():\n return op_lib \\\n .instantiate(reduction=args['reduction']) \\\n .apply(inputs)\n else:\n return op_lib.blend(**args)", "def L2(yhat, y):\n loss = np.dot((y - yhat).T,(y - yhat))\n \n return loss", "def smooth_l1_loss(inputs, beta=1., reduction='mean', **kwargs):\n args = ArgHelper.parse(locals())\n args['beta'] = float(args['beta'])\n args['reduction'] = reduction.upper()\n op_lib = loss_ops_lib.SmoothL1Loss\n if context.executing_eagerly():\n return op_lib \\\n .instantiate(\n beta=args['beta'],\n reduction=args['reduction'],\n ).apply(inputs)\n else:\n return op_lib.blend(**args)", "def I(x, y, l, p):\n \n return 0.5 / (mu * c) * A0**2 * ( u(x, y, l, p) )**2", "def crossentropy_fn(args: StepFunctionArgs) -> SingleScorePerStepTensor:\n return -torch.log2(probability_fn(args))", "def log_loss(m_true, alpha, alpha0, m_probs, lambd=1.0):\n \n m_probs = tf.clip_by_value(m_probs, 1e-15, 1 - 1e-15)\n loss = -tf.reduce_mean(input_tensor=tf.reduce_sum(input_tensor=m_true * tf.math.log(m_probs), axis=1))\n if lambd > 0:\n kl = kullback_leibler_dirichlet(m_true, alpha)\n loss = loss + lambd * kl\n return loss", "def cross_entropy_lsm(logits, ys, lsm_prob, ignore_index, training, normalize_length=False):\n bs, _, vocab = logits.size()\n ys = ys.view(-1)\n logits = logits.view((-1, vocab))\n if lsm_prob == 0 or not training:\n loss = F.cross_entropy(logits, ys, ignore_index=ignore_index, reduction='mean')\n ppl = np.exp(loss.item())\n if not normalize_length:\n loss *= (ys != ignore_index).sum() / float(bs)\n else:\n with torch.no_grad():\n target_dist = logits.new_zeros(logits.size())\n target_dist.fill_(lsm_prob / (vocab - 1))\n mask = ys == ignore_index\n ys_masked = ys.masked_fill(mask, 0)\n target_dist.scatter_(1, ys_masked.unsqueeze(1), 1 - lsm_prob)\n log_probs = torch.log_softmax(logits, dim=-1)\n loss_sum = -torch.mul(target_dist, log_probs)\n n_tokens = len(ys) - mask.sum().item()\n denom = n_tokens if normalize_length else bs\n loss = loss_sum.masked_fill(mask.unsqueeze(1), 0).sum() / denom\n ppl = np.exp(loss.item()) if normalize_length else np.exp(loss.item() * bs / n_tokens)\n return loss, ppl", "def l1(y_true, y_pred):\r\n if K.ndim(y_true) == 4:\r\n return K.sum(K.abs(y_pred - y_true), axis=[1, 2, 3])\r\n elif K.ndim(y_true) == 3:\r\n return K.sum(K.abs(y_pred - y_true), axis=[1, 2])\r\n else:\r\n raise NotImplementedError(\"Calculating L1 loss on 1D tensors? should not occur for this network\")", "def experiment_linear_l1(_):\n # Attack epsilon is manually set according to the norm of the min-norm\n # solution found using cvxpy for d/n=10. That is max-margin=1/min-norm.\n # Min linf-norm solution found (norm=0.0422)\n # Min l2-norm solution found (norm=0.3411)\n # Min l1-norm solution found (norm=1.8497)\n # Min l4-norm solution found (norm=0.0002)\n # Min l1.5-norm solution found (norm=0.5274)\n return experiment_linear_lp(\n adv_norm_type='l1',\n dual_norm_type='linf',\n baseline_norm_types=['l2'],\n attack_step_dir='grad_max')", "def add_loss(self):\n with vs.variable_scope(\"loss\"):\n weights = tf.to_float(tf.not_equal(self.ans_ids, PAD_ID)) # [batch_size, context_len]\n\n # shift the weight right to include the end id\n batch_size = tf.shape(weights)[0]\n shift_val = tf.ones([batch_size, 1])\n\n self.new_ans_ids = tf.concat([self.ans_ids[:, 1:], tf.fill([batch_size, 1], 0)], 1)\n self.logits = self.train_logits\n weights = tf.concat([shift_val, weights], 1)[:, :-1]\n self.loss = tf.contrib.seq2seq.sequence_loss(self.logits, self.new_ans_ids, weights=weights)\n tf.summary.scalar('train_loss', self.loss)\n tf.summary.scalar('sampling_prob', self.sampling_prob)\n\n if self.FLAGS.pred_method == 'beam':\n self.dev_logits = tf.Print(self.dev_logits[:, :, 0], [tf.shape(self.dev_logit), self.dev_logits[0, :, 0]])\n self.dev_loss = tf.cast(self.dev_logits[0, 0], tf.float32)\n return\n dev_logits_len = tf.to_int32(tf.shape(self.dev_logits)[1])\n weights = tf.concat([weights[:, 1:], tf.fill([batch_size, 1], 0.0)], 1)\n self.dev_loss = tf.contrib.seq2seq.sequence_loss(\n self.dev_logits, self.new_ans_ids[:, :dev_logits_len],\n weights=weights[:, :dev_logits_len])", "def test_special_substitution_of_identity(free_alg):\n\n dr = free_alg\n p = dr.names\n\n x = IndexedBase('x')\n t = IndexedBase('y')\n a = IndexedBase('a')\n i, j = p.i, p.j\n v = p.v\n w = Vec('w')\n\n orig = dr.sum((i, p.R), x[i] * v[i] + a[i])\n ident_def = dr.define(1, dr.einst(t[i] * w[i]))\n\n res = orig.subst_all([ident_def])\n assert dr.simplify(\n res - dr.einst(x[i] * v[i])\n - dr.sum((i, p.R), (j, p.R), a[i] * t[j] * w[j])\n ) == 0", "def bp_mll_loss(y_true: tf.Tensor, y_pred: tf.Tensor) -> tf.Tensor:\n\n # get true and false labels\n shape = tf.shape(y_true)\n y_i = tf.equal(y_true, tf.ones(shape))\n y_i_bar = tf.not_equal(y_true, tf.ones(shape))\n\n # get indices to check\n truth_matrix = tf.cast(pairwise_and(y_i, y_i_bar), dtype=tf.float32)\n\n # calculate all exp'd differences\n sub_matrix = pairwise_sub(y_pred, y_pred)\n exp_matrix = tf.exp(tf.negative(sub_matrix))\n\n # check which differences to consider and sum them\n sparse_matrix = tf.multiply(exp_matrix, truth_matrix)\n sums = tf.reduce_sum(sparse_matrix, axis=[1,2])\n\n # get normalizing terms and apply them\n y_i_sizes = tf.reduce_sum(tf.cast(y_i, dtype=tf.float32), axis=1)\n y_i_bar_sizes = tf.reduce_sum(tf.cast(y_i_bar, dtype=tf.float32), axis=1)\n normalizers = tf.multiply(y_i_sizes, y_i_bar_sizes)\n results = tf.divide(sums, normalizers)\n\n # average error\n return tf.reduce_mean(results)", "def l2_reg_cost(cost, lambtha, weights, L, m):\n enorm = 0\n for i in range(1, L + 1):\n layer = 'W{}'.format(i)\n enorm += np.linalg.norm(weights[layer])\n return cost + (lambtha / (2 * m)) * enorm", "def calc_iam(a_1, a_2, a_3, a_4, a_5, a_6, aoi, loss_method):\n if loss_method == 'Janotte':\n iam = 1 - a_1 * abs(aoi) - a_2 * aoi**2\n\n if loss_method == 'Andasol':\n iam = (1 - a_1 * abs(aoi) - a_2 * aoi**2 - a_3 * aoi**3 - a_4 * aoi**4\n - a_5 * aoi**5 - a_6 * aoi**6)\n return iam", "def loss_step1(self, x, y):\n u = x @ self.w\n u_prime = 0.125 * (u.t() @ u).t()\n return u, u_prime", "def identity(features):\n features = tf.convert_to_tensor(features)\n return tf.identity(features)", "def compute_loss_lasso(y, tx, w, lambda_):\n e = y - tx.dot(w)\n\n return e.dot(e)/(2 * len(e)) + lambda_ * sum(abs(w))", "def L_fun(x):\n \n f_x0 = f(ad.create_vector('x0', x0, seed_vector=x));\n f_x0 = np.array(f_x0) #ensure that f_x0 is np.array\n action = sum_values(ad.get_deriv(f_x0))\n return action", "def nll_full_rank(self, target, mu, tril_elements, reduce=True):\n batch_size, _ = target.shape\n tril = torch.zeros([batch_size, self.Y_dim, self.Y_dim],\n device=self.device, dtype=None)\n tril[:, self.tril_idx[0], self.tril_idx[1]] = tril_elements\n log_diag_tril = torch.diagonal(tril, offset=0, dim1=1, dim2=2) # [batch_size, Y_dim]\n logdet_term = -torch.sum(log_diag_tril, dim=1) # [batch_size,]\n tril[:, torch.eye(self.Y_dim, dtype=bool)] = torch.exp(log_diag_tril)\n prec_mat = torch.bmm(tril, torch.transpose(tril, 1, 2)) # [batch_size, Y_dim, Y_dim]\n y_diff = mu - target # [batch_size, Y_dim]\n mahalanobis_term = 0.5*torch.sum(\n y_diff*torch.sum(prec_mat*y_diff.unsqueeze(-1), dim=-2), dim=-1) # [batch_size,]\n loss = logdet_term + mahalanobis_term + 0.5*self.Y_dim*log_2_pi\n if reduce:\n return torch.mean(loss, dim=0) # float\n else:\n return loss # [batch_size,]", "def unnormalized_loss(self):\n return 0.5 * la.norm(self.resids) ** 2", "def g(t1):\n if isinstance(t1, IdentExp) and self.st.has_key(t1.name):\n ninfo = self.st[t1.name]\n if ninfo[\"srcty\"] == \"vector\":\n if self.existMats and t1.name == n.exp.lhs.name:\n return ArrayRefExp(t1, IdentExp(\"i2\"))\n else:\n self.st[\"itrs\"][0].update({ninfo[\"len\"][0]: \"i1\"})\n return ArrayRefExp(t1, IdentExp(\"i1\"))\n elif ninfo[\"srcty\"] == \"matrix\":\n self.st[\"itrs\"][0].update({ninfo[\"len\"][0]: \"i1\"})\n self.st[\"itrs\"][1].update({ninfo[\"len\"][1]: \"i2\"})\n sub = s2t(\"exp\", ninfo[\"len\"][0] + \" * i2 + i1\")\n return ArrayRefExp(t1, sub)\n else:\n return t1\n else:\n return t1", "def generator_loss(self, fake_images=None, real_images=None, fake_output=None, l1_lambda=100, loss_strategy='both'):\n #TODO with try/except\n assert loss_strategy in ['GAN', 'L1', 'both'], \"Error: invalid type of loss. Should be 'GAN', 'L1' or 'both'\"\n if loss_strategy == \"GAN\":\n fake_loss = self.cross_entropy(ones_like(fake_output), fake_output)\n return fake_loss\n elif loss_strategy == \"L1\":\n L1_loss = l1_lambda*self.l1(real_images, fake_images)\n return L1_loss\n elif loss_strategy == 'both':\n fake_loss = self.cross_entropy(ones_like(fake_output), fake_output)\n L1_loss = self.l1(real_images, fake_images)\n return fake_loss + l1_lambda*L1_loss", "def get_identity(l):\n identity = np.zeros((l, l))\n for i in range(l):\n identity[i][i] = 1\n return identity", "def cross_entropy_cost(m, A, L):\n\tcost = (-1 / m) * np.sum(L * np.log(A) + (1 - L) * (np.ma.log(1 - A))) #Note: Using numpy masked array np.ma for values of log(0)\n\n\n\t# Sanity checks\n\tcost = np.squeeze(cost) \t#squeeze() removes single dimensional elements from the array: e.g. (1, 3, 1) -> (3,)\n\tassert(cost.shape == ()) \t#checks if cost value is a scalar\n\n\treturn cost", "def loss_fn(params):\n logits = models.ProgramTransformer(config).apply(\n {'params': params},\n inputs,\n outputs,\n programs,\n rngs={'dropout': train_rng})\n loss, weight_sum = compute_weighted_cross_entropy(logits, programs, weights)\n mean_loss = loss / weight_sum\n return mean_loss, logits", "def causal_structure_learning(X, lambda1=0.001, loss_type='l2', max_iter=100, h_tol=1e-8, rho_max=1e+16, w_threshold=0.3):\r\n\r\n def _loss(W):\r\n \"\"\"Evaluate value and gradient of loss.\"\"\"\r\n M = X @ W\r\n if loss_type == 'l2':\r\n R = X - M\r\n loss = 0.5 / X.shape[0] * (R ** 2).sum()\r\n G_loss = - 1.0 / X.shape[0] * X.T @ R\r\n elif loss_type == 'logistic':\r\n loss = 1.0 / X.shape[0] * (np.logaddexp(0, M) - X * M).sum()\r\n G_loss = 1.0 / X.shape[0] * X.T @ (sigmoid(M) - X)\r\n elif loss_type == 'poisson':\r\n S = np.exp(M)\r\n loss = 1.0 / X.shape[0] * (S - X * M).sum()\r\n G_loss = 1.0 / X.shape[0] * X.T @ (S - X)\r\n else:\r\n raise ValueError('unknown loss type')\r\n return loss, G_loss\r\n\r\n def _h(W):\r\n \"\"\"Evaluate value and gradient of acyclicity constraint.\"\"\"\r\n # E = slin.expm(W * W)\r\n # h = np.trace(E) - d\r\n M = np.eye(d) + W * W / d\r\n E = np.linalg.matrix_power(M, d - 1)\r\n h = (E.T * M).sum() - d\r\n G_h = E.T * W * 2\r\n return h, G_h\r\n\r\n def _adj(w):\r\n \"\"\"Convert doubled variables ([2 d^2] array) back to original variables ([d, d] matrix).\"\"\"\r\n return (w[:d * d] - w[d * d:]).reshape([d, d])\r\n\r\n def _func(w):\r\n \"\"\"Evaluate value and gradient of augmented Lagrangian for doubled variables ([2 d^2] array).\"\"\"\r\n W = _adj(w)\r\n loss, G_loss = _loss(W)\r\n h, G_h = _h(W)\r\n obj = loss + 0.5 * rho * h * h + alpha * h + lambda1 * w.sum()\r\n G_smooth = G_loss + (rho * h + alpha) * G_h\r\n g_obj = np.concatenate((G_smooth + lambda1, - G_smooth + lambda1), axis=None)\r\n return obj, g_obj\r\n\r\n n, d = X.shape\r\n w_est, rho, alpha, h = np.zeros(2 * d * d), 1.0, 0.0, np.inf # double w_est into (w_pos, w_neg)\r\n bnds = [(0, 0) if i == j else (0, None) for _ in range(2) for i in range(d) for j in range(d)]\r\n for iter_j in range(max_iter):\r\n w_new, h_new = None, None\r\n print(iter_j)\r\n while rho < rho_max:\r\n sol = sopt.minimize(_func, w_est, method='L-BFGS-B', jac=True, bounds=bnds)\r\n w_new = sol.x\r\n h_new, _ = _h(_adj(w_new))\r\n if h_new > 0.25 * h:\r\n rho *= 10\r\n else:\r\n break\r\n w_est, h = w_new, h_new\r\n alpha += rho * h\r\n if h <= h_tol or rho >= rho_max:\r\n break\r\n W_est = _adj(w_est)\r\n # print(W_est)\r\n W_est[np.abs(W_est) < w_threshold] = 0\r\n # print(W_est)\r\n return W_est, h", "def step(self, x, y, learning_rate=1e-3):\n \n # Input transformation\n \"\"\"\n Input is represented with M-dimensional vectors\n We convert them to (N, M) matrices such that columns are one-hot \n representations of the input\n \"\"\"\n x = self.one_hot(x, self.N)\n y = self.one_hot(y, self.N)\n\n \n # Forward propagation\n \"\"\"\n Returns\n -------\n embedding: array\n (D, M) matrix where columns are word embedding from U matrix\n logits: array\n (N, M) matrix where columns are output logits\n prob: array\n (N, M) matrix where columns are output probabilities\n \"\"\"\n \n ### YOUR CODE HERE ###\n #Omran:\n #U and V of dimension (D, N) and (N, D) respectively\n\n embedding = np.dot(self.U, x)\n logits = np.dot(self.V, embedding)\n prob = self.softmax(logits,0)# take care of the axis, I am not quite sure how you will implement it\n \n assert embedding.shape == (self.D, x.shape[1])\n assert logits.shape == (self.N, x.shape[1])\n assert prob.shape == (self.N, x.shape[1])\n \n \n # Loss calculation\n \"\"\"\n Returns\n -------\n loss: int\n Cross-entropy loss using true values and probabilities\n \"\"\"\n \n ### YOUR CODE HERE ###\n loss = self.loss(y, prob)\n \n # Backward propagation\n \"\"\"\n Returns\n -------\n d_U: array\n (N, D) matrix of partial derivatives of loss w.r.t. U\n d_V: array\n (D, N) matrix of partial derivatives of loss w.r.t. V\n \"\"\"\n \n ### YOUR CODE HERE ###\n #I am not quite sure of this!!\n \n# difference = np.sum(np.subtract(prob, y), axis=1)\n difference = prob - y\n d_V = difference @ embedding.T\n# print(self.N, self.D)\n# print(difference.shape)\n# print(d_V.shape)\n d_U = (self.V.T @ difference) @ x.T\n# d_U = self.V.T @ np.outer(difference, x)\n \n assert d_V.shape == (self.N, self.D)\n assert d_U.shape == (self.D, self.N)\n \n \n # Update the parameters\n \"\"\"\n Updates the weights with gradient descent such that W_new = W - alpha * dL/dW, \n where alpha is the learning rate and dL/dW is the partial derivative of loss w.r.t. \n the weights W\n \"\"\"\n \n ### YOUR CODE HERE ###\n self.V = self.V - learning_rate * d_V\n self.U = self.U - learning_rate * d_U\n\n return loss, d_U, d_V", "def normal_kl(mean1, logvar1, mean2, logvar2):\n return 0.5 * (-1.0 + logvar2 - logvar1 + torch.exp(logvar1 - logvar2) + (mean1 - mean2) ** 2 * torch.exp(-logvar2))", "def __init__(self, generator, tgt_vocab,\n normalization=\"sents\",\n label_smoothing=0.0,\n use_kl_annealing=False,\n use_kl_freebits=False,\n kl_freebits_margin=0.0,\n kl_annealing_current=0.0,\n kl_annealing_increment=0.0001,\n kl_annealing_warmup_steps=1000,\n image_loss_type='logprob',\n use_local_image_features=False,\n two_step_image_prediction=False\n ):\n self.multimodal_model_type = 'vi-model1'\n\n super(NMTVIModel1LossCompute, self).__init__(generator, tgt_vocab,\n normalization, label_smoothing)\n\n # kl annealing parameters\n self.n_model_updates = 0\n self.use_kl_annealing = use_kl_annealing\n if use_kl_annealing:\n self.kl_annealing_current = kl_annealing_current\n self.kl_annealing_increment = kl_annealing_increment\n self.kl_annealing_warmup_steps = kl_annealing_warmup_steps\n else:\n self.kl_annealing_current = 1.0\n self.kl_annealing_increment = 0.0\n self.kl_annealing_warmup_steps = 0\n\n self.use_kl_freebits = use_kl_freebits\n if use_kl_freebits:\n self.kl_freebits_margin = kl_freebits_margin\n else:\n self.kl_freebits_margin = 0.0\n\n self.image_loss_type = image_loss_type\n self.use_local_image_features = use_local_image_features\n self.two_step_image_prediction = two_step_image_prediction\n self._statistics = onmt.VIStatistics\n\n if image_loss_type == 'categorical':\n self.image_loss_criterion = nn.NLLLoss2d()", "def lasso_func(A, x, b, lmbda): \n q = (1.0/2) * (LA.norm(np.dot(A, x) - b, 2) ** 2) + lmbda * LA.norm(x, 1)\n return q", "def loss_fn(outputs, labels):\n return nn.CrossEntropyLoss()(outputs, labels)", "def pgd_linf(model, x, y, epsilon=0.1, alpha=0.01, num_iter=20, randomize=False, label_leaking=True):\n if randomize:\n delta = torch.rand_like(x, requires_grad=True)\n delta.data = delta.data * 2 * epsilon - epsilon\n else:\n delta = torch.zeros_like(x, requires_grad=True)\n \n for t in range(num_iter):\n loss = nn.CrossEntropyLoss()(model(x + delta), y)\n loss.backward()\n delta.data = (delta + alpha * delta.grad.detach().sign()).clamp(-epsilon, epsilon)\n delta.grad.zero_()\n return delta.detach()", "def ls_generator_loss(scores_fake):\r\n loss = torch.mean((scores_fake - 1) ** 2) / 2\r\n return loss", "def step(self, x):\r\n y,mean,stddev_p,z,xhat=self.forward(x)\r\n L_rec=self.loss_fn(x,xhat)\r\n\r\n var = torch.exp(stddev_p)\r\n s = torch.log(var) - torch.pow(mean,2) - var\r\n s = self.latent_dim + torch.sum(s, 1)\r\n s = -0.5*s\r\n s = self.lam*s \r\n L_kl = s.mean()\r\n \r\n \r\n L=L_kl+L_rec\r\n\r\n L.backward()\r\n self.opt.step()\r\n self.opt.zero_grad()\r\n \r\n return L_rec,L_kl,L", "def get_lid(model, X_test, X_test_noisy, X_test_adv, k=10, batch_size=100, dataset='mnist'):\r\n print('Extract local intrinsic dimensionality: k = %s' % k)\r\n lids_normal, lids_noisy, lids_adv = get_lids_random_batch(model, X_test, X_test_noisy,\r\n X_test_adv, dataset, k, batch_size)\r\n print(\"lids_normal:\", lids_normal.shape)\r\n print(\"lids_noisy:\", lids_noisy.shape)\r\n print(\"lids_adv:\", lids_adv.shape)\r\n\r\n ## skip the normalization, you may want to try different normalizations later\r\n ## so at this step, just save the raw values\r\n # lids_normal_z, lids_adv_z, lids_noisy_z = normalize(\r\n # lids_normal,\r\n # lids_adv,\r\n # lids_noisy\r\n # )\r\n\r\n lids_pos = lids_adv\r\n lids_neg = np.concatenate((lids_normal, lids_noisy))\r\n artifacts, labels = merge_and_generate_labels(lids_pos, lids_neg)\r\n\r\n return artifacts, labels", "def forward_train(self, preds_T: torch.Tensor) -> torch.Tensor:\n fake_label = preds_T.data.max(1)[1]\n return F.cross_entropy(preds_T, fake_label)", "def _aux_mixup_forward(self, F, pred1, pred2, label1, label2, lam):\n loss1 = self._mixup_forwar(F, pred1, label1, label2, lam)\n loss2 = self._mixup_forwar(F, pred2, label1, label2, lam)\n return loss1 + self.aux_weight * loss2", "def apply_induction(self, id, th_name, var):\n method.apply_method(self, {\n 'method_name': 'induction',\n 'goal_id': id, 'fact_ids': [], 'theorem': th_name, 'var': var\n })", "def funcs(dataset, network, batch_size=BATCH_SIZE, learning_rate=LEARNING_RATE, sparsity=0.02, beta=0.01, momentum=MOMENTUM):\n\n # symbolic variables \n X_batch = T.matrix()\n y_batch = T.matrix()\n\n layers = lasagne.layers.get_all_layers(network)\n num_layers = len(layers)\n\n code_layer = layers[num_layers/2]\n activations_2_layer = layers[num_layers/2 - 1]\n activations_1_layer = layers[num_layers/2 - 2]\n\n # code output \n code_output = lasagne.layers.get_output(code_layer, X_batch, deterministic=True)\n\n l = T.sub(1,code_output)\n ll = T.mul(code_output,l)\n L = T.mul(4,ll)\n L = L.mean()\n\n rho_hat = T.mean(code_output,axis=1)\n # L = T.sum(sparsity * T.log(sparsity/rho_hat) + (1 - sparsity) * T.log((1 - sparsity)/(1 - rho_hat)))\n\n # reg = 0.0001*lasagne.regularization.l2(network)\n # this is the cost of the network when fed throught the noisey network\n train_output = lasagne.layers.get_output(network, X_batch)\n cost = lasagne.objectives.mse(train_output, y_batch) \n cost = cost.mean() + beta * L\n\n all_params = lasagne.layers.get_all_params(network)\n updates = lasagne.updates.nesterov_momentum(cost, all_params, learning_rate, momentum)\n\n \n\n # code and activation outputs\n \n activations_1_output = lasagne.layers.get_output(activations_1_layer, X_batch, deterministic=True)\n activations_2_output = lasagne.layers.get_output(activations_2_layer, X_batch, deterministic=True)\n\n train = theano.function(inputs=[X_batch, y_batch], outputs=cost, updates=updates, allow_input_downcast=True)\n code = theano.function(inputs=[X_batch], outputs=code_output, allow_input_downcast=True)\n activations_1 = theano.function(inputs=[X_batch], outputs=activations_1_output, allow_input_downcast=True)\n activations_2 = theano.function(inputs=[X_batch], outputs=activations_2_output, allow_input_downcast=True)\n\n return dict(\n train=train,\n code=code,\n activations_1=activations_1,\n activations_2=activations_2\n )", "def perplexity_fn(args: StepFunctionArgs) -> SingleScorePerStepTensor:\n return 2 ** crossentropy_fn(args)", "def activation(x):\n return 1 / (1 + torch.exp(-x))", "def diff_fn(\n mu_i: tf.Tensor,\n ddu_n_i: tf.Tensor,\n ddu_t_i: tf.Tensor,\n ) -> tf.Tensor:\n return mu_i * (4.0 / 3.0 * ddu_n_i + 1.0 / 3.0 * ddu_t_i)", "def test_embed():\n\n embed_dense = L.EmbedID(5, 10)\n embed_sparse = L.EmbedID(5, 10)\n embed_dense.W.data[:] = np.random.randn(5, 10).astype('float32')\n embed_sparse.W.data[:] = np.random.randn(5, 10).astype('float32')\n embed_sparse.W.data[:, 1:] /= 1e5\n dhl_dense_01 = dirichlet_likelihood(embed_dense, alpha=0.1).data\n dhl_sparse_01 = dirichlet_likelihood(embed_sparse, alpha=0.1).data\n\n msg = \"Sparse vector has higher likelihood than dense with alpha=0.1\"\n assert dhl_sparse_01 > dhl_dense_01, msg", "def distillation_KL_loss(y, teacher_scores, T, scale=1, reduction='batchmean'):\n return F.kl_div(F.log_softmax(y / T, dim=1), F.softmax(teacher_scores / T, dim=1),\n reduction=reduction) * scale", "def loss(A, Y):\n return A - Y", "def kl(self, Y, Y_hat):\n # Pull out the argument to the sigmoid\n assert hasattr(Y_hat, 'owner')\n owner = Y_hat.owner\n assert owner is not None\n op = owner.op\n\n if not hasattr(op, 'scalar_op'):\n raise ValueError(\"Expected Y_hat to be generated by an Elemwise op, got \"+str(op)+\" of type \"+str(type(op)))\n assert isinstance(op.scalar_op, T.nnet.sigm.ScalarSigmoid)\n z ,= owner.inputs\n\n term_1 = Y * T.nnet.softplus(-z)\n term_2 = (1 - Y) * T.nnet.softplus(z)\n\n total = term_1 + term_2\n ave = total.mean(axis=1)\n assert ave.ndim == 1\n\n return ave", "def kl(self, other: \"Distribution\", **kwargs) -> TensorType:", "def kl(self, other: \"Distribution\", **kwargs) -> TensorType:", "def loss(logits, angles):\n\treturn tf.nn.l2_loss(tf.sub(logits, angles), name='loss')", "def L(C1s,C0s,ks,bs,sigma=1):\n # return jnp.linalg.det(FIM(q,ps,C1s,C0s,ks,bs,sigma))\n return lambda q,ps:jnp.trace(jnp.linalg.inv(FIM(C1s,C0s,ks,bs,sigma)(q,ps)))" ]
[ "0.5652646", "0.5454709", "0.5431929", "0.5346413", "0.52700704", "0.5245193", "0.5241548", "0.5189512", "0.51814455", "0.51782507", "0.51747054", "0.5168681", "0.5149222", "0.51125103", "0.510106", "0.50966114", "0.5086483", "0.50756854", "0.50619715", "0.5041158", "0.5026359", "0.49958798", "0.49842122", "0.49822715", "0.4976281", "0.49656218", "0.49615568", "0.4958906", "0.49548876", "0.49539298", "0.49528816", "0.49527165", "0.49427536", "0.49352407", "0.49252546", "0.4914682", "0.49066827", "0.4903649", "0.49017128", "0.48995188", "0.48870444", "0.48802796", "0.48772347", "0.48769367", "0.48710972", "0.48680368", "0.48679188", "0.48615927", "0.48607063", "0.4844569", "0.4840318", "0.48250794", "0.48228216", "0.48186055", "0.48184255", "0.48155257", "0.48152652", "0.48099595", "0.48003772", "0.4799831", "0.47983623", "0.47967103", "0.47960955", "0.47891888", "0.47879258", "0.4781176", "0.47781268", "0.477643", "0.47710887", "0.47639862", "0.47602773", "0.47596273", "0.47555566", "0.47472483", "0.4745855", "0.4743553", "0.47428903", "0.4742154", "0.4739782", "0.4739283", "0.4733689", "0.47284442", "0.4724442", "0.47197407", "0.47186506", "0.47177973", "0.47119597", "0.47117585", "0.47076315", "0.46998426", "0.469352", "0.46918803", "0.46912563", "0.46907678", "0.46906176", "0.46897268", "0.4689693", "0.4689693", "0.46879703", "0.46825606" ]
0.69998527
0
Modified optimizer taken from vanhuyz TensorFlow implementation of CycleGAN
def __optimizers(self, Gen_loss, D_A_loss, D_B_loss): def make_optimizer(loss, variables, name='Adam'): """ Adam optimizer with learning rate 0.0002 for the first 100k steps (~100 epochs) and a linearly decaying rate that goes to zero over the next 100k steps """ global_step = tf.Variable(0, trainable=False, name='global_step') starter_learning_rate = self.opt.lr end_learning_rate = 0.0 start_decay_step = self.opt.niter decay_steps = self.opt.niter_decay beta1 = self.opt.beta1 learning_rate = (tf.where(tf.greater_equal(global_step, start_decay_step), tf.train.polynomial_decay(starter_learning_rate, global_step-start_decay_step, decay_steps, end_learning_rate, power=1.0), starter_learning_rate)) learning_step = (tf.train.AdamOptimizer(learning_rate, beta1=beta1, name=name) .minimize(loss, global_step=global_step, var_list=variables)) return learning_step Gen_optimizer = make_optimizer(Gen_loss, self.G.variables + self.F.variables, name='Adam_Gen') D_A_optimizer = make_optimizer(D_A_loss, self.D_A.variables, name='Adam_D_A') D_B_optimizer = make_optimizer(D_B_loss, self.D_B.variables, name='Adam_D_B') with tf.control_dependencies([Gen_optimizer, D_A_optimizer, D_B_optimizer]): return tf.no_op(name='optimizers')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, sess, max_iter=50001, optim='adagrad', learning_rate=1e-2,\n d_per_iter=1, g_per_iter=2, d_update=True, g_update=True,\n real_n=1000, real_dim=2, fake_n=1000, z_dim=3, g_out_dim=2,\n g_layers_depth=5, g_layers_width=None, g_activations=None,\n d_out_dim=1, d_layers_depth=5, d_layers_width=5,\n d_activations=None, d_batch_size=25, x_lims=None, y_lims=None,\n grid_gran=21, grid_n=None, dataset='gaussian', expt='test_low_alpha'):\n self.sess = sess\n self.max_iter = max_iter\n self.optim = optim\n self.learning_rate = learning_rate\n\n self.d_per_iter = d_per_iter \n self.g_per_iter = g_per_iter\n self.d_update = d_update\n self.g_update = not d_update \n\n self.real_n = real_n \n self.real_dim = real_dim \n self.fake_n = fake_n\n\n self.z_dim = z_dim\n self.g_out_dim = g_out_dim\n self.g_layers_depth = g_layers_depth\n self.g_layers_width = [[5]] * (g_layers_depth - 1) + [[g_out_dim]]\n self.g_activations = [tf.nn.tanh, tf.nn.elu]\n\n self.d_out_dim = d_out_dim\n self.d_layers_depth = d_layers_depth\n self.d_layers_width = d_layers_width\n self.d_activations = [tf.nn.tanh, tf.nn.relu]\n self.d_batch_size = d_batch_size\n\n self.x_lims = [-6., 2.]\n self.y_lims = [-2., 6.]\n self.grid_gran = grid_gran\n self.grid_n = grid_gran ** 2\n self.grid, self.x_grid, self.y_grid = self.make_grid()\n\n self.dataset = dataset \n self.real_points = load_2d_data(dataset, real_n, real_dim)\n\n self.expt = expt\n\n self.build_model()", "def optimizer(self):\n \n # taken from https://github.com/germain-hug/Deep-RL-Keras/blob/master/DDPG/actor.py\n # I believe this is a work around to get keras to learn **given a gradient**\n # As opposed to bunch of x_train, y_trains?\n \n #Inputs\n state_pl = self.model.input\n action_grads_pl = K.placeholder(shape=(None,1)) \n \n #Find grad_(pars) mu(state)\n mu_pl = self.model.output\n pars = self.model.trainable_weights\n pars_grad_mu = tf.gradients(mu_pl, pars, -action_grads_pl)\n \n #grads_and_pars = zip(pars_grad_mu, pars) #keras needs this form\n #updates = tf.train.AdamOptimizer(self.lr).apply_gradients(grads_and_pars)\n\n # The gradients as defined above work on my mac, but not ubuntu.\n # Below I am trying a workaround. I changed the keras source code \n # To get this working. Specifically, I make the optimizer.get_updates()\n # function accept custom gradients. It was easy to do.\n \n opt = Adam(self.lr)\n loss = pars_grad_mu #placeholder, I won't use it\n updates = opt.get_updates(loss = loss, params = pars, grads = pars_grad_mu)\n\n return K.function(inputs = [state_pl, action_grads_pl], outputs = [], updates = updates)\n #return K.function(inputs = [state_pl, action_grads_pl], outputs = [updates])", "def make_optimizer(self, train_var_filter):\n # According from the prototxt in Caffe implement, learning rate must multiply by 10.0 in pyramid module\n fc_list = ['conv5_3_pool1_conv', 'conv5_3_pool2_conv', 'conv5_3_pool3_conv', 'conv5_3_pool6_conv', 'conv6',\n 'conv5_4']\n all_trainable = [v for v in tf.trainable_variables() if\n ('beta' not in v.name and 'gamma' not in v.name) or True]\n fc_trainable = [v for v in all_trainable if v.name.split('/')[0] in fc_list]\n conv_trainable = [v for v in all_trainable if v.name.split('/')[0] not in fc_list] # lr * 1.0\n fc_w_trainable = [v for v in fc_trainable if 'weights' in v.name] # lr * 10.0\n fc_b_trainable = [v for v in fc_trainable if 'biases' in v.name] # lr * 20.0\n assert (len(all_trainable) == len(fc_trainable) + len(conv_trainable))\n assert (len(fc_trainable) == len(fc_w_trainable) + len(fc_b_trainable))\n\n with tf.control_dependencies(self.update_ops):\n opt_conv = tf.train.MomentumOptimizer(self.lr_op, self.momentum)\n opt_fc_w = tf.train.MomentumOptimizer(self.lr_op * 10.0, self.momentum)\n opt_fc_b = tf.train.MomentumOptimizer(self.lr_op * 20.0, self.momentum)\n\n grads = tf.gradients(self.loss, conv_trainable + fc_w_trainable + fc_b_trainable)\n grads_conv = grads[:len(conv_trainable)]\n grads_fc_w = grads[len(conv_trainable): (len(conv_trainable) + len(fc_w_trainable))]\n grads_fc_b = grads[(len(conv_trainable) + len(fc_w_trainable)):]\n\n train_op_conv = opt_conv.apply_gradients(zip(grads_conv, conv_trainable), global_step=self.global_step)\n train_op_fc_w = opt_fc_w.apply_gradients(zip(grads_fc_w, fc_w_trainable))\n train_op_fc_b = opt_fc_b.apply_gradients(zip(grads_fc_b, fc_b_trainable))\n\n self.optimizer = tf.group(train_op_conv, train_op_fc_w, train_op_fc_b)", "def sgd_optim(config = None, global_step = None):\n learning_rate = config[\"learning_rate\"]\n \n train_step = tf.train.GradientDescentOptimizer(learning_rate)\n #train_step = tf.train.GradientDescentOptimizer(learning_rate)\n return train_step", "def build_gan(\n optimizer,\n timesteps,\n vocab_sizes,\n latlon_dense_units=64,\n concat_dense_units=100,\n lstm_units=100,\n latent_dim=100,\n lstm_reg=0.02,\n):\n gen = build_generator(\n timesteps,\n latlon_dense_units,\n concat_dense_units,\n lstm_units,\n latent_dim,\n lstm_reg,\n vocab_sizes,\n )\n dis = build_discriminator(\n timesteps,\n latlon_dense_units,\n concat_dense_units,\n lstm_units,\n latent_dim,\n lstm_reg,\n vocab_sizes,\n )\n # Compile discriminator with masked BCE loss. Mask is last output of generator\n dis.compile(optimizer=optimizer, loss=\"binary_crossentropy\", metrics=[\"accuracy\"])\n dis.trainable = False\n\n # The trajectory generator takes real trajectories and noise as inputs\n # inputs = [layers.Input(shape=(timesteps, 2), name=\"input_latlon\")]\n # for key, val in vocab_sizes.items():\n # inputs.append(layers.Input(shape=(timesteps, val), name=\"input_\" + key))\n # inputs.append(layers.Input(shape=(latent_dim,), name=\"input_noise\"))\n # inputs.append(layers.Input(shape=(timesteps, 1), name=\"input_mask\"))\n # gen_trajs = gen(inputs)\n # y_pred = dis(gen_trajs[:-1])\n # mask = inputs[-1]\n # gan = Model(inputs, y_pred)\n # gan.add_loss(traj_loss(inputs[:-2], gen_trajs[:-1], mask))\n ##\n y_pred = dis(gen.outputs[:-1])\n gan = Model(gen.inputs, y_pred)\n mask = gen.inputs[-1]\n gan.add_loss(traj_loss(gen.inputs[:-2], gen.outputs[:-1], mask))\n gan.compile(optimizer=optimizer, loss=\"binary_crossentropy\")\n return gen, dis, gan", "def add_optimizers_to_graph(self):\n with tf.device(self.params.device):\n with self.graph.as_default():\n with tf.compat.v1.variable_scope(\"optimizers\") as scope:\n self.grads_and_vars = list() # [sch_idx][weight_idx]\n self.apply_grads = list() # [sch_idx][weight_idx]\n self.learning_rates = list() # [sch_idx][weight_idx]\n if self.params.optimizer == \"lbfgsb\":\n self.minimizer = None\n #self.minimizer = tfp.optimizer.lbfgs_minimize(\n # value_and_gradients_function=self.loss_value_and_grad,#self.total_loss,\n # initial_position=self.w_init,#self.trainable_variables,\n # max_iterations=self.params.maxiter)\n #self.minimizer = tf.contrib.opt.ScipyOptimizerInterface(self.total_loss,\n # options={\"maxiter\":self.params.maxiter}) # Default method is L-BFGSB\n for schedule_idx, sch in enumerate(self.params.schedule):\n sch_grads_and_vars = list() # [weight_idx]\n sch_apply_grads = list() # [weight_idx]\n sch_lrs = list() # [weight_idx]\n #Construct weight ops\n weight_ops = [self.trainable_variables[weight] for weight in sch[\"weights\"]]\n for w_idx, weight in enumerate(sch[\"weights\"]):\n weight_name = weight.split(\"/\")[-1].split(\":\")[0]\n learning_rates = tf.compat.v1.train.exponential_decay(\n learning_rate=sch[\"weight_lr\"][w_idx],\n global_step=self.global_step,\n decay_steps=sch[\"decay_steps\"][w_idx],\n decay_rate=sch[\"decay_rate\"][w_idx],\n staircase=sch[\"staircase\"][w_idx],\n name=\"annealing_schedule_\"+weight_name)\n sch_lrs.append(learning_rates)\n if self.params.optimizer == \"sgd\":\n optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rates,\n name=\"grad_optimizer_\"+weight_name)\n elif self.params.optimizer == \"adam\":\n optimizer = tf.compat.v1.train.AdamOptimizer(learning_rates, beta1=0.9, beta2=0.99,\n epsilon=1e-07, name=\"adam_optimizer_\"+weight_name)\n elif self.params.optimizer == \"adadelta\":\n optimizer = tf.compat.v1.train.AdadeltaOptimizer(learning_rates, epsilon=1e-07,\n name=\"adadelta_optimizer_\"+weight_name)\n elif self.params.optimizer == \"lbfgsb\":\n optimizer = None\n else:\n assert False, (\"Optimizer \"+self.params.optimizer+\" is not supported.\")\n weight_op = self.trainable_variables[weight]\n sch_grads_and_vars.append(self.compute_weight_gradients(optimizer, weight_op))\n gstep = self.global_step if w_idx == 0 else None # Only increment once\n if self.params.optimizer == \"lbfgsb\": # BFGS doesn't actually need the update op\n if w_idx == 0:\n sch_apply_grads.append(tf.compat.v1.assign_add(self.global_step, 1))\n else:\n sch_apply_grads.append(None)\n else:\n sch_apply_grads.append(optimizer.apply_gradients(sch_grads_and_vars[w_idx],\n global_step=gstep))\n self.learning_rates.append(sch_lrs)\n self.grads_and_vars.append(sch_grads_and_vars)\n self.apply_grads.append(sch_apply_grads)\n self.optimizers_added = True", "def _add_train_op(self):\n with tf.device(\"/gpu:0\"):\n learning_rate_D = 0.0004 # tf.train.exponential_decay(0.001, self.global_step_D,\n # 100000, 0.96, staircase=True)\n learning_rate_G = 0.0004 # tf.train.exponential_decay(0.001, self.global_step_G,\n # 100000, 0.96, staircase=True)\n learning_rate_D_in = 0.0004 # tf.train.exponential_decay(0.001, self.global_step_D,\n # 100000, 0.96, staircase=True)\n self._train_op_D = tf.train.AdamOptimizer(learning_rate_D,beta1=0.5).minimize(self._D_loss,\n global_step=self.global_step_D,\n var_list=self.discriminator._theta)\n self._train_op_D_in = tf.train.AdamOptimizer(learning_rate_D_in,beta1=0.5).minimize(self._D_in_loss,\n global_step=self.global_step_D_in,\n var_list=self.discriminator_inner._theta)\n\n self._train_op_G = tf.train.AdamOptimizer(learning_rate_G,beta1=0.5).minimize(self._G_loss,\n global_step=self.global_step_G,\n var_list=self.generator._theta)", "def GNN(V_init, E_init, sizes, iterations=3, edge_layers = 2,\n edge_hidden = 100, node_layers = 2, node_hidden = 100, act=tf.nn.relu):\n V, E = V_init, E_init\n\n # Get dimensions\n N_v = int(V.get_shape()[1])\n C_v = int(V.get_shape()[2])\n C_e = int(E.get_shape()[3])\n\n with tf.variable_scope(\"GraphNeuralNet\"):\n with tf.variable_scope(\"Masks\"):\n mask = tf.sequence_mask(\n sizes, maxlen=N_v, dtype=tf.float32, name=\"Mask1D\"\n )\n mask_V = tf.expand_dims(mask, 2)\n mask_E = tf.expand_dims(mask_V,1) * tf.expand_dims(mask_V,2)\n \n # Initialize hidden state\n with tf.variable_scope(\"NodeInit\"):\n V = mask_V * MLP(V, node_layers, node_hidden)\n with tf.variable_scope(\"EdgeInit\"):\n E = mask_E * MLP(E, edge_layers, edge_hidden)\n tf.summary.image(\"Edge\", E[:,:,:,:3])\n\n for i in range(iterations):\n # with tf.variable_scope(\"Iteration{}\".format(i)):\n # reuse = None\n with tf.name_scope(\"Iteration{}\".format(i)):\n reuse = True if i > 0 else None\n with tf.variable_scope(\"EdgeUpdate\", reuse=reuse):\n # Update edges given {V,E}\n f = PairFeatures(\n V, E, edge_hidden, reuse=reuse, name=\"EdgeFeatures\", activation=act\n )\n dE = MLP(\n f, edge_layers, edge_hidden, name=\"EdgeMLP\", activation=act, reuse=reuse # changed\n )\n # dE = tf.layers.dropout(dE, dropout, training=bool(dropout))\n E = E + mask_E * dE\n with tf.variable_scope(\"NodeUpdate\", reuse=reuse):\n # Update nodes given {V,E'}\n # f = PairFeatures(\n # V, E, node_hidden, reuse=reuse, name=\"NodeFeatures\", activation=act\n # )\n tf.summary.image(\"EdgeOut\", E[:,:,:,:3])\n dV = MLP(\n E, node_layers, node_hidden, name = \"NodeMessages\", activation=act, reuse=reuse\n )\n dV = tf.reduce_sum(dV, 2)\n dV = MLP(\n dV, node_layers, node_hidden, name = \"NodeMLP\", activation=act, reuse=reuse # changed\n )\n # dV = tf.layers.dropout(dV, dropout, training=bool(dropout))\n V = V + mask_V * dV\n return V, E, mask_V, mask_E", "def __init__(self, m, n, dim, n_iterations=100, alpha=None, sigma=None):\n \n #Assign required variables first\n self._m = m\n self._n = n\n if alpha is None:\n alpha = 0.3\n else:\n alpha = float(alpha)\n if sigma is None:\n sigma = max(m, n) / 2.0\n else:\n sigma = float(sigma)\n self._n_iterations = abs(int(n_iterations))\n \n ##INITIALIZE GRAPH\n self._graph = tf.Graph()\n \n ##POPULATE GRAPH WITH NECESSARY COMPONENTS\n with self._graph.as_default():\n \n ##VARIABLES AND CONSTANT OPS FOR DATA STORAGE\n \n #Randomly initialized weightage vectors for all neurons,\n #stored together as a matrix Variable of size [m*n, dim]\n \n ## Vector de pesos\n self._weightage_vects = tf.Variable(tf.random_normal([m*n, dim]))\n \n #Matrix of size [m*n, 2] for SOM grid locations\n #of neurons\n \n ## Vector de localizacion de nodos\n self._location_vects = tf.constant(np.array(\n list(self._neuron_locations(m, n))))\n \n ##PLACEHOLDERS FOR TRAINING INPUTS\n #We need to assign them as attributes to self, since they\n #will be fed in during training\n \n #The training vector\n \n ## Vector de input\n self._vect_input = tf.placeholder(\"float\", [dim])\n #Iteration number\n \n \n ## Iteracion \n self._iter_input = tf.placeholder(\"float\")\n \n matriz = np.array(list(self._neuron_locations(m,n)))\n \n matrices = []\n \n matrices = np.stack((matriz,\n matriz + np.array([m,n]), \n matriz - np.array([m,n]), \n matriz + np.array([m,0]),\n matriz - np.array([m,0]),\n matriz + np.array([0,n]),\n matriz - np.array([0,n]),\n matriz + np.array([m,-n]),\n matriz + np.array([-m,n])\n ))\n\n distancias_matriz = []\n \n for i in range(n*m):\n distancias_matriz.append([])\n for j in range(m*n):\n distancias_matriz[i].append(np.min(np.sum(np.power(np.subtract(matriz[i], matrices[:,j]),2), axis = 1)))\n \n distancias_matriz = tf.constant(np.array(distancias_matriz))\n\n \n ##CONSTRUCT TRAINING OP PIECE BY PIECE\n #Only the final, 'root' training op needs to be assigned as\n #an attribute to self, since all the rest will be executed\n #automatically during training\n \n #To compute the Best Matching Unit given a vector\n #Basically calculates the Euclidean distance between every\n #neuron's weightage vector and the input, and returns the\n #index of the neuron which gives the least value\n \n ## Indice del BMU (no es necesario crear arreglo de vectores)\n bmu_index = tf.argmin(tf.sqrt(tf.reduce_sum(\n tf.pow(tf.subtract(self._weightage_vects, self._vect_input), 2), 1)), 0)\n \n #This will extract the location of the BMU based on the BMU's\n #index\n \n slice_input = tf.pad(tf.reshape(bmu_index, [1]),\n np.array([[0, 1]]))\n \n ## Entrega la localizacion en x e y\n bmu_loc = tf.reshape(tf.slice(self._location_vects, slice_input,\n tf.constant(np.array([1, 2]),\"int64\")),\n [2])\n\n #To compute the alpha and sigma values based on iteration\n #number\n learning_rate_op = tf.subtract(1.0, tf.div(self._iter_input,\n self._n_iterations))\n \n # Creado por mi \n #lear_rate = tf.exp(tf.negative(tf.div(self._iter_input, self._n_iterations)))\n \n _alpha_op = tf.multiply(alpha, learning_rate_op)\n _sigma_op = tf.multiply(sigma, learning_rate_op)\n \n #Construct the op that will generate a vector with learning\n #rates for all neurons, based on iteration number and location\n #wrt BMU. \n \n #bmu_distance_squares = tf.reduce_sum(tf.pow(tf.subtract(\n # self._location_vects, bmu_loc), 2), 1)\n \n bmu_distance_squares = distancias_matriz[bmu_index]\n \n # Crea un arreglo de m*n para cada funcion dependiendo de la dist al BMU \n neighbourhood_func = tf.exp(tf.negative(tf.div(tf.cast(\n bmu_distance_squares, \"float32\"), tf.multiply(tf.pow(_sigma_op, 2.), 2.))))\n\n print(neighbourhood_func)\n \n learning_rate_op = tf.multiply(_alpha_op, neighbourhood_func)\n \n print(learning_rate_op)\n \n #Finally, the op that will use learning_rate_op to update\n #the weightage vectors of all neurons based on a particular\n #input\n \n# learning_rate_multiplier = [tf.tile(tf.slice(\n# learning_rate_op, np.array([i]), np.array([1])), [dim]) for i in range(m*n)]\n# \n learning_rate_op_2 = tf.reshape(learning_rate_op, (m*n,1))\n\n #print(learning_rate_multiplier)\n \n weightage_delta = tf.multiply(\n learning_rate_op_2,\n tf.subtract(self._vect_input,\n self._weightage_vects)) \n \n new_weightages_op = tf.add(self._weightage_vects,\n weightage_delta)\n self._training_op = tf.assign(self._weightage_vects,\n new_weightages_op) \n \n ##INITIALIZE SESSION\n self._sess = tf.Session()\n \n ##INITIALIZE VARIABLES\n init_op = tf.global_variables_initializer()\n self._sess.run(init_op)", "def sgd_optimization(dataset, learning_rate, n_epochs, batch_size):\n datasets = load_data(dataset)\n train_set_x, train_set_y = datasets[0]\n valid_set_x, valid_set_y = datasets[1]\n test_set_x, test_set_y = datasets[2]\n\n #number of minibatches\n n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size\n n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size\n n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size\n\n #build the model\n print \"... building the model\"\n\n index = T.lscalar()\n x = T.matrix('x') #data for the rasterized images\n y = T.ivector('y') # labels (int)\n\n # logistic regression Class\n classifierLR = LogisticRegression(input=x, n_in=28*28, n_out=10)\n cost = classifierLR.negative_log_likelihood(y)\n\n # test model (no updates)\n test_model = theano.function(\n inputs=[index],\n outputs=classifierLR.errors(y),\n givens={\n x: test_set_x[index * batch_size: (index + 1) * batch_size],\n y: test_set_y[index * batch_size: (index + 1) * batch_size]\n }\n )\n\n #validate model (no updates)\n validate_model = theano.function(\n inputs=[index],\n outputs=classifierLR.errors(y),\n givens={\n x: valid_set_x[index * batch_size: (index + 1) * batch_size],\n y: valid_set_y[index * batch_size: (index + 1) * batch_size]\n }\n )\n\n #compute the gradient of cost wrt W, b\n g_W = T.grad(cost=cost, wrt=classifierLR.W)\n g_b = T.grad(cost=cost, wrt=classifierLR.b)\n\n #updating expression\n updates = [(classifierLR.W, classifierLR.W - learning_rate * g_W),\n (classifierLR.b, classifierLR.b - learning_rate * g_b)]\n\n # Train model (theano function); updates\n train_model = theano.function(\n inputs=[index],\n outputs=cost,\n updates=updates,\n givens={\n x: train_set_x[index * batch_size: (index + 1) * batch_size],\n y: train_set_y[index * batch_size: (index + 1) * batch_size]\n\n }\n )\n\n # Training model (early stopping with validation examples)\n print \"... training the model\"\n patience = 5000\n patience_inc = 2 # wait this much\n improved_threshold = 0.995 # relative improvement (significant)\n validation_frequency = min(n_train_batches, patience / 2)\n best_validation_loss = numpy.inf\n test_score = 0.\n start_time = timeit.default_timer()\n\n done_looping = False\n epoch = 0\n while (epoch < n_epochs) and (not done_looping):\n epoch += 1\n for minibatch_index in xrange(n_train_batches):\n minibatch_avg_cost = train_model(minibatch_index)\n iter = (epoch - 1) * n_train_batches + minibatch_index\n\n if (iter + 1) % validation_frequency == 0:\n # compute loss on validation set\n validation_losses = [validate_model(i) for i in xrange(n_valid_batches)]\n this_validation_loss = numpy.mean(validation_losses)\n\n print(\n \"Epoch: %i, minibatch: %i/%i, validation_error: %f %%\" %\n (\n epoch,\n minibatch_index + 1,\n n_train_batches,\n this_validation_loss * 100.\n )\n )\n\n if this_validation_loss < best_validation_loss:\n #improve patience if good improvement\n if this_validation_loss < best_validation_loss * improved_threshold:\n patience = max(patience, iter * patience_inc)\n\n best_validation_loss = this_validation_loss\n\n #testing on test_set\n test_losses = [test_model(i) for i in xrange(n_test_batches)]\n test_score = numpy.mean(test_losses)\n\n print(\n (\n \"Epoch : %i, minibatch %i/%i,\"\n \" test error of best model %f %%\"\n ) % (\n epoch,\n minibatch_index,\n n_train_batches,\n test_score * 100.\n )\n )\n\n #save the best model\n print \"New best model found; saving ...\"\n with open('best_model.pkl', \"w\") as f:\n cPickle.dump(classifierLR, f)\n\n if patience <= iter:\n done_looping = True\n break\n\n\n end_time = timeit.default_timer()\n print(\n (\n \"Optimization Complete: best validation score : %f %%,\"\n \" test performance : %f %%\"\n )\n % (best_validation_loss * 100., test_score * 100.)\n )\n print \"The code run for %d epochs, with %f epochs/sec\" %(epoch, 1. * epoch / (end_time - start_time))\n print >> sys.stderr, (\"The code for file \" + os.path.split(__file__)[1] + \" ran for %.1fs\" % ((end_time - start_time)))", "def optimize_coding(real_coding, loss_weight, latent_space, \n mse, generator, optimizer, ntimes, ninput):\n latent_values = tf.random.normal([len(real_coding), latent_space]) \n latent_values = tf.Variable(latent_values)\n\n loss = []\n for epoch in range(5000):\n loss.append(opt_step(latent_values, real_coding, loss_weight, \n mse, generator, optimizer, ntimes, ninput).numpy())\n\n plt.plot(loss)\n plt.grid()\n plt.show\n\n return latent_values", "def optimization_step(self):\n \n if \"CSS\" in self.algorithm:\n \n input_dict = {self.x: self.train_inputs[self.minibatch_set,:]}\n \n var_list = [self.x_tilda, self.minibatch_set]\n \n if (self.num_samples > 0) and (not self.mixture):\n \n if ((self.mf_steps > 0) and self.alpha >0) or\\\n self.gibbs_steps > 0: \n \n var_list.append(self.sampler_theta)\n \n elif \"CD\" in self.algorithm:\n \n input_dict = {self.x : self.train_inputs[self.minibatch_set,:]} \n \n var_list = [self.minibatch_set]\n \n var_list.append(self.learning_rate)\n \n if self.use_momentum:\n \n var_list.append(self.momentum)\n \n output_vars = [self.pseudo_cost]\n \n if self.report_p_tilda:\n \n output_vars.append(self.p_tilda)\n \n else:\n \n output_vars.append(theano.shared(0))\n \n opt_step = theano.function(inputs = var_list,\n outputs = output_vars,\n updates = self.updates,\n givens = input_dict,\n on_unused_input='warn')\n \n return opt_step", "def build_optimizer(cfg: CfgNode, model: torch.nn.Module) -> torch.optim.Optimizer:\n norm_module_types = (\n torch.nn.BatchNorm1d,\n torch.nn.BatchNorm2d,\n torch.nn.BatchNorm3d,\n torch.nn.SyncBatchNorm,\n # NaiveSyncBatchNorm inherits from BatchNorm2d\n torch.nn.GroupNorm,\n torch.nn.InstanceNorm1d,\n torch.nn.InstanceNorm2d,\n torch.nn.InstanceNorm3d,\n torch.nn.LayerNorm,\n torch.nn.LocalResponseNorm,\n )\n params: List[Dict[str, Any]] = []\n memo: Set[torch.nn.parameter.Parameter] = set()\n for module in model.modules():\n for key, value in module.named_parameters(recurse=False):\n if not value.requires_grad:\n continue\n # Avoid duplicating parameters\n if value in memo:\n continue\n memo.add(value)\n lr = cfg.SOLVER.BASE_LR\n weight_decay = cfg.SOLVER.WEIGHT_DECAY\n if isinstance(module, norm_module_types):\n weight_decay = cfg.SOLVER.WEIGHT_DECAY_NORM\n elif key == \"bias\":\n # NOTE: unlike Detectron v1, we now default BIAS_LR_FACTOR to 1.0\n # and WEIGHT_DECAY_BIAS to WEIGHT_DECAY so that bias optimizer\n # hyperparameters are by default exactly the same as for regular\n # weights.\n lr = cfg.SOLVER.BASE_LR * cfg.SOLVER.BIAS_LR_FACTOR\n weight_decay = cfg.SOLVER.WEIGHT_DECAY_BIAS\n params += [{\"params\": [value], \"lr\": lr, \"weight_decay\": weight_decay}]\n\n optimizer = torch.optim.SGD(params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM)\n return optimizer", "def __init__(self, dim_input=1, dim_output=1, test_num_updates=5):\n self.dim_input = dim_input\n self.dim_output = dim_output\n self.update_lr = FLAGS.update_lr\n self.meta_lr = tf.placeholder_with_default(FLAGS.meta_lr, ())\n self.auto_lr = tf.placeholder_with_default(FLAGS.auto_lr, ())\n \n self.classification = False\n self.test_num_updates = test_num_updates\n self.dim_auto = 2 #This should be able to be arbitrary\n if auto:\n self.real_input = 39 # This is square root of the total (its a kernel)\n #self.real_output = 40#self.dim_output\n self.real_output = 39*39 # This should be the complete dimension out. \n self.dim_input = 3*self.dim_auto #= 3*self.dim_auto \n self.dim_output = self.dim_auto\n #This is from each. \n #if auto: self.dim_input, self.dim_output = self.dim_auto, self.dim_auto #If auto, pass in/out the dimension of the latent (auto_\n if FLAGS.datasource == 'sinusoid':\n self.dim_hidden = [40, 40,40]\n self.loss_func = mse\n self.forward = self.forward_fc\n self.construct_weights = self.construct_fc_weights\n elif FLAGS.datasource == 'omniglot' or FLAGS.datasource == 'miniimagenet':\n self.loss_func = xent\n self.classification = True\n if FLAGS.conv:\n self.dim_hidden = FLAGS.num_filters\n self.forward = self.forward_conv\n self.construct_weights = self.construct_conv_weights\n else:\n self.dim_hidden = [256, 128, 64, 64]\n self.forward=self.forward_fc\n self.construct_weights = self.construct_fc_weights\n if FLAGS.datasource == 'miniimagenet':\n self.channels = 3\n else:\n self.channels = 1\n self.img_size = int(np.sqrt(self.dim_input/self.channels))\n else:\n raise ValueError('Unrecognized data source.')", "def optimize(self):\n\n\t\t# Set up optimizer\n\t\tadam_optimizer = AdamOpt.AdamOpt(tf.trainable_variables(), learning_rate=par['learning_rate'])\n\n\t\t# Calculate losses\n\t\tself.task_loss = tf.reduce_mean(self.time_mask[::1,...] * \\\n\t\t\t\ttf.nn.softmax_cross_entropy_with_logits_v2(logits=self.output[::1,...], \\\n\t\t\t\tlabels=self.target_data[::1,...]))\n\n\t\tself.spike_loss = 0.*tf.reduce_mean(tf.nn.relu(self.h + 0.02))\n\n\t\t# Compute gradients\n\t\tself.train = adam_optimizer.compute_gradients(self.task_loss + self.spike_loss)", "def __init__(self, m, n, dim, n_iterations=100, alpha=None, sigma=None):\n \n #Assign required variables first\n self._m = m\n self._n = n\n imgs=[]\n if alpha is None:\n alpha = 0.3\n else:\n alpha = float(alpha)\n if sigma is None:\n sigma = max(m, n) / 2.0\n else:\n sigma = float(sigma)\n self._n_iterations = abs(int(n_iterations))\n \n ##INITIALIZE GRAPH\n self._graph = tf.Graph()\n \n ##POPULATE GRAPH WITH NECESSARY COMPONENTS\n with self._graph.as_default():\n \n ##VARIABLES AND CONSTANT OPS FOR DATA STORAGE\n \n #Randomly initialized weightage vectors for all neurons,\n #stored together as a matrix Variable of size [m*n, dim]\n #HERE i used random_uniform instead of random_normal in the original code.(LB)\n self._weightage_vects = tf.Variable(tf.random_uniform(\n [m*n, dim]))\n \n #Matrix of size [m*n, 2] for SOM grid locations\n #of neurons\n self._location_vects = tf.constant(np.array(\n list(self._neuron_locations(m, n))))\n \n ##PLACEHOLDERS FOR TRAINING INPUTS\n #We need to assign them as attributes to self, since they\n #will be fed in during training\n \n #The training vector\n self._vect_input = tf.placeholder(\"float\", [dim])\n #Iteration number\n self._iter_input = tf.placeholder(\"float\")\n \n ##CONSTRUCT TRAINING OP PIECE BY PIECE\n #Only the final, 'root' training op needs to be assigned as\n #an attribute to self, since all the rest will be executed\n #automatically during training\n \n #To compute the Best Matching Unit given a vector\n #Basically calculates the Euclidean distance between every\n #neuron's weightage vector and the input, and returns the\n #index of the neuron which gives the least value\n bmu_index = tf.argmin(tf.sqrt(tf.reduce_sum(\n tf.pow(tf.subtract(self._weightage_vects, tf.stack(\n [self._vect_input for i in range(m*n)])), 2), 1)),\n 0)\n \n #This will extract the location of the BMU based on the BMU's\n #index\n slice_input = tf.pad(tf.reshape(bmu_index, [1]),\n np.array([[0, 1]]))\n bmu_loc = tf.reshape(tf.slice(self._location_vects, slice_input,\n tf.constant(np.array([1, 2]))),\n [2])\n \n #To compute the alpha and sigma values based on iteration\n #number\n learning_rate_op = tf.subtract(1.0, tf.divide(self._iter_input,\n self._n_iterations))\n _alpha_op = tf.multiply(alpha, learning_rate_op)\n _sigma_op = tf.multiply(sigma, learning_rate_op)\n \n #Construct the op that will generate a vector with learning\n #rates for all neurons, based on iteration number and location\n #wrt BMU.\n bmu_distance_squares = tf.reduce_sum(tf.pow(tf.subtract(\n self._location_vects, tf.stack(\n [bmu_loc for i in range(m*n)])), 2), 1)\n neighbourhood_func = tf.exp(tf.neg(tf.divide(tf.cast(\n bmu_distance_squares, \"float32\"), tf.pow(_sigma_op, 2))))\n learning_rate_op = tf.multiply(_alpha_op, neighbourhood_func)\n \n #Finally, the op that will use learning_rate_op to update\n #the weightage vectors of all neurons based on a particular\n #input\n learning_rate_multiplier = tf.stack([tf.tile(tf.slice(\n learning_rate_op, np.array([i]), np.array([1])), [dim])\n for i in range(m*n)])\n weightage_delta = tf.multiply(\n learning_rate_multiplier,\n tf.subtract(tf.stack([self._vect_input for i in range(m*n)]),\n self._weightage_vects)) \n new_weightages_op = tf.add(self._weightage_vects,\n weightage_delta)\n self._training_op = tf.assign(self._weightage_vects,\n new_weightages_op) \n \n ##INITIALIZE SESSION\n self._sess = tf.Session()\n \n ##INITIALIZE VARIABLES\n init_op = tf.global_variables_initializer()\n self._sess.run(init_op)", "def TEM_Train(X_feature,Y_action,Y_startend,LR,istrain,config): \n net=tf.layers.conv1d(inputs=X_feature,filters=512,kernel_size=3,strides=1,padding='same')\n # net=tf.layers.batch_normalization(net,training=istrain)\n net=tf.nn.relu(net)\n net=tf.layers.conv1d(inputs=net,filters=512,kernel_size=3,strides=1,padding='same')\n # net=tf.layers.batch_normalization(net,training=istrain)\n net=tf.nn.relu(net)\n net=0.1*tf.layers.conv1d(inputs=net,filters=2,kernel_size=1,strides=1,padding='same')\n net=tf.nn.sigmoid(net)\n\n anchors_action = net[:,:,0]\n # print(\"anchors_action: \", anchors_action)\n anchors_startend = net[:,:,1]\n \n loss=TEM_loss(anchors_action,anchors_startend,Y_action,Y_startend,config)\n\n TEM_trainable_variables=tf.trainable_variables()\n l2 = 0.001 * sum(tf.nn.l2_loss(tf_var) for tf_var in TEM_trainable_variables)\n cost = loss[\"loss_action\"]+loss[\"loss_startend\"]+l2\n loss['l2'] = l2\n loss['cost'] = cost\n # optimizer=tf.train.AdamOptimizer(learning_rate=LR).minimize(cost,var_list=TEM_trainable_variables)\n opt = tf.train.AdamOptimizer(learning_rate=LR)\n grads = opt.compute_gradients(cost, var_list=TEM_trainable_variables)\n gs = []\n for i, (g, v) in enumerate(grads):\n \tif g is not None:\n \t\tgrads[i] = (tf.clip_by_norm(g, 15), v)\n \t\tgs.append(g)\n optimizer = opt.apply_gradients(grads)\n return optimizer,loss,TEM_trainable_variables", "def compile(self, optimizer, lr):\n \n #clip_morm = 0.1\n self.loss_f = None\n with self.graph.as_default():\n \n tvars = tf.trainable_variables()\n ft_vars = [v for v in tvars if \"_fe\" in v.name] \n lab_vars = [v for v in tvars if \"_dc\" not in v.name]\n dom_vars = [v for v in tvars if \"_lp\" not in v.name]\n\n print()\n print(\" ft updates:\", ft_vars)\n print(\"96x3 updates:\", lab_vars)\n print(\" 1x3 updates:\", dom_vars)\n print()\n\n # `tf.nn.softmax_cross_entropy_with_logits` is deprcated.\n # https://www.tensorflow.org/api_docs/python/tf/nn/softmax_cross_entropy_with_logits\n self.loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=self.labels, logits=self.output, name='cross_entropy')\n self.loss_adv = tf.nn.softmax_cross_entropy_with_logits_v2(labels=self.labels_adv, logits=self.output_adv, name='cross_entropy_adv')\n \n #grads_and_vars = optimizer.compute_gradients(loss, var_list=tf_vars)\n #clipped_grads_and_vars = [(tf.clip_by_norm(grad, clip_norm=clip_norm), var) for grad, var in grads_and_vars]\n \n self.loss_fe = - lam * self.loss_adv\n # `tf.control_dependencies` is necessary if `tf.layers.batch_normalization` is in the model\n # https://www.tensorflow.org/api_docs/python/tf/layers/batch_normalization\n self.train_step_adv = optimizer(lr).minimize(self.loss_fe, name='minimize_fe', var_list=ft_vars)\n self.train_step = optimizer(lr).minimize(self.loss, name='minimize', var_list=lab_vars)\n self.train_step_adv = optimizer(lr).minimize(self.loss_adv, name='minimize_adv', var_list=dom_vars)\n\n # Initialize all `tf.Variable`.\n self.session.run(tf.global_variables_initializer())", "def __init__(self, number_episodes, training, feature_number, double_gpu=False, freeze_rate=1000, v_set=15, d_set=50, controller='DDPG'):\r\n if controller == 'DDPG_v':\r\n self.action_range = np.array([-5., 25.]) # range for the values of the action (velocity values)\r\n else:\r\n self.action_range = np.array([-3, 3]) # range for the values of the action (acceleration values)\r\n self.v_set = v_set\r\n self.d_set = d_set\r\n self.a_set = []\r\n self.action_space = 1 # acceleration as single (continuous) action\r\n self.feature_number = feature_number\r\n self.experience_batch_size = 100000 # number of samples in the batch\r\n self.experience_batch = np.zeros((1, 6), dtype=np.float32) # Experience Replay batch,\r\n self.minibatch_size = 64 # number of samples in a minibatch used for 1 gradient descent step\r\n self.freeze_rate = freeze_rate\r\n self.warmup_time = 2000 # wait for x time steps before NNs start getting updated\r\n self.update_frequency = 2 # The number of obtained transition tuples (time steps) before an update of Q is performed\r\n self.learning_rate_actor = 5.e-08 # 1.e-07\r\n self.learning_rate_critic = 5.e-07 # 1.e-06\r\n self.optimizer = tf.keras.optimizers.Adam(lr=self.learning_rate_critic, clipnorm=1.) # optimizer critic\r\n self.critic_loss = np.zeros((number_episodes*10000, 1))\r\n if controller == 'DDPG_v':\r\n self.actor_activation_output = self.scaled_sigmoid # output of actor NN with range of (0, 30) (velocity)\r\n else:\r\n self.actor_activation_output = self.scaled_tanh # output of actor NN with range of (-3, 3) (accelerations)\r\n self.relu_init = tf.keras.initializers.he_normal()\r\n self.tanh_init = tf.keras.initializers.lecun_normal()\r\n self.linear_init = tf.keras.initializers.glorot_normal()\r\n self.step_counter = 0\r\n self.minibatch = np.zeros((self.minibatch_size, 6), dtype=np.float32)\r\n self.double_gpu = double_gpu\r\n self.state = np.zeros([1, self.feature_number], dtype=np.float32)\r\n self.new_state = np.zeros([1, self.feature_number], dtype=np.float32)\r\n self.endstate = False\r\n self.index = np.zeros((1000000, 1), dtype=int)\r\n self.observed_weights = np.zeros([number_episodes, 6])\r\n self.OU_theta = 0.2 # mean reversion rate of the Ornstein Uhlenbeck process -- 0.5\r\n self.OU_mu = 8 # mean reversion level of the Ornstein Uhlenbeck process -- 0.1\r\n self.OU_sigma = 10 # diffusion coefficient of the Ornstein Uhlenbeck process -- 0.3\r\n self.OU_repeats = 2 # number of timesteps the delta_noise from OU is used for\r\n self.noise_counter = 0 # counting variable for OU repeat\r\n self.delta_noise = 0 # noise value added to calculated acceleration\r\n self.k_hybrid_a = 1/2 # factor for hybrid_a controller that scales the DDPG-set-acceleration\r\n self.weight_grad_sum = [0]\r\n\r\n\r\n # create the NN models\r\n self.sess = tf.Session()\r\n tf.keras.backend.set_session(self.sess)\r\n\r\n #self.optimizer = tf.keras.optimizers.RMSprop(lr=self.learning_rate_critic)\r\n\r\n # actor network\r\n with tf.device('/cpu:0'):\r\n self.actor_input = tf.keras.layers.Input(shape=(self.feature_number,))\r\n actor_hidden1 = tf.keras.layers.Dense(units=50, activation=tf.keras.layers.LeakyReLU(alpha=0.1), kernel_initializer=self.relu_init)(self.actor_input)\r\n actor_hidden2 = tf.keras.layers.Dense(units=50, activation=tf.keras.layers.LeakyReLU(alpha=0.1), kernel_initializer=self.relu_init)(actor_hidden1)\r\n actor_output = tf.keras.layers.Dense(units=self.action_space, activation=self.actor_activation_output, kernel_initializer=self.tanh_init)(actor_hidden2)\r\n actor_model = tf.keras.models.Model(inputs=self.actor_input, outputs=actor_output)\r\n if self.double_gpu:\r\n actor_parallel_model = tf.keras.utils.multi_gpu_model(actor_model, gpus=2)\r\n self.actor = actor_parallel_model\r\n else:\r\n self.actor = actor_model\r\n\r\n # actor target network\r\n self.target_actor = copy(self.actor)\r\n\r\n # critic network\r\n with tf.device('/cpu:0'):\r\n\r\n self.critic_input_state = tf.keras.layers.Input(shape=(self.feature_number,))\r\n self.critic_input_action = tf.keras.layers.Input(shape=(self.action_space,))\r\n critic_input = tf.keras.layers.concatenate([self.critic_input_state, self.critic_input_action])\r\n critic_hidden1 = tf.keras.layers.Dense(units=150, activation=tf.keras.layers.LeakyReLU(alpha=0.1), kernel_initializer=self.relu_init)(critic_input)\r\n critic_hidden2 = tf.keras.layers.Dense(units=150, activation=tf.keras.layers.LeakyReLU(alpha=0.1), kernel_initializer=self.relu_init)(critic_hidden1)\r\n critic_output = tf.keras.layers.Dense(units=1, activation='linear', kernel_initializer=self.linear_init)(critic_hidden2)\r\n critic_model = tf.keras.models.Model(inputs=[self.critic_input_state, self.critic_input_action], outputs=critic_output)\r\n \"\"\" critic topology with action feed-in in 2nd layer\r\n self.critic_input_state = tf.keras.layers.Input(shape=(self.feature_number,))\r\n self.critic_input_action = tf.keras.layers.Input(shape=(self.action_space,))\r\n critic_hidden1 = tf.keras.layers.Dense(units=150, activation=tf.keras.layers.LeakyReLU(alpha=0.1), kernel_initializer=self.relu_init)(\r\n self.critic_input_state)\r\n critic_hidden1_with_action = tf.keras.layers.concatenate([critic_hidden1, self.critic_input_action])\r\n critic_hidden2 = tf.keras.layers.Dense(units=150, activation=tf.keras.layers.LeakyReLU(alpha=0.1), kernel_initializer=self.relu_init)(\r\n critic_hidden1_with_action)\r\n critic_output = tf.keras.layers.Dense(units=1, activation='linear', kernel_initializer=self.linear_init)(critic_hidden2)\r\n critic_model = tf.keras.models.Model(inputs=[self.critic_input_state, self.critic_input_action], outputs=critic_output)\r\n \"\"\"\r\n if self.double_gpu:\r\n critic_parallel_model = tf.keras.utils.multi_gpu_model(critic_model, gpus=2)\r\n critic_parallel_model.compile(loss='mse', optimizer=self.optimizer) # loss=self.clipped_mse\r\n self.critic = critic_parallel_model\r\n else:\r\n critic_model.compile(loss='mse', optimizer=self.optimizer) # loss=self.clipped_mse\r\n self.critic = critic_model\r\n\r\n # critic target network\r\n self.target_critic = copy(self.critic)\r\n\r\n # symbolic Gradient of Q w.r.t. a\r\n self.dQda = tf.gradients(self.critic.output, self.critic_input_action)\r\n\r\n # Tensorflow placeholder for dQda\r\n self.dQda_placeholder = tf.placeholder(tf.float32, [None, self.action_space])\r\n\r\n # symbolic Policy Gradient dpi/dtheta * dQ/da -- dQ/da fed into tf.gradients as multiplicative term grad_ys\r\n # minus sign in grad_ys to switch from a gradient descent to a gradient ascend formulation\r\n self.policy_gradient = tf.gradients(self.actor.output, self.actor.trainable_weights, grad_ys=-self.dQda_placeholder)\r\n\r\n # Optimizer method of the actor, inputs pairs of gradient (policy gradient) and weight\r\n grads_and_vars = zip(self.policy_gradient, self.actor.trainable_weights)\r\n self.optimize_actor = tf.train.AdamOptimizer(learning_rate=self.learning_rate_actor).apply_gradients(grads_and_vars)\r\n\r\n # Initialize tensorflow variables\r\n self.sess.run(tf.global_variables_initializer())\r\n\r\n #self.train_writer = tf.summary.FileWriter('logs', self.sess.graph)\r\n\r\n #self.tensorboard = tf.keras.callbacks.TensorBoard(log_dir=\"logs/{}\".format(time()))\r", "def autoencoder_train(discriminator_loss, generator_loss, reconstruction_loss, global_step):\n # Variables that affect learning rate.\n decay_steps = NUM_ITERATIONS_PER_DECAY\n\n # Decay the learning rate exponentially based on the number of steps.\n lr = tf.train.exponential_decay(\n INITIAL_LEARNING_RATE,\n global_step,\n decay_steps,\n LEARNING_RATE_DECAY_FACTOR,\n staircase=True\n )\n\n tf.scalar_summary('learning_rate', lr)\n\n # Generate moving averages of all losses and associated summaries.\n loss_averages_op = _add_loss_summaries(\n [discriminator_loss, generator_loss, reconstruction_loss]\n )\n # Get total weight decay\n total_weight_loss = tf.add_n(tf.get_collection(\"losses\"), name=\"total_weight_loss\")\n\n # Get losses for each optimizer\n G_loss = generator_loss + total_weight_loss\n R_loss = reconstruction_loss + total_weight_loss\n D_loss = discriminator_loss + total_weight_loss\n\n # separate out the G and D variables\n trainable_vars = tf.trainable_variables()\n D_vars = [var for var in trainable_vars if \"discriminator\" in var.name]\n G_vars = [var for var in trainable_vars if not \"discriminator\" in var.name]\n\n # Compute gradients.\n with tf.control_dependencies([loss_averages_op]):\n # optimizer for Discriminator\n D_opt = tf.train.AdamOptimizer(lr, beta1=.5, name=\"D_optimizer\")\n D_grads = D_opt.compute_gradients(D_loss, D_vars)\n\n # optimizer for Reconstruction and generator\n R_opt = tf.train.AdamOptimizer(lr, name=\"R_optimizer\")\n R_grads = R_opt.compute_gradients(R_loss+G_loss, G_vars)\n\n\n # Apply gradients.\n R_apply_gradient_op = R_opt.apply_gradients(R_grads, global_step=global_step)\n D_apply_gradient_op = D_opt.apply_gradients(D_grads, global_step=global_step)\n\n\n # Add histograms for trainable variables.\n for var in trainable_vars:\n tf.histogram_summary(var.op.name, var)\n\n # Add histograms for gradients for each optimizer\n for grads, name in [(D_grads, '/D_gradients'), (R_grads, '/R_gradients')]:\n for grad, var in grads:\n if grad is not None:\n tf.histogram_summary(var.op.name + name, grad)\n\n # Track the moving averages of the batch norm variables.\n variable_averages = tf.train.ExponentialMovingAverage(\n MOVING_AVERAGE_DECAY, global_step)\n\n # average the batch norm variables\n variables_to_average = list(\n set(\n [v for v in tf.all_variables() if \"_mean\" in v.name or \"_variance\" in v.name]\n )\n )\n variables_averages_op = variable_averages.apply(variables_to_average)\n\n # generate training op for reconstruction\n with tf.control_dependencies([R_apply_gradient_op, variables_averages_op]):\n R_train_op = tf.no_op(name='R_train')\n # generate training op for discriminator\n with tf.control_dependencies([D_apply_gradient_op, variables_averages_op]):\n D_train_op = tf.no_op(name='D_train')\n\n #return G_train_op, R_train_op, D_train_op\n return R_train_op, D_train_op", "def initialize_optimization(self):\n\n if self.FLAGS.optimizer == \"Adam\" :\n self.solver = tf.train.AdamOptimizer(\n learning_rate = self.learning_rate,\n beta1 = self.FLAGS.beta1,\n beta2 = self.FLAGS.beta2)\n else:\n print(\"ERROR: Cannot handle optimizer type {}!!!\".format(self.FLAGS.optimizer))\n raise RuntimeError\n \n # batch normalization in tensorflow requires this extra dependency\n # this is required to update the moving mean and moving variance variables\n extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(extra_update_ops):\n self.update = self.solver.minimize(self.loss, global_step=self.global_step)", "def build_graph(self):\n # place holders for inputs here \n HIDDEN_LAYER = self.FLAGS.feature_layer_size\n \n self.x_i = tf.placeholder(dtype=tf.float32, shape = (None, self.INPUT_DIM), name=\"x_i\")\n self.a_i = tf.placeholder(dtype=tf.float32, shape = (None, self.ACTION_DIM), name = \"a_i\")\n self.q_opc = tf.placeholder(dtype=tf.float32, shape = (None, 1), name = \"q_opc\")\n self.q_ret = tf.placeholder(dtype=tf.float32, shape = (None, 1), name = \"q_ret\" )\n self.c = self.FLAGS.c # truncation threshold constant\n \n self.actor_net = PolicyNet(HIDDEN_LAYER, self.ACTION_DIM, name= self.name + \"_actor\", co_var = self.co_var)\n self.critic_net = AdvantageValueNet(HIDDEN_LAYER , name= self.name + \"_critic\")\n \n self.policy_xi_stats, self.policy_xi_dist = self.actor_net(self.x_i)\n \n self.val_xi, self.adv_xi_ai = self.critic_net(self.x_i, self.a_i, self.policy_xi_dist)\n \n #sample a' now\n self.a_i_ = tf.reshape(self.policy_xi_dist.sample(1), shape=[-1,self.ACTION_DIM])\n self.a_i_ = tf.clip_by_value(self.a_i_, self.env.action_space.low[0], self.env.action_space.high[0]) #20190828 add clipping\n \n _, self.adv_xi_ai_ = self.critic_net(self.x_i, self.a_i_, self.policy_xi_dist) # val will be the same for \n \n _, self.average_policy_xi_dist = self.average_actor_net(self.x_i) # can this be done better ?\n \n self.prob_a_i = tf.reshape(self.policy_xi_dist.prob(self.a_i),shape=[-1,1]) + 1e-8\n self.prob_a_i_ = tf.reshape(self.policy_xi_dist.prob(self.a_i_),shape=[-1,1]) + 1e-8\n \n self.log_prob_a_i = tf.log(self.prob_a_i)\n self.log_prob_a_i_ = tf.log(self.prob_a_i_)\n \n # for predicting 1-step a_i', p_i, p_i',\n self.u_i = tf.placeholder(dtype=tf.float32, shape = (None, 2*self.ACTION_DIM))\n \n #self.u_i_dist = tf.contrib.distributions.MultivariateNormalDiag(loc= self.u_i, \n # scale_diag = tf.ones_like(self.u_i) * self.co_var)\n self.u_i_dist = tf.contrib.distributions.MultivariateNormalDiag(loc= self.u_i[:,0], scale_diag=self.u_i[:,1])\n \n self.u_i_prob_a_i = tf.reshape(self.u_i_dist.prob(self.a_i),shape=[-1,1]) + 1e-8\n self.u_i_prob_a_i_ = tf.reshape(self.u_i_dist.prob(self.a_i_),shape=[-1,1]) + 1e-8\n \n self.p_i = tf.divide(self.prob_a_i, self.u_i_prob_a_i)\n self.p_i_ = tf.divide(self.prob_a_i_ , self.u_i_prob_a_i_)\n \n\n # take care of NaNs here, for importance sampling weights (might be an extra step)\n self.p_i = tf.where(tf.is_nan(self.p_i), tf.zeros_like(self.p_i), self.p_i)\n self.p_i_ = tf.where(tf.is_nan(self.p_i_), tf.zeros_like(self.p_i_), self.p_i_)\n\n self.c_i = tf.minimum(1. , tf.pow(self.p_i, 1.0/self.ACTION_DIM))\n \n \n # for verification about checking if params are getting synched\n self.local_actor_vars = self.actor_net.local_params()\n self.global_actor_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'global_actor')\n \n self.local_critic_vars = self.critic_net.local_params()\n self.global_critic_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'global_critic')\n \n \n # Sync ops from global\n self.sync_local_actor_op = self.actor_net.update_local_params_op('global_actor') # global actor\n self.sync_local_critic_op = self.critic_net.update_local_params_op('global_critic')\n \n # soft update the average network\n self.soft_update_average_actor_op = self.average_actor_net.soft_update_from_target_params('global_actor',\n self.FLAGS.tau)\n \n #Get gradients from local network using local losses\n \n g1 = tf.reshape(tf.gradients((self.log_prob_a_i * (self.q_opc - self.val_xi)),self.policy_xi_stats,\n name=self.name+\"g1_grads\", unconnected_gradients='zero'), shape = [-1,2*self.ACTION_DIM])\n g2 = (self.adv_xi_ai_ - self.val_xi) * tf.reshape(tf.gradients((self.log_prob_a_i_), \n self.policy_xi_stats, name=self.name+\"g2_grads\", unconnected_gradients='zero'), shape = [-1,2*self.ACTION_DIM])\n \n \n self.g = tf.minimum(self.c, self.p_i) * g1 + tf.nn.relu(1 - tf.divide(self.c , self.p_i_)) * g2\n \n \n self.k = tf.reshape(tf.gradients(tf.contrib.distributions.kl_divergence(self.average_policy_xi_dist, \n self.policy_xi_dist), self.policy_xi_stats, unconnected_gradients='zero'), shape = [-1,2*self.ACTION_DIM])\n \n \n \n self.kg = tf.reduce_sum( tf.multiply(self.g, self.k), 1, keep_dims=True)\n \n #print \"kg\", self.kg\n \n self.k2 = tf.reduce_sum( tf.multiply(self.k, self.k), 1, keep_dims=True)\n \n self.reg_g = self.g - tf.maximum(tf.zeros_like(self.g), tf.divide((self.kg - self.FLAGS.delta), self.k2) ) * self.k\n \n # take gradients wrt to the local params\n self.actor_grads = tf.gradients(self.policy_xi_stats, self.local_actor_vars, \n grad_ys= -self.reg_g, name=\"actor_grads\", unconnected_gradients='zero')\n \n \n #for ti,tj in zip(self.actor_grads, self.global_actor_vars):\n # print ti, \"\\n\", tj , \"\\n\", \"===========\"\n \n # apply local gradients to the global network\n self.actor_train_op = self.optimizer.apply_gradients(zip(self.actor_grads, self.global_actor_vars),\n global_step=tf.train.get_global_step())\n \n \n # critic loss function and updates\n \n # take gradient wrt to local variables\n self.critic_loss_1 = ((self.q_ret - self.adv_xi_ai) ** 2.0) / 2.0\n \n # for predicting 1-step a_i', p_i, p_i',\n self.v_target = tf.placeholder(dtype=tf.float32, shape = (None, 1))\n \n #self.v_trunc = tf.minimum(self.p_i, 1.0) * (self.q_ret - self.adv_xi_ai) + self.val_xi\n self.critic_loss_2 = ((self.v_target - self.val_xi) ** 2.0) / 2.0\n \n self.critic_loss = self.critic_loss_1 + self.critic_loss_2\n \n #Apply local gradients to global network\n \n self.critic_grads = tf.gradients(self.critic_loss, self.local_critic_vars)\n \n self.critic_train_op = self.optimizer.apply_gradients(zip(self.critic_grads, self.global_critic_vars),\n global_step=tf.train.get_global_step())\n \n # critic_summaries op\n critic_grads_summary = []\n print('-----------------------------------------------------------------')\n print('Create critic_grads_summary histogram ')\n for grad,var in zip(self.critic_grads, self.local_critic_vars):\n print('{} - {}'.format(var.name, grad))\n critic_grads_summary.append(tf.summary.histogram(var.name + '/gradient', grad))\n critic_grads_summary.append(tf.summary.histogram(var.name + '/weight', var))\n \n self.critic_summary_op = tf.summary.merge([\n tf.summary.scalar(self.name + \"_critc_mean_loss_Q\", tf.reduce_mean(self.critic_loss_1)),\n tf.summary.scalar(self.name + \"_critc_mean_loss_V\", tf.reduce_mean(self.critic_loss_2)),\n tf.summary.scalar(self.name + \"_critc_sum_loss_Q\", tf.reduce_sum(self.critic_loss_1)),\n tf.summary.scalar(self.name + \"_critc_sum_loss_V\", tf.reduce_sum(self.critic_loss_2)),\n tf.summary.scalar(self.name + \"_critc_mean_loss\", tf.reduce_mean(self.critic_loss)),\n tf.summary.scalar(self.name + \"_critc_sum_loss\", tf.reduce_sum(self.critic_loss)),\n tf.summary.histogram(self.name + \"_val_target\", self.v_target),\n tf.summary.histogram(self.name + \"_val_pred\", self.val_xi),\n tf.summary.histogram(self.name + \"_Q_pred\", self.adv_xi_ai),\n tf.summary.histogram(self.name + \"_Q_ret\", self.q_ret),\n tf.summary.histogram(self.name + \"_Q_opc\", self.q_opc),\n ] + critic_grads_summary)\n \n \n # actor summaries op\n\n actor_grads_summary = []\n print('-----------------------------------------------------------------')\n print('Create actor_grads_summary histogram ')\n for grad,var in zip(self.actor_grads, self.local_actor_vars):\n print('{} - {}'.format(var.name, grad))\n actor_grads_summary.append(tf.summary.histogram(var.name + '/gradient', grad))\n actor_grads_summary.append(tf.summary.histogram(var.name + '/weight', var))\n \n\n self.actor_summary_op = tf.summary.merge([\n tf.summary.scalar(self.name + \"_actor_mean_loss_reg_g\", tf.reduce_mean(self.reg_g)),\n tf.summary.scalar(self.name + \"_actor_neg_mean_loss_reg_g\", tf.reduce_mean(-self.reg_g)),\n tf.summary.scalar(self.name + \"_actor_sum_loss_reg_g\", tf.reduce_sum(self.reg_g)),\n tf.summary.scalar(self.name + \"_actor_neg_sum_reg_g\", tf.reduce_sum(-self.reg_g)),\n tf.summary.scalar(self.name + \"_actor_sum_g\", tf.reduce_sum(self.g)),\n tf.summary.scalar(self.name + \"_actor_neg_sum_g\", tf.reduce_sum(-self.g)),\n tf.summary.scalar(self.name + \"_actor_mean_kl\", tf.reduce_mean(self.k)),\n tf.summary.scalar(self.name + \"_actor_sum_kl\", tf.reduce_sum(self.k)),\n tf.summary.histogram(self.name + \"_policy_stats\", self.policy_xi_stats),\n ] + actor_grads_summary )", "def optimizer_config(self):\r\n return {\r\n \"lr\": self.args.lr[0],\r\n \"momentum\": self.args.momentum,\r\n \"weight_decay\": self.args.weight_decay,\r\n }", "def adam_optim(config = None, global_step = None):\n learning_rate = config[\"learning_rate\"]\n\n beta1 = config.get('beta1', 0.9)\n beta2 = config.get('beta2', 0.999)\n epsilon = config.get('epsilon', 1e-8)\n \n train_step = tf.train.AdamOptimizer(learning_rate, beta1, beta2, epsilon)\n\n return train_step", "def optimizer(self):\n return 'sgd'", "def dan(xm_train, ym_train, xm_dev, ym_dev, l_rate, n_epochs, m_b_size, n_h_layers, layers, k_prob, reg, costs, sess):\n\n x, y, keep_prob = init_ph(xm_train.shape[0], ym_train.shape[0])\n\n params = init_weights(n_h_layers, layers)\n\n z = fwd(x, params, keep_prob, n_h_layers)\n\n cost = compute_cost(z, y, reg, params, n_h_layers)\n\n global_step = tf.Variable(0, trainable=False)\n\n learning_rate = tf.train.exponential_decay(l_rate, global_step, 1000, 0.90)\n\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost, global_step=global_step)\n\n y_pred = tf.argmax(z, name='y_pred')\n\n y_true = tf.argmax(y, name='y_true')\n\n correct_prediction = tf.equal(y_pred, y_true)\n\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"), name='accuracy')\n\n init = tf.global_variables_initializer()\n\n num_minibatches = int(xm_train.shape[1] / m_b_size)\n\n sess.run(init)\n\n for epoch in range(n_epochs):\n epoch_cost = 0.\n\n minibatch_idxs = np.random.permutation(xm_train.shape[1])\n\n for i in range(num_minibatches):\n minibatch_x = np.take(\n xm_train,\n minibatch_idxs[i * m_b_size: (i + 1) * m_b_size],\n axis=1\n )\n\n # minibatch_x *= np.random.binomial([np.ones(minibatch_x.shape)], k_prob)[0] * (1.0 / k_prob)\n\n minibatch_y = np.take(\n ym_train,\n minibatch_idxs[i * m_b_size: (i + 1) * m_b_size],\n axis=1\n )\n\n _, minibatch_cost = sess.run(\n [optimizer, cost],\n feed_dict={\n x: minibatch_x,\n y: minibatch_y,\n keep_prob: k_prob\n }\n )\n\n epoch_cost += minibatch_cost / num_minibatches\n\n if epoch % 100 == 0:\n print('Cost after epoch %i: %f' % (epoch, epoch_cost))\n print('Train Accuracy:', accuracy.eval({x: xm_train, y: ym_train, keep_prob: 1.0}, session=sess))\n print('Dev Accuracy:', accuracy.eval({x: xm_dev, y: ym_dev, keep_prob: 1.0}, session=sess))\n print('Learning Rate:', learning_rate.eval(session=sess))\n print('')\n\n if epoch % 5 == 0:\n costs.append(epoch_cost)\n\n tr_acc = accuracy.eval({x: xm_train, y: ym_train, keep_prob: 1.0}, session=sess)\n dv_acc = accuracy.eval({x: xm_dev, y: ym_dev, keep_prob: 1.0}, session=sess)\n\n print('Final epoch')\n print('Train Accuracy:', tr_acc)\n print('Dev Accuracy:', dv_acc)\n\n # Save the model\n saver = tf.train.Saver()\n save_path = saver.save(sess, \"./trained/model.ckpt\")\n print(\"Model saved in path: %s\" % save_path)\n\n return accuracy, tr_acc, dv_acc, x, y, keep_prob, y_pred, y_true", "def _create_optimizer(self):\n\n with tf.name_scope(\"optimizer\"):\n self.train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)", "def build_generator(latent_dim=100):\n # The weight initialization and the slope are chosen to accord with the\n # Parameters in the paper. I only change padding when it seems neccesary to\n # to mantain adequate dimensons. \n weight_initializer = tf.keras.initializers.RandomNormal(stddev=0.02)\n slope = 0.3\n \n inputs = keras.Input(shape=(1,1,100))\n # First convolutional layer\n x = Conv2DTranspose(\n 1024, \n kernel_size=(4,4), \n strides=1, \n kernel_initializer=weight_initializer,\n padding='valid',\n use_bias=False\n )(inputs)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=slope)(x)\n \n # Second convolutional layer\n x = Conv2DTranspose(\n kernel_initializer=weight_initializer,\n filters = 512,\n kernel_size = 4,\n strides = (2,2),\n padding = 'same',\n use_bias = False\n )(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=slope)(x)\n \n # Third convolutional layer\n x = Conv2DTranspose(\n kernel_initializer=weight_initializer,\n filters = 256,\n kernel_size = 5,\n strides = (2,2),\n use_bias=False,\n padding = 'same',\n )(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=slope)(x)\n\n # Fourth convolutional layer\n x = Conv2DTranspose(\n kernel_initializer=weight_initializer,\n filters = 128,\n kernel_size = (5,5),\n strides = (2,2),\n use_bias=False,\n padding = 'same',\n )(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=slope)(x)\n\n # Fifth convolutional layer\n x = Conv2DTranspose(\n kernel_initializer=weight_initializer,\n filters = 3,\n kernel_size = (5,5),\n use_bias=False,\n strides = (2,2),\n padding = 'same',\n activation='tanh'\n )(x)\n model = keras.Model(inputs=inputs, outputs=x)\n return model", "def __init__(self,\n weight_decay,\n global_step,\n max_matrix_size=768,\n gbar_decay=0.0,\n gbar_weight=1.0,\n mat_gbar_decay=1.0,\n mat_gbar_weight=1.0,\n learning_rate=1.0,\n svd_interval=1,\n precond_update_interval=1,\n epsilon=1e-4,\n alpha=0.5,\n use_iterative_root=False,\n use_locking=False,\n name=\"ShampooW\"):\n super(ShampooWOptimizer, self).__init__(\n weight_decay,\n global_step=global_step,\n max_matrix_size=max_matrix_size,\n gbar_decay=gbar_decay,\n gbar_weight=gbar_weight,\n mat_gbar_decay=mat_gbar_weight,\n learning_rate=learning_rate,\n svd_interval=svd_interval,\n precond_update_interval=precond_update_interval,\n epsilon=epsilon,\n alpha=alpha,\n use_iterative_root=use_iterative_root,\n use_locking=use_locking,\n name=name)", "def compile(self, learning_rate, momentum):\n # Optimizer object\n if self.config.ADAMW:\n from nets.adamw import AdamW\n optimizer = AdamW(lr=learning_rate, decay=0.001, weight_decay=self.config.WEIGHT_DECAY, \n clipnorm=self.config.GRADIENT_CLIP_NORM)\n else:\n optimizer = keras.optimizers.SGD(\n lr=learning_rate, momentum=momentum,\n clipnorm=self.config.GRADIENT_CLIP_NORM)\n \n # Add Losses\n # First, clear previously set losses to avoid duplication\n self.keras_model._losses = []\n self.keras_model._per_input_losses = {}\n # ************************* NOTE for 2 label dataset \n if self.config.HAVE_LABEL2:\n loss_names = [\n \"rpn_class_loss\", \"rpn_bbox_loss\",\n \"mrcnn_class_loss\", \"mrcnn_class_loss2\", \"mrcnn_bbox_loss\", \"mrcnn_mask_loss\"]\n else:\n loss_names = [\n \"rpn_class_loss\", \"rpn_bbox_loss\",\n \"mrcnn_class_loss\", \"mrcnn_bbox_loss\", \"mrcnn_mask_loss\"]\n\n for name in loss_names:\n layer = self.keras_model.get_layer(name)\n if layer.output in self.keras_model.losses:\n continue\n loss = (tf.reduce_mean(layer.output, keepdims=True) * self.config.LOSS_WEIGHTS.get(name, 1.))\n self.keras_model.add_loss(loss)\n\n # Add L2 Regularization\n # Skip gamma and beta weights of batch normalization layers.\n reg_losses = [\n keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)\n for w in self.keras_model.trainable_weights\n if 'gamma' not in w.name and 'beta' not in w.name]\n self.keras_model.add_loss(tf.add_n(reg_losses))\n\n # Compile\n self.keras_model.compile(\n optimizer=optimizer,\n loss=[None] * len(self.keras_model.outputs))\n # print (self.keras_model.metrics_names)\n # Add metrics for losses\n for name in loss_names:\n if name in self.keras_model.metrics_names:\n continue\n layer = self.keras_model.get_layer(name)\n self.keras_model.metrics_names.append(name)\n loss = (tf.reduce_mean(layer.output, keepdims=True) * self.config.LOSS_WEIGHTS.get(name, 1.))\n self.keras_model._metrics_tensors.update({name: loss})\n # self.keras_model._compile_stateful_metrics_tensors.update({name: loss})\n # print (\"================\",self.keras_model._compile_stateful_metrics_tensors)", "def _build_optimizers(self):\r\n self._optimize_ops = []\r\n all_trainable_variables = tf.trainable_variables()\r\n all_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\r\n all_reg_losses = tf.losses.get_regularization_losses()\r\n for spec in self._learning_schedule:\r\n optimize_ops = []\r\n update_ops = []\r\n loss_terms = spec['loss_terms_to_optimize']\r\n reg_losses = []\r\n assert isinstance(loss_terms, dict)\r\n for loss_term_key, prefixes in loss_terms.items():\r\n assert loss_term_key in self.loss_terms['train'].keys()\r\n variables_to_train = []\r\n for prefix in prefixes:\r\n variables_to_train += [\r\n v for v in all_trainable_variables\r\n if v.name.startswith(prefix)\r\n ]\r\n update_ops += [\r\n o for o in all_update_ops\r\n if o.name.startswith(prefix)\r\n ]\r\n reg_losses += [\r\n l for l in all_reg_losses\r\n if l.name.startswith(prefix)\r\n ]\r\n\r\n optimizer_class = tf.train.AdamOptimizer\r\n optimizer = optimizer_class(\r\n learning_rate=self.learning_rate_multiplier * spec['learning_rate'],\r\n # beta1=0.9,\r\n # beta2=0.999,\r\n )\r\n final_loss = self.loss_terms['train'][loss_term_key]\r\n if len(reg_losses) > 0:\r\n final_loss += tf.reduce_sum(reg_losses)\r\n with tf.control_dependencies(update_ops):\r\n gradients, variables = zip(*optimizer.compute_gradients(\r\n loss=final_loss,\r\n var_list=variables_to_train,\r\n aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N,\r\n ))\r\n # gradients, _ = tf.clip_by_global_norm(gradients, 5.0) # TODO: generalize\r\n optimize_op = optimizer.apply_gradients(zip(gradients, variables))\r\n optimize_ops.append(optimize_op)\r\n self._optimize_ops.append(optimize_ops)\r\n logger.info('Built optimizer for: %s' % ', '.join(loss_terms.keys()))", "def _build_train_op(self):\n\n logits_flatten = tf.reshape(self.logits_up, [-1, self.num_classes])\n pred_flatten = tf.reshape(self.pred, [-1, self.num_classes])\n\n labels_gt = self.labels\n\n if self.ignore_class_bg:\n # ignore background labels: 255\n gt_labels_flatten = tf.reshape(labels_gt, [-1, ])\n indices = tf.squeeze(tf.where(tf.less_equal(gt_labels_flatten, self.num_classes - 1)), 1)\n remain_logits = tf.gather(logits_flatten, indices)\n remain_pred = tf.gather(pred_flatten, indices)\n remain_labels = tf.gather(gt_labels_flatten, indices)\n xent = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=remain_logits, labels=remain_labels)\n else:\n xent = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits_up, labels=labels_gt)\n\n self.cls_loss = tf.reduce_mean(xent, name='xent') # xent.shape=[nIgnoredBgPixels]\n self.cost = self.cls_loss + self._decay()\n tf.summary.scalar('cost', self.cost)\n\n self.global_step = tf.Variable(0, name='global_step', trainable=False)\n self.learning_rate = tf.train.polynomial_decay(self.lrn_rate,\n self.global_step,\n self.lr_decay_step,\n end_learning_rate=self.lrn_rate_end,\n power=0.9)\n tf.summary.scalar('learning rate', self.learning_rate)\n\n tvars = tf.trainable_variables()\n\n if self.optimizer == 'sgd':\n optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)\n elif self.optimizer == 'mom':\n optimizer = tf.train.MomentumOptimizer(self.learning_rate, 0.9)\n elif self.optimizer == 'adam':\n optimizer = tf.train.AdamOptimizer(self.learning_rate)\n else:\n raise NameError(\"Unknown optimizer type %s!\" % self.optimizer)\n\n grads_and_vars = optimizer.compute_gradients(self.cost, var_list=tvars)\n var_lr_mult = {}\n for var in tvars:\n if var.op.name.find(r'fc_final_sketch46') > 0 and var.op.name.find(r'biases') > 0:\n var_lr_mult[var] = 20.\n elif var.op.name.find(r'fc_final_sketch46') > 0:\n var_lr_mult[var] = 10.\n else:\n var_lr_mult[var] = 1.\n grads_and_vars = [((g if var_lr_mult[v] == 1 else tf.multiply(var_lr_mult[v], g)), v)\n for g, v in grads_and_vars]\n\n ## summary grads\n # for grad, grad_var in grads_and_vars:\n # print('>>>', grad_var.op.name)\n # if grad is None:\n # print('None grad')\n # # if grad is not None:\n # # tf.summary.histogram(grad_var.op.name + \"/gradient\", grad)\n\n apply_op = optimizer.apply_gradients(grads_and_vars,\n global_step=self.global_step, name='train_step')\n\n train_ops = [apply_op] + self._extra_train_ops\n self.train_step = tf.group(*train_ops)", "def getCnnOptimizerOp(self, global_step):\n # Define loss and optimizer\n optimizer = tf.train.AdamOptimizer(self.__learning_rate)\n return optimizer", "def setOptimizerParams(self,lr,momentum,decay):\n self.optimizer = SGD(lr=lr,momentum=momentum,decay=decay)", "def update_op(self, loss, learning_rate,var):\n #train_op = None\n ####### Implementation Here ######\n #pass\n train_op = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(loss = loss,var_list = var )\n return train_op", "def optimizer_creator(model, config):\n return torch.optim.SGD(model.parameters(), lr=config.get(\"lr\", 1e-4))", "def optimize(nn_last_layer, correct_label, learning_rate, num_classes):\n\n # freeze all convolution variables\n tvars = tf.trainable_variables()\n trainable_vars = [var for var in tvars if not(var.name.startswith('conv'))]\n\n #print(\"Trainable parameters are: \")\n #for var in trainable_vars:\n # print(var.name + \"\\n\")\n\n logits = tf.reshape(nn_last_layer, (-1, num_classes), name=\"logits\")\n pred = tf.nn.softmax(logits)\n output = tf.identity(pred, 'prediction')\n\n correct_label = tf.reshape(correct_label, (-1, num_classes))\n cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=correct_label))\n\n tf.summary.scalar('cross_entropy_loss', cross_entropy_loss, collections=['batch'])\n # add regularization to the loss\n reg_losses = tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n tf.summary.scalar('regularization loss', reg_losses, collections=['batch'])\n reg_constant = 0.01\n loss = cross_entropy_loss + reg_constant * reg_losses\n\n tf.summary.scalar('total loss', loss, collections=['batch'])\n\n prediction = tf.argmax(logits, 1)\n correct_label_flatten = tf.argmax(correct_label, 1)\n acc = tf.reduce_mean(tf.cast(tf.equal(prediction, correct_label_flatten), tf.float32))\n tf.summary.scalar('train_acc', acc, collections=['epoch_train'])\n tf.summary.scalar('validation_acc', acc, collections=['epoch_validate'])\n\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n training_operation = optimizer.minimize(cross_entropy_loss, var_list=trainable_vars)\n\n return logits, training_operation, loss", "def __init__(self, state_size, output_size, learning_rate, hidden_layer_sizes, name, source_nets=None,\n optimizer=tf.compat.v1.train.AdamOptimizer):\n self.state_size = state_size\n self.output_size = output_size\n self.learning_rate = learning_rate\n self.hidden_layer_sizes = hidden_layer_sizes\n self.name = name\n self.source_nets = source_nets\n\n with tf.compat.v1.variable_scope(name, reuse=tf.compat.v1.AUTO_REUSE):\n self.scope = tf.compat.v1.get_variable_scope()\n self.state = tf.compat.v1.placeholder(tf.compat.v1.float32, [None, self.state_size], name='state')\n self.target = tf.compat.v1.placeholder(tf.compat.v1.float32, name='target')\n self.optimizer = optimizer(learning_rate=self.learning_rate)\n self.output, self.loss, self.train_step = None, None, None\n\n # possibly unused\n self.action = tf.compat.v1.placeholder(tf.compat.v1.float32, [self.output_size], name='action')\n self.actions_distribution, self.sampled_action = None, None\n\n # build hidden layers\n self.Ws, self.bs, self.Zs, self.As = [], [], [], []\n if source_nets is not None:\n self.lateral_Ws, self.lateral_bs = [], []\n prev_layer_size, A = self.state_size, self.state\n for i, layer_size in enumerate(hidden_layer_sizes):\n W = tf.compat.v1.get_variable('W%d' % i, [prev_layer_size, layer_size],\n initializer=tf.compat.v1.initializers.glorot_uniform())\n b = tf.compat.v1.get_variable('b%d' % i, [layer_size],\n initializer=tf.compat.v1.zeros_initializer())\n Z = tf.compat.v1.add(tf.compat.v1.matmul(A, W), b)\n\n # in case of progressive net, add hidden output of source nets before activation\n if source_nets is not None:\n for j, source_net in enumerate(source_nets):\n source_Z = source_net.Zs[i]\n source_layer_size = source_net.hidden_layer_sizes[i]\n lateral_W = tf.compat.v1.get_variable(f'lateral_{j}_W%d' % i, [source_layer_size, layer_size],\n initializer=tf.compat.v1.initializers.glorot_uniform())\n lateral_b = tf.compat.v1.get_variable(f'lateral_{j}_b%d' % i, [layer_size],\n initializer=tf.compat.v1.zeros_initializer())\n lateral_Z = tf.compat.v1.add(tf.compat.v1.matmul(source_Z, lateral_W), lateral_b)\n Z = tf.compat.v1.add(Z, lateral_Z)\n\n self.lateral_Ws.append(lateral_W)\n self.lateral_bs.append(lateral_b)\n\n A = tf.compat.v1.nn.relu(Z)\n self.Ws.append(W)\n self.bs.append(b)\n self.Zs.append(Z)\n self.As.append(A)\n prev_layer_size = layer_size", "def target(total_epoch, filter_num, filter_len, num_dense_nodes):\n \n start = time.time()\n total_epoch = int(round(total_epoch))\n filter_num = int(round(filter_num))\n filter_len = int(round(filter_len))\n num_dense_nodes = int(round(num_dense_nodes))\n print(\"Epochs =\", total_epoch, \"| # Conv filters =\", filter_num, \"| Filter length =\", filter_len, \"| # Dense nodes =\", num_dense_nodes)\n\n # model specification\n model = Sequential()\n model.add(Convolution1D(input_dim=4, input_length=dna_bp_length, nb_filter=filter_num, filter_length=filter_len, activation=\"relu\", border_mode =\"same\"))\n model.add(MaxPooling1D(pool_length=dna_bp_length))\n model.add(Flatten())\n model.add(BatchNormalization())\n model.add(Dense(input_dim=filter_num,output_dim=num_dense_nodes))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization())\n model.add(Dense(output_dim=num_classes))\n model.add(Activation(\"softmax\"))\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=[\"accuracy\"])\n print(model.summary())\n \n max_val_acc = 0.0\n max_acc_pair = 0.0\n num_chunks = 6 \n \n epoch_train_acc = np.zeros((total_epoch,num_chunks))\n epoch_val_acc = np.zeros((total_epoch,1))\n\n # train the model\n for e in range(total_epoch):\n print(\"Epoch =\", e+1, \"out of\", total_epoch)\n for f in range(num_chunks-1):\n X_train = np.load(\"/mnt/data\"+str(f)+\".npy\")\n y_train = np.load(\"/mnt/labels\"+str(f)+\".npy\")\n history = model.fit(X_train, y_train, batch_size = 8, \\\n validation_split=0.0, nb_epoch=1, verbose=1, class_weight=cl_weight)\n epoch_train_acc[e,f] = history.history['acc'][0]\n \n # train final chunk and do validation\n X_train = np.load(\"/mnt/data\"+str(num_chunks-1)+\".npy\")\n y_train = np.load(\"/mnt/labels\"+str(num_chunks-1)+\".npy\")\n history = model.fit(X_train, y_train, batch_size = 8, \\\n validation_data=(val_dna_seqs_onehot, val_pi_labels_onehot), nb_epoch=1, verbose=1, class_weight=cl_weight)\n epoch_train_acc[e,num_chunks-1] = history.history['acc'][0]\n epoch_val_acc[e,0] = history.history['val_acc'][0]\n\n # record max validation accuracy\n if history.history['val_acc'][0] > max_val_acc:\n max_val_acc = history.history['val_acc'][0]\n max_acc_pair = history.history['acc'][0]\n \n # save network stats\n print(\"Epoch training accuracy\")\n print(epoch_train_acc)\n print(\"Mean epoch training accuracy\")\n print(np.transpose(np.mean(epoch_train_acc, axis=1)))\n end = time.time()\n np.save(str(int(end))+'conv'+str(filter_num)+'x'+str(filter_len)+'dense'+str(num_dense_nodes)+'time'+str(int(end-start))+'_mean_train_acc.out', np.transpose(np.mean(epoch_train_acc, axis=1)))\n print(\"Epoch validation accuracy\" )\n print(epoch_val_acc)\n np.save(str(int(end))+'conv'+str(filter_num)+'x'+str(filter_len)+'dense'+str(num_dense_nodes)+'time'+str(int(end-start))+'_epoch_val_acc.out', epoch_val_acc, end-start)\n \n return max_val_acc/(end-start)", "def __init__(self, optimizer):\n super(ShardedOptimizer, self).__init__(optimizer, name=\"ShardedOptimizer\")", "def get_optimizer(args, net):\n if args.backbone_lr > 0.0:\n base_params = []\n resnet_params = []\n resnet_name = []\n resnet_name.append('layer0')\n resnet_name.append('layer1')\n #resnet_name.append('layer2')\n #resnet_name.append('layer3')\n #resnet_name.append('layer4')\n len_resnet = len(resnet_name)\n else:\n param_groups = net.parameters()\n\n if args.backbone_lr > 0.0:\n for name, param in net.named_parameters():\n is_resnet = False\n for i in range(len_resnet):\n if resnet_name[i] in name:\n resnet_params.append(param)\n # param.requires_grad=False\n print(\"resnet_name\", name)\n is_resnet = True\n break\n if not is_resnet:\n base_params.append(param)\n\n if args.sgd:\n if args.backbone_lr > 0.0:\n optimizer = optim.SGD([\n {'params': base_params},\n {'params': resnet_params, 'lr':args.backbone_lr}\n ],\n lr=args.lr,\n weight_decay=5e-4, #args.weight_decay,\n momentum=args.momentum,\n nesterov=False)\n else:\n optimizer = optim.SGD(param_groups,\n lr=args.lr,\n weight_decay=5e-4, #args.weight_decay,\n momentum=args.momentum,\n nesterov=False)\n else:\n raise ValueError('Not a valid optimizer')\n\n if args.lr_schedule == 'scl-poly':\n if cfg.REDUCE_BORDER_ITER == -1:\n raise ValueError('ERROR Cannot Do Scale Poly')\n\n rescale_thresh = cfg.REDUCE_BORDER_ITER\n scale_value = args.rescale\n lambda1 = lambda iteration: \\\n math.pow(1 - iteration / args.max_iter,\n args.poly_exp) if iteration < rescale_thresh else scale_value * math.pow(\n 1 - (iteration - rescale_thresh) / (args.max_iter - rescale_thresh),\n args.repoly)\n scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda1)\n elif args.lr_schedule == 'poly':\n lambda1 = lambda iteration: math.pow(1 - iteration / args.max_iter, args.poly_exp)\n scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda1)\n else:\n raise ValueError('unknown lr schedule {}'.format(args.lr_schedule))\n\n return optimizer, scheduler", "def update_net(optimizer):\n assert kl_train_dataset.bp_mode\n frames_gen, frame_cnt, rel_props, prop_ticks, prop_scaling = kl_train_dataset[index]\n\n optimizer.zero_grad()\n \n num_crop = 1\n length = 3\n if args.modality == 'Flow':\n length = 10\n elif args.modality == 'RGBDiff':\n length = 18\n \n for frames in frames_gen:\n # frames.shape == [frame_batch_size * num_crop * 3, 224, 224]\n assert len(frames) == length * frame_cnt\n input_var = torch.autograd.Variable(frames.view(-1, length, frames.size(-2), frames.size(-1)).cuda())\n base_out = net(input_var, None, None, None, None)\n assert base_out.size(0) == frame_cnt and base_out.size(1) == base_out_dim\n step_features = base_out.mean(dim=0).unsqueeze(0)\n gate, glcu_task_pred = net.glcu(step_features)\n gate = gate.repeat(1, frame_cnt).view(frame_cnt, base_out_dim)\n assert glcu_task_pred.size(0) == 1\n glcu_task_pred = F.softmax(glcu_task_pred.squeeze(), dim=0)\n if net.additive_glcu:\n base_out = base_out + gate\n else:\n base_out = base_out * gate\n\n output = net.test_fc(base_out)\n assert output.size(0) == frame_cnt and output.size(1) == output_dim\n act_scores, comp_scores, reg_scores = reorg_stpp.forward(output, prop_ticks, prop_scaling, bp_mode=True)\n\n # Task Head\n combined_scores = F.softmax(act_scores[:, 1:], dim=1) * torch.exp(comp_scores)\n combined_scores = combined_scores.mean(dim=0).unsqueeze(0)\n task_pred = net.task_head(combined_scores)\n assert task_pred.size(0) == 1\n task_pred = F.softmax(net.task_head(combined_scores).squeeze(), dim=0)\n\n loss = KL(task_pred, glcu_task_pred)\n loss.backward()\n torch.cuda.empty_cache() # To empty the cache from previous iterations\n break\n\n optimizer.step()\n optimizer.zero_grad()\n torch.cuda.empty_cache()\n\n return float(loss.data), frame_cnt", "def model(X_train, Y_train, X_dev, Y_dev, numOutputNodes, learning_rate = 0.003,\n iterations = 2000, minibatch_size = 16, layer1 = 25, beta = 0, dropout = 1.0, istanh1 = False, batchnorm = True, print_cost = True, is_charge = False):\n \n # reset all variables to allow the network to be trained multiple times with different hyperparameters\n sess = tf.Session()\n tf.reset_default_graph()\n\n tf.set_random_seed(1) # to keep consistent results\n seed = 1 # to keep consistent results\n (n_x, m) = X_train.shape # n_x : input size (the other dimension is the number of examples in the train set)\n n_y = Y_train.shape[0] # n_y : output size\n costs = [] # holds data for graphing\n dev_costs = []\n \n # Create Placeholders of shape (n_x, n_y)\n X, Y = create_placeholders(n_x, n_y)\n parameters = initialize_parameters(layer1, 1, regression = True) # Initialize parameters, with one hidden layer\n training = tf.placeholder_with_default(False, shape=(), name='training') # Create a boolean to use for implementing batch norm and dropout correctly\n\n # Forward propagation: Build the forward propagation in the tensorflow graph\n Z3 = forward_propagation(X, parameters, training, istanh1, False, batchnorm, dropout, regression = True)\n\n # Cost function: Add cost function to tensorflow graph\n cost = compute_reg_cost(Z3, Y, parameters, beta)\n\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) # allows for prediction at test time to work with batch normalization; allows for updating of global mean and variance\n with tf.control_dependencies(update_ops):\n # Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer.\n optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)\n\n # Initialize all the variables\n init = tf.global_variables_initializer()\n saver = tf.train.Saver()\n\n # Calculate the correct predictions\n correct_prediction = tf.less_equal(tf.abs(tf.divide(tf.subtract(Z3, Y), Y)), tf.fill([1,1], 0.05)) \n # define one measure of accuracy by counting a prediction as correct if it's within 5% of the true value\n \n # Calculate the mean absolute percentage error of the predictions\n MAPE = tf.scalar_mul(100, tf.reduce_mean(tf.abs(tf.divide(tf.subtract(Z3, Y), Y))))\n\n # Calculate accuracy on the test set\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n\n # Start the session to compute the tensorflow graph\n with tf.Session() as sess:\n sess.run(init)\n\n # Print the hyperparameters for this particular model\n print('Learning Rate: %s, Mini-Batch Size: %d, Beta: %s, %d Nodes in Hidden Layer, %d Output Nodes, %d Iterations, %s Dropout Prob, Hidden Layer Tanh: %s, Batch Norm: %s' \\\n % (str(learning_rate).rstrip('0'), minibatch_size, str(beta).rstrip('0'), layer1, numOutputNodes, iterations, str(dropout).rstrip('0'), istanh1, batchnorm))\n \n for epoch in range(iterations):\n epoch_cost = 0. # Defines a cost related to an epoch\n seed = seed + 1\n minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed)\n num_minibatches = len(minibatches) \n\n for minibatch in minibatches:\n # Select a minibatch\n (minibatch_X, minibatch_Y) = minibatch\n \n # Run the session on one minibatch, and add the cost to the epoch cost\n _ , minibatch_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y, training: True})\n epoch_cost += minibatch_cost / num_minibatches\n \n # save the training and cross-validation cost every 5 epochs\n if print_cost == True and epoch % 5 == 0:\n costs.append(epoch_cost)\n dev_cost = sess.run(cost, feed_dict = {X: X_dev, Y: Y_dev, training: False})\n dev_costs.append(dev_cost)\n\n # print the cost after every 200 epochs\n if print_cost == True and epoch % 200 == 0: # used during testing to ensure gradient descent is working properly\n train_accuracy = accuracy.eval({X: X_train, Y: Y_train, training: False})\n train_mape = MAPE.eval({X: X_train, Y: Y_train, training: False})\n print(\"Training cost after iteration %i: %f, accuracy: %f, MAPE: %f %%\" % (epoch, epoch_cost, train_accuracy, train_mape))\n \n dev_cost = sess.run(cost, feed_dict={X:X_dev, Y: Y_dev, training: False})\n dev_accuracy = accuracy.eval({X: X_dev, Y: Y_dev, training: False})\n dev_mape = MAPE.eval({X: X_dev, Y: Y_dev, training: False})\n print(\"Dev cost after iteration %i: %f, accuracy: %f, MAPE: %f %%\" % (epoch, dev_cost, dev_accuracy, dev_mape))\n\n # # Plot the cost\n # if print_cost:\n # iter_num = np.arange(iterations / 5) * 5\n # plt.plot(iter_num, np.squeeze(costs), label = 'training')\n # plt.plot(iter_num, np.squeeze(dev_costs), label = 'cross-validation')\n # plt.ylabel('cost')\n # plt.xlabel('iterations')\n # # plt.ylim(top = 0.01, bottom = 0.002) # y range used to plot for averaged spectra\n # plt.ylim(top = 0.0075, bottom = 0.001) # y range used to plot for training on charges\n # plt.title('Cost vs. Iterations')\n # plt.legend()\n # plt.show()\n\n # Save the parameters in a variable\n parameters = sess.run(parameters)\n if is_charge:\n saver.save(sess, \"./charge_reg_models/charge_regression_model_{}_{}_{}_{}.ckpt\".format(learning_rate, iterations, layer1, beta))\n else:\n saver.save(sess, \"./reg_models/regression_model_{}_{}_{}_{}.ckpt\".format(learning_rate, iterations, layer1, beta))\n\n train_acc = accuracy.eval({X: X_train, Y: Y_train, training: False})\n dev_acc = accuracy.eval({X: X_dev, Y: Y_dev, training: False})\n \n mape_train = MAPE.eval({X: X_train, Y: Y_train, training: False})\n mape_dev = MAPE.eval({X: X_dev, Y: Y_dev, training: False})\n\n accs = [train_acc, dev_acc, mape_train, mape_dev]\n\n print(\"Train Accuracy:\", train_acc, \"; MAPE:\", mape_train)\n print(\"Dev Accuracy:\", dev_acc, \"; MAPE:\", mape_dev)\n\n return accs, parameters", "def pgd_optimizer(self, sess, X, y, optimization_step, num_iter, loss, delta, last = False, featurized_X = None):\n feed_dict = None\n if not last:\n feed_dict = {self.x: X, self.y: y}\n sess.run(tf.initialize_variables([delta]), feed_dict = feed_dict)\n else:\n feed_dict = {self.x : X, self.featurizations: featurized_X, self.y: y}\n sess.run(tf.initialize_variables([delta]), feed_dict = feed_dict)\n feed_dict = {self.featurizations: featurized_X, self.y: y}\n\n for i in range(num_iter):\n print(\"iteration: %d\"%i)\n sess.run(optimization_step, feed_dict = feed_dict)\n loss_adv = sess.run(loss, feed_dict = feed_dict)\n print(\"loss %f\" %loss_adv)\n return True", "def _add_train_op(self):\n self._lr_rate = tf.maximum(\n self._hps.min_lr, # min_lr_rate.\n tf.train.exponential_decay(self._hps.lr, self.global_step, 30000, 0.98))\n \n \n # Take gradients of the trainable variables w.r.t. the loss function to minimize\n loss_to_minimize = self._total_loss if self._hps.coverage else self._loss\n tvars = tf.trainable_variables()\n gradients = tf.gradients(loss_to_minimize, tvars, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)\n\n # Clip the gradients\n with tf.device(self._get_gpu(self._num_gpus-1)):\n grads, global_norm = tf.clip_by_global_norm(gradients, self._hps.max_grad_norm)\n\n # Add a summary\n tf.summary.scalar('global_norm', global_norm)\n\n # Apply adagrad optimizer\n if self._hps.optimizer == 'adagrad':\n optimizer = tf.train.AdagradOptimizer(\n self._hps.lr, initial_accumulator_value=self._hps.adagrad_init_acc)\n\n elif self._hps.optimizer == 'adam': \n # Adam\n optimizer = tf.train.AdamOptimizer()\n \n elif self._hps.optimizer == 'sgd':\n # SGD\n optimizer = tf.train.GradientDescentOptimizer(self._lr_rate)\n tf.summary.scalar('learning rate', self._lr_rate)\n \n else:\n raise Exception('Invalid optimizer: ', self._hps.optimizer)\n\n with tf.device(self._get_gpu(self._num_gpus-1)):\n self._train_op = optimizer.apply_gradients(\n zip(grads, tvars), global_step=self.global_step, name='train_step')", "def CycleGAN(g_conv_dim=64, d_conv_dim=64, n_res_blocks=6):\n \n # Instantiate generators\n G_XtoY = Generator(conv_dim=g_conv_dim, n_res_blocks=n_res_blocks)\n G_YtoX = Generator(conv_dim=g_conv_dim, n_res_blocks=n_res_blocks)\n # Instantiate patch discriminators\n Dp_X = PatchDiscriminator(conv_dim=d_conv_dim)\n Dp_Y = PatchDiscriminator(conv_dim=d_conv_dim)\n # Instantiate global discriminators\n Dg_X = GlobalDiscriminator(conv_dim=d_conv_dim)\n Dg_Y = GlobalDiscriminator(conv_dim=d_conv_dim)\n\n # move models to GPU, if available\n cuda_available = torch.cuda.is_available()\n device = torch.device(\"cuda:0\" if cuda_available else \"cpu\")\n\n device = torch.device(device)\n G_XtoY.to(device)\n G_YtoX.to(device)\n Dp_X.to(device)\n Dp_Y.to(device)\n Dg_X.to(device)\n Dg_Y.to(device)\n\n print('Using {}.'.format(\"GPU\" if cuda_available else \"CPU\"))\n return G_XtoY, G_YtoX, Dp_X, Dp_Y, Dg_X, Dg_Y", "def _create_train_op(self):\n self.lr = self.learning_rate\n # global_step = tf.train.get_or_create_global_step()\n learning_rate = tf.constant(value=self.learning_rate, shape=[], dtype=tf.float32)\n learning_rate =tf.train.exponential_decay(learning_rate,self.global_step,2*self.num_warm_up,0.96,staircase=True,name=\"exponential_decay\")\n\n # Implements linear warmup. I.e., if global_step < num_warmup_steps, the\n # learning rate will be `global_step/num_warmup_steps * init_lr`.\n if self.num_warm_up:\n global_steps_int = tf.cast(self.global_step, tf.int32)\n warmup_steps_int = tf.constant(self.num_warm_up, dtype=tf.int32)\n\n global_steps_float = tf.cast(global_steps_int, tf.float32)\n warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)\n\n warmup_percent_done = global_steps_float / warmup_steps_float\n warmup_learning_rate = self.learning_rate * warmup_percent_done\n\n is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)\n learning_rate = (\n (1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)\n self.current_learning_rate = learning_rate\n if self.optim_type == 'adagrad':\n self.optimizer = tf.train.AdagradOptimizer(self.lr)\n elif self.optim_type == 'adam':\n self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)\n elif self.optim_type == 'rprop':\n self.optimizer = tf.train.RMSPropOptimizer(self.lr)\n elif self.optim_type == 'sgd':\n self.optimizer = tf.train.GradientDescentOptimizer(self.lr)\n elif self.optim_type == \"bert\":\n self.optimizer = AdamWeightDecayOptimizer(learning_rate=learning_rate, weight_decay_rate=0.01, beta_1=0.9,\n beta_2=0.999, epsilon=1e-6,\n exclude_from_weight_decay=[\"LayerNorm\", \"layer_norm\", \"bias\"])\n else:\n raise NotImplementedError('Unsupported optimizer: {}'.format(self.optim_type))\n\n self.logger.info(\"applying optimize %s\" % self.optim_type)\n if self.clip_weight:\n # clip_weight\n tvars = tf.trainable_variables()\n grads = tf.gradients(self.loss, tvars)\n grads, _ = tf.clip_by_global_norm(grads, clip_norm=self.max_norm_grad)\n grad_var_pairs = zip(grads, tvars)\n train_op = self.optimizer.apply_gradients(grad_var_pairs, name='apply_grad', global_step=self.global_step)\n new_global_step = self.global_step + 1\n train_op = tf.group(train_op, [self.global_step.assign(new_global_step)])\n self.train_op = train_op\n else:\n self.train_op = self.optimizer.minimize(self.loss, global_step=self.global_step)", "def _build_train_op(self):\n replay_action_one_hot = tf.one_hot(\n self._replay.actions, self.num_actions, 1., 0., name='action_one_hot')\n\n replay_chosen_qs = []\n for i in range(len(self.gammas)):\n replay_chosen_q = tf.reduce_sum(\n self._replay_net_outputs.q_values[i] * replay_action_one_hot,\n reduction_indices=1,\n name='replay_chosen_q_{}'.format(i))\n replay_chosen_qs.append(replay_chosen_q)\n\n targets = self._build_target_q_op()\n loss = 0.\n\n for i, (target,\n replay_chosen_q) in enumerate(zip(targets, replay_chosen_qs)):\n gamma_loss = tf.losses.huber_loss(\n tf.stop_gradient(target),\n replay_chosen_q,\n reduction=tf.losses.Reduction.NONE)\n\n loss += gamma_loss\n if self.summary_writer is not None:\n tf.summary.scalar('Losses/GammaLoss_{}'.format(i),\n tf.reduce_mean(gamma_loss))\n\n # Divide by the number of gammas to preserve scale.\n loss = loss / self.number_of_gammas\n\n if self.summary_writer is not None:\n with tf.variable_scope('Losses'):\n tf.summary.scalar('HuberLoss', tf.reduce_mean(loss))\n\n def clip_if_not_none(grad, clip_norm=5.):\n \"\"\"Clip the gradient only if not None.\"\"\"\n if grad is None:\n return grad\n return tf.clip_by_norm(grad, clip_norm)\n\n if self.gradient_clipping_norm is not None:\n # Clip gradients to test stability.\n grads_and_vars = self.optimizer.compute_gradients(tf.reduce_mean(loss))\n clipped_gradients = [\n (clip_if_not_none(grad, clip_norm=self.gradient_clipping_norm), var)\n for grad, var in grads_and_vars\n ]\n\n return self.optimizer.apply_gradients(clipped_gradients)\n else:\n return self.optimizer.minimize(tf.reduce_mean(loss))", "def optimize_parameters(self) -> None:\n self.forward() # compute fake images: G(A)\n # update discriminator\n self.set_requires_grad([self._discriminator_module], True) # enable backward for D\n self._discriminator_optimizer.zero_grad() # set D's gradients to zero\n self.backward_discriminator() # calculate gradients for D\n self._discriminator_optimizer.step() # update D's weights\n # update generator\n self.set_requires_grad([self._discriminator_module], False) # D requires no gradients when optimizing G\n self._generator_optimizer.zero_grad() # set G's gradients to zero\n self.backward_generator() # calculate gradients for G\n self._generator_optimizer.step() # update G's weights\n return", "def __init__(\n self, \n dim_feat_raw, \n dim_feat_smooth, \n dim_label_raw, \n dim_label_smooth, \n arch_gnn, \n aug_feat,\n num_ensemble, \n train_params\n ):\n super().__init__()\n self.mulhead = 1\n self.num_layers = arch_gnn[\"num_layers\"]\n self.dropout, self.dropedge = train_params[\"dropout\"], train_params['dropedge']\n self.mulhead = int(arch_gnn[\"heads\"]) # only useful for GAT\n\n self.branch_sharing = arch_gnn['branch_sharing'] # only for ensemble\n\n self.type_feature_augment = aug_feat\n assert dim_feat_raw <= dim_feat_smooth, \"smoothened feature cannot have smaller shape than the original one\"\n # NOTE: dim_label_raw may be larger than dim_label_smooth ==> label is not used as input\n self.num_classes = dim_label_raw\n self.dim_label_in = dim_label_smooth\n self.dim_feat_in = dim_feat_smooth\n self.dim_hidden = arch_gnn['dim']\n # build the model below\n dim, act = arch_gnn['dim'], arch_gnn['act']\n self.aug_layers, self.conv_layers, self.res_pool_layers = [], [], []\n for i in range(num_ensemble):\n # feat aug\n if len(self.type_feature_augment) > 0:\n self.aug_layers.append(nn.ModuleList(\n nn.Linear(_dim, self.dim_feat_in) for _, _dim in self.type_feature_augment\n ))\n # graph convs\n convs = []\n if i == 0 or not self.branch_sharing:\n for j in range(arch_gnn['num_layers']):\n cls_gconv = DeepGNN.NAME2CLS[arch_gnn['aggr']]\n dim_in = (self.dim_feat_in + self.dim_label_in) if j == 0 else dim\n convs.append(cls_gconv(dim_in, dim, dropout=self.dropout, act=act, mulhead=self.mulhead))\n self.conv_layers.append(nn.Sequential(*convs))\n else: # i > 0 and branch_sharing\n self.conv_layers.append(self.conv_layers[-1])\n # skip-pooling layer\n type_res = arch_gnn['residue'].lower()\n type_pool = arch_gnn['pooling'].split('-')[0].lower()\n cls_res_pool = layers.ResPool\n args_pool = {}\n if type_pool == 'sort':\n args_pool['k'] = int(arch_gnn['pooling'].split('-')[1])\n self.res_pool_layers.append(\n cls_res_pool(dim, dim, arch_gnn['num_layers'], type_res, type_pool,\n dropout=self.dropout, act=act, args_pool=args_pool\n ))\n if len(self.aug_layers) > 0:\n self.aug_layers = nn.ModuleList(self.aug_layers)\n self.conv_layers = nn.ModuleList(self.conv_layers)\n self.res_pool_layers = nn.ModuleList(self.res_pool_layers)\n # ------- ensembler + classifier -------\n if num_ensemble == 1:\n self.ensembler = layers.EnsembleDummy()\n else:\n self.ensembler = layers.EnsembleAggregator(dim, dim, num_ensemble, dropout=self.dropout, \n type_dropout=train_params[\"ensemble_dropout\"], act=arch_gnn[\"ensemble_act\"])\n self.classifier = DeepGNN.NAME2CLS['mlp'](dim, self.num_classes, act='I', dropout=0.)\n # ---- optimizer, etc. ----\n self.lr = train_params[\"lr\"]\n self.sigmoid_loss = arch_gnn[\"loss\"] == \"sigmoid\"\n self.loss, self.opt_op = 0, None\n self.optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)\n\n self.num_ensemble = num_ensemble", "def opt_step(latent_values, real_coding, loss_weight, mse, \n generator, optimizer, ntimes, ninput):\n with tf.GradientTape() as tape:\n tape.watch(latent_values)\n # results from generator\n gen_output = generator(latent_values, training=False)\n loss = mse_loss(mse, real_coding, gen_output[:, :(ntimes - 1), :, :], \n loss_weight, ninput)\n\n # gradient of the loss ws to the input\n gradient = tape.gradient(loss, latent_values)\n # applies gradients to the input\n optimizer.apply_gradients(zip([gradient], [latent_values]))\n\n return loss", "def optimizer(self, model: nn.Module) -> torch.optim.Optimizer: # type: ignore\n pass", "def _get_optimizer(self):\n raise NotImplementedError", "def optimize_parameters(self):\n # forward\n for i in range(min(self.big_iter+1,len(self.orders_rev))):\n if(self.orders_rev):\n # compute fake images and reconstruction images.\n self.forward(i,False)\n # G_A and G_B\n # Ds require no gradients when optimizing Gs\n self.set_requires_grad(self.netD, False)\n # set G_A and G_B's gradients to zero\n self.optimizers_G[self.orders_rev[i]].zero_grad()\n # calculate gradients for G_A and G_B\n self.backward_G(i,False)\n # update G_A and G_B's weights\n self.optimizers_G[self.orders_rev[i]].step()\n # D_A and D_B\n self.set_requires_grad(self.netD, True)\n self.optimizer_D.zero_grad() \n self.backward_D(i,False) \n self.optimizer_D.step() \n else:\n self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero\n self.backward_DY() # calculate gradients for D_A\n self.optimizer_D.step()\n for i in range(min(self.big_iter+1, len(self.orders))):\n if(self.orders):\n if(i>0):\n self.real_A = self.fake_B.detach()\n self.forward(i,True) # compute fake images and reconstruction images.\n # G_A and G_B\n # Ds require no gradients when optimizing Gs\n self.set_requires_grad(self.netD, False)\n # set G_A and G_B's gradients to zero\n self.optimizers_G[self.orders[i]].zero_grad()\n self.backward_G(i,True) # calculate gradients for G_A and G_B\n # update G_A and G_B's weights\n self.optimizers_G[self.orders[i]].step()\n # D_A and D_B\n self.set_requires_grad(self.netD, True)\n self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero\n self.backward_D(i,True) # calculate gradients for D_A\n self.optimizer_D.step() \n else:\n self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero\n self.backward_DX() # calculate gradients for D_A\n self.optimizer_D.step() \n self.current_label=self.labels[0]\n self.current_order=self.orders\n self.current_pred = np.concatenate((self.pred_real.detach().cpu().numpy().mean(\n axis=2).mean(axis=2), self.pred_fake.detach().cpu().numpy().mean(axis=2).mean(axis=2)))", "def optimize_model(optimizer, policy_net, target_net, memory_batch):\n state_batch, action_batch, reward_batch, next_state_batch, done_batch = memory_batch\n state_batch =state_batch.to(device, torch.float32)\n action_batch = action_batch.to(device, torch.int64).view(-1,1)\n reward_batch = reward_batch.to(device, torch.float32)\n next_state_batch = next_state_batch.to(device, torch.float32)\n done_batch = done_batch.to(device, torch.float32)\n\n # Compute Q(s_t, a) - the model computes Q(s_t), then we select the\n # columns of actions taken\n state_action_values = policy_net(state_batch).gather(1, action_batch)\n\n # Compute V(s_{t+1}) for all next states.\n with torch.no_grad():\n next_state_action_values = target_net(next_state_batch)\n next_state_values = next_state_action_values.max(1)[0]\n next_state_values = next_state_values * (1 - done_batch) # no reward if this episode is done.\n # Compute the expected Q values\n expected_state_action_values = (next_state_values * gamma) + reward_batch\n expected_state_action_values = expected_state_action_values.unsqueeze(1)\n\n # Compute Huber loss\n assert expected_state_action_values.requires_grad == False\n assert state_action_values.requires_grad == True\n loss = F.smooth_l1_loss(state_action_values, expected_state_action_values)\n\n # Optimize the model\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n return loss", "def _train(self, loss):\n config = ConfigParser.ConfigParser()\n config.read(\"config/conf.cfg\")\n\n learning_rate =float(config.get(\"Common Params\", \"learning_rate\"))\n moment = float(config.get(\"Common Params\", \"moment\"))\n opt = tf.train.AdamOptimizer()\n train_step = opt.minimize(loss)\n return train_step\n\n # grads = opt.compute_gradients(self.total_loss)\n\n # apply_gradient_op = opt.apply_gradients(grads, global_step=self.global_step)\n\n #return apply_gradient_op", "def _build_algorithm(self):\n self.optimizer = tf.train.AdamOptimizer(self._lr, epsilon=1.5e-8)\n trainable_variables = tf.trainable_variables(\"main/qnet\")\n\n # Compute the state value.\n batch_size = tf.shape(self._observation)[0]\n action_index = tf.stack([tf.range(batch_size), self._action], axis=1)\n action_q = tf.gather_nd(self._qvals, action_index)\n assert_shape(action_q, [None])\n\n # Compute back up.\n ave_q = tf.add_n(self._target_qvals) / self._n_net\n assert_shape(tf.reduce_max(ave_q, axis=1), [None])\n q_backup = tf.stop_gradient(self._reward + self._discount * (1 - self._done) * tf.reduce_max(ave_q, axis=1))\n\n # Compute loss and optimize the object.\n loss = tf.reduce_mean(tf.squared_difference(q_backup, action_q)) # 损失值。\n self._train_op = self.optimizer.minimize(loss, var_list=trainable_variables)\n\n # Update target network.\n update_target_operation = []\n for i in reversed(range(1, self._n_net)): # i=0表示最近的模型。\n with tf.control_dependencies(update_target_operation):\n update_target_operation.append(self._update_target(f\"target_{i}/qnet\", f\"target_{i-1}/qnet\"))\n\n with tf.control_dependencies(update_target_operation):\n update_target_operation.append(self._update_target(\"target_0/qnet\", \"main/qnet\"))\n\n self.update_target_op = update_target_operation\n self._log_op = {\"loss\": loss}", "def define(self, optimizer = Adam(lr=1e-5)): \n \n self.optimizer = optimizer\n\n model = Sequential()\n\n #Layer 1\n model.add(Conv2D( filters = 96, \n kernel_size = (11,11), \n strides = 4, \n padding = 'same', \n activation = 'relu', \n input_shape = (224, 224, 3), \n kernel_initializer = 'he_normal'))\n model.add(MaxPooling2D( pool_size = (3,3), \n strides = (2,2), \n padding= 'same', \n data_format = None)) # overlapping pooling\n #Layer 2\n model.add(Conv2D( filters = 256, \n kernel_size = (5,5), \n strides = 1, \n padding = 'same', \n activation = 'relu', \n kernel_initializer = 'he_normal'))\n model.add(MaxPooling2D( pool_size = (3,3), \n strides = (2,2), \n padding= 'same', \n data_format = None)) \n #Layer 3\n model.add(Conv2D( filters = 384, \n kernel_size = (3,3), \n strides = 1, padding = 'same', \n activation = 'relu', kernel_initializer = 'he_normal'))\n #Layer 4\n model.add(Conv2D( filters = 384, \n kernel_size = (3,3), \n strides = 1, padding = 'same', \n activation = 'relu', \n kernel_initializer = 'he_normal'))\n #Layer 5\n model.add(Conv2D( filters = 256, \n kernel_size = (3,3), \n strides = 1, padding = 'same', \n activation = 'relu', \n kernel_initializer = 'he_normal'))\n #Layer 6\n model.add(MaxPooling2D( pool_size = (3,3), \n strides = (2,2), \n padding= 'same', \n data_format = None))\n \n #Layer 7\n model.add(Flatten())\n \n #Layer 8\n model.add(Dense( units = 4096, activation = 'relu'))\n model.add(Dense( units = 1024, activation = 'relu'))\n model.add(Dense( units = 512, activation = 'relu'))\n model.add(Dense( units = 256, activation = 'relu'))\n model.add(Dense( units = 128, activation = 'relu'))\n \n #Layer end\n model.add(Dense( units = 3, activation = 'softmax'))\n model.summary()\n \n self.model = model", "def test_adam_standalone_with_augmentation(self):\n tf.reset_default_graph()\n\n v = tf.Variable([1., 2., 3.])\n obj = tf.reduce_sum(tf.pow(v, 2))\n\n v1, obj1 = vectorize_model([v], obj, augment=2)\n\n iterations = 100\n lr = .1\n\n adam_dict = AdamOptimizer.create(v1, lr=lr, loss=obj1, w_is_state=True)\n # assign_ops = v1.assign(adam_dict)\n\n # print(assign_ops)\n\n with tf.Session().as_default() as ss:\n tf.global_variables_initializer().run()\n for _ in range(iterations):\n print(ss.run(adam_dict.dynamics))\n ss.run(adam_dict.assign_ops)\n adam_dict.increase_global_step()\n res = v.eval()\n print(v1.tensor.eval())\n\n print(res)\n\n tf_adam = tf.train.AdamOptimizer(learning_rate=lr).minimize(obj, var_list=[v])\n\n with tf.Session().as_default() as ss:\n tf.global_variables_initializer().run()\n for _ in range(iterations):\n ss.run(tf_adam)\n res2 = v.eval()\n\n print(res2)\n\n self.assertLess(np.linalg.norm(res - res2), 1.e-5)", "def __init__(self,ls,activations = [tf.nn.tanh, tf.nn.tanh, None], sess = None, RL = False, lr = 1e-2):\n self.ls = ls\n if sess == None:\n self.sess = self.tf_reset()\n else:\n self.sess = sess\n self.activations = activations\n self.input_ph = tf.placeholder(dtype=tf.float32, shape=[None, ls[0]]) # batch-size by state size\n self.output_ph = tf.placeholder(dtype=tf.float32, shape=[None, ls[-1]]) # action space size\n self.W_dict = {}\n self.b_dict = {}\n for i in range(len(ls)-1):\n self.W_dict[i] = tf.get_variable(name='W'+str(i), shape=[ls[i], ls[i+1]], initializer=tf.contrib.layers.xavier_initializer())\n self.b_dict[i] = tf.get_variable(name='b'+str(i), shape=[ls[i+1]], initializer=tf.constant_initializer(0.))\n\n\n self.layer = self.input_ph\n print(tf.shape(self.layer))\n\n\n for i in range(len(self.activations)):\n self.layer = tf.matmul(self.layer, self.W_dict[i]) + self.b_dict[i]\n print(tf.shape(self.layer))\n if self.activations[i] is not None:\n self.layer = self.activations[i](self.layer)\n self.output_pred = self.layer\n\n if RL == True: \n with tf.name_scope('reward_holder'):\n self.reward_holder = tf.placeholder(shape=[None],dtype=tf.float32)\n \n with tf.name_scope('get_resp_outs'):\n self.action_holder = tf.placeholder(shape=[None],dtype=tf.int32, name = 'action_holder')\n \n self.indexes = tf.range(0, tf.shape(self.output_pred)[0]) * tf.shape(self.output_pred)[1] + self.action_holder\n\n self.responsible_outputs = tf.gather(tf.reshape(self.output_pred, [-1]), self.indexes, name = 'responsible_outputs')\n # out of the output vector, this will pull out the indexes\n # But i still don't understand indexes.\n\n # i feel like instead of going thru all of this, you could have just saved the actual outputs. I think I'll try that.\n # then for responsible outputs, you'd do tf.gather(outputs, action_holder) oh maybe it's not different than this. \n # Maybe that's exactly what they're doing, bc action_holder is a scaler number. IDK.\n with tf.name_scope('loss'):\n self.loss = -tf.reduce_mean(tf.log(self.responsible_outputs)*self.reward_holder) #becuase reward_holder value \n # doesn't directly change as you change the Weights, this is equivalent to multiplying the gradient by the reward.\n # when you take the gradient, you're solving for d(log*A)/dW = d(log_p)/dW * d(log_p*A)/d(log_p) = A*d(log_p)/dW. so it's equivalent to mult gradient\n # by the reward function\n tvars = tf.trainable_variables()\n\n with tf.name_scope('update'):\n # self.train_step = tf.train.RMSPropOptimizer(learning_rate = lr, decay = 0.99).minimize(self.loss)\n self.train_step = tf.train.AdamOptimizer().minimize(self.loss)\n self.init = tf.global_variables_initializer()", "def __init__(self, num_classes, learning_rate, batch_size, decay_steps, decay_rate, sequence_length,\n vocab_size, embed_size,hidden_size, is_training,decoder_sent_length=6,\n initializer=tf.random_normal_initializer(stddev=0.1),clip_gradients=5.0,l2_lambda=0.0001):\n # set hyperparamter\n self.num_classes = num_classes\n self.batch_size = batch_size\n self.sequence_length = sequence_length\n self.vocab_size = vocab_size\n self.embed_size = embed_size\n self.is_training = is_training\n self.learning_rate = tf.Variable(learning_rate, trainable=False, name=\"learning_rate\")\n self.learning_rate_decay_half_op = tf.assign(self.learning_rate, self.learning_rate * 0.5)\n self.initializer = initializer\n self.decoder_sent_length=decoder_sent_length\n self.hidden_size = hidden_size\n self.clip_gradients=clip_gradients\n self.l2_lambda=l2_lambda\n\n self.input_x = tf.placeholder(tf.int32, [None, self.sequence_length], name=\"input_x\") #x\n self.decoder_input = tf.placeholder(tf.int32, [None, self.decoder_sent_length],name=\"decoder_input\") #y, but shift\n self.input_y_label = tf.placeholder(tf.int32, [None, self.decoder_sent_length], name=\"input_y_label\") #y, but shift\n self.dropout_keep_prob = tf.placeholder(tf.float32, name=\"dropout_keep_prob\")\n\n self.global_step = tf.Variable(0, trainable=False, name=\"Global_Step\")\n self.epoch_step = tf.Variable(0, trainable=False, name=\"Epoch_Step\")\n self.epoch_increment = tf.assign(self.epoch_step, tf.add(self.epoch_step, tf.constant(1)))\n self.decay_steps, self.decay_rate = decay_steps, decay_rate\n\n self.instantiate_weights()\n self.logits = self.inference() #logits shape:[batch_size,decoder_sent_length,self.num_classes]\n\n self.predictions = tf.argmax(self.logits, axis=2, name=\"predictions\")\n self.accuracy = tf.constant(0.5) # fuke accuracy. (you can calcuate accuracy outside of graph using method calculate_accuracy(...) in train.py)\n if not is_training:\n return\n self.loss_val = self.loss_seq2seq()\n self.train_op = self.train()", "def __init__(self, gpu_ids='0', isTrain=False, checkpoints_dir='./checkpoints', name='experiment_name', continue_train=False, model='cycle_gan'):\n \n assert(not isTrain)\n BaseModel.__init__(self, gpu_ids=gpu_ids, isTrain=isTrain, checkpoints_dir=checkpoints_dir, name=name, continue_train=continue_train, verbose=False)\n\n self.input_nc = 3\n self.output_nc = 3\n self.ngf = 64 # num of gen filters in the last conv layer\n self.ndf = 64 # num of discriminator filters in the first conv layer'\n self.netG = 'resnet_9blocks' # specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]\n self.norm = 'instance' # instance normalization or batch normalization [instance | batch | none]\n self.no_dropout = True\n self.init_type = 'normal' # network initialization [normal | xavier | kaiming | orthogonal]\n self.init_gain = 0.02\n self.netD = 'basic' # specify discriminator architecture [basic | n_layers | pixel]\n self.n_layers_D = 3 # only used if netD==n_layers\n self.pool_size = 50 # the size of image buffer that stores previously generated images\n self.lr = 0.0002\n self.beta1 = 0.5 # momentum term of adam\n self.gan_mode = 'lsgan' # the type of GAN objective. [vanilla| lsgan | wgangp]\n self.model_suffix = ''\n\n self.loss_names = []\n self.visual_names = ['real', 'fake']\n self.model_names = ['G' + self.model_suffix] # only generator is needed.\n self.netG = networks.define_G(self.input_nc, self.output_nc, self.ngf, self.netG,\n self.norm, not self.no_dropout, self.init_type, self.init_gain, self.gpu_ids)\n\n setattr(self, 'netG' + self.model_suffix, self.netG) # store netG in self.", "def tensorflow_optimization(m):\n\n fusing.fuse_Transpose_into_Constant(m.graph)\n fusing.fuse_MatMul_and_Add_into_Gemm(m.graph)\n other.topological_sort(m.graph)\n\n m = other.polish_model(m)\n\n # constant folding\n replacing.replace_shape_with_constant(m.graph)\n\n # constant_folding\n m = other.inference_shapes(m)\n while constant_folding.constant_folding(m.graph):\n logging.debug(\"After constant folding jobs.\")\n other.topological_sort(m.graph)\n while len(m.graph.value_info) != 0:\n m.graph.value_info.pop()\n\n m = other.inference_shapes(m)\n replacing.replace_shape_with_constant(m.graph)\n other.topological_sort(m.graph)\n m = tf_pattern_match(m)\n m = optimizer.optimize(m, [\"eliminate_deadend\"])\n\n eliminating.eliminate_consecutive_reshape(m.graph)\n eliminating.eliminate_Squeeze_before_Reshape(m.graph)\n other.topological_sort(m.graph)\n return m", "def _optimize(self):\n # Retrieve all trainable variables\n train_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n\n # Compute the gradient (return a pair of variable and their respective gradient)\n grads = self.optimizer.compute_gradients(loss=self.loss, var_list=train_variables)\n self.train_dis = self.optimizer.apply_gradients(grads, global_step=self.global_step)", "def tensor_network_aug(inputs, states, output_size, rank_vals, bias, bias_start=0.0):\n # each coordinate of hidden state is independent- parallel\n num_orders = len(rank_vals)+1\n num_lags = len(states)\n batch_size = tf.shape(inputs)[0]\n state_size = states[0].get_shape()[1].value #hidden layer size\n inp_size = inputs.get_shape()[1].value\n total_state_size = (inp_size + state_size * num_lags + 1 )\n\n mat_dims = np.ones((num_orders,)) * total_state_size\n mat_ranks = np.concatenate(([1], rank_vals, [output_size]))\n mat_ps = np.cumsum(np.concatenate(([0], mat_ranks[:-1] * mat_dims * mat_ranks[1:])),dtype=np.int32)\n mat_size = mat_ps[-1]\n mat = vs.get_variable(\"weights\", mat_size) # h_z x h_z... x output_size\n\n states = (inputs,) + states # concatenate the [x, h] \n \n states_tensor = nest.flatten(states)\n #total_inputs = [inputs]\n states_vector = tf.concat(states, 1)\n states_vector = tf.concat( [states_vector, tf.ones([batch_size, 1])], 1)\n \"\"\"form high order state tensor\"\"\"\n states_tensor = states_vector\n for order in range(num_orders-1):\n states_tensor = _outer_product(batch_size, states_tensor, states_vector)\n \n # states_tensor= tf.reshape(states_tensor, [-1,total_state_size**num_orders] )\n\n cores = []\n for i in range(num_orders):\n # Fetch the weights of factor A^i from our big serialized variable weights_h.\n mat_core = tf.slice(mat, [mat_ps[i]], [mat_ps[i + 1] - mat_ps[i]])\n mat_core = tf.reshape(mat_core, [mat_ranks[i], total_state_size, mat_ranks[i + 1]]) \n cores.append(mat_core)\n \n res = tensor_train_contraction(states_tensor, cores)\n if not bias:\n return res\n biases = vs.get_variable(\"biases\", [output_size])\n return nn_ops.bias_add(res,biases)", "def __init__(self,\n sess,\n output_shape,\n processing_dtype=tf.float32,\n conditional_input_shapes=None,\n noise_shape=(100,),\n generator_network_fn=gen_lib.mnist_generator_gan,\n discriminator_network_fn=gen_lib.mnist_discriminator_gan,\n tf_device='/cpu:*',\n max_tf_checkpoints_to_keep=4,\n g_optimizer=tf.train.AdamOptimizer(),\n d_optimizer=tf.train.AdamOptimizer(),\n k=1,\n weights_clip=0.01,\n summary_writer=None,\n summary_writing_frequency=500,\n allow_partial_reload=False):\n assert weights_clip > 0\n self.weights_clip = weights_clip\n gan.VanillaGAN.__init__(self,\n sess,\n output_shape,\n processing_dtype=processing_dtype,\n conditional_input_shapes=conditional_input_shapes,\n noise_shape=noise_shape,\n generator_network_fn=generator_network_fn,\n discriminator_network_fn=discriminator_network_fn,\n tf_device=tf_device,\n max_tf_checkpoints_to_keep=max_tf_checkpoints_to_keep,\n g_optimizer=g_optimizer,\n d_optimizer=d_optimizer,\n k=k,\n summary_writer=summary_writer,\n summary_writing_frequency=summary_writing_frequency,\n allow_partial_reload=allow_partial_reload)\n tf.logging.info('\\t weights_clip: %d', weights_clip)", "def optimize(self):\n self.output = self.net.forward(Variable(self.source))\n self.optimizer.zero_grad()\n self.loss = self.loss_function(self.output, Variable(self.target))\n self.loss.backward()\n self.optimizer.step()", "def __init__(self, state_dim, action_dim, learning_rate, weight_decay):\n self.dynamics_net = ForwardModel(state_dim, action_dim)\n self.rewards_net = RewardModel(state_dim, action_dim)\n self.done_net = RewardModel(state_dim, action_dim)\n\n self.dyn_optimizer = tfa_optimizers.AdamW(\n learning_rate=learning_rate, weight_decay=weight_decay)\n self.reward_optimizer = tfa_optimizers.AdamW(\n learning_rate=learning_rate, weight_decay=weight_decay)\n self.done_optimizer = tfa_optimizers.AdamW(\n learning_rate=learning_rate, weight_decay=weight_decay)", "def run(args):\n\n #Get input values\n n_feat = list(map(int, args.n_feat))[0]\n hidden = list(map(int, args.hidden))[0]\n latent = int(args.latent)\n data_path = args.i[0]\n LOGDIR = args.o\n n_layer = args.n_layers\n drop_rate = args.dp_rate\n\n # setup graph\n g = tf.Graph()\n with g.as_default():\n\n ## Setup placeholders and one hot encode input\n with tf.variable_scope('inputs', reuse=True):\n x_data = tf.placeholder(tf.float32, [None, n_feat], name=\"x_data\")\n x_onehot = tf.one_hot(tf.cast(x_data, tf.int32),3,dtype=tf.float32)\n x_flat = tf.reshape(x_onehot, [-1, 3*n_feat])\n is_training = tf.placeholder(tf.bool)\n beta = tf.placeholder(tf.float32, [1,], name=\"Beta\")\n\n #Encoder\n with tf.name_scope('encoder'):\n en = encoder(x_flat,hidden,n_layer,tf.nn.relu,drop_rate,is_training)\n\n #Latent layers\n with tf.name_scope('latent_space'):\n z_mean = fc(en, latent, scope='enc_fc4_mu', activation_fn=None) # Linear activation\n z_log_sigma = fc(en, latent, scope='enc_fc4_sigma', activation_fn=tf.nn.softplus) # softplus activation\n\n # Sample from gaussian distribution\n z = sample_z(z_mean, z_log_sigma)\n\n #Decoder\n with tf.name_scope('decoder'):\n de = decoder(z, hidden, n_layer, tf.nn.relu,drop_rate,is_training)\n\n # get flat reconstruction and reshape back to genotype format with argmax\n with tf.name_scope('output'):\n x_hat = fc(de, 3*n_feat, scope='dec_fc4', activation_fn=None) #linear activation\n x_hat = tf.reshape(x_hat,[-1, n_feat,3])\n x_decoded = tf.cast(tf.argmax(x_hat,axis=-1),tf.int64)\n\n # Loss functions\n with tf.name_scope(\"cross_entropy\"):\n cross_entropy = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits_v2(logits=x_hat, labels=x_onehot))\n recon_loss = tf.reduce_mean(cross_entropy)\n tf.summary.scalar(\"cross_entropy\", recon_loss)\n\n with tf.name_scope(\"KL_divergence\"):\n KL_divergence = -0.5 * tf.reduce_sum(1 + z_log_sigma - tf.square(z_mean) - tf.exp(z_log_sigma), axis=1)\n latent_loss = tf.reduce_mean(KL_divergence)\n tf.summary.scalar(\"KL_divergence\", latent_loss)\n\n with tf.name_scope(\"total_loss\"):\n total_loss = tf.reduce_mean(recon_loss + tf.reduce_mean(tf.multiply(KL_divergence, beta)))\n tf.summary.scalar(\"total_loss\", total_loss)\n\n # Train optimizer\n with tf.name_scope(\"train\"):\n train_step = tf.train.AdamOptimizer(learning_rate=args.lrate).minimize(total_loss)\n\n # save summaries\n saver = tf.train.Saver()\n\n # initializer\n init = tf.global_variables_initializer()\n\n # prepare lists for collecting data\n epoch_dict = {\"CE\":[], \"KLd\": [], \"loss\": []}\n\n # open handle forsaving loss\n loss_file = \"%s/loss.tab\" % LOGDIR\n if os.path.exists(loss_file):\n os.remove(loss_file)\n fh_log = open(loss_file, \"a\")\n\n fh_log.write(\"Epoch\\tLoss\\tKL\\tCE\\n\")\n\n ## Run session ##\n with tf.Session(graph=g, config=tf.ConfigProto(log_device_placement=True)) as sess:\n #sess.run()\n sess.run(init)\n\n beta_, to_add_ = init_warmup(args.warmup)\n merged = tf.summary.merge_all()\n train_writer = tf.summary.FileWriter(LOGDIR + '/train', sess.graph)\n\n ## Get genotype data\n data=get_data(data_path)\n ran=30\n\n # run epochs\n for epoch in range(args.epochs):\n\n # prepare list for collecting data for minibatches\n mb_dict = {\"CE\":[], \"KLd\": [], \"loss\": []}\n\n # training\n data2 = data.iloc[:,0:n_feat]\n for iter in range(ran):\n D = data2.sample(n=args.batch,axis=0)\n data2=data2.drop(D.index)\n batch=np.array(D)\n _, vaecost, cre, KLd, summary = sess.run([train_step, total_loss, cross_entropy, KL_divergence, merged], feed_dict={x_data: batch, beta: beta_, is_training: True})\n mb_dict[\"loss\"].append(vaecost)\n mb_dict[\"CE\"].append(cre)\n mb_dict[\"KLd\"].append(KLd)\n\n # summaries for every epoch\n epoch_dict = summaries(epoch_dict, mb_dict)\n\n # add to tensorboard\n train_writer.add_summary(summary, epoch)\n\n # after epoch add information to epoch lists and write out to file\n report(LOGDIR, epoch, epoch_dict, saver, sess, fh_log)\n\n # add to beta\n beta_ = beta_ + to_add_\n if beta_ > 1:\n beta_ = np.array([1,])\n\n # after session\n fh_log.close()\n\n ## get latent representation and save reconstructions\n la_dict = {\"CE\":[], \"KLd\": [], \"loss\": [],}\n\n latent_file = \"%s/latent.representation.tab\" % LOGDIR\n if os.path.exists(latent_file):\n os.remove(latent_file)\n open_file_1 = open(latent_file, 'ab')\n\n genotype_file = \"%s/genotype.reconstruction.tab\" % LOGDIR\n if os.path.exists(genotype_file):\n os.remove(genotype_file)\n open_file_2 = open(genotype_file, 'ab')\n\n labels_file = \"%s/labels.reconstruction.tab\" % LOGDIR\n if os.path.exists(labels_file):\n os.remove(labels_file)\n open_file_3 = open(labels_file, 'ab')\n\n # final pass\n for iter in range(1):\n D = data.iloc[0:,:]\n ind = D.index\n #drop those individuals after use\n data=data.drop(ind)\n batch_test=np.array(D.iloc[:,0:n_feat])\n vaecost, cre, KLd, mu, x_reconstruction = sess.run([total_loss, cross_entropy, KL_divergence, z_mean,x_decoded], feed_dict={x_data: batch_test,beta: beta_, is_training: False})\n la_dict[\"loss\"].append(vaecost)\n la_dict[\"CE\"].append(cre)\n la_dict[\"KLd\"].append(KLd)\n np.savetxt(open_file_1, mu, fmt=\"%.3f\", delimiter=\"\\t\")\n np.savetxt(open_file_2, x_reconstruction, delimiter=\"\\t\")\n np.savetxt(open_file_3, list(ind), delimiter=\"\\t\",fmt=\"%s\")\n\n # close\n open_file_1.close()\n open_file_2.close()\n open_file_3.close()\n\n # print loss to screen\n print (\"Final model loss: %f; KLd: %f; CE %f; \" % (np.mean(la_dict[\"loss\"]), np.mean(la_dict[\"KLd\"]), np.mean(la_dict[\"CE\"])))", "def __init__(self, dim_input, \n ALPHA = 0.5,\n LAMBDA = 1.0,\n KAPPA = 1.0,\n OPTIM = 'Adam',\n LEARN_RATE = 0.01,\n per_split_feats = 300):\n \n print(\"Building computational graph for survival NCA.\")\n #pUtils.Log_and_print(\"Building computational graph for survival NCA.\") \n \n assert per_split_feats < dim_input\n \n # set up instace attributes\n self.dim_input = dim_input\n self.ALPHA = ALPHA\n self.LAMBDA = LAMBDA\n self.KAPPA = KAPPA\n self.OPTIM = OPTIM\n self.LEARN_RATE = LEARN_RATE\n self.per_split_feats = per_split_feats\n \n # clear lurking tensors\n tf.reset_default_graph()\n \n #pUtils.Log_and_print(\"Adding placeholders.\")\n self.add_placeholders()\n \n #pUtils.Log_and_print(\"Adding linear feature transform.\")\n self.add_linear_transform()\n \n #pUtils.Log_and_print(\"Adding regularized weighted log likelihood.\")\n self.add_cost()\n \n #pUtils.Log_and_print(\"Adding optimizer.\")\n self.add_optimizer()\n \n #pUtils.Log_and_print(\"Finished building graph.\")", "def __init__(self, env, args):\n\n super(Agent_PG,self).__init__(env)\n self.env = env\n\n # Define Agent Model...\n # Hyper-parameters\n self.lr = args.lr # learning rate\n self.bz = args.bz # batch size\n self.episodes = args.eps # total episodes(epochs)\n self.gamma = args.gamma\n self.freq = args.freq\n \n self.action_size = 3#env.get_action_space().n\n self.hidden_dim = 200\n\n self.model = tf.Graph()\n with self.model.as_default():\n # Network Architecture\n self.state_in = tf.placeholder(shape=[None, 80, 80, 1], dtype=tf.float32, name='state_in')\n \n #init1 = tf.truncated_normal_initializer(0, stddev=1./np.sqrt(80*80), dtype=tf.float32)\n #init2 = tf.truncated_normal_initializer(0, stddev=1./np.sqrt(self.hidden_dim), dtype=tf.float32)\n init = tf.contrib.layers.xavier_initializer(uniform=False)\n \n self.conv = tf.layers.conv2d(self.state_in, 32, kernel_size=4, strides=(2,2), padding='same', kernel_initializer=init,\n activation=tf.nn.relu)\n self.conv = tf.layers.conv2d(self.conv, 64, kernel_size=4, strides=(2,2), padding='same', kernel_initializer=init,\n activation=tf.nn.relu)\n self.conv = tf.layers.conv2d(self.conv, 64, kernel_size=4, strides=(2,2), padding='same', kernel_initializer=init, \n activation=tf.nn.relu)\n #self.conv = tf.layers.max_pooling2d(self.conv, 2, strides=2)\n #print(self.conv.get_shape())\n\n self.hidden = tf.contrib.layers.flatten(self.conv)\n self.hidden = tf.layers.dense(self.hidden, self.hidden_dim, kernel_initializer=init, \n activation=tf.nn.relu)\n self.hidden = tf.layers.dense(self.hidden, self.hidden_dim, kernel_initializer=init, \n activation=tf.nn.relu)\n self.output = tf.layers.dense(self.hidden, self.action_size, kernel_initializer=init,\n activation=tf.nn.softmax)\n \n #self.chosen_action = tf.argmax(self.output, 1)\n\n self.reward_holder = tf.placeholder(shape=[None,1], dtype=tf.float32, name='reward')\n self.action_holder = tf.placeholder(shape=[None,self.action_size], dtype=tf.float32, name='action')\n \n self.loss = tf.nn.l2_loss(self.action_holder - self.output)\n #tf.nn.softmax_cross_entropy_with_logits(\n # labels=self.action_onehot, logits=self.output, name=\"cross_entropy\")\n #self.loss = -tf.reduce_sum(tf.multiply(self.reward_holder, self.cross_entropy, name=\"rewards\"))\n #self.loss = -tf.reduce_sum(tf.log(tf.clip_by_value(self.action_dist, 1e-10, 1.0))*self.reward_holder)\n \n tvars = tf.trainable_variables()\n #self.gradient_holders = []\n #for idx, var in enumerate(tvars):\n # placeholder = tf.placeholder(tf.float32, name=str(idx)+'_holder')\n # self.gradient_holders.append(placeholder)\n\n optimizer = tf.train.RMSPropOptimizer(learning_rate=self.lr, decay=0.99) \n self.gradients = optimizer.compute_gradients(self.loss, tvars, grad_loss=self.reward_holder)\n self.optim = optimizer.apply_gradients(self.gradients)\n #self.optim = optimizer.minimize(self.loss)\n \n if args.test_pg: \n self.model_path = os.path.join('./models/pg_models-1200')\n self.saver = tf.train.Saver()", "def prepare_evaluation(self):\n # this is called after the optimizer has discretized the graph\n cells = [self.edges[2, 3].op, self.edges[4, 5].op, self.edges[6, 7].op]\n\n self._expand()\n\n channels = [64, 128, 256]\n factor = 4\n\n for cell, c in zip(cells, channels):\n for _, _, data in cell.edges.data():\n data.op.update_nodes(\n lambda node, in_edges, out_edges: _set_comb_op_channels(\n node, in_edges, out_edges, c=c\n ),\n single_instances=False,\n )\n\n self.edges[1, 2].set(\"op\", ops.Stem(channels[0]))\n self.edges[2, 3].set(\"op\", cells[0].copy())\n self.edges[3, 4].set(\n \"op\",\n ops.SepConv(channels[0], channels[0], kernel_size=3, stride=1, padding=1),\n )\n self.edges[4, 5].set(\"op\", cells[0].copy())\n self.edges[5, 6].set(\n \"op\",\n ops.SepConv(channels[0], channels[1], kernel_size=3, stride=2, padding=1),\n )\n self.edges[6, 7].set(\"op\", cells[1].copy())\n self.edges[7, 8].set(\n \"op\",\n ops.SepConv(channels[1], channels[1], kernel_size=3, stride=1, padding=1),\n )\n self.edges[8, 9].set(\"op\", cells[1].copy())\n self.edges[9, 10].set(\n \"op\",\n ops.SepConv(channels[1], channels[2], kernel_size=3, stride=2, padding=1),\n )\n self.edges[10, 11].set(\"op\", cells[2].copy())\n self.edges[11, 12].set(\n \"op\",\n ops.SepConv(channels[2], channels[2], kernel_size=3, stride=1, padding=1),\n )\n self.edges[12, 13].set(\"op\", cells[2].copy())\n self.edges[13, 14].set(\n \"op\",\n ops.Sequential(\n ops.SepConv(\n channels[-1], channels[-1], kernel_size=3, stride=1, padding=1\n ),\n nn.AdaptiveAvgPool2d(1),\n nn.Flatten(),\n nn.Linear(channels[-1], 10),\n ),\n )\n\n self.update_edges(\n update_func=lambda edge: _increase_channels(edge, factor),\n scope=self.OPTIMIZER_SCOPE,\n private_edge_data=True,\n )", "def train(g1,\n g2,\n critic,\n loss_fn,\n learning_rate,\n batch_size=TRAIN_BATCH_SIZE,\n n_iters=15000,\n n_evals=15,\n compute_jacobian=False,\n noise_std=0.0,\n data_dimensions=DIMS//2,\n n_iter=1,\n loss_name='InfoNCE',\n ):\n x_1, x_2, _ = processed_train_data(data_dimensions, batch_size)\n\n if noise_std > 0.0:\n assert x_1.shape == x_2.shape, \"X1 and X2 shapes must agree to add noise!\"\n noise = noise_std * tf.random.normal(x_1.shape)\n x_1 += noise\n x_2 += noise\n\n # Compute the representations.\n code_1, code_2 = g1(x_1), g2(x_2)\n critic_matrix = critic(code_1, code_2)\n # Compute the Jacobian of g1 if needed.\n if compute_jacobian:\n jacobian = gradients.batch_jacobian(code_1, x_1, use_pfor=False)\n singular_values = tf.linalg.svd(jacobian, compute_uv=False)\n\n # Optimizer setup.\n loss = loss_fn(critic_matrix)\n optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)\n if not loss_name == 'wpc':\n optimizer_op = optimizer.minimize(loss)\n else:\n gvs = optimizer.compute_gradients(loss)\n capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gvs]\n optimizer_op = optimizer.apply_gradients(capped_gvs)\n\n with tf.compat.v1.Session() as session:\n session.run(tf.compat.v1.global_variables_initializer())\n\n # Subgraph for eval (add noise to input if necessary)\n data_ph = tf.compat.v1.placeholder(tf.float32, shape=[None, data_dimensions])\n data_ph_noisy = data_ph + noise_std * tf.random.normal(tf.shape(input=data_ph))\n codes = g1(data_ph_noisy)\n\n training_losses, testing_losses, classification_accuracies, iters, sigmas \\\n = [], [], [], [], []\n # Main training loop.\n for iter_n in range(n_iters):\n # Evaluate the model performance.\n if iter_n % (n_iters // n_evals) == 0:\n iters.append(iter_n)\n accuracy = get_classification_accuracy(session, codes, data_ph, data_dimensions)\n classification_accuracies.append(accuracy)\n testing_losses.append(\n get_testing_loss(x_test, session, loss, data_ph, data_dimensions))\n if compute_jacobian:\n sigmas.append(session.run(singular_values))\n print(\"{:d}th iter Loss_name {} Step {:>10d} fit {:>.5f} DS {} B {:d} lr {:f}\".format(\\\n n_iter, loss_name, iter_n, accuracy, args.dataset, args.batch_size, args.lr))\n # Run one optimization step.\n loss_np, _ = session.run([loss, optimizer_op])\n training_losses.append(loss_np)\n\n return Results(iterations=iters,\n training_losses=training_losses,\n testing_losses=testing_losses,\n classification_accuracies=classification_accuracies,\n singular_values=sigmas)", "def funcs(dataset, network, batch_size=BATCH_SIZE, learning_rate=LEARNING_RATE, sparsity=0.02, beta=0.01, momentum=MOMENTUM):\n\n # symbolic variables \n X_batch = T.matrix()\n y_batch = T.matrix()\n\n layers = lasagne.layers.get_all_layers(network)\n num_layers = len(layers)\n\n code_layer = layers[num_layers/2]\n activations_2_layer = layers[num_layers/2 - 1]\n activations_1_layer = layers[num_layers/2 - 2]\n\n # code output \n code_output = lasagne.layers.get_output(code_layer, X_batch, deterministic=True)\n\n l = T.sub(1,code_output)\n ll = T.mul(code_output,l)\n L = T.mul(4,ll)\n L = L.mean()\n\n rho_hat = T.mean(code_output,axis=1)\n # L = T.sum(sparsity * T.log(sparsity/rho_hat) + (1 - sparsity) * T.log((1 - sparsity)/(1 - rho_hat)))\n\n # reg = 0.0001*lasagne.regularization.l2(network)\n # this is the cost of the network when fed throught the noisey network\n train_output = lasagne.layers.get_output(network, X_batch)\n cost = lasagne.objectives.mse(train_output, y_batch) \n cost = cost.mean() + beta * L\n\n all_params = lasagne.layers.get_all_params(network)\n updates = lasagne.updates.nesterov_momentum(cost, all_params, learning_rate, momentum)\n\n \n\n # code and activation outputs\n \n activations_1_output = lasagne.layers.get_output(activations_1_layer, X_batch, deterministic=True)\n activations_2_output = lasagne.layers.get_output(activations_2_layer, X_batch, deterministic=True)\n\n train = theano.function(inputs=[X_batch, y_batch], outputs=cost, updates=updates, allow_input_downcast=True)\n code = theano.function(inputs=[X_batch], outputs=code_output, allow_input_downcast=True)\n activations_1 = theano.function(inputs=[X_batch], outputs=activations_1_output, allow_input_downcast=True)\n activations_2 = theano.function(inputs=[X_batch], outputs=activations_2_output, allow_input_downcast=True)\n\n return dict(\n train=train,\n code=code,\n activations_1=activations_1,\n activations_2=activations_2\n )", "def __init__(self, m, n, dim, num_iterations, eta=0.5, sigma=None):\n\n self._m = m\n self._n = n\n self._neighbourhood = []\n self._topography = []\n self._num_iterations = int(num_iterations)\n self._learned = False\n self.abnormal_dist = 0\n\n if sigma is None:\n sigma = max(m, n) / 2.0 # Constant radius\n else:\n sigma = float(sigma)\n\n # Inicializace grafu\n self._graph = tf.Graph()\n\n with self._graph.as_default():\n # vahy jednotlivych neuronu jsou nahodne nastavene -- matice m X n kde na kazde pozici je\n # 1-D pole velikosti dimenze vstup. dat\n self._W = tf.Variable(tf.random_normal([m * n, dim], seed=5))\n\n # rozlozeni gridu - pole m X n kde jsou pozice neuronu\n self._topography = tf.constant(np.array(list(self._neuron_location(m, n))))\n\n # Placeholder pro vstupni data\n self._X = tf.placeholder('float', [dim])\n\n # Placeholder pro pocet iteraci\n self._iter = tf.placeholder('float')\n\n # Vypocet BMU - spocita euklidovu vzdalenost mezi vstupnim vektorem a kazdym neuronem gridu (jeho vahou)\n # a vrati index index toho neuronu, ktery ma nejmensi vzdalenost\n d = tf.sqrt(tf.reduce_sum(tf.pow(self._W - tf.stack([self._X for i in range(m * n)]), 2), 1))\n self.WTU_idx = tf.argmin(d, 0)\n\n # vrati lokaci neuronu na zaklade jeho indexu\n slice_start = tf.pad(tf.reshape(self.WTU_idx, [1]), np.array([[0, 1]]))\n self.WTU_loc = tf.reshape(tf.slice(self._topography, slice_start, tf.constant(np.array([1, 2]))), [2])\n self.bd2 = self.WTU_loc\n\n # Zmena hodnot sigma a eta podle aktualni iterace\n learning_rate = 1 - self._iter / self._num_iterations\n _eta_new = eta * learning_rate\n _sigma_new = sigma * learning_rate\n\n # Neighbourhood funkce ktera generuje vektor s upravenou learning rate pro vsechny neurony na zaklade aktualni iterace a BMU\n distance_square = tf.reduce_sum(tf.pow(tf.subtract(self._topography, tf.stack([self.WTU_loc for i in range(m * n)])), 2), 1)\n neighbourhood_func = tf.exp(tf.negative(tf.div(tf.cast(distance_square, 'float32'), tf.pow(_sigma_new, 2))))\n\n # vynasobeni learning rate s fci sousedu\n # Operace je pak pouzita k aktualizaci vektoru vah jednotlivych neuronu na zaklade vstupu\n eta_into_gamma = tf.multiply(_eta_new, neighbourhood_func)\n\n # uprava vah na zaklade nove vypoctenych\n # nove vypoctene vahy musi byt upraveny na spravny shape\n weight_multiplier = tf.stack(\n [tf.tile(tf.slice(eta_into_gamma, np.array([i]), np.array([1])), [dim]) for i in range(m * n)])\n delta_W = tf.multiply(weight_multiplier, tf.subtract(tf.stack([self._X for i in range(m * n)]), self._W))\n new_W = self._W + delta_W\n self._training = tf.assign(self._W, new_W)\n\n # Inicializace vsech promennych\n init = tf.global_variables_initializer()\n self._sess = tf.Session()\n self._sess.run(init)\n self._saver = tf.train.Saver()", "def _build_backward_graph(self):\n\n print('[*] Building optimization problem.')\n with tf.variable_scope('optimization'):\n for t in range(0, self.sequence_length):\n print_progress(float(t+1) / self.sequence_length)\n\n # loss is a binary crossentropy for each timestep\n self.loss += self.bce(self.targets[t], self.outputs[t])\n\n self.opt = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate)\n g = self.opt.compute_gradients(self.loss)\n clipped_g = [(tf.clip_by_value(grad, self.min_grad, self.max_grad), var) for grad, var in g]\n self.opt_step = self.opt.apply_gradients(clipped_g)", "def build_optimizer(self, loss):\n self.decay_epoch = tf.Variable(0, trainable=False)\n decay_learning_rate = tf.train.exponential_decay(self.learning_rate, self.decay_epoch, 1, 0.9)\n optimizer = tf.train.AdamOptimizer(decay_learning_rate)\n gradient_pair = optimizer.compute_gradients(loss)\n clip_gradient_pair = []\n for grad, var in gradient_pair:\n grad = tf.clip_by_value(grad, -self.grad_clip, self.grad_clip)\n clip_gradient_pair.append((grad, var))\n optimizer = optimizer.apply_gradients(clip_gradient_pair)\n return optimizer, decay_learning_rate", "def run():\n \n # pass your training data here, a list of training batch, [batch1, batch2], batch=(64,32,32,1) float32.\n train_data_lst = None\n train_label_lst = None\n\n bufsize = 0 \n\n tf.reset_default_graph() \n with tf.Session() as sess:\n global_step = tf.train.get_or_create_global_step()\n batch = tf.Variable(0, dtype=tf.int32)\n epoch_counter = tf.Variable(0, dtype=tf.int32)\n\n train_data_node = tf.placeholder(data_type(),shape=(BATCH_SIZE, PATCH_SIZE[0], PATCH_SIZE[1], NUM_CHANNELS))\n train_label_node = tf.placeholder(tf.float32, shape=(BATCH_SIZE, 1))\n\n net = QualityNet.QualityNet(train_data_node, NUM_CHANNELS, SEED=SEED)\n net.build_graph()\n logits = net.forward(train_data_node, train=True)\n\n loss = tf.divide(tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(logits, train_label_node)))), BATCH_SIZE)\n var_list = net.parameters\n epoch_inc_op = tf.assign(epoch_counter, epoch_counter+1)\n\n learning_rate = tf.train.exponential_decay(\n 0.01,\n epoch_counter,\n DECAY_EPOCH,\n 0.1,\n staircase=True)\n\n optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=0.9, use_nesterov=True)\n\n gvs = optimizer.compute_gradients(loss,var_list)\n capped_gvs = [(tf.clip_by_norm(gv[0], 1), gv[1]) for gv in gvs]\n train_op = optimizer.apply_gradients(capped_gvs)\n\n saver = tf.train.Saver(var_list, max_to_keep=1)\n tf.global_variables_initializer().run()\n\n for epoch in range(NUM_EPOCHS):\n epoch_loss = 0\n\n for train_data, train_labels in zip(train_data_lst, train_label_lst):\n\n feed_dict = {train_data_node: train_data,\n train_label_node: train_labels}\n loss_val = sess.run([train_op, loss], feed_dict=feed_dict)\n epoch_loss += loss_val[-1]\n\n sess.run(epoch_inc_op)\n print (\"Epoch loss:\", epoch_loss)\n saver.save(sess, os.path.join(MODEL_DIR, 'model_'+str(epoch+1)))", "def BuildTrainGraph(self):\n # Replace this with an actual training op\n self.train_step_ = None\n\n # Replace this with an actual loss function\n self.train_loss_ = None\n\n #### YOUR CODE HERE ####\n # See hints in instructions!\n\n # Define approximate loss function.\n # Note: self.softmax_ns (i.e. k=200) is already defined; use that as the\n # number of samples.\n # Loss computation (sampled, for training)\n #print(self.W_out_.get_shape())\n #print(self.b_out_.get_shape())\n #print(self.outputs_.get_shape())\n #print(tf.reshape(self.outputs_, [-1,self.H]).get_shape())\n #print(tf.reshape(self.outputs_, [self.batch_size_*self.max_time_,self.H]).get_shape())\n #print(self.x_.get_shape())\n #print(tf.reshape(self.x_, [-1, self.W_out_.get_shape()[-1]]).get_shape())\n #print(self.target_y_.get_shape())\n #print(tf.reshape(self.target_y_, [self.batch_size_*self.max_time_,]).get_shape())\n \n #per_example_train_loss_ = tf.nn.sampled_softmax_loss(weights=tf.transpose(self.W_out_), biases=self.b_out_, \n #labels=tf.reshape(self.target_y_, \n #[self.batch_size_*self.max_time_,1]),\n #inputs=tf.reshape(self.outputs_, \n #[self.batch_size_*self.max_time_,self.H]), \n #num_sampled=self.softmax_ns, num_classes=self.V, \n #name=\"per_example_sampled_softmax_loss\")\n #partition_strategy=\"div\" ???\n \n #per_example_train_loss_ = tf.nn.sampled_softmax_loss(weights=tf.transpose(self.W_out_), biases=self.b_out_, \n #labels=self.target_y_,\n #inputs=tf.reshape(self.outputs_, [-1,self.W_out_.get_shape()[0]]), \n #num_sampled=self.softmax_ns, num_classes=self.V, \n #name=\"per_example_sampled_softmax_loss\")\n #per_example_train_loss_ = tf.nn.sampled_softmax_loss(weights=self.W_out_, biases=self.b_out_, \n #labels=self.target_y_,\n #inputs=tf.reshape(self.x_, [-1, self.W_out_.get_shape()[-1]]), \n #num_sampled=self.softmax_ns, num_classes=self.V, \n #name=\"per_example_sampled_softmax_loss\")\n #per_example_train_loss_ = tf.nn.sampled_softmax_loss(weights=tf.transpose(self.W_out_), biases=self.b_out_, \n #labels=tf.expand_dims(self.target_y_, 1), inputs=self.x_, \n #num_sampled=self.softmax_ns, num_classes=self.V, \n #name=\"per_example_sampled_softmax_loss\")\n with tf.name_scope(\"training_loss_function\"):\n per_example_train_loss_ = tf.nn.sampled_softmax_loss(weights=tf.transpose(self.W_out_), biases=self.b_out_, \n labels=tf.reshape(self.target_y_, \n [self.batch_size_*self.max_time_,1]),\n inputs=tf.reshape(self.outputs_, \n [self.batch_size_*self.max_time_,self.H]), \n num_sampled=self.softmax_ns, num_classes=self.V, \n name=\"per_example_sampled_softmax_loss\")\n #partition_strategy=\"div\" ???\n self.train_loss_ = tf.reduce_mean(per_example_train_loss_, name=\"sampled_softmax_loss\")\n \n #optimizer_ = tf.train.AdamOptimizer()\n #gradient clipping: tf.clip_by_global_norm\n\n\n\n # Define optimizer and training op\n #tvars = tf.trainable_variables()\n #grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars), self.max_grad_norm)\n \n #optimizer_ = tf.train.AdamOptimizer(learning_rate=self.learning_rate_)\n #gradients, v = zip(*optimizer_.compute_gradients(self.train_loss_))\n #gradients, _ = tf.clip_by_global_norm(gradients, self.max_grad_norm_)\n #self.train_step_ = optimizer_.apply_gradients(zip(gradients, v))\n \n #self.train_step_ = optimizer_.apply_gradients(zip(grads, tvars))\n #gradient clipping: tf.clip_by_global_norm, self.max_grad_norm\n #self.train_step_ = optimizer_.minimize(self.train_loss_)\n with tf.name_scope(\"optimizer_and_training_op\"):\n optimizer_ = tf.train.AdamOptimizer(learning_rate=self.learning_rate_)\n gradients, v = zip(*optimizer_.compute_gradients(self.train_loss_))\n gradients, _ = tf.clip_by_global_norm(gradients, self.max_grad_norm_)\n self.train_step_ = optimizer_.apply_gradients(zip(gradients, v))\n\n\n\n #### END(YOUR CODE) ####", "def train_step(\n config,\n unused_model, # NOT USED\n state,\n unused_opt,\n learning_rate_fn,\n batch,\n rng,\n *unused_args,\n **unused_kwargs,\n):\n step = state.step + 1\n lr = learning_rate_fn(step)\n\n key, rng = jax.random.split(rng)\n rays, pixels = instant_ngp_utils.random_ray_batch(\n key, (config.trainer.per_device_num_rays,), batch\n )\n\n def loss_fn(vox):\n rgb_est, _, _, coarse_den, _, weights, t = instant_ngp_utils.render_rays(\n rays, vox, rng, config\n )\n loss_color_l2 = jnp.mean(jnp.square(rgb_est - pixels))\n loss_color_huber = jnp.mean(huber_loss(rgb_est, pixels))\n loss_distortion = config.trainer.distortion_loss_strength * jnp.mean(\n lossfun_distortion(t, weights)\n )\n loss_density = config.trainer.density_regularization * jnp.mean(\n jnp.square(coarse_den)\n )\n loss = loss_color_huber + loss_density + loss_distortion\n stats = {\n \"loss_color_l2\": loss_color_l2,\n \"loss_color_huber\": loss_color_huber,\n \"loss_density\": loss_density,\n \"loss_distortion\": loss_distortion,\n \"loss\": loss,\n }\n return loss, stats\n\n # Get gradient function, then evaluate it with current parameters\n grad_fn = jax.value_and_grad(loss_fn, has_aux=True)\n (loss, output), grad = grad_fn(state.params)\n if config.get(\"multi\"):\n # Compute average gradient across multiple workers.\n grad = jax.lax.pmean(grad, axis_name=\"batch\")\n state = state.apply_gradients(grads=grad)\n\n mse = output[\"loss_color_l2\"]\n if config.get(\"multi\"):\n grad = jax.lax.pmean(mse, axis_name=\"batch\")\n psnr = image_metrics.compute_psnr(mse=mse)\n if config.get(\"multi\"):\n stats = {k: jax.lax.pmean(v, axis_name=\"batch\") for k, v in output.items()}\n metrics_update = TrainMetrics.gather_from_model_output(\n **stats, learning_rate=lr, psnr=psnr\n )\n else:\n metrics_update = TrainMetrics.single_from_model_output(\n **output, learning_rate=lr, psnr=psnr\n )\n return state, metrics_update, {}", "def train():\n ### DO NOT CHANGE SEEDS!\n # Set the random seeds for reproducibility\n tf.set_random_seed(42)\n np.random.seed(42)\n\n ## Prepare all functions\n # Get number of units in each hidden layer specified in the string such as 100,100\n if FLAGS.dnn_hidden_units:\n dnn_hidden_units = FLAGS.dnn_hidden_units.split(\",\")\n dnn_hidden_units = [int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units]\n else:\n dnn_hidden_units = []\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n # Parameters\n input_dim = 3 * 32 * 32\n activation_fn, dropout_rate, weight_initializer, weight_regularizer, n_classes, optimizer, batch_size, max_steps, \\\n log_dir, data_dir = _parse_flags(\n FLAGS)\n\n # dataset\n cifar10 = cifar10_utils.get_cifar10(data_dir=data_dir)\n\n # Session\n tf.reset_default_graph()\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.99, allow_growth=True)\n session = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n\n # Placeholders for images, labels input.\n X = tf.placeholder(dtype=tf.float32, shape=[None, input_dim], name='inputs')\n y = tf.placeholder(dtype=tf.int32, shape=[None, n_classes], name='labels')\n\n # init network\n net = MLP(n_hidden=dnn_hidden_units, n_classes=n_classes, is_training=True,\n activation_fn=activation_fn, dropout_rate=dropout_rate,\n weight_initializer=weight_initializer,\n weight_regularizer=weight_regularizer)\n\n # Trainings ops\n global_step = tf.Variable(0, trainable=False, name='global_step')\n logits_op = net.inference(X)\n train_flags = {'optimizer': optimizer, 'global_step': global_step, 'grad_clipping': FLAGS.grad_clipping}\n loss_op = net.loss(logits_op, y)\n accuracy_op = net.accuracy(logits_op, y)\n train_op = net.train_step(loss_op, train_flags)\n confusion_matrix_op = net.confusion_matrix(logits=logits_op, labels=y)\n train_loss = train_accuracy = test_accuracy = test_loss = 0.\n\n # utility ops\n summary_op = tf.summary.merge_all()\n write_logs = FLAGS.log_dir is not None\n save_model = True\n\n if write_logs:\n train_log_path = os.path.join(log_dir, '{}_train'.format(FLAGS.model_name))\n test_log_path = os.path.join(log_dir, '{}_test'.format(FLAGS.model_name))\n _ensure_path_exists(train_log_path)\n _ensure_path_exists(test_log_path)\n train_log_writer = tf.summary.FileWriter('{}_train/'.format(train_log_path), graph=session.graph)\n test_log_writer = tf.summary.FileWriter('{}_test/'.format(test_log_path), graph=session.graph)\n\n # Initialize variables\n init_op = tf.global_variables_initializer()\n local_init_op = tf.local_variables_initializer()\n session.run(fetches=[init_op, local_init_op])\n\n # track losses\n stats = defaultdict(list)\n\n # loop over steps\n for _step in range(FLAGS.max_steps):\n\n # get batch of data\n X_train, y_train = cifar10.train.next_batch(batch_size)\n X_train = np.reshape(X_train, (batch_size, -1))\n # feed to model\n train_feed = {X: X_train, y: y_train, net.training_mode: True}\n fetches = [train_op, loss_op, accuracy_op]\n\n # Training set\n if _step % 13 == 0 and write_logs: # write summary\n fetches += [summary_op]\n _, train_loss, train_accuracy, train_summary = session.run(fetches=fetches, feed_dict=train_feed)\n train_log_writer.add_summary(train_summary, _step)\n else:\n _, train_loss, train_accuracy = session.run(fetches=fetches, feed_dict=train_feed)\n\n if _step % 10 == 0:\n print('Ep.{}: train_loss:{:+.4f}, train_accuracy:{:+.4f}'.format(_step, train_loss, train_accuracy))\n stats = _update_stats(stats, train_loss=train_loss, train_accuracy=train_accuracy)\n\n # Sanity check\n if np.isnan(train_loss):\n print('Warning: training loss is NaN.. ')\n break\n\n # Test set evaluation\n if (_step + 1) % 100 == 0:\n X_test, y_test = cifar10.test.images, cifar10.test.labels\n X_test = np.reshape(X_test, [X_test.shape[0], -1])\n test_feed = {X: X_test, y: y_test, net.training_mode: False}\n test_loss, test_accuracy, test_logits, test_confusion_matrix, test_summary = session.run(\n fetches=[loss_op, accuracy_op, logits_op,\n confusion_matrix_op, summary_op],\n feed_dict=test_feed)\n\n if write_logs:\n test_log_writer.add_summary(test_summary, _step)\n\n stats = _update_stats(stats, test_loss=test_loss, test_accuracy=test_accuracy,\n test_confusion_matrix=test_confusion_matrix)\n print('==> Ep.{}: test_loss:{:+.4f}, test_accuracy:{:+.4f}'.format(_step, test_loss, test_accuracy))\n print('==> Confusion Matrix on test set \\n {} \\n'.format(test_confusion_matrix))\n\n if _step > 1000 and test_accuracy < 0.25: # hopeless trials\n save_model = False\n break\n\n # Early stopping: if the last test accuracy is not above the mean of prev 10 epochs, stop\n delta = 1e-4 # accuracy is in decimals\n if _step > 1000:\n window = stats['test_accuracy'][-10:]\n window_accuracy = sum(window) / len(window)\n\n if abs(test_accuracy - window_accuracy) < delta:\n print(\n '\\n==> EARLY STOPPING with accuracy {} and moving-window mean accuracy {} \\n'.format(test_accuracy,\n window_accuracy))\n\n # save model\n if write_logs:\n train_log_writer.close()\n test_log_writer.close()\n\n if save_model:\n save_dir = os.path.join(FLAGS.save_path, FLAGS.model_name)\n saver = tf.train.Saver()\n _ensure_path_exists(save_dir)\n saver.save(session, save_path=os.path.join(save_dir, 'model.ckpt'))\n\n # save results for easy plotting\n results_dir = os.path.relpath('./results')\n _ensure_path_exists(results_dir)\n with open(os.path.join(results_dir, '{}.pkl'.format(FLAGS.model_name)), 'wb') as f:\n pickle.dump(stats, f)\n\n\n #######################\n # END OF YOUR CODE #\n #######################", "def one_step_gd(self, batch):\n\n # get target values yj\n targets = self.get_target(batch)\n phi_input = np.vstack(batch[0])\n masks = self.get_masks(batch[1])\n dummy_targets = targets.max(axis=1)\n\n X = [phi_input, targets, masks]\n Y = [dummy_targets, targets]\n\n # update main network with one step of gradient descent\n # self.Qmodel.fit(X, Y, batch_size=len(X))\n # pdb.set_trace()\n metrics = self.train_model.train_on_batch(X, Y)\n\n # every fixed number of steps, update target network\n self.c_count += 1\n # print(self.c_count, self.c)\n\n if self.c_count == self.c:\n # if self.verbose:\n # print('* Target network updated')\n\n # update target network to be equal the main network\n self.update_target_network()\n\n # reset counter\n self.c_count = 0\n\n return metrics[0]", "def create_model_optimizer(net,alpha):\n optimizer = chainer.optimizers.Adam(alpha=alpha)\n optimizer.setup(net)\n return optimizer", "def train(self):\n learning_rate = tf.train.exponential_decay(self.learning_rate, self.global_step, self.decay_steps,self.decay_rate, staircase=True)\n self.learning_rate_=learning_rate\n #noise_std_dev = tf.constant(0.3) / (tf.sqrt(tf.cast(tf.constant(1) + self.global_step, tf.float32))) #gradient_noise_scale=noise_std_dev\n train_op = tf_contrib.layers.optimize_loss(self.loss_val, global_step=self.global_step,\n learning_rate=learning_rate, optimizer=\"Adam\",clip_gradients=self.clip_gradients)\n return train_op", "def __init__(self, Lambda, epsilon, hidden_dims, alpha = 0.1):\n self.SAVE_STEP_NUM = 100000 # modulus of steps to save (100,000 currently)\n self.SAVE_DIREC = \"./models/\"\n self.SAVE_FILE = self.SAVE_DIREC + \"checkers-model.ckpt\"\n self.Lambda = Lambda\n self.alpha = alpha\n self.epsilon = epsilon\n self.hidden_dims = hidden_dims\n self.old_state = None\n self.old_reward = 0\n self.first_step = True\n\n # sets up the network\n tf.reset_default_graph()\n\n # create model\n self.inputs1, self.Qout, self.weights, self.biases = self.build_mlp(hidden_dims)\n self.init = tf.global_variables_initializer()\n\n # placeholder for old Q value for training\n self.placeholder_q = tf.placeholder(tf.float32, shape=[1, 1])\n\n self.train_step = 0\n self.sess = tf.Session()\n self.saver = tf.train.Saver()\n # restore saved graph + variables - https://www.tensorflow.org/programmers_guide/saved_model\n file = tf.train.latest_checkpoint(self.SAVE_DIREC)\n if file is not None:\n print(\"Loading model from %s\" % file)\n self.saver = tf.train.import_meta_graph(self.SAVE_FILE + \".meta\")\n self.saver.restore(self.sess, file)\n\n # result of next Q values used in Bellman update equation\n self.loss = tf.reduce_sum(tf.square(self.placeholder_q - self.Qout))\n self.trainer = tf.train.GradientDescentOptimizer(alpha)\n self.updateModel = self.trainer.minimize(self.loss)\n\n # finalize structure\n tf.get_default_graph().finalize()", "def optimize(self):\n \n # converting from batch to local quantities\n if self.dispersion == \"gene-batch\":\n local_dispersion = tf.matmul(self.batch, tf.exp(self.px_r))\n else: \n local_dispersion = tf.exp(self.px_r)\n \n if self.library_mode == \"numeric\":\n local_l_mean = self.library_size_mean\n local_l_var = self.library_size_var\n else:\n local_l_mean = tf.matmul(self.batch, self.library_size_mean)\n local_l_var = tf.matmul(self.batch, self.library_size_var)\n \n \n # VAE loss\n if self.zi:\n recon = log_zinb_positive(self.expression, self.px_rate, local_dispersion, \\\n self.px_dropout)\n else:\n recon = log_nb_positive(self.expression, self.px_rate, local_dispersion)\n \n kl_gauss_z = 0.5 * tf.reduce_sum(\\\n tf.square(self.qz_m) + self.qz_v - tf.log(1e-8 + self.qz_v) - 1, 1)\n\n if self.scalings:\n kl_gauss_l = 0.5 * tf.reduce_sum(\\\n tf.square(self.ql_m - local_l_mean) / local_l_var \\\n + self.ql_v / local_l_var \\\n + tf.log(1e-8 + local_l_var) - tf.log(1e-8 + self.ql_v) - 1, 1)\n \n if self.scalings:\n self.ELBO_gau = tf.reduce_mean(recon - self.kl_scale * kl_gauss_z - kl_gauss_l)\n else:\n self.ELBO_gau = tf.reduce_mean(recon - self.kl_scale * kl_gauss_z)\n \n # MMD loss\n if self.apply_mmd:\n self.mmd = mmd_objective(self.z, self.batch_ind, self.num_batches)\n self.loss = - self.ELBO_gau + self.mmd_scale * self.mmd\n \n else:\n self.loss = - self.ELBO_gau\n \n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n optimizer = self.optimize_algo\n with tf.control_dependencies(update_ops):\n self.train_step = optimizer.minimize(self.loss)", "def optimization(err_acc, learning_rate):\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n tvars = tf.trainable_variables()\n grads = tf.gradients(err_acc, tvars)\n tg_pairs = [(tf.clip_by_value(k[0], -100, 100), k[1]) for k in zip(grads, tvars) if k[0] is not None]\n train_op = optimizer.apply_gradients(tg_pairs)\n return train_op", "def optimize(nn_last_layer, correct_label, learning_rate, num_classes):\n # TODO: Implement function\n\n logits = tf.reshape(nn_last_layer, (-1, num_classes))\n correct_label = tf.reshape(correct_label, (-1, num_classes))\n cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = logits, labels = correct_label))\n tf.summary.scalar(\"Training Loss\", cross_entropy_loss) \n optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-08)\n\n # IOU as loss function - ** does not give correct iou. Need to figure out. So not using as loss function anymore\n iou_logits = tf.reshape(nn_last_layer, [-1])\n iou_labels = tf.reshape(correct_label, [-1])\n intersection = tf.reduce_sum(tf.multiply(logits, correct_label))\n union = tf.reduce_sum(tf.subtract(tf.add(logits, correct_label), tf.multiply(logits, correct_label)))\n iou = tf.div(intersection, union)\n # iou_loss = tf.subtract(tf.constant(1.0, dtype=tf.float32), iou)\n\n # iou, iou_op = tf.metrics.mean_iou(correct_label, logits, num_classes)\n # iou_loss = tf.subtract(tf.constant(1.0, dtype=tf.float32), iou)\n\n # train_op = optimizer.minimize(iou_loss)\n train_op = optimizer.minimize(cross_entropy_loss)\n\n # return logits, train_op, iou_loss\n return logits, train_op, cross_entropy_loss, iou", "def _build_train_ops(train_params):\n global_step = tf.get_variable('global_step', shape=[], dtype='int32',\n initializer=tf.constant_initializer(0), trainable=False)\n #global_step = tf.train.get_or_create_global_step()\n loss = tf.get_collection(tf.GraphKeys.LOSSES)\n if len(loss) == 0:\n raise RuntimeError(\"No losses found in losses collection\")\n loss = tf.add_n(loss, name=\"loss\")\n\n if len(tf.get_collection(tf.GraphKeys.SUMMARIES)) > 0:\n # Add any summaries client stored in SUMMARIES\n summary_tensor = tf.summary.merge([[tf.summary.tensor_summary(\"loss\", loss)] +\n tf.get_collection(tf.GraphKeys.SUMMARIES)])\n else:\n summary_tensor = tf.summary.tensor_summary(\"loss\", loss)\n\n train_objective = loss\n\n regularizers = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n if len(regularizers) > 0:\n regularization_loss = tf.add_n(regularizers, name=\"regularization_loss\")\n if train_params.regularization_weight is not None:\n train_objective = train_objective + regularization_loss * train_params.regularization_weight\n else:\n train_objective = train_objective + regularization_loss\n else:\n regularization_loss = None\n\n opt = train_params.opt.get()\n opt = hvd.DistributedOptimizer(opt)\n #train_opt = opt.apply_gradients(opt.compute_gradients(train_objective), global_step=global_step)\n train_opt = opt.minimize(train_objective, global_step=global_step)\n\n if train_params.ema is not None:\n ema = tf.train.ExponentialMovingAverage(decay=train_params.ema)\n ema_op = ema.apply(tf.trainable_variables())\n with tf.control_dependencies([train_opt]):\n # Run the old training op, then update the averages.\n train_opt = tf.group(ema_op)\n else:\n ema = None\n\n # Any collections starting with \"monitor\" are also added as summaries\n to_monitor = {}\n for col in tf.get_default_graph().get_all_collection_keys():\n if col.startswith(\"monitor\"):\n v = tf.get_collection(col)\n if len(v) > 0:\n print(\"Monitoring: \" + col)\n v = tf.add_n(v)\n to_monitor[col] = v\n\n if len(to_monitor) > 0:\n monitor_ema = tf.train.ExponentialMovingAverage(decay=train_params.monitor_ema, name=\"MonitorEMA\",\n zero_debias=True)\n train_opt = tf.group(train_opt, monitor_ema.apply(list(to_monitor.values())))\n summary_tensor = tf.summary.merge(\n [tf.summary.scalar(col, monitor_ema.average(v)) for col, v in to_monitor.items()] +\n [summary_tensor])\n\n # EMA for the loss and what we monitoring\n if train_params.loss_ema is not None:\n loss_ema = tf.train.ExponentialMovingAverage(decay=train_params.loss_ema, name=\"LossEMA\", zero_debias=True)\n\n if regularization_loss is None:\n ema_op = loss_ema.apply([loss])\n train_opt = tf.group(train_opt, ema_op)\n ema_var = loss_ema.average(loss)\n summary_tensor = tf.summary.merge([tf.summary.scalar(\"training-ema/loss\", ema_var), summary_tensor])\n else:\n to_track = [loss, train_objective, regularization_loss]\n ema_op = loss_ema.apply(to_track)\n train_opt = tf.group(train_opt, ema_op)\n tensor_vars = [\n tf.summary.scalar(\"training-ema/loss\", loss_ema.average(loss)),\n tf.summary.scalar(\"training-ema/objective\", loss_ema.average(train_objective)),\n tf.summary.scalar(\"training-ema/regularization-loss\",\n loss_ema.average(regularization_loss))\n ]\n summary_tensor = tf.summary.merge([tensor_vars, summary_tensor])\n\n return loss, summary_tensor, train_opt, global_step, ema", "def __init__(self,ls,activations = [tf.nn.tanh, tf.nn.tanh, None], sess = None, RL = False, lr = 1e-2, reg_scale = 0.1):\n self.ls = ls\n if sess == None:\n self.sess = self.tf_reset()\n else:\n self.sess = sess\n self.activations = activations\n self.input_ph = tf.placeholder(dtype=tf.float32, shape=[None, ls[0]], name = 'msh_input_placeholder') # batch-size by state size\n self.output_ph = tf.placeholder(dtype=tf.float32, shape=[None, ls[-1]]) # action space size\n\n self.fc1 = tf.contrib.layers.fully_connected(self.input_ph, ls[1],\n weights_regularizer=tf.contrib.layers.l2_regularizer(reg_scale),\n activation_fn=activations[0])\n\n self.fc2 = tf.contrib.layers.fully_connected(self.fc1, ls[2],\n weights_regularizer=tf.contrib.layers.l2_regularizer(reg_scale),\n activation_fn=activations[1])\n\n self.output_pred = tf.contrib.layers.fully_connected(self.fc2, ls[-1],\n weights_regularizer=tf.contrib.layers.l2_regularizer(reg_scale),\n activation_fn=activations[-1])\n\n\n # self.W_dict = {}\n # self.b_dict = {}\n # for i in range(len(ls)-1):\n # self.W_dict[i] = tf.get_variable(name='W'+str(i), shape=[ls[i], ls[i+1]], initializer=tf.contrib.layers.xavier_initializer())\n # self.b_dict[i] = tf.get_variable(name='b'+str(i), shape=[ls[i+1]], initializer=tf.constant_initializer(0.))\n\n\n # self.layer = self.input_ph\n # print(tf.shape(self.layer))\n\n\n # for i in range(len(self.activations)):\n # self.layer = tf.matmul(self.layer, self.W_dict[i]) + self.b_dict[i]\n # print(tf.shape(self.layer))\n # if self.activations[i] is not None:\n # self.layer = self.activations[i](self.layer)\n # self.output_pred = self.layer\n\n if RL == True: \n with tf.name_scope('reward_holder'):\n self.reward_holder = tf.placeholder(shape=[None],dtype=tf.float32)\n \n with tf.name_scope('get_resp_outs'):\n self.action_holder = tf.placeholder(shape=[None],dtype=tf.int32, name = 'action_holder')\n \n self.indexes = tf.range(0, tf.shape(self.output_pred)[0]) * tf.shape(self.output_pred)[1] + self.action_holder\n\n self.responsible_outputs = tf.gather(tf.reshape(self.output_pred, [-1]), self.indexes, name = 'responsible_outputs')\n # out of the output vector, this will pull out the indexes\n # But i still don't understand indexes.\n\n # i feel like instead of going thru all of this, you could have just saved the actual outputs. I think I'll try that.\n # then for responsible outputs, you'd do tf.gather(outputs, action_holder) oh maybe it's not different than this. \n # Maybe that's exactly what they're doing, bc action_holder is a scaler number. IDK.\n with tf.name_scope('loss'):\n self.loss = -tf.reduce_mean(tf.log(self.responsible_outputs)*self.reward_holder) #becuase reward_holder value \n # doesn't directly change as you change the Weights, this is equivalent to multiplying the gradient by the reward.\n # when you take the gradient, you're solving for d(log*A)/dW = d(log_p)/dW * d(log_p*A)/d(log_p) = A*d(log_p)/dW. so it's equivalent to mult gradient\n # by the reward function\n tvars = tf.trainable_variables()\n\n with tf.name_scope('update'):\n # self.train_step = tf.train.RMSPropOptimizer(learning_rate = lr, decay = 0.99).minimize(self.loss)\n self.train_step = tf.train.AdamOptimizer().minimize(self.loss)\n self.init = tf.global_variables_initializer()", "def __init__(self, state_size, action_size, LR_ACTOR=.0001, LR_CRITIC=.001, noise_magnitude=.1, BUFFER_SIZE=1000000, BATCH_SIZE=32, GAMMA=.99, TAU=.001):\n # state_size - shape of the oberved state\n # action_size - shape of the actions taken by the actor\n # LR_ACTOR - learning rate for the actor network\n # LR_CRITIC - learning rate for the critic network\n # noise_magnitude - the magnitude for the GaussianNoiseProcess\n # BUFFER_SIZE - the number of action/observation pairs to be kept in the buffer memory\n # BATCH_SIZE - the number of random observations pulled for the network to learn on\n # GAMMA - discount factor for the learning process\n # TAU - update rate for target networks\n self.state_size = state_size\n self.action_size = action_size\n self.lr_actor = LR_ACTOR\n self.lr_critic = LR_CRITIC\n self.noise_magnitude = noise_magnitude\n self.buffer_size = BUFFER_SIZE\n self.batch_size = BATCH_SIZE\n self.gamma = GAMMA\n self.tau = TAU\n self.episode = 0\n self.training_time = 0\n self.updating_time = 0\n self.selecting_time = 0\n # Actor Network (w/ Target Network)\n\n self.actor_local = actor_network(state_size, action_size)\n self.actor_target = actor_network(state_size, action_size)\n # setting weights to be the same\n self.actor_target.set_weights(self.actor_local.get_weights())\n self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=self.lr_actor)\n\n # Critic Network (w/ Target Network)\n self.critic_local = critic_network(state_size, action_size)\n self.critic_target = critic_network(state_size, action_size)\n self.critic_target.set_weights(self.critic_local.get_weights())\n self.critic_optimizer = tf.keras.optimizers.Adam(learning_rate=self.lr_critic)\n\n # Noise process\n self.noise = GaussianNoiseProcess(self.noise_magnitude, self.action_size)\n # self.noise = OUNoise(self.action_size)\n # Replay memory\n self.memory = ReplayBuffer(action_size, self.buffer_size, self.batch_size)", "def compile_optimizer(self):\n optimizer = torch.optim.Adam(self.parameters(), lr=self.cfg.learning_rate)\n\n return optimizer", "def build_net(graph, training=True, validation=False):\n\n with graph.as_default(): \n x = tf.placeholder(tf.float32, [None] + resize_shape, 'x')\n # TODO: use len(labels_map)\n y = tf.placeholder(tf.int32, [None, 17], 'y')\n phase_train = tf.placeholder(tf.bool, name='phase_train')\n keep_prob = tf.placeholder(tf.float32, name='keep_prob')\n keep_prob_fc1 = tf.placeholder(tf.float32, name='keep_prob_fc1')\n learning_rate = tf.placeholder(tf.float32, name='learning_rate')\n\n # Create Input Pipeline for Train, Validation and Test Sets\n if training:\n batch, batch_labels, batch_image_paths = dsutils.create_input_pipeline(\n image_paths=image_paths[:index_split_train_val],\n labels=labels_onehot_list[:index_split_train_val],\n batch_size=batch_size,\n n_epochs=n_epochs,\n shape=input_shape,\n crop_factor=resize_factor,\n training=training,\n randomize=True)\n elif validation:\n batch, batch_labels, batch_image_paths = dsutils.create_input_pipeline(\n image_paths=image_paths[index_split_train_val:],\n labels=labels_onehot_list[index_split_train_val:],\n batch_size=batch_size,\n # only one epoch for test output\n n_epochs=1,\n shape=input_shape,\n crop_factor=resize_factor,\n training=training) \n else:\n batch, batch_labels, batch_image_paths = dsutils.create_input_pipeline(\n image_paths=test_image_paths,\n labels=test_onehot_list,\n batch_size=batch_size,\n # only one epoch for test output\n n_epochs=1,\n shape=input_shape,\n crop_factor=resize_factor,\n training=training)\n\n Ws = []\n \n current_input = x\n\n for layer_i, n_output in enumerate(n_filters):\n with tf.variable_scope('layer{}'.format(layer_i)):\n # 2D Convolutional Layer with batch normalization and relu\n h, W = utils.conv2d(x=current_input,\n n_output=n_output,\n k_h=filter_sizes[layer_i],\n k_w=filter_sizes[layer_i])\n h = tf.layers.batch_normalization(h, training=phase_train)\n h = tf.nn.relu(h, 'relu' + str(layer_i))\n\n # Apply Max Pooling Every 2nd Layer\n if layer_i % 2 == 0:\n h = tf.nn.max_pool(value=h,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME')\n\n # Apply Dropout Every 2nd Layer\n if layer_i % 2 == 0:\n h = tf.nn.dropout(h, keep_prob)\n\n Ws.append(W)\n current_input = h\n\n h = utils.linear(current_input, fc_size, name='fc_t')[0]\n h = tf.layers.batch_normalization(h, training=phase_train)\n h = tf.nn.relu(h, name='fc_t/relu')\n h = tf.nn.dropout(h, keep_prob_fc1)\n\n logits = utils.linear(h, len(labels_map), name='fc_t2')[0]\n h = tf.nn.sigmoid(logits, 'fc_t2')\n\n # must be the same type as logits\n y_float = tf.cast(y, tf.float32)\n\n cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits,\n labels=y_float)\n loss = tf.reduce_mean(cross_entropy)\n\n if training:\n # update moving_mean and moving_variance so it will be available at inference time\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)\n else:\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)\n \n saver = tf.train.Saver()\n init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\n return batch, batch_labels, batch_image_paths, init, x, y, phase_train, keep_prob, keep_prob_fc1, learning_rate, h, loss, optimizer, saver", "def architecture(lq, img=None, mode=None):\r\n\r\n phase = mode == tf.estimator.ModeKeys.TRAIN #phase is true during training\r\n concat_axis = 3\r\n\r\n ##Reusable blocks\r\n\r\n def conv_block(input, filters, phase=phase):\r\n \"\"\"\r\n Convolution -> batch normalisation -> leaky relu\r\n phase defaults to true, meaning that the network is being trained\r\n \"\"\"\r\n\r\n conv_block = tf.layers.conv2d(\r\n inputs=input,\r\n filters=filters,\r\n kernel_size=3,\r\n padding=\"SAME\",\r\n activation=tf.nn.relu)\r\n\r\n #conv_block = tf.contrib.layers.batch_norm(\r\n # conv_block, \r\n # center=True, scale=True, \r\n # is_training=phase)\r\n\r\n #conv_block = tf.nn.leaky_relu(\r\n # features=conv_block,\r\n # alpha=0.2)\r\n #conv_block = tf.nn.relu(conv_block)\r\n\r\n return conv_block\r\n\r\n def aspp_block(input, phase=phase):\r\n \"\"\"\r\n Atrous spatial pyramid pooling\r\n phase defaults to true, meaning that the network is being trained\r\n \"\"\"\r\n\r\n #Convolutions at multiple rates\r\n conv1x1 = tf.layers.conv2d(\r\n inputs=input,\r\n filters=aspp_filters,\r\n kernel_size=1,\r\n padding=\"same\",\r\n activation=tf.nn.relu,\r\n name=\"1x1\")\r\n #conv1x1 = tf.contrib.layers.batch_norm(\r\n # conv1x1, \r\n # center=True, scale=True, \r\n # is_training=phase)\r\n\r\n conv3x3_rateSmall = tf.layers.conv2d(\r\n inputs=input,\r\n filters=aspp_filters,\r\n kernel_size=3,\r\n padding=\"same\",\r\n dilation_rate=aspp_rateSmall,\r\n activation=tf.nn.relu,\r\n name=\"lowRate\")\r\n #conv3x3_rateSmall = tf.contrib.layers.batch_norm(\r\n # conv3x3_rateSmall, \r\n # center=True, scale=True, \r\n # is_training=phase)\r\n\r\n conv3x3_rateMedium = tf.layers.conv2d(\r\n inputs=input,\r\n filters=aspp_filters,\r\n kernel_size=3,\r\n padding=\"same\",\r\n dilation_rate=aspp_rateMedium,\r\n activation=tf.nn.relu,\r\n name=\"mediumRate\")\r\n #conv3x3_rateMedium = tf.contrib.layers.batch_norm(\r\n # conv3x3_rateMedium, \r\n # center=True, scale=True, \r\n # is_training=phase)\r\n\r\n conv3x3_rateLarge = tf.layers.conv2d(\r\n inputs=input,\r\n filters=aspp_filters,\r\n kernel_size=3,\r\n padding=\"same\",\r\n dilation_rate=aspp_rateLarge,\r\n activation=tf.nn.relu,\r\n name=\"highRate\")\r\n #conv3x3_rateLarge = tf.contrib.layers.batch_norm(\r\n # conv3x3_rateLarge, \r\n # center=True, scale=True, \r\n # is_training=phase)\r\n\r\n #Image-level features\r\n pooling = tf.nn.pool(\r\n input=input,\r\n window_shape=(2,2),\r\n pooling_type=\"AVG\",\r\n padding=\"SAME\",\r\n strides=(2, 2))\r\n #Use 1x1 convolutions to project into a feature space the same size as the atrous convolutions'\r\n pooling = tf.layers.conv2d(\r\n inputs=pooling,\r\n filters=aspp_filters,\r\n kernel_size=1,\r\n padding=\"SAME\",\r\n name=\"imageLevel\")\r\n pooling = tf.image.resize_images(pooling, [64, 64])\r\n #pooling = tf.contrib.layers.batch_norm(\r\n # pooling,\r\n # center=True, scale=True,\r\n # is_training=phase)\r\n\r\n #Concatenate the atrous and image-level pooling features\r\n concatenation = tf.concat(\r\n values=[conv1x1, conv3x3_rateSmall, conv3x3_rateMedium, conv3x3_rateLarge, pooling],\r\n axis=concat_axis)\r\n\r\n #Reduce the number of channels\r\n reduced = tf.layers.conv2d( #Not sure if this is the correct way to reshape...\r\n inputs=concatenation,\r\n filters=aspp_filters,\r\n kernel_size=1,\r\n padding=\"SAME\")\r\n\r\n return reduced\r\n\r\n\r\n def strided_conv_block(input, filters, stride, rate=1, phase=phase):\r\n \r\n return slim.separable_convolution2d(\r\n inputs=input,\r\n num_outputs=filters,\r\n kernel_size=3,\r\n depth_multiplier=1,\r\n stride=stride,\r\n padding='SAME',\r\n data_format='NHWC',\r\n rate=rate,\r\n activation_fn=tf.nn.relu,\r\n normalizer_fn=None,\r\n normalizer_params=None,\r\n weights_initializer=tf.contrib.layers.xavier_initializer(),\r\n weights_regularizer=None,\r\n biases_initializer=tf.zeros_initializer(),\r\n biases_regularizer=None,\r\n reuse=None,\r\n variables_collections=None,\r\n outputs_collections=None,\r\n trainable=True,\r\n scope=None)\r\n\r\n\r\n def deconv_block(input, filters, phase=phase):\r\n '''Transpositionally convolute a feature space to upsample it'''\r\n \r\n deconv_block = tf.layers.conv2d_transpose(\r\n inputs=input,\r\n filters=filters,\r\n kernel_size=3,\r\n strides=2,\r\n padding=\"SAME\",\r\n activation=tf.nn.relu)\r\n\r\n #deconv_block = tf.contrib.layers.batch_norm(\r\n # deconv_block, \r\n # center=True, scale=True, \r\n # is_training=phase)\r\n\r\n #deconv_block = tf.nn.leaky_relu(\r\n # features=deconv_block,\r\n # alpha=0.2)\r\n #deconv_block = tf.nn.relu(deconv_block)\r\n\r\n return deconv_block\r\n\r\n '''Model building'''\r\n input_layer = tf.reshape(lq, [-1, cropsize, cropsize, channels])\r\n\r\n #Encoding block 0\r\n cnn0 = conv_block(\r\n input=input_layer, \r\n filters=features0)\r\n cnn0_last = conv_block(\r\n input=cnn0, \r\n filters=features0)\r\n cnn0_strided = strided_conv_block(\r\n input=cnn0_last,\r\n filters=features0,\r\n stride=2)\r\n\r\n #Encoding block 1\r\n cnn1 = conv_block(\r\n input=cnn0_strided, \r\n filters=features1)\r\n cnn1_last = conv_block(\r\n input=cnn1, \r\n filters=features1)\r\n cnn1_strided = strided_conv_block(\r\n input=cnn1_last,\r\n filters=features1,\r\n stride=2)\r\n\r\n #Encoding block 2\r\n cnn2 = conv_block(\r\n input=cnn1_strided,\r\n filters=features2)\r\n cnn2_last = conv_block(\r\n input=cnn2,\r\n filters=features2)\r\n cnn2_strided = strided_conv_block(\r\n input=cnn2_last,\r\n filters=features2,\r\n stride=2)\r\n\r\n #Encoding block 3\r\n cnn3 = conv_block(\r\n input=cnn2_strided,\r\n filters=features3)\r\n cnn3 = conv_block(\r\n input=cnn3,\r\n filters=features3)\r\n cnn3_last = conv_block(\r\n input=cnn3,\r\n filters=features3)\r\n cnn3_strided = strided_conv_block(\r\n input=cnn3_last,\r\n filters=features3,\r\n stride=2)\r\n\r\n #Encoding block 4\r\n cnn4 = conv_block(\r\n input=cnn3_strided,\r\n filters=features4)\r\n cnn4 = conv_block(\r\n input=cnn4,\r\n filters=features4)\r\n cnn4_last = conv_block(\r\n input=cnn4,\r\n filters=features4)\r\n\r\n #cnn4_strided = split_separable_conv2d(\r\n # inputs=cnn4_last,\r\n # filters=features4,\r\n # rate=2,\r\n # stride=2)\r\n\r\n #Prepare for aspp\r\n aspp_input = strided_conv_block(\r\n input=cnn4_last,\r\n filters=features4,\r\n stride=1,\r\n rate=2)\r\n aspp_input = conv_block(\r\n input=aspp_input,\r\n filters=features4)\r\n\r\n ##Atrous spatial pyramid pooling\r\n aspp = aspp_block(aspp_input)\r\n\r\n #Upsample the semantics by a factor of 4\r\n #upsampled_aspp = tf.image.resize_bilinear(\r\n # images=aspp,\r\n # tf.shape(aspp)[1:3],\r\n # align_corners=True)\r\n\r\n #Decoding block 1 (deepest)\r\n deconv4 = conv_block(aspp, features4)\r\n deconv4 = conv_block(deconv4, features4)\r\n deconv4 = conv_block(deconv4, features4)\r\n \r\n #Decoding block 2\r\n deconv4to3 = deconv_block(deconv4, features4)\r\n concat3 = tf.concat(\r\n values=[deconv4to3, cnn3_last],\r\n axis=concat_axis)\r\n deconv3 = conv_block(concat3, features3)\r\n deconv3 = conv_block(deconv3, features3)\r\n deconv3 = conv_block(deconv3, features3)\r\n\r\n #Decoding block 3\r\n deconv3to2 = deconv_block(deconv3, features3)\r\n concat2 = tf.concat(\r\n values=[deconv3to2, cnn2_last],\r\n axis=concat_axis)\r\n deconv2 = conv_block(concat2, features2)\r\n deconv2 = conv_block(deconv2, features2)\r\n \r\n #Decoding block 4\r\n deconv2to1 = deconv_block(deconv2, features2)\r\n concat1 = tf.concat(\r\n values=[deconv2to1, cnn1_last],\r\n axis=concat_axis)\r\n deconv1 = conv_block(concat1, features1)\r\n deconv1 = conv_block(deconv1, features1)\r\n\r\n #Decoding block 5\r\n deconv1to0 = deconv_block(deconv1, features1)\r\n concat0 = tf.concat(\r\n values=[deconv1to0, cnn0_last],\r\n axis=concat_axis)\r\n deconv0 = conv_block(concat0, features0)\r\n deconv0 = conv_block(deconv0, features0)\r\n\r\n #Create final image with 1x1 convolutions\r\n deconv_final = tf.layers.conv2d_transpose(\r\n inputs=deconv0,\r\n filters=1,\r\n kernel_size=3,\r\n padding=\"SAME\",\r\n activation=tf.nn.relu)\r\n\r\n\r\n #Residually connect the input to the output\r\n output = deconv_final#+input_layer\r\n\r\n #Image values will be between 0 and 1\r\n output = tf.clip_by_value(\r\n output,\r\n clip_value_min=0,\r\n clip_value_max=1)\r\n\r\n if phase: #Calculate loss during training\r\n ground_truth = tf.reshape(img, [-1, cropsize, cropsize, channels])\r\n loss = 1.0-tf_ssim(output, ground_truth)#cropsize*cropsize*tf.reduce_mean(tf.squared_difference(output, ground_truth))\r\n \r\n #tf.log(cropsize*cropsize*tf.reduce_mean(tf.squared_difference(output, ground_truth))+1)\r\n #tf.summary.histogram(\"loss\", loss)\r\n else:\r\n loss = -1\r\n\r\n return loss, output", "def __gpu_task__(\r\n self, batch_size=64, is_training=False, data_batch=None,\r\n opt_op=None, code_batch=None):\r\n if is_training:\r\n # sample new data, [batch_size*2, height, weight, channels]\r\n if self.sample_same_class:\r\n code_batch = self.sample_codes(batch_size, code_y=data_batch['y'], name='code_tr')\r\n else:\r\n code_batch = self.sample_codes(batch_size, name='code_tr')\r\n gen_batch = self.Gen(code_batch, is_training=is_training)\r\n dis_out = self.Dis(self.concat_two_batches(data_batch, gen_batch), is_training=True)\r\n s_x, s_gen = tf.split(dis_out['x'], num_or_size_splits=2, axis=0)\r\n\r\n # loss function\r\n gan_losses = GANLoss(self.do_summary)\r\n if self.loss_type in {'rep', 'rmb'}:\r\n loss_gen, loss_dis = gan_losses.apply(\r\n s_gen, s_x, self.loss_type, batch_size=batch_size, d=self.score_size,\r\n rep_weights=self.rep_weights)\r\n else:\r\n loss_gen, loss_dis = gan_losses.apply(\r\n s_gen, s_x, self.loss_type, batch_size=batch_size, d=self.score_size)\r\n\r\n # form loss list\r\n # sigma = [layer.sigma for layer in self.Dis.net.layers]\r\n # kernel_norm = tf.squeeze(self.Dis.net.layers[-1].ops['kernel'].kernel_norm[1])\r\n loss_list = [loss_gen, loss_dis]\r\n self.loss_names = '<loss_gen>, <loss_dis>'\r\n\r\n # compute gradient\r\n # grads is a list of (gradient, variable) tuples\r\n # update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\r\n # with tf.control_dependencies(update_ops):\r\n vars_dis = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, \"dis\")\r\n grads_dis = opt_op[0].compute_gradients(loss_dis, var_list=vars_dis)\r\n vars_gen = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, \"gen\")\r\n grads_gen = opt_op[1].compute_gradients(loss_gen, var_list=vars_gen)\r\n grads_list = [grads_dis, grads_gen]\r\n\r\n # summary op is always pinned to CPU\r\n # add summary to loss and intermediate variables\r\n if self.do_summary:\r\n tf.summary.histogram('x/x', data_batch['x'])\r\n tf.summary.histogram('x/x_gen', gen_batch['x'])\r\n tf.summary.histogram('x/sx', s_x)\r\n tf.summary.histogram('x/sg', s_gen)\r\n g_x = tf.reshape(tf.gradients(s_x, data_batch['x'])[0], [batch_size, -1])\r\n g_x_norm = tf.norm(g_x, ord=2, axis=1)\r\n tf.summary.histogram('x/g_x_norm', g_x_norm)\r\n g_gen = tf.reshape(tf.gradients(s_gen, gen_batch['x'])[0], [batch_size, -1])\r\n g_gen_norm = tf.norm(g_gen, ord=2, axis=1)\r\n tf.summary.histogram('x/g_gen_norm', g_gen_norm)\r\n self.Gen.net.add_summary('kernel_norm')\r\n self.Dis.net.add_summary('kernel_norm')\r\n\r\n return grads_list, loss_list\r\n else:\r\n if code_batch is None:\r\n code_batch = self.sample_codes(batch_size, name='code_te')\r\n # generate new images\r\n gen_batch = self.Gen(code_batch, is_training=is_training)\r\n return gen_batch", "def train(self):\n #learning_rate = tf.train.exponential_decay(self.learning_rate, self.global_step, self.decay_steps,self.decay_rate, staircase=True) #去掉decay_steps\n train_op = tf.contrib.layers.optimize_loss(self.losses, global_step=self.global_step, learning_rate=self.learning_rate, optimizer=\"Adam\")\n return train_op", "def train(self): \n self.current_step = 0\n self.log = log_setup(self.args)\n self.current_gamma = self.args.initial_gamma\n with tf.Session(graph = self.computation_graph) as session:\n self.init.run()\n print(\"Model Initialized.\")\n for repetition in range(0, self.args.epochs):\n\n random.shuffle(self.nodes)\n self.optimization_time = 0 \n self.average_loss = 0\n\n epoch_printer(repetition)\n for i in tqdm(range(int(len(self.edges)/self.args.batch_size))):\n self.current_step = self.current_step + 1\n self.current_gamma = gamma_incrementer(self.current_step, self.args.initial_gamma, self.current_gamma, self.true_step_size)\n feed_dict = self.feed_dict_generator(self.edges[i*self.args.batch_size:(i+1)*self.args.batch_size], self.current_step, self.current_gamma)\n start = time.time()\n _, loss = session.run([self.train_op , self.loss], feed_dict=feed_dict)\n end = time.time()\n self.optimization_time = self.optimization_time + (end-start)\n self.average_loss = self.average_loss + loss\n\n print(\"\")\n self.average_loss = self.average_loss/self.vocab_size\n self.final_embeddings = self.factorization_layer.embedding_matrix.eval()\n if \"CODE\" in self.args.model: \n self.c_means = self.cluster_layer.cluster_means.eval()\n self.modularity_score, assignments = neural_modularity_calculator(self.graph, self.final_embeddings, self.c_means)\n else:\n self.modularity_score, assignments = classical_modularity_calculator(self.graph, self.final_embeddings, self.args)\n self.log = log_updater(self.log, repetition, self.average_loss, self.optimization_time, self.modularity_score)\n tab_printer(self.log)\n if \"CODE\" in self.args.model: \n initiate_dump_grafcode(self.log, assignments, self.args, self.final_embeddings, self.c_means)\n else:\n initiate_dump_graf(self.log, assignments, self.args, self.final_embeddings)", "def compile_network(model, optimizer):\n compile_network_model(model, optimizer, categorical_crossentropy)", "def train(generator,\n discriminator,\n dataset,\n batch_size=128,\n epochs = 10,\n print_freq = 50,\n start_epoch = 0,\n checkpoint_dir = '',\n checkpoint = None,\n progression_images_dir = '',\n save_img_freq=400,\n lat_dim = 100\n \n):\n\n # HYPERPARAMETERS\n lr = 0.0002\n beta_1 = 0.5\n gen_optim = tf.keras.optimizers.Adam(lr = lr,beta_1 = beta_1)\n dis_optim = tf.keras.optimizers.Adam(lr = lr, beta_1 = beta_1)\n checkpoint_prefix = os.path.join(checkpoint_dir, 'ckpt')\n checkpoint = tf.train.Checkpoint(\n generator=generator, \n discriminator=discriminator\n )\n cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)\n # TRAINING IMAGES USED TO KEEP TRACK OF PROGRESSION THROUGHOUT THE EPOCHS\n tf.random.set_seed(123)\n prog_images = tf.random.normal([16, 1, 1, lat_dim])\n\n G_loss_history = []\n D_loss_history = []\n\n for epoch in range(start_epoch, epochs):\n # This will be later used to print things to the terminal \n start = time.time()\n batch_step = 0\n for image_batch in dataset:\n batch_step +=1 \n latent_vars = tf.random.normal([batch_size, 1, 1, lat_dim])\n \n # We maximize the log(D(x)) + log(1 - D(G(X))\n # Here we take the gradients in two different steps. \n # this is exactly what is done in the original gan paper. Moreover we compute the loss using \n # Cross entropy for its simplicity. We see that there is no label smoothing here. \n with tf.GradientTape() as disc_tape :\n real_logits = discriminator(image_batch, training=True)\n fake_images = generator(latent_vars, training=True)\n fake_images = fake_images * 127.5 + 127.5\n fake_logits = discriminator(fake_images, training=True)\n \n log_DX_loss = cross_entropy(tf.ones_like(real_logits), real_logits)\n log_DXG_loss = cross_entropy(tf.zeros_like(fake_logits), fake_logits)\n \n D_loss = log_DX_loss + log_DXG_loss\n grad_D = disc_tape.gradient(D_loss, discriminator.trainable_variables)\n dis_optim.apply_gradients(zip(grad_D, discriminator.trainable_variables))\n\n with tf.GradientTape() as gen_tape:\n fake_images = generator(latent_vars, training=True)\n # This additional multiplication is done to obtain a real image. \n fake_images = fake_images * 127.5 + 127.5\n fake_logits = discriminator(fake_images, training=True)\n G_loss = cross_entropy(tf.ones_like(fake_logits), fake_logits)\n\n grad_G = gen_tape.gradient(G_loss, generator.trainable_variables)\n gen_optim.apply_gradients(zip(grad_G, generator.trainable_variables))\n\n G_loss_history.append(G_loss.numpy())\n D_loss_history.append(D_loss.numpy())\n \n # We save the images just to make sure that there is progress. \n # Given that the loss is not like a normal loss this is one of the more effective ways to monitor training. \n if batch_step % print_freq == 0: \n print('[{}] - disc loss {:.4f} - gen_loss{:.4f} - {}/{}'.format(epoch ,D_loss, G_loss, batch_step, len(dataset)))\n if batch_step % save_img_freq == 0:\n fake_images = generator(prog_images, training=False) * 127.5 + 127.5\n save_location = os.path.join(progression_images_dir, 'progression_image_{}_{}'.format(epoch, batch_step))\n make_grid(fake_images, len(fake_images), save_location) \n\n #Epoch checkpoints and saving of progress images\n checkpoint.save(file_prefix=checkpoint_prefix)\n print ('Time for epoch {} is {} sec'.format(epoch + 1, time.time()-start))\n\n \n return (G_loss_history, D_loss_history)", "def train(n_hidden_1, dropout, lr, wdecay, _run):\n\n ### DO NOT CHANGE SEEDS!\n # Set the random seeds for reproducibility\n np.random.seed(42)\n\n ## Prepare all functions\n # Get number of units in each hidden layer specified in the string such as 100,100\n if FLAGS.dnn_hidden_units:\n dnn_hidden_units = FLAGS.dnn_hidden_units.split(\",\")\n dnn_hidden_units = [int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units]\n else:\n dnn_hidden_units = []\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n def get_xy_tensors(batch):\n x, y = batch\n x = torch.tensor(x.reshape(-1, 3072), dtype=torch.float32).to(device)\n y = torch.tensor(y, dtype=torch.long).to(device)\n return x, y\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n datasets = cifar10_utils.read_data_sets(DATA_DIR_DEFAULT, one_hot=False)\n train_data = datasets['train']\n test_data = datasets['test']\n model = MLP(n_inputs=3072, n_hidden=[n_hidden_1, 400], n_classes=10, dropout=dropout).to(device)\n loss_fn = nn.CrossEntropyLoss()\n optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=wdecay)\n\n log_every = 50\n avg_loss = 0\n avg_acc = 0\n for step in range(FLAGS.max_steps):\n x, y = get_xy_tensors(train_data.next_batch(FLAGS.batch_size))\n\n # Forward and backward passes\n optimizer.zero_grad()\n out = model.forward(x)\n loss = loss_fn(out, y)\n loss.backward()\n\n # Parameter updates\n optimizer.step()\n\n avg_loss += loss.item() / log_every\n avg_acc += accuracy(out, y) / log_every\n if step % log_every == 0:\n print('[{}/{}] train loss: {:.6f} train acc: {:.6f}'.format(step,\n FLAGS.max_steps,\n avg_loss, avg_acc))\n _run.log_scalar('train-loss', avg_loss, step)\n _run.log_scalar('train-acc', avg_acc, step)\n avg_loss = 0\n avg_acc = 0\n\n # Evaluate\n if step % FLAGS.eval_freq == 0 or step == (FLAGS.max_steps - 1):\n x, y = get_xy_tensors(test_data.next_batch(test_data.num_examples))\n model.eval()\n out = model.forward(x)\n model.train()\n test_loss = loss_fn(out, y).item()\n test_acc = accuracy(out, y)\n print('[{}/{}] test accuracy: {:6f}'.format(step, FLAGS.max_steps, test_acc))\n\n _run.log_scalar('test-loss', test_loss, step)\n _run.log_scalar('test-acc', test_acc, step)\n ########################\n # END OF YOUR CODE #\n #######################" ]
[ "0.6959912", "0.6959529", "0.6903491", "0.68603384", "0.6580812", "0.6557089", "0.65515715", "0.6538013", "0.64774996", "0.64496166", "0.6435693", "0.6410382", "0.63923454", "0.6380889", "0.63773113", "0.6373982", "0.6361317", "0.63536835", "0.6332485", "0.631817", "0.6312744", "0.6280184", "0.62489074", "0.62467456", "0.6236155", "0.62327886", "0.62279856", "0.62214494", "0.62117565", "0.6206684", "0.620208", "0.619409", "0.61863256", "0.6184746", "0.6171284", "0.6169253", "0.61688745", "0.6167079", "0.6163774", "0.61603576", "0.61549866", "0.6154723", "0.61478406", "0.61452705", "0.6143368", "0.61430985", "0.61342394", "0.610266", "0.609991", "0.6099306", "0.60979617", "0.6097332", "0.6096451", "0.60943294", "0.60924727", "0.60881233", "0.6087814", "0.60806984", "0.6071761", "0.6071743", "0.6068489", "0.6067567", "0.6060981", "0.60526365", "0.60506374", "0.604692", "0.6046687", "0.60432374", "0.6042092", "0.6039208", "0.60352594", "0.6024411", "0.602382", "0.6022476", "0.60174626", "0.6016053", "0.60120445", "0.601188", "0.60109955", "0.60090804", "0.60086215", "0.6006461", "0.59964824", "0.5990395", "0.598589", "0.5982161", "0.5981707", "0.5979979", "0.5973423", "0.5971009", "0.5967132", "0.59631085", "0.59615946", "0.5956255", "0.5953408", "0.5949968", "0.59443176", "0.59428567", "0.59393436", "0.593892" ]
0.626492
22
Adam optimizer with learning rate 0.0002 for the first 100k steps (~100 epochs) and a linearly decaying rate that goes to zero over the next 100k steps
def make_optimizer(loss, variables, name='Adam'): global_step = tf.Variable(0, trainable=False, name='global_step') starter_learning_rate = self.opt.lr end_learning_rate = 0.0 start_decay_step = self.opt.niter decay_steps = self.opt.niter_decay beta1 = self.opt.beta1 learning_rate = (tf.where(tf.greater_equal(global_step, start_decay_step), tf.train.polynomial_decay(starter_learning_rate, global_step-start_decay_step, decay_steps, end_learning_rate, power=1.0), starter_learning_rate)) learning_step = (tf.train.AdamOptimizer(learning_rate, beta1=beta1, name=name) .minimize(loss, global_step=global_step, var_list=variables)) return learning_step
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def adam_optimizer(loss, initial_lr, decay_step, decay_rate):\n global_step = tf.train.get_or_create_global_step()\n lr = tf.train.exponential_decay(initial_lr, global_step, decay_step,\n decay_rate, staircase=True)\n\n optim = tf.train.AdamOptimizer(learning_rate=lr)\n global_step = tf.train.get_or_create_global_step()\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n\n with tf.control_dependencies(update_ops):\n train_op = optim.minimize(loss, global_step=global_step)\n\n return train_op", "def adjust_learning_rate(optimizer):\n for group in optimizer.param_groups:\n if 'step' not in group:\n group['step'] = 0\n group['step'] += 1\n\n group['lr'] = args.lr / (1 + group['step'] * args.lr_decay)", "def admm_adjust_learning_rate(optimizer,epoch,config):\n \"\"\"\n For admm, the learning rate change is periodic.\n When epoch is dividable by admm_epoch, the learning rate is reset\n to the original one, and decay every 3 epoch (as the default \n admm epoch is 9)\n\n \"\"\"\n admm_epoch = config.admm_epoch\n lr = None\n if epoch % admm_epoch == 0:\n lr = config.lr\n else:\n admm_epoch_offset = epoch%admm_epoch\n\n admm_step = admm_epoch/3 # roughly every 1/3 admm_epoch. \n \n lr = config.lr *(0.1 ** (admm_epoch_offset//admm_step))\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adam_optim(config = None, global_step = None):\n learning_rate = config[\"learning_rate\"]\n\n beta1 = config.get('beta1', 0.9)\n beta2 = config.get('beta2', 0.999)\n epsilon = config.get('epsilon', 1e-8)\n \n train_step = tf.train.AdamOptimizer(learning_rate, beta1, beta2, epsilon)\n\n return train_step", "def adjust_learning_rate_adam(optimizer, epoch):\n \n boundary = [args.epochs//5*4]\n lr = args.lr * 0.2 ** int(bisect.bisect_left(boundary, epoch))\n print('Learning rate: %f'%lr)\n #print(epoch, lr)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n \n return lr", "def train_adam(total_loss, global_step):\n # Variables that affect learning rate.\n num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size\n decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)\n #decay_steps = 2000\n\n # Decay the learning rate exponentially based on the number of steps.\n lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,\n global_step,\n decay_steps,\n LEARNING_RATE_DECAY_FACTOR,\n staircase=True)\n tf.summary.scalar('learning_rate', lr)\n\n # Generate moving averages of all losses and associated summaries.\n loss_averages_op = _add_loss_summaries(total_loss)\n\n # Compute gradients.\n with tf.control_dependencies([loss_averages_op]):\n #opt = tf.train.GradientDescentOptimizer(lr)\n opt = tf.train.AdamOptimizer()\n grads = opt.compute_gradients(total_loss)\n\n # Apply gradients.\n apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)\n\n # Add histograms for trainable variables.\n for var in tf.trainable_variables():\n tf.summary.histogram(var.op.name, var)\n\n # Add histograms for gradients.\n for grad, var in grads:\n if grad is not None:\n tf.summary.histogram(var.op.name + '/gradients', grad)\n\n # Track the moving averages of all trainable variables.\n variable_averages = tf.train.ExponentialMovingAverage(\n MOVING_AVERAGE_DECAY, global_step)\n variables_averages_op = variable_averages.apply(tf.trainable_variables())\n\n with tf.control_dependencies([apply_gradient_op, variables_averages_op]):\n train_op = tf.no_op(name='train')\n\n return train_op", "def adjust_learning_rate(epoch, opt, optimizer):\n steps = np.sum(epoch >= np.asarray(opt.lr_decay_epochs))\n if steps > 0:\n new_lr = opt.learning_rate * (opt.lr_decay_rate ** steps)\n optimizer.set_learning_rate(new_lr)", "def adjust_learning_rate(optimizer):\n for group in optimizer.param_groups:\n if 'step' not in group:\n group['step'] = 0.\n else:\n group['step'] += 1.\n group['lr'] = args.lr * (\n 1.0 - float(group['step']) * float(args.batch_size) / (args.n_triplets * float(args.epochs)))\n return", "def adjust_learning_rate(optimizer, decay=0.1):\n for param_group in optimizer.param_groups:\n param_group['lr'] = decay * param_group['lr']", "def adjust_learning_rate(epoch, opt, optimizer):\n steps = np.sum(epoch > np.asarray(opt.lr_decay_epochs))\n if steps > 0:\n new_lr = opt.learning_rate * (opt.lr_decay_rate ** steps)\n for param_group in optimizer.param_groups:\n param_group['lr'] = new_lr", "def make_optimizer(loss, variables, name='Adam', lrcoef=1):\n global_step = tf.Variable(0, trainable=False)\n starter_learning_rate = self.learning_rate * lrcoef\n beta1 = self.beta1\n beta2 = self.beta2\n if not self.exponential_decay:\n end_learning_rate = self.learning_rate * 0.01 * lrcoef\n start_decay_step = 10000\n decay_steps = 140000\n learning_rate = (\n tf.where(\n tf.greater_equal(global_step, start_decay_step),\n tf.train.polynomial_decay(starter_learning_rate, global_step - start_decay_step,\n decay_steps, end_learning_rate,\n power=1.0),\n starter_learning_rate\n )\n\n )\n else:\n decay_rate = self.decay_rate\n start_decay_step = 10000\n decay_steps = 140000\n learning_rate = (\n tf.where(\n tf.greater_equal(global_step, start_decay_step),\n tf.train.exponential_decay(starter_learning_rate, global_step - start_decay_step,\n decay_steps, decay_rate),\n starter_learning_rate\n )\n\n )\n tf.summary.scalar('learning_rate/{}'.format(name), learning_rate)\n\n learning_step = (\n tf.train.AdamOptimizer(learning_rate, beta1=beta1, beta2=beta2, name=name)\n .minimize(loss, global_step=global_step, var_list=variables)\n )\n return learning_step", "def adjust_learning_rate(epoch, learn_rate, decay_step, decay_rate, optimizer):\n steps = np.sum(epoch > np.asarray(decay_step))\n if steps > 0:\n new_lr = learn_rate * (decay_rate ** steps)\n for param_group in optimizer.param_groups:\n param_group['lr'] = new_lr", "def adjust_learning_rate_D(start_lr, optimizer, epoch):\n #lr = start_lr * (0.1 ** (epoch // 30))\n lr = start_lr * (0.3 ** (epoch // 5))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch, args):\n lr = args.lr * (0.3 ** (epoch // args.lr_decay))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, args, epoch):\n\tlr = args.learning_rate * (0.1 ** (epoch // args.lr_decay_step))\n\tfor param_group in optimizer.param_groups:\n\t\tparam_group['lr'] = lr", "def learning_rate_decaying(optimizer, rate):\n lr = get_learning_rate(optimizer) * rate\n for param_group in optimizer.param_groups:\n param_group[\"lr\"] = lr", "def adjust_learning_rate_GAN(optimizer, epoch):\r\n lrD = opt.lrD * (0.1 ** (epoch // opt.step))\r\n return lrD", "def decay_learning_rate(initial_learning_rate, i, n_iterations):\n return initial_learning_rate * np.exp(-i / n_iterations)", "def adjust_learning_rate(optimizer, lr, step, args):\n # decay = 0.1**(sum(epoch >= np.array(lr_steps)))\n lr = lr * (0.95**(step//args.lr_decay_every))\n print(\"current learning rate: {:.6f}\".format(lr))\n param_group = optimizer.param_groups\n for i in range(len(param_group)):\n param_group[i]['lr'] = lr\n\n return optimizer", "def ft_adjust_learning_rate(optimizer, intial_lr, epoch, lr_steps):\n decay = 0.3 ** (sum(epoch >= np.array(lr_steps)))\n lr = intial_lr * decay\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(cfg, optimizer):\n for idx, group in enumerate(optimizer.param_groups):\n init_lr = cfg.TRAINING.LR\n if 'step' not in group:\n group['step'] = 0.\n else:\n group['step'] += 1.\n\n group['lr'] = init_lr * (\n 1.0 - float(group['step']) * float(cfg.TRAINING.BATCH_SIZE) /\n (cfg.TRAINING.N_TRIPLETS * float(cfg.TRAINING.EPOCHS)))\n return", "def eager_decay_rate():\n learning_rate = 0.5 * learning_rate_base * (1 + tf.cos(\n np.pi *\n (tf.cast(global_step, tf.float32) - warmup_steps - hold_base_rate_steps\n ) / float(total_steps - warmup_steps - hold_base_rate_steps)))\n if hold_base_rate_steps > 0:\n learning_rate = tf.where(\n global_step > warmup_steps + hold_base_rate_steps,\n learning_rate, learning_rate_base)\n if warmup_steps > 0:\n slope = (learning_rate_base - warmup_learning_rate) / warmup_steps\n warmup_rate = slope * tf.cast(global_step,\n tf.float32) + warmup_learning_rate\n learning_rate = tf.where(global_step < warmup_steps, warmup_rate,\n learning_rate)\n return tf.where(global_step > total_steps, 0.0, learning_rate,\n name='learning_rate')", "def adjust_learning_rate(optimizer, epoch, lr_steps):\n decay = 0.1 ** (sum(epoch >= np.array(lr_steps)))\n lr = args.start_lr * decay\n decay = args.weight_decay\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n param_group['weight_decay'] = decay", "def adjust_learning_rate(lr, optimizer, epoch, decay_epoch=30):\n lr = lr * (0.1 ** (epoch // decay_epoch))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch, args):\n lr = args.lr * (0.1 ** (epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch, args):\n lr = args.lr * (0.1 ** (epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch, args):\n lr = args.lr * (0.1 ** (epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch, args):\r\n lr = args.lr * (0.1 ** (epoch // 30))\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch):\n lr = args.lr * (args.expo ** epoch)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch, args, step):\n lr = args.lr * (0.1 ** (epoch // step))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, gamma, epoch, step_index, iteration, epoch_size):\n warmup_epoch = -1\n if epoch <= warmup_epoch:\n lr = 1e-6 + (initial_lr-1e-6) * iteration / (epoch_size * warmup_epoch)\n else:\n lr = initial_lr * (gamma ** (step_index))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return lr", "def lr_decay(config, optimizer: optim.Optimizer, epoch: int) -> optim.Optimizer:\n lr = config.learning_rate / (1 + config.lr_decay * (epoch - 1))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n print('learning rate is set to: ', lr)\n return optimizer", "def adjust_learning_rate(optimizer, epoch, step, opt):\n _lr_decay_epoch = [# 150,\n int(4/8*opt.n_epochs),\n # int(5/8*opt.n_epochs),\n int(6/8*opt.n_epochs),\n int(7/8*opt.n_epochs),\n opt.n_epochs]\n if epoch in _lr_decay_epoch and step == 0:\n for param_group in optimizer.param_groups:\n param_group['lr'] = param_group['lr'] * opt._lr_decay", "def adjust_learning_rate(optimizer, epoch, args):\r\n lr = args.lr * (0.1 ** (epoch // 30))\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch):\n lr = 0.5 * (0.1 ** (epoch // 100))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch, args):\n lr = args.lr * (0.5 ** (epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(opt, epoch):\n d, e = args.lr_decay\n lr = args.lr * (d ** -(epoch // e))\n for param_group in opt.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch, decay, lrate):\n lr = lrate * (0.1 ** (epoch // decay))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, gamma, epoch, step_index, iteration, epoch_size):\n if epoch < 0:\n lr = 1e-6 + (args.lr - 1e-6) * iteration / (epoch_size * 5)\n else:\n lr = args.lr * (gamma ** (step_index))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return lr", "def adjust_learning_rate(optimizer, gamma, epoch, step_index, iteration, epoch_size):\n if epoch < 0:\n lr = 1e-6 + (args.lr - 1e-6) * iteration / (epoch_size * 5)\n else:\n lr = args.lr * (gamma ** (step_index))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return lr", "def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu):\n global_step = tf.compat.v1.train.get_or_create_global_step()\n\n learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)\n\n # Implements linear decay of the learning rate.\n learning_rate = tf.compat.v1.train.polynomial_decay(\n learning_rate,\n global_step,\n num_train_steps,\n end_learning_rate=0.0,\n power=1.0,\n cycle=False)\n\n # Implements linear warmup. I.e., if global_step < num_warmup_steps, the\n # learning rate will be `global_step/num_warmup_steps * init_lr`.\n if num_warmup_steps:\n global_steps_int = tf.cast(global_step, tf.int32)\n warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)\n\n global_steps_float = tf.cast(global_steps_int, tf.float32)\n warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)\n\n warmup_percent_done = global_steps_float / warmup_steps_float\n warmup_learning_rate = init_lr * warmup_percent_done\n\n is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)\n learning_rate = (\n (1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)\n\n # It is recommended that you use this optimizer for fine tuning, since this\n # is how the model was trained (note that the Adam m/v variables are NOT\n # loaded from init_checkpoint.)\n optimizer = AdamWeightDecayOptimizer(\n learning_rate=learning_rate,\n weight_decay_rate=0.01,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=[\"LayerNorm\", \"layer_norm\", \"bias\"])\n\n if use_tpu:\n optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)\n\n tvars = tf.compat.v1.trainable_variables()\n grads = tf.gradients(loss, tvars)\n\n # This is how the model was pre-trained.\n (grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)\n\n train_op = optimizer.apply_gradients(\n zip(grads, tvars), global_step=global_step)\n\n # Normally the global step update is done inside of `apply_gradients`.\n # However, `AdamWeightDecayOptimizer` doesn't do this. But if you use\n # a different optimizer, you should probably take this line out.\n new_global_step = global_step + 1\n train_op = tf.group(train_op, [global_step.assign(new_global_step)])\n return train_op", "def adjust_learning_rate(lr, optimizer, lr_decay, epoch):\n\n if epoch >= lr_decay[0]:\n lr = lr * 0.1\n if epoch >= lr_decay[1]:\n lr = lr * 0.01\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n return lr", "def adjust_learning_rate(optimizer, epoch):\n lr = args.lr * (0.1 ** (epoch // 100))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def create_optimizer(init_lr, num_train_steps, num_warmup_steps):\n # Implements linear decay of the learning rate.\n learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay(\n initial_learning_rate=init_lr,\n decay_steps=num_train_steps,\n end_learning_rate=0.0)\n if num_warmup_steps:\n learning_rate_fn = WarmUp(initial_learning_rate=init_lr,\n decay_schedule_fn=learning_rate_fn,\n warmup_steps=num_warmup_steps)\n optimizer = AdamWeightDecay(\n learning_rate=learning_rate_fn,\n weight_decay_rate=0.01,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=[LAYER_NORM_NAME, 'bias'])\n return optimizer", "def adjust_learning_rate(optimizer, epoch, args):\n lr = args.lr\n if args.cos: # cosine lr schedule\n lr *= 0.5 * (1. + math.cos(math.pi * epoch / args.epochs))\n else: # stepwise lr schedule\n for milestone in args.schedule:\n lr *= 0.1 if epoch >= milestone else 1.\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def eager_decay_rate():\n learning_rate = 0.5 * learning_rate_base * (1 + tf.cos(\n np.pi *\n (tf.cast(global_step, tf.float32) - warmup_steps - hold_base_rate_steps\n ) / float(total_steps - warmup_steps - hold_base_rate_steps)))\n if hold_base_rate_steps > 0:\n learning_rate = tf.where(\n global_step > warmup_steps + hold_base_rate_steps,\n learning_rate, learning_rate_base)\n if warmup_steps > 0:\n if learning_rate_base < warmup_learning_rate:\n raise ValueError('learning_rate_base must be larger or equal to '\n 'warmup_learning_rate.')\n slope = (learning_rate_base - warmup_learning_rate) / warmup_steps\n warmup_rate = slope * tf.cast(global_step,\n tf.float32) + warmup_learning_rate\n learning_rate = tf.where(global_step < warmup_steps, warmup_rate,\n learning_rate)\n return tf.where(global_step > total_steps, 0.0, learning_rate,\n name='learning_rate')", "def adjust_learning_rate(optimizer, epoch, lr_steps):\r\n decay = 0.1 ** (sum(epoch >= np.array(lr_steps)))\r\n lr = args.lr * decay\r\n decay = args.weight_decay\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = lr * param_group['lr_mult']\r\n param_group['weight_decay'] = decay * param_group['decay_mult']", "def adjust_learning_rate(optimizer, epoch):\n lr = opt.lr * (0.5 ** (epoch // opt.step))\n return lr", "def adjust_learning_rate(optimizer, epoch, args):\n \"\"\"Comes from pytorch demo\"\"\"\n lr = args.lr * (0.1 ** (epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def learning_rate_decay(alpha, decay_rate, global_step, decay_step):\n \"\"\"if staircase == True\n decayed_learning_rate = learning_rate /\n (1 + decay_rate * floor(global_step / decay_step)))\"\"\"\n return tf.train.inverse_time_decay(\n alpha, global_step, decay_step, decay_rate, staircase=True)", "def adjust_learning_rate(optimizer, epoch):\n lr = args.lr * (0.1**(epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, batch):\n lr = learning_rate\n for i in range(len(steps)):\n scale = scales[i] if i < len(scales) else 1\n if batch >= steps[i]:\n lr = lr * scale\n if batch == steps[i]:\n break\n else:\n break\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr / batch_size\n return lr", "def pt_adjust_learning_rate(epoch, opt, optimizer):\n # if epoch < 2:\n # for param_group in optimizer.param_groups:\n # param_group['lr'] = 1e-7\n # return 0\n # print(epoch)\n # print(np.asarray(opt.pt_lr_decay_epochs))\n steps = np.sum(epoch > np.asarray(opt.pt_lr_decay_epochs))\n if steps > 0:\n new_lr = opt.pt_learning_rate * (opt.pt_lr_decay_rate ** steps)\n for param_group in optimizer.param_groups:\n param_group['lr'] = new_lr", "def adjust_learning_rate(optimizer, epoch):\r\n lr = 0.001 * (0.1 ** (epoch // 30))\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, gamma, epoch, step_index, iteration, epoch_size):\r\n warmup_epoch = -1\r\n if epoch <= warmup_epoch:\r\n lr = 1e-6 + (initial_lr-1e-6) * iteration / (epoch_size * warmup_epoch)\r\n else:\r\n lr = initial_lr * (gamma ** (step_index))\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = lr\r\n return lr", "def adjust_learning_rate(opts, optimizer, epoch):\n lr = opts.lr * (0.1 ** (epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(opt, optimizer, epoch):\r\n lr = opt.learning_rate * (0.1 ** (epoch // opt.lr_update))\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, gamma, step):\n global lr\n lr = lr * (gamma ** (step))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch):\n lr = ln * (0.1 ** (epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n print(\"learning rate\",lr)", "def adjust_learning_rate(self, optimizer, epoch, args):\n lr = args.learning_rate * (0.1 ** (epoch // 30))\n # print(lr)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(opt, optimizer, epoch):\n epoch = copy.deepcopy(epoch)\n lr = opt.maxlr\n wd = opt.weightDecay\n if opt.learningratescheduler == 'imagenetscheduler':\n if epoch >= 1 and epoch <= 18:\n lr = 1e-3\n wd = 5e-5\n elif epoch >= 19 and epoch <= 29:\n lr = 5e-4\n wd = 5e-5\n elif epoch >= 30 and epoch <= 43:\n lr = 1e-4\n wd = 0\n elif epoch >= 44 and epoch <= 52:\n lr = 5e-5\n wd = 0\n elif epoch >= 53:\n lr = 2e-5\n wd = 0\n if opt.optimType=='sgd':\n lr *= 10\n opt.lr = lr\n opt.weightDecay = wd\n if opt.learningratescheduler == 'decayscheduler':\n while epoch >= opt.decayinterval:\n lr = lr/opt.decaylevel\n epoch = epoch - opt.decayinterval\n lr = max(lr,opt.minlr)\n opt.lr = lr\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n param_group['weight_decay'] = wd", "def adjust_learning_rate(optimizer, epoch):\n lr = args.lr * (0.1 ** (epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch):\n lr = args.lr * (0.1 ** (epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch):\n lr = args.lr * (0.1 ** (epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(epoch, learning_rate, lr_decay_epochs, optimizer):\r\n steps = np.sum(epoch > np.asarray(lr_decay_epochs))\r\n if steps > 0:\r\n new_lr = learning_rate * (lr_decay_rate ** steps)\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = new_lr", "def adjust_learning_rate(optimizer, epoch):\n lr = LEARNING_RATE * (0.1 ** (epoch // 10))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch):\r\n lr = args.lr * (0.1 ** (epoch // 30))\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch, args):\n lr = args.lr\n if 20 < epoch <= 30:\n lr = 0.0001\n elif 30 < epoch :\n lr = 0.00001\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n print(\"learning rate -> {}\\n\".format(lr))", "def learning_rate_decay(config, global_step):\n warmup_steps = tf.to_float(config.train.learning_rate_warmup_steps)\n global_step = tf.to_float(global_step)\n return config.hidden_units ** -0.5 * tf.minimum(\n (global_step + 1.0) * warmup_steps ** -1.5, (global_step + 1.0) ** -0.5)", "def adjust_learning_rate(optimizer, iteration_count):\n lr = args.lr / (1.0 + args.lr_decay * iteration_count)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, iteration_count):\n lr = args.lr / (1.0 + args.lr_decay * iteration_count)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, iteration_count):\n lr = args.lr / (1.0 + args.lr_decay * iteration_count)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, iteration_count):\n lr = args.lr / (1.0 + args.lr_decay * iteration_count)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch):\n lr = args.lr * (0.1 ** (epoch // args.lr_drop))\n print('lr= '+str(lr), flush=True)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch):\n lr = args.lr * ((1 - 0.015) ** epoch)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate_and_learning_taks(optimizer, epoch, args):\n if epoch >= args.step2: \n lr = args.lr * 0.01\n elif epoch >= args.step1:\n lr = args.lr * 0.1\n else:\n lr = args.lr\n \n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n # Return training classes\n return range(len(args.dataset))", "def adam_optimizer() -> mt.HyperparameterConfig:\n # Create a dict of hyperparameters and their feasible ranges\n hyperparam_dict = {\n # ADAM optimization hyperparameters\n # Ref: Kingma, Diederik P., and Jimmy Ba.\n # \"Adam: A method for stochastic optimization.\" (2014).\n # log10 of alpha1 := 1 - beta1\n 'log_alpha1': -1.5,\n # log10 of alpha2 := 1 - beta2\n 'log_alpha2': -2.1,\n # log10 of epsilon\n 'log_epsilon': -7.,\n # Other hyperparameters\n # Minimum value for the voxel weights in the summed xentropy op\n 'weight_floor': 0.01,\n\n # Exponential learning rate decay hyperparams\n # log10 of the learning rate\n 'log_learning_rate': -3.,\n # log10 of the number of Decay steps\n 'log_decay_steps': 3.4,\n # Exponential decay rate\n 'exponential_decay_rate': 0.75\n }\n\n return mt.HyperparameterConfig(hyperparam_dict)", "def setOptimizerParams(self,lr,momentum,decay):\n self.optimizer = SGD(lr=lr,momentum=momentum,decay=decay)", "def adjust_learning_rate(optimizer, epoch):\n lr = args.lr * (0.4 ** (epoch // 4))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def __init__(self,\n weight_decay,\n learning_rate=0.001,\n beta1=0.9,\n beta2=0.999,\n epsilon=1e-8,\n use_locking=False,\n name=\"AdamW\"):\n super(AdamWOptimizer, self).__init__(\n weight_decay,\n learning_rate=learning_rate,\n beta1=beta1,\n beta2=beta2,\n epsilon=epsilon,\n use_locking=use_locking,\n name=name)", "def learning_rate_decay(alpha, decay_rate, global_step, decay_step):\n epoc_number = int(global_step / decay_step)\n alpha /= (1 + decay_rate * epoc_number)\n return alpha", "def adjust_learning_rate(optimizer, epoch):\n initial_lr = args.lr\n if epoch <= 150:\n lr = initial_lr\n elif epoch <=225:\n lr = initial_lr/10\n else:\n lr = initial_lr/100\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n print(\"=\"*100)\n print('At epoch:',epoch,\" lr is:\",lr)", "def adjust_learning_rate(lr, lr_decay_steps, optimizer, epoch, lr_decay_rate=0.1):\n steps = list(map(int, lr_decay_steps.split(',')))\n for milestone in steps:\n lr *= lr_decay_rate if epoch >= milestone else 1.\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, gamma, step):\n lr = args.lr * (0.8 ** step)\n print(lr)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch):\n \n boundary = [args.epochs//2,args.epochs//4*3,args.epochs]\n lr = args.lr * 0.1 ** int(bisect.bisect_left(boundary, epoch))\n print('Learning rate: %f'%lr)\n #print(epoch, lr, bisect.bisect_left(boundary, epoch))\n # lr = args.lr * (0.1 ** (epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n return lr", "def train_adam(self, x, y, epochs=2000, x_test=None, y_test=None, learning_rate=0.1, val_freq=1000, log_freq=1000,\n verbose=1):\n\n self.optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)\n epoch_loss = tf.keras.metrics.Mean(name='epoch_loss')\n if verbose:\n logging.info(f'Start ADAM optimization')\n\n for epoch in range(1, epochs + 1):\n loss = self.train_step(x, y)\n # Track progress\n epoch_loss.update_state(loss) # Add current batch loss\n\n self.epoch_callback(epoch, epoch_loss.result(), epochs, x_test, y_test, val_freq, log_freq,\n verbose)", "def update_learning_rate(self) -> None:\n self.epsilon = self.initial_epsilon / (1. + self.rate_decay * self.n_it)\n return", "def update_learning_rate(self) -> None:\n self.epsilon = self.initial_epsilon / (1. + self.rate_decay * self.n_it)\n return", "def adjust_learning_rate(lr, decay, optimizer, cur_epoch, every_n_epochs):\n new_lr = lr * (decay ** (cur_epoch // every_n_epochs))\n\n # if cur_epoch % every_n_epochs == 0:\n # new_lr = lr * 0.1\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = new_lr", "def adjust_learning_rate(optimizer, gamma, step):\r\n lr = args.lr * (gamma ** (step))\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = lr", "def adjust_learning_rate(lr, optimizer, epoch):\n lr = lr * (0.1 ** (epoch // 10))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def optimize(self):\n print(colored('starting optimization with ADAM...', 'cyan'))\n self.optimizer = torch.optim.Adam(self.net.parameters(), lr=self.args.lr)\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, mode='min',\n factor=self.args.lr_factor,\n threshold=self.args.lr_thresh,\n patience=self.args.lr_patience)\n # stop after no improvements greater than a certain percentage of the previous loss\n stopper = u.EarlyStopping(patience=self.args.earlystop_patience,\n min_delta=self.args.earlystop_min_delta,\n percentage=True)\n start = time()\n for j in range(self.args.epochs):\n self.optimizer.zero_grad()\n loss = self.optimization_loop()\n self.optimizer.step()\n if self.args.reduce_lr:\n scheduler.step(loss)\n if stopper.step(loss): # stopper is computed on loss, as we don't have any validation metrics\n break\n \n self.elapsed = time() - start\n print(colored(u.sec2time(self.elapsed), 'yellow'))", "def adjust_learning_rate(optimizer, epoch):\n lr = hyper.lr * (0.5 ** (epoch // 10))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return lr", "def update_lr(optimizer, lr, epoch, max_epochs, exponent=0.9):\n optimizer.param_groups[0]['lr'] = lr * (1 - epoch / max_epochs)**exponent", "def adjust_learning_rate(optimizer, epoch):\n lr = learning_rate * (0.88 ** (epoch))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch, lr):\n lr = lr * ((1 - 0.015) ** epoch)\n print('learning rate : {}'.format(lr))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(initial_lr, optimizer, epoch, every_epoch):\n lr = initial_lr * (0.1 ** (epoch // every_epoch))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch, args_lr, epoch_adjust):\n lr = args_lr * (0.1 ** (epoch // epoch_adjust))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def lr_decay(step):\n return(alpha / (1 + decay_rate * step))", "def adjust_learning_rate(start_lr, optimizer, epoch, total_epoch_num):\n #lr = start_lr * (0.1 ** (epoch // 30))\n lr = start_lr * (0.3 ** (epoch // 5))\n if epoch==total_epoch_num:\n lr = lr * 0.3\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr" ]
[ "0.7543031", "0.73133004", "0.724946", "0.7170338", "0.7165979", "0.7088052", "0.7046958", "0.6981401", "0.6972685", "0.69598085", "0.6947667", "0.69352144", "0.69040656", "0.6877185", "0.68620074", "0.6840973", "0.68289465", "0.6801931", "0.67928517", "0.6781422", "0.67728466", "0.6767964", "0.676635", "0.67537814", "0.6749773", "0.6749773", "0.6749773", "0.6745417", "0.67452586", "0.6743608", "0.6743034", "0.67396677", "0.6729645", "0.6725869", "0.6724744", "0.67224646", "0.6718511", "0.6712468", "0.6709722", "0.6709722", "0.669776", "0.66924363", "0.6683016", "0.668152", "0.66806483", "0.6677462", "0.6676163", "0.66733974", "0.6671261", "0.66699386", "0.6666383", "0.66629076", "0.6662211", "0.6661614", "0.6659071", "0.6656737", "0.66538066", "0.6652353", "0.66509396", "0.66495013", "0.66409403", "0.6637355", "0.6637355", "0.6637355", "0.6633356", "0.6626417", "0.6622022", "0.66140455", "0.66083306", "0.6607863", "0.6607863", "0.6607863", "0.6607863", "0.66025573", "0.66007715", "0.6598164", "0.6597995", "0.6597321", "0.65972644", "0.6595294", "0.6595042", "0.65906227", "0.65885895", "0.6586305", "0.65831596", "0.6580682", "0.6579654", "0.6579654", "0.6578689", "0.6565511", "0.6562997", "0.65603834", "0.6557933", "0.6552106", "0.655177", "0.6550036", "0.6549804", "0.6549511", "0.65481883", "0.65458155" ]
0.68411076
15
Insert object into the Property Sheet to display it. Object must implements the GetPropList interface. This interface returns the dictionary with keys as the groups labels and values as dictionary with keys as property labels and values as property value objects
def SetPropObject(self, obj): if self.propObj is not None: # deregister notification if hasattr(self.propObj, "DropPropsNotify") : self.propObj.DropPropsNotify() if hasattr(obj, "GetPropList") : self.propObj = obj self.UpdateGrid() else : wx.MessageBox(_("Can't display properties for %s, because it hasn't GetPropList interface") % obj.__class__.__name__, _("Sampo Framework"), wx.OK | wx.CENTER | wx.ICON_ERROR)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def define_group_properties(self):\n\n # PropertyGroup\n self.propertygroup['debug']['x86'] = get_propertygroup(\n 'debug', 'x86', ' and @Label=\"Configuration\"'\n )\n self.propertygroup['debug']['x64'] = get_propertygroup(\n 'debug', 'x64', ' and @Label=\"Configuration\"'\n )\n self.propertygroup['release']['x86'] = get_propertygroup(\n 'release', 'x86', ' and @Label=\"Configuration\"'\n )\n self.propertygroup['release']['x64'] = get_propertygroup(\n 'release', 'x64', ' and @Label=\"Configuration\"'\n )\n\n # ItemDefinitionGroup\n self.definitiongroups['debug']['x86'] = get_definitiongroup('debug', 'x86')\n self.definitiongroups['debug']['x64'] = get_definitiongroup('debug', 'x64')\n self.definitiongroups['release']['x86'] = get_definitiongroup('release', 'x86')\n self.definitiongroups['release']['x64'] = get_definitiongroup('release', 'x64')", "def custom_props():\r\n section = document.add_section()\r\n new_width, new_height = section.page_height, section.page_width\r\n section.orientation = WD_ORIENT.LANDSCAPE\r\n section.page_width = 7772400\r\n section.page_height = 10058400\r\n document.add_heading('Custom Properties', level=1)\r\n\r\n customproperties = get_qlik_sense.get_customprop()\r\n num_of_customproperties = len(customproperties)\r\n table = document.add_table(rows=num_of_customproperties+1, cols=3)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'name'\r\n row.cells[1].text = 'choice values'\r\n row.cells[2].text = 'object types'\r\n\r\n for customproperty in range(num_of_customproperties):\r\n row = table.rows[customproperty+1]\r\n row.cells[0].text = str(customproperties[customproperty][0])\r\n row.cells[1].text = ', '.join(customproperties[customproperty][1])\r\n row.cells[2].text = ', '.join(customproperties[customproperty][2])\r\n document.add_page_break()", "def properties(self,prop):\r\n # The particulars of how they are stored and manipulated (e.g., do\r\n # we want an inventory internally) is not settled. I've used a\r\n # property dictionary for now.\r\n #\r\n # How these properties interact with a user defined style file is\r\n # even less clear.\r\n\r\n # Properties defined by plot\r\n self.xbox.set_text(r\"$%s$\" % prop[\"xlabel\"])\r\n self.ybox.set_text(r\"$%s$\" % prop[\"ylabel\"])\r\n self.tbox.set_text(r\"$%s$\" % prop[\"title\"])\r\n\r\n # Properties defined by user\r\n #self.axes.grid(True)\r", "def propertyGroup(self, p_int): # real signature unknown; restored from __doc__\n return \"\"", "def properties(self,prop):\n # The particulars of how they are stored and manipulated (e.g., do \n # we want an inventory internally) is not settled. I've used a\n # property dictionary for now.\n #\n # How these properties interact with a user defined style file is\n # even less clear.\n\n # Properties defined by plot\n self.subplot.set_xlabel(r\"$%s$\" % prop[\"xlabel\"])\n self.subplot.set_ylabel(r\"$%s$\" % prop[\"ylabel\"])\n self.subplot.set_title(prop[\"title\"])\n\n # Properties defined by user\n #self.axes.grid(True)", "def pack(self):\n data = {\n 'name': self._name,\n 'piece': self._piece,\n 'pos': self._pos,\n 'cash': self._cash,\n 'properties': []\n }\n\n for i in self._properties:\n data['properties'].append({'name': i.name, 'value': i.value})\n\n return data", "def testAddingPropertyFields(self):\n map_sheet = self.properties[PROPERTY_SHEET]\n for key, value in PROPS.items():\n self.failUnless(map_sheet.hasProperty(key) and list(map_sheet.getProperty(key)) == value)", "def add_object(self, name, env, contentnode):\n props = PropertyDefinition(name, env.docname)\n props.gather(contentnode)\n self.data['objects'][props.key] = props\n self.data['all_objects'][props.key] = props\n return props", "def create_propositions(self, props):\n\n for prop in props:\n Proposition(label=html.escape(prop), poll=self).save()", "def add_property(self, _, obj_prop_pair):\n (obj, prop) = obj_prop_pair\n if prop is None:\n name = self.get_new_obj_name(obj.properties, prefix='Unnamed Property')\n prop = odml.Property(name=name, dtype='string')\n # The default value part should be put in odML core library\n prop.values = [dtypes.default_values('string')]\n create_pseudo_values([prop])\n else:\n prefix = prop.name\n name = self.get_new_obj_name(obj.properties, prefix=prefix)\n prop = prop.clone()\n prop.name = name\n create_pseudo_values([prop])\n\n cmd = commands.AppendValue(obj=obj, val=prop)\n self.execute(cmd)", "def transform_property_info_list(se, prop_list, output_type):\n props = [{\"description\": _prop.get(\"description\"),\n \"domain\": transform_schemaclasses_lst(se,\n _prop.get(\"domain\"),\n output_type),\n \"range\": transform_schemaclasses_lst(se,\n _prop.get(\"range\"),\n output_type),\n \"curie\": se.cls_converter.get_curie(_prop.get(\"uri\")),\n \"label\": se.cls_converter.get_label(_prop.get(\"uri\")),\n \"uri\": _prop.get(\"uri\"),\n \"object\": se.get_property(_prop.get(\"uri\"))} for _prop in prop_list]\n return props", "def as_dict(self):\n rv = {\n 'id': self.id,\n 'name': self.name,\n 'contributes': self.contributes,\n 'hint': self.hint,\n 'values': [],\n }\n for value in self.values:\n if isinstance(value, GroupingComponent):\n rv['values'].append(value.as_dict())\n else:\n # this basically assumes that a value is only a primitive\n # and never an object or list. This should be okay\n # because we verify this.\n rv['values'].append(value)\n return rv", "def add_value(self, _, obj_value_pair):\n (obj, val) = obj_value_pair\n new_val = value_model.Value(obj)\n\n # Add new empty PseudoValue to the Properties PseudoValue list\n cmd = commands.AppendValue(obj=obj.pseudo_values, val=new_val)\n self.execute(cmd)\n\n # Update the empty new PseudoValue with the actual value.\n if val:\n new_val.pseudo_values = val\n\n # Reset the view to make sure the changes are properly displayed.\n self.reset_value_view(None)", "def _props_grouper(props, pgs):\n if not props:\n return props\n #props = sorted([\n #_ if _.strip().endswith(\";\")\n #and not _.strip().endswith(\"*/\") and not _.strip().endswith(\"/*\")\n #else _.rstrip() + \";\\n\" for _ in props])\n props_pg = zip(map(lambda prop: _prioritify(prop, pgs), props), props)\n props_pg = sorted(props_pg, key=lambda item: item[0][1])\n props_by_groups = map(\n lambda item: list(item[1]),\n itertools.groupby(props_pg, key=lambda item: item[0][1]))\n props_by_groups = map(lambda item: sorted(\n item, key=lambda item: item[0][0]), props_by_groups)\n props = []\n for group in props_by_groups:\n group = map(lambda item: item[1], group)\n props += group\n props += ['\\n']\n props.pop()\n return props", "def create_group(self, properties: dict[str, Any | None]) -> dict:\n group = self.ms_client.http_request(method='POST', url_suffix='groups', json_data=properties)\n return group", "def process_property(prop):\n output = {}\n output['Property'] = prop['PropertyAddress']\n output['Sale date'] = convert_date(prop['DateSold'])\n output['Sale price'] = convert_prices(prop['SalePrice'])\n output['Rates value'] = convert_prices(prop['CapitalValue'])\n return output", "def property_groups(self):\n from hubspot3.property_groups import PropertyGroupsClient\n\n return PropertyGroupsClient(**self.auth, **self.options)", "def save( self, propList= None ):\n if propList is None:\n # props which do not begin with underscore\n propList= [ aKey \n for aKey in self.__dict__.keys() \n if aKey.split(\"_\")[0].strip() != \"\" ]\n \n elif type( propList ) == type( str() ):\n propList= [ propList ]\n \n data= [ getattr( self, aProp ) \n if aProp in self.__dict__ \n else warnings.warn( \"\\\"\" + aProp + \"\\\" not able to be saved, not a property\" )\n for aProp in propList ]\n \n self.prefObj.save( group= self.prefGroup, name= propList, data= data )", "def write_properties(props):\n root = Element('{%s}coreProperties' % COREPROPS_NS)\n for attr in (\"creator\", \"title\", \"description\", \"subject\", \"identifier\",\n \"language\"):\n SubElement(root, '{%s}%s' % (DCORE_NS, attr)).text = getattr(props, attr)\n\n for attr in (\"created\", \"modified\"):\n value = datetime_to_W3CDTF(getattr(props, attr))\n SubElement(root, '{%s}%s' % (DCTERMS_NS, attr),\n {'{%s}type' % XSI_NS:'%s:W3CDTF' % DCTERMS_PREFIX}).text = value\n\n for attr in (\"lastModifiedBy\", \"category\", \"contentStatus\", \"version\",\n \"revision\", \"keywords\"):\n SubElement(root, '{%s}%s' % (COREPROPS_NS, attr)).text = getattr(props, attr)\n\n if props.lastPrinted is not None:\n SubElement(root, \"{%s}lastPrinted\" % COREPROPS_NS).text = datetime_to_W3CDTF(props.lastPrinted\n )\n return tostring(root)", "def _create_properties_table(font, format, base):\n propstrings = bytearray()\n xlfd_props = create_xlfd_properties(font)\n xlfd_props['FONT'] = create_xlfd_name(xlfd_props)\n props = []\n props_struct = base.Struct(**_PROPS)\n for key, value in xlfd_props.items():\n prop = props_struct(\n name_offset=len(propstrings),\n isStringProp=isinstance(value, str),\n )\n propstrings += key.encode('ascii', 'replace') + b'\\0'\n if prop.isStringProp:\n prop.value = len(propstrings)\n value = from_quoted_string(value)\n propstrings += value.encode('ascii', 'replace') + b'\\0'\n else:\n prop.value = int(value)\n props.append(prop)\n table_bytes = (\n bytes(le.uint32(format))\n + bytes(base.uint32(len(props)))\n + bytes((props_struct * len(props))(*props))\n # pad to next int32 boundary\n + bytes(0 if len(props)&3 == 0 else 4-(len(props)&3))\n + bytes(base.uint32(len(propstrings)))\n + bytes(propstrings)\n )\n return table_bytes, format", "def add_properties(self, p: dict):\n # TODO: Deprecate in favour of directly updating or adding metadata\n for k, v in p.items():\n units = v.pop(\"units\", None)\n try:\n try:\n n, i = self._properties.get_name_and_index(k)\n except ValueError:\n msg = (\n f\"The property name {k} in property metadata is not a recognized \"\n \"standard property name defined in this PropertySet. Please refer \"\n \"to IDAES standard names in the IDAES documentation. You can use \"\n \"the define_custom_properties() rather than the add_properties() \"\n \"method to define metadata for this property. You can also use a \"\n \"different property set by calling the define_property_set() method.\"\n )\n deprecation_warning(\n msg=msg, logger=_log, version=\"2.0.0\", remove_in=\"3.0.0\"\n )\n n = k\n i = None\n getattr(self._properties, n)[i].update_property(**v)\n except AttributeError:\n # TODO: Deprecate this and make it raise an exception if an unknown property is encountered\n # Force users to explicitly declare new/custom properties\n self._properties.define_property(name=k, **v, units=units)", "def put_prop(self, obj_type, obj_id, prop_name, value):\n ierr = exolib.py_expp(self.exoid, obj_type, obj_id, prop_name, value)\n if ierr:\n raise ExodusIIWriterError(\"Error putting prop\")", "def set_label_props(self, props):\n _api.check_isinstance(dict, props=props)\n props = _expand_text_props(props)\n for text, prop in zip(self.labels, props):\n text.update(prop)", "def set_label_props(self, props):\n _api.check_isinstance(dict, props=props)\n props = _expand_text_props(props)\n for text, prop in zip(self.labels, props):\n text.update(prop)", "def get_properties():", "def put_prop(self, obj_type, obj_id, prop_name, value):\n ierr = exolib.py_expp(self.exoid, obj_type, obj_id, prop_name, value)\n if ierr:\n raise ExodusIIWriterError(\"Error putting prop value\")", "def update_values (self, property_list, dest_dict, overwrite_init=False ):\r\n for property in property_list:\r\n visible = True\r\n for attr in property[\"attrs\"]:\r\n if attr[\"type\"] == \"private\":\r\n if attr[\"value\"].lower() == 'true':\r\n visible = False\r\n\r\n if not visible:\r\n continue\r\n\r\n value = {} # Value dict, format: {value, dirty}\r\n value_list = [] # List for multiple values\r\n init_value_list = []\r\n init_value = {}\r\n # Add all the values (object with multiple getters) \r\n # to a list of values\r\n for getter in property['getters']:\r\n getter_name = getter['name']\r\n getter_value = self.execute_getter( getter_name, self.object )\r\n if str(getter_value).startswith('('):\r\n python_value = self.__get_python_format(\r\n property['params'], \r\n getter_value \r\n )\r\n else:\r\n python_value = [getter_value]\r\n\r\n value_list.append(python_value)\r\n if overwrite_init:\r\n init_value_list.append(python_value)\r\n\r\n value['value'] = value_list # Property value\r\n value['dirty'] = False # Must save or not\r\n value['inheritance'] = self.mode # Used for revert\r\n \r\n if overwrite_init:\r\n init_value['value'] = init_value_list\r\n init_value['dirty'] = False \r\n init_value['inheritance'] = self.mode\r\n \r\n dest_dict[property['name']] = value\r\n \r\n if overwrite_init:\r\n self.init_values[property['name']] = init_value", "def add_property(self, property):\n # check input data type\n if not isinstance(property, dict):\n raise TypeError\n # check data types of keys in dict\n if not all([isinstance(key, str) for key in property.keys()]):\n raise TypeError\n # check that values are lists of strings\n for key, value in property.items():\n if isinstance(value, str):\n property[key] = [value]\n elif not isinstance(value, list):\n raise TypeError\n else:\n if not all([isinstance(val, str) for val in value]):\n raise TypeError\n\n # add properties to the dict\n for key, value in property.items():\n if key not in self.properties:\n self.properties[key] = value\n else:\n self.properties[key] += value\n self.properties[key] = list(set(self.properties[key]))", "def load(self):\n for prop in self.properties:\n try:\n value = getattr(self, prop)\n self._prop_dict[prop] = value\n except AttributeError as ate:\n pass", "def create_plot_panel_props(prop_map):\n props = {}\n for k1, v in prop_map.items():\n v = {'edgecolor' : v.get('color', None),\n 'facecolor' : 'none',\n 'linewidth' : v.get('width', None),\n 'alpha' : v.get('alpha', None)}\n for k2, _ in prop_map.items():\n if (k1, k2) not in props:\n props[k1, k2] = v\n if (k2, k1) not in props:\n props[k2, k1] = v\n return props", "def get_prop_labels(self, input):\n ob_labels = label(input, connectivity=2)\n\n prop = ['label', 'centroid', 'orientation', 'major_axis_length', 'minor_axis_length', 'bbox', 'area',\n 'coords', 'image', 'bbox_area']\n props_table = regionprops_table(ob_labels, properties=prop)\n\n return pd.DataFrame(props_table)", "def _create_groups(properties, alias_dictionary):\n # We first convert properties into a dictionary structure. Each dictionary\n # represents a group. The None key corresponds to the fields directly stored\n # on that group. The other keys map from group name to another dictionary.\n # For example:\n # {\n # None: [field1, field2, ...]\n # 'groupA': { None: [field3] },\n # 'groupB': {\n # None: [],\n # 'groupC': { None: [field4] },\n # },\n # }\n #\n # We then recursively convert this dictionary into a tree of Groups.\n # TODO(shend): Skip the first step by changing Group attributes into methods.\n def _dict_to_group(name, group_dict):\n fields_in_current_group = group_dict.pop(None)\n subgroups = [_dict_to_group(subgroup_name, subgroup_dict) for subgroup_name, subgroup_dict in group_dict.items()]\n return Group(name, subgroups, _reorder_fields(fields_in_current_group))\n\n root_group_dict = {None: []}\n for property_ in properties:\n current_group_dict = root_group_dict\n if property_['field_group']:\n for group_name in property_['field_group'].split('->'):\n current_group_dict[group_name] = current_group_dict.get(group_name, {None: []})\n current_group_dict = current_group_dict[group_name]\n current_group_dict[None].extend(_create_fields(property_, alias_dictionary))\n\n return _dict_to_group(None, root_group_dict)", "def create_properties(self, properties):\n self._update_metadata_date(properties)\n self._backend.insert_product_properties(properties)", "def get_storable_dict(self):\n d = super().get_storable_dict()\n d.update(grp=turn_keys_into_str(self._grp), grp_order=self._grp_order)\n return d", "def _prop(self):\n return [\"%s = %s\" % (str(k), repr(v)) for k, v in self.prop.items()]", "def test_properties_stats_group_by_group_by_get(self):\n pass", "def properties(self):", "def properties(self):", "def properties(self):", "def get_properties_groups(serialized_file: str, sparql_file: str, repository: str, endpoint: str, endpoint_type: str,\n properties_dict: ResourceDictionary,\n limit: int = 1000) -> Dict:\n if os.path.isfile(serialized_file):\n properties_groups = deserialize(serialized_file)\n return properties_groups\n encoding_dir = os.path.dirname(serialized_file)\n if not os.path.exists(encoding_dir):\n os.makedirs(encoding_dir)\n\n sub_properties_dict = {}\n get_sub_properties_query = open(sparql_file).read()\n get_sub_properties_query_template = Template(get_sub_properties_query + \" limit $limit offset $offset \")\n for (property1, property2) in get_sparql_results(get_sub_properties_query_template, [\"property1\", \"property2\"],\n endpoint, repository,\n endpoint_type, limit):\n if property2 not in sub_properties_dict:\n sub_properties_dict[property2] = []\n\n sub_properties_dict[property2].append(property1)\n\n G = nx.Graph()\n for property1 in sub_properties_dict:\n for property2 in sub_properties_dict[property1]:\n G.add_edge(property1, property2)\n for property_uri in properties_dict:\n G.add_node(property_uri)\n properties_connected_components = {}\n index = 0\n for c in nx.connected_components(G):\n for p in c:\n properties_connected_components[p] = index\n index += 1\n\n serialize(properties_connected_components, serialized_file)\n return properties_connected_components", "def add_custom_properties(product):\n \n # TODO: may override property of object\n if product['properties']:\n for p in product['properties']:\n product[p['name']] = p['value']\n \n return product", "def _build_properties(self, k, v, definition):\n\n if isinstance(v, schema.Map):\n newdef = self._create_section(definition, k, term=k)\n\n if v.schema is None:\n # if it's a map for arbritary values, only include description\n field = nodes.line('', v.description)\n newdef.append(field)\n return\n\n newdeflist = self._create_def_list(newdef)\n\n sorted_schema = sorted(v.schema.items(),\n key=cmp_to_key(self._sort_by_type))\n for key, value in sorted_schema:\n self._build_properties(key, value, newdeflist)\n elif isinstance(v, schema.List):\n newdef = self._create_section(definition, k, term=k)\n\n # identify next section as list properties\n field = nodes.line()\n emph = nodes.emphasis('', 'List properties:')\n field.append(emph)\n newdef.append(field)\n\n newdeflist = self._create_def_list(newdef)\n\n self._build_properties('**', v.schema['*'], newdeflist)\n else:\n newdef = self._create_section(definition, k, term=k)\n if 'description' in v:\n field = nodes.line('', v['description'])\n newdef.append(field)\n else:\n field = nodes.line('', '++')\n newdef.append(field)", "def get_properties_code(self, obj):\n return []", "def addPropertie(self, propname, value):\n if isinstance(propname, types.IntType) or isinstance(propname, types.StringType):\n self.properties[propname] = value\n else:\n raise Exception(\"la propiedad debe ser de tipo int o string\")", "def make_props_files(labels, label_list, dir_path, data,\r\n background_color, label_color, prefs):\r\n cat_connected_num = 0\r\n mapping = data['map']\r\n groups_and_colors = iter_color_groups(mapping, prefs)\r\n for params in groups_and_colors:\r\n l = params[0]\r\n if l == \"SampleID\" or l == \"Description\":\r\n continue\r\n m = params[2]\r\n c = params[3]\r\n output = open(os.path.join(dir_path, \"props/custom.%s.props\" % l), 'w')\r\n props_str_list = [l] * 5\r\n props_str_list.append(','.join(map(str, label_color.toRGB())))\r\n props_str_list.extend([l] * 22)\r\n props_str_list.append(','.join(map(str, label_color.toRGB())))\r\n props_str_list.extend([l] * 16)\r\n props_str_list.append(props_edge % (l, l))\r\n props_str_list.append(l)\r\n props_str_list.append(\r\n '\\n'.join([props_edge_meta % (l, s, ','.join(map(str, c[n].toRGB())))\r\n for s, n in m.items()]))\r\n props_str_list.extend([l] * 109)\r\n props_str_list.append(props_node % (l, l))\r\n props_str_list.append(l)\r\n props_str_list.append(\r\n '\\n'.join([props_node_meta % (l, s, ','.join(map(str, c[n].toRGB())))\r\n for s, n in m.items()]))\r\n props_str_list.extend([l] * 48)\r\n props_str_list[98] = ','.join(map(str, background_color.toRGB()))\r\n props_str_list[109] = ','.join(map(str, label_color.toRGB()))\r\n props_str_list[132] = ','.join(map(str, label_color.toRGB()))\r\n output.write(props_file_str % tuple(props_str_list))\r\n output.close()", "def create_props(kinds, colors=None, interstitial_color=(0.5, 0.5, 0.5, 1)):\n if colors is None:\n prop_cycle = _plt.rcParams.get('pyseas.map.trackprops', _dark_artist_cycler)\n elif isinstance(colors, (list, int)):\n prop_cycle = _cycler(edgecolor=colors, facecolor=[(0, 0, 0, 0)] * len(colors))\n elif isinstance(colors, _Cycler):\n prop_cycle = colors\n else:\n raise ValueError(f\"don't know how to handle props of type {type(props)}\")\n prop_cycle = prop_cycle()\n props = {}\n for k1 in kinds:\n props[(k1, k1)] = next(prop_cycle)\n for k2 in kinds:\n if k1 != k2:\n props[(k1, k2)] = {'edgecolor' : interstitial_color,\n 'facecolor' : (0, 0, 0, 0),\n 'legend' : None}\n return props", "def update_meta_property(self, property, item, iteration):\n try:\n property[iteration].append(item)\n except:\n property[iteration] = [item]", "def set_value(self, _, prop_value_pair):\n (prop, val) = prop_value_pair\n\n _, _, obj = self.popup_data\n if not isinstance(obj, value_model.Value):\n raise TypeError(\"Expected %s\" % type(value_model.Value))\n\n if not prop == obj.parent:\n raise ValueError(\"Property '%s' is not the parent of '%s'\" % (prop, obj))\n\n # To enable undo redo for this we need a bit of trickery\n new_prop = prop.clone(keep_id=True)\n create_pseudo_values([new_prop])\n if new_prop.pseudo_values[obj.index].pseudo_values != obj.pseudo_values:\n raise ValueError(\"Cannot find replacement value\")\n\n # Update the value in the new property\n new_prop.pseudo_values[obj.index].pseudo_values = val\n\n # Lets replace the old property with the new and updated one\n cmd = commands.ReplaceObject(obj=prop, repl=new_prop)\n self.execute(cmd)\n\n # Reset the view to make sure the changes are properly displayed.\n self.select_object(new_prop)\n self.reset_value_view(None)", "def reshape_properties(self):\n for key in self.properties:\n try:\n self.properties[key] = self.properties[key].reshape(\n self.grid.nlay,\n self.grid.nx,\n self.grid.ny\n )\n except AttributeError:\n pass", "def _convert_props(pcf_data):\n xlfd_name = pcf_data.xlfd_props.pop('FONT', '')\n pcf_data.xlfd_props = {_k: str(_v) for _k, _v in pcf_data.xlfd_props.items()}\n props = parse_xlfd_properties(pcf_data.xlfd_props, xlfd_name)\n props.update(dict(\n default_char=Codepoint(pcf_data.default_char),\n ))\n # ascent and descent - these are stored in accelerator table rather than XLFD props\n if hasattr(pcf_data, 'bdf_acc_props'):\n props.update(dict(\n ascent=pcf_data.bdf_acc_props.fontAscent,\n descent=pcf_data.bdf_acc_props.fontDescent,\n ))\n elif hasattr(pcf_data, 'acc_props'):\n props.update(dict(\n ascent=pcf_data.acc_props.fontAscent,\n descent=pcf_data.acc_props.fontDescent,\n ))\n return props", "def set_up_groups(self):\n groups = []\n groups.append({'groupname': 'th',\n 'grouptitle': 'TH',\n 'path': '/'})\n groups.append({'groupname': 'neutronics',\n 'grouptitle': 'Neutronics',\n 'path': '/'})\n groups.append({'groupname': 'metadata',\n 'grouptitle': 'Simulation Metadata',\n 'path': '/'})\n return groups", "def createObjectPropertyMap(self, propertyName: unicode, objectClass: java.lang.Class) -> ghidra.program.model.util.ObjectPropertyMap:\n ...", "def asdict(self):\n return self._prop_dict", "def properties(self) -> google.protobuf.internal.containers.MessageMap[builtins.str, global___Expression]:", "def insert(self, index: int, obj: Any) -> None:\n from ..pane import panel\n new_objects = list(self)\n new_objects.insert(index, panel(obj))\n self.objects = new_objects", "def subject_property_create(context, values, session=None):\n prop_ref = models.SubjectProperty()\n prop = _subject_property_update(context, prop_ref, values, session=session)\n return prop.to_dict()", "def make_groups(self):\n for g in self.groups:\n self.add_group(groupname=g['groupname'],\n grouptitle=g['grouptitle'],\n path_to_group=g['path'])", "def testAddingPropertySheet(self):\n self.failUnless(hasattr(self.properties.aq_base, PROPERTY_SHEET))", "def __add__(self, obj):\n if isinstance(obj, vtk.vtkProp3D):\n self.AddPart(obj)\n\n self.actors.append(obj)\n\n if hasattr(obj, \"scalarbar\") and obj.scalarbar is not None:\n if self.scalarbar is None:\n self.scalarbar = obj.scalarbar\n return self\n\n def unpack_group(scalarbar):\n if isinstance(scalarbar, Group):\n return scalarbar.unpack()\n else:\n return scalarbar\n\n if isinstance(self.scalarbar, Group):\n self.scalarbar += unpack_group(obj.scalarbar)\n else:\n self.scalarbar = Group([unpack_group(self.scalarbar), unpack_group(obj.scalarbar)])\n self.pipeline = vedo.utils.OperationNode(\"add mesh\", parents=[self, obj], c=\"#f08080\")\n return self", "def add_metadata_properties(self, sentence, result):\r\n for property in sentence.properties:\r\n if property.property_metadata.is_category:\r\n result[property.name] = property.value", "def init_prop(obj):\n if 'Test_object' not in obj:\n obj['Test_object'] = \"None\"\n if 'Test_ratio' not in obj:\n obj['Test_ratio'] = 1\n if 'Correct_color' not in obj:\n obj['Correct_color'] = 0, 1.0, 0\n if 'Wrong_color' not in obj:\n obj['Wrong_color'] = 1.0, 0, 0\n if 'TEST' not in obj:\n obj[\"TEST\"] = \"INACTIVE\"\n\n if 'Active_Dialogue' not in obj:\n obj['Active_Dialogue'] = {}\n\n if 'STORY_MODE' not in obj:\n obj['STORY_MODE'] = \"NORMAL\"\n\n if 'SOLVED' not in obj:\n obj['SOLVED'] = \"No\"\n\n if 'SLIDE' not in obj:\n obj['SLIDE'] = 0\n if 'ACTIVE' not in obj:\n obj['ACTIVE'] = None\n if 'TEST_MODE' not in obj:\n obj['TEST_MODE'] = \"Off\"\n #Set run speed\n if 'running' not in obj:\n obj['running'] = 20\n #Set jump force\n if 'jump_force' not in obj:\n obj['jump_force'] = 20\n #Toggles first person mode\n if 'view_mode' not in obj:\n obj['view_mode'] = 'THIRD_PERSON'\n #The fp thumbstick layout\n if 'thumbstick_layout' not in obj:\n obj['thumbstick_layout'] = 'DEFAULT' #can be DEFAULT, LEGACY, SOUTHPAW, or LEGACYSOUTHPAW\n #Look invert for fp_mode\n if 'look_invert' not in obj:\n #1 = not inverted, -1 = inverted\n obj['look_invert'] = 1\n #When Camera has reached its destined position\n if 'cam_set' not in obj:\n obj['cam_set'] = 'Off'\n if 'index' not in obj:\n obj['index'] = 0", "def test_IMutablePropertiesPlugin_set(self):\n user = self.pas.getUserById(\"uid0\")\n from pas.plugins.ldap.sheet import LDAPUserPropertySheet\n\n sheet = LDAPUserPropertySheet(user, self.ldap)\n self.assertEqual(sheet.getProperty(\"mail\"), \"uid0@groupOfNames_10_10.com\")\n sheet.setProperty(None, \"mail\", \"[email protected]\")\n self.assertEqual(sheet.getProperty(\"mail\"), \"[email protected]\")\n sheet2 = LDAPUserPropertySheet(user, self.ldap)\n self.assertEqual(sheet2.getProperty(\"mail\"), \"[email protected]\")", "def print_item(group):\n print(\"\\tName: {}\".format(group.name))\n print(\"\\tId: {}\".format(group.id))\n if hasattr(group, 'location'):\n print(\"\\tLocation: {}\".format(group.location))\n print_properties(getattr(group, 'properties', None))", "def display_properties(self):\n return self._display_properties", "def parseRowProperty(self, i, j) :\n if self.isEmpty(i,j):\n if self.insideMergeBox(i,j):\n k, l = self.getMergeBoxCoord(i,j)\n self.source_cell_value_qname = self.addValue(self.r_sheet.cell(k,l).value)\n else:\n return\n else:\n self.source_cell_value_qname = self.addValue(self.source_cell.value) \n #self.graph.add((self.namespaces['scope'][self.source_cell_qname],self.namespaces['tablink']['isDimensionProperty'],self.namespaces['scope'][self.source_cell_value_qname]))\n #self.graph.add((self.namespaces['scope'][self.source_cell_value_qname],RDF.type,self.namespaces['qb']['DimensionProperty']))\n #self.graph.add((self.namespaces['scope'][self.source_cell_value_qname],RDF.type,RDF['Property']))\n \n #self.property_dimensions.setdefault(j,[]).append(self.source_cell_value_qname)\n self.property_dimensions[j] = self.source_cell_value_qname\n \n # Add to graph\n resource = self.namespaces['scope'][self.property_dimensions[j]]\n self.graph.add((resource, RDF.type, self.namespaces['tablink']['RowProperty']))\n\n return", "def test_list_properties(self):\n pass", "def test_get_objects_with_properties(self):\n expected_result = self.spec.get(\"test_get_objects_with_properties\")\n expected_type = expected_result.get(\"_type\")\n expected_datastore_list = []\n\n for each_datastore in expected_result.get(\"datastore_infos\"):\n datastore_name = each_datastore[\"name\"]\n expected_datastore_list.append(datastore_name)\n datastore_list = []\n \n object_content = self.session.invoke_api(vim_util, \n 'get_objects', \n self.vim, \n 'Datastore', \n 100, \n ['name'])\n for one_object in object_content.objects:\n self.assertEqual(one_object.obj._type, expected_type)\n if hasattr(one_object, 'propSet'):\n dynamic_properties = one_object.propSet\n prop_dict = {}\n for prop in dynamic_properties:\n if prop.name == \"name\":\n datastore_list.append(prop.val)\n \n for each_ds_name in datastore_list:\n self.assertTrue(each_ds_name in datastore_list)", "def to_dict(self):\n return self.properties", "def to_dict(self):\n return self.properties", "def getProperties():", "def get_popup_menu_items(self):\n model, _, obj = self.popup_data\n menu_items = self.create_popup_menu_items(\"Add Property\", \"Empty Property\",\n model.section, self.add_property,\n lambda sec: sec.properties,\n lambda prop: prop.name,\n stock=\"odml-add-Property\")\n if obj is not None:\n prop = obj\n\n # We are working exclusively with Properties\n if isinstance(obj, value_model.Value):\n prop = obj.parent\n\n for item in self.create_popup_menu_items(\"Add Value\", \"Empty Value\", prop,\n self.add_value, self._value_filter,\n lambda curr_val: curr_val,\n stock=\"odml-add-Value\"):\n menu_items.append(item)\n\n for item in self.create_popup_menu_items(\"Set Value\", \"Empty Value\", prop,\n self.set_value, self._value_filter,\n lambda curr_val: curr_val):\n if item.get_submenu() is None:\n # We don't want a single Set Value item\n continue\n menu_items.append(item)\n\n val = obj\n\n if prop is obj:\n val = prop.pseudo_values[0] if len(prop.pseudo_values) == 1 else None\n\n if val is not None and val.dtype == \"text\":\n menu_items.append(self.create_menu_item(\"Edit text in larger window\",\n self.edit_text, val))\n\n # Cannot delete properties that are linked (they'd be override on next load),\n # instead allow to reset them.\n merged = prop.get_merged_equivalent()\n if prop is obj and merged is not None:\n if merged != obj:\n menu_items.append(self.create_menu_item(\"Reset to merged default\",\n self.reset_property, obj))\n else:\n menu_items.append(self.create_popup_menu_del_item(obj))\n\n return menu_items", "def update_properties(self, prop_dict):\n ft_dict = {ft.name: ft for ft in self.get_field_types()}\n for name, val in prop_dict.items():\n ft = ft_dict[name]\n if ft.is_parameter():\n key = \"value\"\n else:\n key = \"sample\"\n if issubclass(type(val), Sequence) and ft.array:\n self.set_field_value_array(name, None, [{key: v} for v in val])\n else:\n self.set_field_value(name, None, {key: val})", "def render_specification_properties(spec, newline='\\n', ignore_props=None, prepend_items=None, append_items=None):\n\n spec_prop_list = []\n if prepend_items is not None:\n spec_prop_list += prepend_items\n ignore_keys = [] if ignore_props is None else ignore_props\n # Add link properties\n if isinstance(spec, LinkSpec):\n spec_prop_list.append('**Target Type** %s' %\n RSTDocument.get_reference(RSTSectionLabelHelper.get_section_label(\n spec['target_type']),\n spec['target_type']))\n # Add dataset properties\n if isinstance(spec, DatasetSpec):\n if spec.data_type_def is not None and spec.def_key() not in ignore_keys:\n spec_prop_list.append('**Neurodata Type:** %s' % str(spec.data_type_def))\n if spec.data_type_inc is not None and spec.inc_key() not in ignore_keys:\n extend_type = str(spec.data_type_inc)\n spec_prop_list.append('**Extends:** %s' %\n RSTDocument.get_reference(\n RSTSectionLabelHelper.get_section_label(extend_type),\n extend_type))\n if 'primitive_type' not in ignore_keys:\n spec_prop_list.append('**Primitive Type:** %s' % SpecToRST.spec_basetype_name(spec))\n if spec.get('quantity', None) is not None and 'quantity' not in ignore_keys:\n spec_prop_list.append('**Quantity:** %s' % SpecToRST.quantity_to_string(spec['quantity']))\n if spec.get('dtype', None) is not None and 'dtype' not in ignore_keys:\n spec_prop_list.append('**Data Type:** %s' % SpecToRST.render_data_type(spec['dtype']))\n if spec.get('dims', None) is not None and 'dims' not in ignore_keys:\n spec_prop_list.append('**Dimensions:** %s' % str(spec['dims']))\n if spec.get('shape', None) is not None and 'shape' not in ignore_keys:\n spec_prop_list.append('**Shape:** %s' % str(spec['shape']))\n if spec.get('linkable', None) is not None and 'linnkable' not in ignore_keys:\n spec_prop_list.append('**Linkable:** %s' % str(spec['linkable']))\n # Add group properties\n if isinstance(spec, GroupSpec):\n if spec.data_type_def is not None and spec.def_key() not in ignore_keys:\n ntype = str(spec.data_type_def)\n spec_prop_list.append('**Neurodata Type:** %s' %\n RSTDocument.get_reference(\n RSTSectionLabelHelper.get_section_label(ntype),\n ntype))\n if spec.data_type_inc is not None and spec.inc_key() not in ignore_keys:\n extend_type = str(spec.data_type_inc)\n spec_prop_list.append('**Extends:** %s' %\n RSTDocument.get_reference(\n RSTSectionLabelHelper.get_section_label(extend_type),\n extend_type))\n if 'primitive_type' not in ignore_keys:\n spec_prop_list.append('**Primitive Type:** %s' % SpecToRST.spec_basetype_name(spec))\n if spec.get('quantity', None) is not None and 'quantity' not in ignore_keys:\n spec_prop_list.append('**Quantity:** %s' % SpecToRST.quantity_to_string(spec['quantity']))\n if spec.get('linkable', None) is not None and 'linkable' not in ignore_keys:\n spec_prop_list.append('**Linkable:** %s' % str(spec['linkable']))\n # Add attribute spec properites\n if isinstance(spec, AttributeSpec):\n if 'primitive_type' not in ignore_keys:\n spec_prop_list.append('**Primitive Type:** %s' % SpecToRST.spec_basetype_name(spec))\n if spec.get('dtype', None) is not None and 'dtype' not in ignore_keys:\n spec_prop_list.append('**Data Type:** %s' % SpecToRST.render_data_type(spec['dtype']))\n if spec.get('dims', None) is not None and 'dims' not in ignore_keys:\n spec_prop_list.append('**Dimensions:** %s' % str(spec['dims']))\n if spec.get('shape', None) is not None and 'shape' not in ignore_keys:\n spec_prop_list.append('**Shape:** %s' % str(spec['shape']))\n if spec.get('required', None) is not None and 'required' not in ignore_keys:\n spec_prop_list.append('**Required:** %s' % str(spec['required']))\n if spec.get('value', None) is not None and 'value' not in ignore_keys:\n spec_prop_list.append('**Value:** %s' % str(spec['value']))\n if spec.get('default_value', None) is not None and 'default_value' not in ignore_keys:\n spec_prop_list.append('**Default Value:** %s' % str(spec['default_value']))\n\n # Add common properties\n if spec.get('default_name', None) is not None:\n spec_prop_list.append('**Default Name:** %s' % str(spec['default_name']))\n if spec.get('name', None) is not None:\n spec_prop_list.append('**Name:** %s' % str(spec['name']))\n\n # Add custom items if necessary\n if append_items is not None:\n spec_prop_list += append_items\n\n # Render the specification properties list\n spec_doc = ''\n if len(spec_prop_list) > 0:\n spec_doc += newline\n for dp in spec_prop_list:\n spec_doc += newline + '- ' + dp\n spec_doc += newline\n # Return the rendered list\n return spec_doc", "def extract_object_properties(segmented_image_path, intensity_image_path, image_name, xy_scale, z_scale):\n\n print('Extracting object properties for {image_name}'.format(image_name=image_name))\n\n # import packages needed for object extraction\n from skimage.io import imread\n from scipy.ndimage import label as ndi_label\n from skimage import measure\n\n # read in images\n segmented_image = imread(segmented_image_path)\n intensity_image = imread(intensity_image_path)\n\n # label connected components\n labeled, num_features = ndi_label(segmented_image)\n\n # measure properties\n region_properties = measure.regionprops(labeled, intensity_image = intensity_image)\n\n object_data_list = []\n\n for prop in region_properties:\n\n # apply the z scale and xy scales to the centroid and coordinates lists\n centroid = list(prop.centroid)\n centroid_scaled = [centroid[0] * z_scale, centroid[1]*xy_scale, centroid[2] * xy_scale]\n\n coords = prop.coords.tolist()\n coords_scaled = [[coord[0]*z_scale, coord[1]* xy_scale, coord[2]*xy_scale] for coord in coords ]\n\n # create a dict containing object properties\n object_properties_dict = {\n 'area': int(prop.area),\n 'min_intensity' : int(prop.min_intensity),\n 'max_intensity' : int(prop.max_intensity),\n 'mean_intensity' : int(prop.mean_intensity),\n 'total_intensity': int(prop.intensity_image.sum()),\n 'object_id' : int(prop.label),\n 'name': image_name,\n 'centroid': centroid_scaled,\n 'coordinates': coords_scaled,\n 'intensity_image': prop.intensity_image.tolist()}\n\n object_data_list.append(object_properties_dict)\n\n return object_data_list", "def packer(x, y, variables={}, appendRule=[True, True], name=''):\n properties = {\n 'name': name,\n 'variables': variables,\n 'x': x,\n 'y': y,\n 'appendRule': appendRule\n }\n return properties", "def product_values(request, property_group_id, template_name=\"manage/properties/pg_product_values.html\"):\n property_group = lfs_get_object_or_404(PropertyGroup, pk=property_group_id) \n all_properties = property_group.properties.order_by(\"groupspropertiesrelation\")\n products = [] \n for product in property_group.products.all():\n properties = []\n for property in all_properties:\n # Try to get the value, if it already exists.\n try:\n ppv = ProductPropertyValue.objects.get(property = property, product=product)\n except ProductPropertyValue.DoesNotExist:\n value = \"\"\n else: \n value = ppv.value\n \n # mark selected options \"selected\"\n options = []\n for option in property.options.all(): \n options.append({\n \"id\" : option.id,\n \"name\" : option.name,\n \"selected\" : str(option.id) == value\n })\n \n properties.append({\n \"id\" : property.id,\n \"name\" : property.name,\n \"type\" : property.type,\n \"is_select_field\" : property.is_select_field,\n \"options\" : options,\n \"value\" : value,\n })\n\n products.append({\n \"id\" : product.id,\n \"name\" : product.get_name(),\n \"properties\" : properties,\n })\n \n return render_to_string(template_name, RequestContext(request, {\n \"property_group\" : property_group,\n \"products\" : products,\n \"all_properties\" : all_properties, \n }))", "def make_labels(painting):\n labels = {}\n for dcTitleLang, dcTitle in \\\n painting['object']['proxies'][0]['dcTitle'].iteritems():\n labels[dcTitleLang] = {'language': dcTitleLang, 'value': dcTitle[0]}\n return labels", "def add_data(self, d, prop_title):\r\n ac = vtk.vtkDoubleArray()\r\n ac.SetName(prop_title)\r\n for iac in d.flatten(order='C'):\r\n ac.InsertNextTuple1(iac)\r\n self.Grid.GetCellData().AddArray(ac)", "def SaveProps( self, props_dict, for_drag = False ):\n super( PlotWidget, self ).SaveProps( props_dict, for_drag = for_drag )\n\n for k in ( 'timeValue', ):\n props_dict[ k ] = getattr( self, k )\n\n for k in ( 'dataSetSelections', ):\n if hasattr( self, k ):\n cur_attr = getattr( self, k )\n\tif isinstance( cur_attr, dict ):\n\t for name in cur_attr.keys():\n\t if isinstance( name, DataSetName ):\n\t cur_value = cur_attr[ name ]\n\t del cur_attr[ name ]\n\t cur_attr[ name.name ] = cur_value\n\t #end for name\n\t#end if isinstance( cur_value, dict )\n\n\tprops_dict[ k ] = cur_attr\n #end if hasattr( self, k )\n #end for k", "def _get_object_properties(self):\n super()._get_object_properties()\n add_prefix(root=self.root, prefix=self.naming_prefix, exclude=self.exclude_from_prefixing)", "def plotProp(pdict, title=None, sameax=True, showmean=True, \n bounds=[None,None]):\n try:\n pdict.pop('all stats')\n except:\n pass\n spk, groups = [], list(pdict.keys())\n fig = plt.figure()\n c_colors = {}\n \n if sameax:\n ax = fig.add_subplot(111)\n for g in range(len(groups)):\n sofar = []\n for cell in pdict[groups[g]].keys():\n if cell not in c_colors.keys():\n c_colors[cell] = np.random.random(3)\n this = [u for u in pdict[groups[g]][cell][0]]\n if len(pdict[groups[g]][cell]) > 1:\n for sp in pdict[groups[g]][cell][1]:\n this.append(sp)\n ax.plot([i for i in np.random.normal(loc=g, scale=0.1, size=len(this))], this, 'o',\n color=c_colors[cell], label=groups[g], alpha=0.3,\n markeredgecolor='none', markersize=1)\n for t in this:\n sofar.append(t)\n if showmean:\n ax.plot([g-.5,g+.5], [np.mean(sofar), np.mean(sofar)],\n '--', color='black', lw=2)\n # Cosmetics\n plt.xticks(range(len(groups)), groups, rotation=30)\n plt.ylim([bounds[0], bounds[1]])\n \n else:\n plots = [fig.add_subplot(1, len(groups)+1, p) for p in range(len(groups))]\n for g in range(len(groups)):\n for cell in pdict[groups[g]].keys():\n if cell not in c_colors.keys():\n c_colors[cell] = np.random.random(3)\n this = [u for u in pdict[groups[g]][cell][0]]\n if len(pdict[groups[g]][cell]) > 1:\n for sp in pdict[groups[g]][cell][1]:\n this.append(sp)\n plots[g].plot([i+g for i in np.random.random(len(this))], this, 'o',\n color=c_colors[cell], label=groups[g], alpha=0.3,\n markeredgecolor='none')\n \n if title:\n plt.title(title)\n plt.show()\n return", "def add_to_format(existing_format, dict_of_properties, workbook):\n new_dict={}\n for key, value in existing_format.__dict__.iteritems():\n if (value != 0) and (value != {}) and (value != None):\n new_dict[key]=value\n del new_dict['escapes']\n\n return(workbook.add_format(dict(new_dict.items() + dict_of_properties.items())))", "def register_props():\n props_obj = HistoryProps()\n\n bpy.types.Scene.batchapps_history = \\\n bpy.props.PointerProperty(type=HistoryDisplayProps)\n props_obj.display = bpy.context.scene.batchapps_history\n\n return props_obj", "def display_properties(self, display_properties):\n\n self._display_properties = display_properties", "def make_recursive_propdict(wcroot,\r\n output,\r\n rex = re.compile(\"Properties on '(.*)':\")):\r\n lines = filter(None, output.split('\\n'))\r\n pdict = {}\r\n while lines:\r\n line = lines.pop(0)\r\n m = rex.match(line)\r\n if not m:\r\n raise ValueError, \"could not parse propget-line: %r\" % line\r\n path = m.groups()[0]\r\n wcpath = wcroot.join(path, abs=1)\r\n propnames = []\r\n while lines and lines[0].startswith(' '):\r\n propname = lines.pop(0).strip()\r\n propnames.append(propname)\r\n assert propnames, \"must have found properties!\"\r\n pdict[wcpath] = svncommon.PropListDict(wcpath, propnames)\r\n return pdict", "def generate_property_template(self):\n template = {\n \"@id\": \"url or curie of the property\",\n \"@type\": \"rdf:Property\",\n \"rdfs:comment\": \"description of the property\",\n \"rdfs:label\": \"carmel case, should match @id\",\n \"schema:domainIncludes\": {\n \"@id\": \"class which use it as a property, could be list\"\n },\n \"schema:isPartOf\": {\n \"@id\": \"http://schema.biothings.io\"\n },\n \"schema:rangeIncludes\": {\n \"@id\": \"relates a property to a class that constitutes (one of) the expected type(s) for values of the property\"\n }\n }\n return template", "def set_properties(self, property_dict):\n self.properties.update(property_dict)", "def get_editable_properties(self):\n return (\n\t\t{'title':'Title', 'id':'title', 'type':'line', 'description':'The title of this instance', 'required':1,\n\t\t\t'value':getattr(self.aq_base, 'title', utilities.get_type_default('line'))},\n\t)", "def proplist(self, rec=0):\r\n if rec:\r\n res = self._svn('proplist -R')\r\n return make_recursive_propdict(self, res)\r\n else:\r\n res = self._svn('proplist')\r\n lines = res.split('\\n')\r\n lines = map(str.strip, lines[1:])\r\n return svncommon.PropListDict(self, lines)", "def define_custom_properties(self, p: dict):\n for k, v in p.items():\n self._properties.define_property(name=k, **v)", "def load_data(self):\n super(MudderyObjectCreater, self).load_data()\n \n data = self.get_data_record()\n if not data:\n return\n \n # set common object's info\n self.obj_list = {}\n\n for obj in data.obj_list.split(\",\"):\n obj_key = \"\"\n number = 0\n arg = obj.split(\":\", 1)\n if len(arg) == 1:\n obj_key = arg[0]\n number = 1\n elif len(arg) >= 2:\n obj_key = arg[0]\n number = int(arg[1])\n\n self.obj_list[obj_key] = number", "def set(self, prop, value):\r\n\r\n prop_parts = prop.split(\".\")\r\n if self.copy_dict:\r\n new_dict = copy.deepcopy(self.obj)\r\n else:\r\n new_dict = self.obj\r\n pointer = None\r\n parts_length = len(prop_parts) - 1\r\n for i, part in enumerate(prop_parts):\r\n if pointer is None and i == parts_length:\r\n new_dict[part] = value\r\n elif pointer is None:\r\n pointer = new_dict.get(part)\r\n elif i == parts_length:\r\n pointer[part] = value\r\n else:\r\n pointer = pointer.get(part)\r\n return new_dict", "def properties(self):\n raise NotImplementedError", "def mdAveragePropertiesList(self):\n\t\tpass", "def test_properties_stats_group_by_group_by_and_sub_group_by_get(self):\n pass", "def __init__(self, sheet_type, properties):\n super(SheetSpec,self).__init__(sheet_type)\n\n if 'level' not in properties:\n raise Exception(\"SheetSpec always requires 'level' property.\")\n\n\n properties = [(k, properties[k]) for k in self.name_ordering\n if k in properties]\n\n self.sheet_type = sheet_type\n self.properties = OrderedDict(properties)", "def get_props_from_doc(self, cls, id, doc):\r\n obj_node = doc.getElementsByTagName('object')[0]\r\n if not cls:\r\n class_name = obj_node.getAttribute('class')\r\n cls = find_class(class_name)\r\n if not id:\r\n id = obj_node.getAttribute('id')\r\n props = {}\r\n for prop_node in obj_node.getElementsByTagName('property'):\r\n prop_name = prop_node.getAttribute('name')\r\n prop = cls.find_property(prop_name)\r\n value = self.decode_value(prop, prop_node)\r\n value = prop.make_value_from_datastore(value)\r\n if value != None:\r\n props[prop.name] = value\r\n return (cls, props, id)", "def get_property_setters(self, doclist):\n\t\tfrom webnotes.utils import cstr\n\t\tproperty_dict = {}\n\t\t# final property dict will be\n\t\t# {\n\t\t#\tdoc_type: {\n\t\t#\t\tfieldname: [list of property setter dicts]\n\t\t#\t}\n\t\t# }\n\n\t\tdoc_type_list = list(set(\n\t\t\td.doctype=='DocType' and d.name or d.parent\n\t\t\tfor d in doclist))\n\t\tin_string = '\", \"'.join(doc_type_list)\n\t\tfor ps in webnotes.conn.sql(\"\"\"\\\n\t\t\tSELECT doc_type, field_name, property, property_type, value\n\t\t\tFROM `tabProperty Setter`\n\t\t\tWHERE doc_type IN (\"%s\")\"\"\" % in_string, as_dict=1):\n\t\t\tproperty_dict.setdefault(ps.get('doc_type'),\n\t\t\t\t\t{}).setdefault(cstr(ps.get('field_name')), []).append(ps)\n\n\t\treturn property_dict, doc_type_list", "def get_params(self):\n return {'physical_properties_actor': {'group': self.group}}", "def setDisplayProperties(self, x, y, textcolor, bgcolor):\n poolt = [\"ZombieBufPool\", \"BufPool\", \"ZombiePool\", \"Pool\"]\n if self.gobj.mobj.parent.className in poolt:\n self.setGeometry(0, 30,\n self.gobj.boundingRect().width(),\n self.gobj.boundingRect().height())\n else:\n self.setGeometry(x, y,\n self.gobj.boundingRect().width(),\n self.gobj.boundingRect().height())\n self.bg.setBrush(QtGui.QBrush(bgcolor))\n self.setFlag(QGraphicsItem.ItemIsMovable, False)" ]
[ "0.5784708", "0.5752772", "0.5563086", "0.55042255", "0.54970247", "0.54590595", "0.5275027", "0.52564776", "0.51849294", "0.51720124", "0.50716716", "0.5064659", "0.5059839", "0.5025119", "0.50181776", "0.4982828", "0.49804395", "0.4961031", "0.49296808", "0.49119613", "0.49045804", "0.49042162", "0.48849696", "0.48849696", "0.4884425", "0.48422155", "0.4840741", "0.48241746", "0.48188284", "0.4806504", "0.47989777", "0.47920737", "0.4780873", "0.4779126", "0.47741032", "0.4742863", "0.47392625", "0.47392625", "0.47392625", "0.4733013", "0.47327596", "0.4732215", "0.47055292", "0.47029975", "0.4699602", "0.46956533", "0.46842706", "0.46673748", "0.46524194", "0.46462515", "0.46388468", "0.46319056", "0.46254689", "0.4621994", "0.461955", "0.46174586", "0.4613492", "0.4612305", "0.46092296", "0.46002182", "0.4593038", "0.45872474", "0.4585906", "0.45812327", "0.45803973", "0.45785987", "0.45765585", "0.4575616", "0.4575616", "0.4568945", "0.45670664", "0.456345", "0.45601135", "0.45575318", "0.45549545", "0.45502514", "0.4546686", "0.4546147", "0.454581", "0.45374483", "0.45362806", "0.45236945", "0.45193374", "0.45171598", "0.45112064", "0.45100966", "0.45097548", "0.45096597", "0.45079768", "0.45073718", "0.45028225", "0.4497828", "0.44946453", "0.44941804", "0.44908628", "0.44883358", "0.44874242", "0.44866863", "0.44785327", "0.44757563" ]
0.50260586
13
Parses a tensorflow.SequenceExample into an image and caption.
def parse_sequence_example(serialized, image_id, image_feature, caption_feature): context, sequence = tf.parse_single_sequence_example( serialized, context_features={ image_id : tf.FixedLenFeature([], dtype=tf.int64), image_feature: tf.FixedLenFeature([], dtype=tf.string) }, sequence_features={ caption_feature: tf.FixedLenSequenceFeature([], dtype=tf.int64), }) encoded_image_id = context[image_id] encoded_image = context[image_feature] caption = sequence[caption_feature] return encoded_image_id, encoded_image, caption
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_sequence_example(serialized, image_feature, caption_feature):\n\tcontext, sequence = tf.parse_single_sequence_example(\n\t\t\tserialized,\n\t\t\tcontext_features={\n\t\t\t\t\timage_feature: tf.FixedLenFeature([], dtype=tf.string)\n\t\t\t},\n\t\t\tsequence_features={\n\t\t\t\t\tcaption_feature: tf.FixedLenSequenceFeature([], dtype=tf.int64),\n\t\t\t})\n\n\tencoded_image = context[image_feature]\n\tcaption = sequence[caption_feature]\n\treturn encoded_image, caption", "def _parse_tfexample(example):\n\n ## parse\n features = tf.parse_single_example(example, KEYS2FEATURES)\n\n image = tf.image.decode_png(features['image/encoded'])\n label = tf.image.decode_png(features['label/encoded'])\n # label is decoded as a 3-D png image\n label = label[..., 0]\n im_path = features['image/path']\n la_path = features['label/path']\n\n return image, label, im_path, la_path", "def parser(self, serialized_example):\n features = {\n 'image/height': tf.FixedLenFeature([], tf.int64),\n 'image/width': tf.FixedLenFeature([], tf.int64),\n 'image/colorspace': tf.FixedLenFeature([], tf.string),\n 'image/channels': tf.FixedLenFeature([], tf.int64),\n 'image/class/label': tf.FixedLenFeature([], tf.int64),\n 'image/format': tf.FixedLenFeature([], tf.string),\n 'image/encoded': tf.FixedLenFeature([], tf.string),\n 'image/fixation_pt': tf.FixedLenFeature([2], tf.float32)}\n parsed_features = tf.parse_single_example(serialized_example, features)\n\n # Get label as a Tensor.\n label = parsed_features['image/class/label']\n\n # Decode the image JPEG string into a Tensor.\n image = tf.image.decode_jpeg(parsed_features['image/encoded'],\n channels=self.DEPTH)\n\n # Convert from uint8 -> float32 and map onto range [0, 1].\n image = tf.cast(image, tf.float32) * (1. / 255)\n\n # Standardize image.\n image = tf.image.per_image_standardization(image)\n\n # Apply data augmentation.\n if (self.mode == tf.estimator.ModeKeys.TRAIN\n and self.params['train_with_distortion']):\n # Randomly flip the image, zero-pad with four pixels along\n # each edge, and take a random 32 x 32 crop.\n image = tf.image.random_flip_left_right(image)\n image = tf.image.resize_image_with_crop_or_pad(image, 40, 40)\n image = tf.image.crop_to_bounding_box(image,\n tf.random_uniform([], minval=0, maxval=8, dtype=tf.int32),\n tf.random_uniform([], minval=0, maxval=8, dtype=tf.int32),\n 32, 32)\n\n return image, label", "def parser(self, serialized_example):\n features = {\n 'image/height': tf.FixedLenFeature([], tf.int64),\n 'image/width': tf.FixedLenFeature([], tf.int64),\n 'image/colorspace': tf.FixedLenFeature([], tf.string),\n 'image/channels': tf.FixedLenFeature([], tf.int64),\n 'image/class/label': tf.FixedLenFeature([], tf.int64),\n 'image/class/synset': tf.FixedLenFeature([], tf.string),\n 'image/class/text': tf.FixedLenFeature([], tf.string),\n 'image/object/bbox/xmin': tf.VarLenFeature(tf.float32),\n 'image/object/bbox/ymin': tf.VarLenFeature(tf.float32),\n 'image/object/bbox/xmax': tf.VarLenFeature(tf.float32),\n 'image/object/bbox/ymax': tf.VarLenFeature(tf.float32),\n 'image/object/bbox/label': tf.VarLenFeature(tf.int64),\n 'image/format': tf.FixedLenFeature([], tf.string),\n 'image/encoded': tf.FixedLenFeature([], tf.string)}\n parsed_features = tf.parse_single_example(serialized_example, features)\n\n # Get label as a Tensor.\n label = parsed_features['image/class/label']\n\n # Decode the image JPEG string into a Tensor.\n image = tf.image.decode_jpeg(parsed_features['image/encoded'],\n channels=self.DEPTH)\n\n # VGG preprocessing borrowed from slim; includes data augmentation so train_with_distortion should be set to True.\n if self.mode == tf.estimator.ModeKeys.TRAIN:\n assert self.params['train_with_distortion'] == True\n is_training = True\n else:\n is_training = False\n image = vgg_preprocess_image(image, 224, 224, is_training=is_training)\n\n return image, label", "def prepare_example(image_path, annotations, label_map_dict):\n print(\"encoding %s\" % image_path)\n with tf.gfile.GFile(image_path, 'rb') as fid:\n encoded_png = fid.read()\n encoded_png_io = io.BytesIO(encoded_png)\n image = pil.open(encoded_png_io)\n\n if image.format != 'PNG':\n raise ValueError('Image format error')\n\n key = hashlib.sha256(encoded_png).hexdigest()\n # obtain attributes\n width, height = image.size\n img_filename = image_path.split('/')[-1]\n\n xmin = []\n ymin = []\n xmax = []\n ymax = []\n classes = []\n classes_text = []\n truncated = []\n occlud = []\n\n xmin.append(int(annotations[2]) / width)\n ymin.append(int(annotations[3]) / height)\n xmax.append(int(annotations[4]) / width)\n ymax.append(int(annotations[5]) / height)\n class_name = annotations[1]\n classes_text.append(class_name)\n classes.append(label_map_dict[class_name])\n classes_text = [class_text.encode('utf-8') for class_text in classes_text]\n trun, occ = annotations[6].split(',')\n truncated.append(int(trun))\n occlud.append(int(occ))\n\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': dataset_util.int64_feature(height),\n 'image/width': dataset_util.int64_feature(width),\n 'image/filename': dataset_util.bytes_feature(img_filename.encode('utf8')),\n 'image/source_id': dataset_util.bytes_feature(img_filename.encode('utf8')),\n 'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),\n 'image/encoded': dataset_util.bytes_feature(encoded_png),\n 'image/format': dataset_util.bytes_feature('png'.encode('utf8')),\n 'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),\n 'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),\n 'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),\n 'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),\n 'image/object/class/text': dataset_util.bytes_list_feature(classes_text),\n 'image/object/class/label': dataset_util.int64_list_feature(classes),\n 'image/object/truncated': dataset_util.int64_list_feature(truncated),\n 'image/object/view': dataset_util.int64_list_feature(occlud),\n }))\n return example", "def single_example_parser(serialized_example):\n # Dimensions of the images in the CIFAR-10 dataset.\n # See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the\n # input format.\n features = tf.parse_single_example(\n serialized_example,\n features={\n 'image': tf.FixedLenFeature([], tf.string),\n 'label': tf.FixedLenFeature([], tf.int64),\n })\n image = tf.decode_raw(features['image'], tf.uint8)\n image.set_shape([DEPTH * HEIGHT * WIDTH])\n\n # Reshape from [depth * height * width] to [depth, height, width].\n image = tf.cast(\n tf.transpose(tf.reshape(image, [DEPTH, HEIGHT, WIDTH]), [1, 2, 0]),\n tf.float32)\n label = tf.cast(features['label'], tf.int32)\n \n image = train_preprocess_fn(image)\n label = tf.one_hot(label, NUM_CLASSES)\n \n return image, label", "def parse_fn(self, example_serialized):\n feature_description = {\n 'image_raw': tf.io.FixedLenFeature([], tf.string),\n 'label': tf.io.FixedLenFeature([], tf.int64)\n }\n features = tf.io.parse_single_example(example_serialized, feature_description)\n image = tf.io.decode_raw(features['image_raw'], tf.uint8)\n image = tf.cast(image, dtype='float32') / 255.0\n label = tf.cast(features['label'], dtype=tf.int32)\n image = tf.reshape(image, [32, 32, 3])\n if self.is_training:\n image = tf.image.resize_with_crop_or_pad(image, 32 + 8, 32 + 8)\n image = tf.image.random_crop(image, [32, 32, 3])\n image = tf.image.random_flip_left_right(image)\n return image, label", "def dataset_parser(self, value):\n keys_to_features = {\n 'image/encoded':\n tf.io.FixedLenFeature((), tf.string, ''),\n 'image/format':\n tf.io.FixedLenFeature((), tf.string, 'jpeg'),\n 'image/class/label':\n tf.io.FixedLenFeature([], tf.int64, -1),\n 'image/class/text':\n tf.io.FixedLenFeature([], tf.string, ''),\n 'image/object/bbox/xmin':\n tf.io.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/ymin':\n tf.io.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/xmax':\n tf.io.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/ymax':\n tf.io.VarLenFeature(dtype=tf.float32),\n 'image/object/class/label':\n tf.io.VarLenFeature(dtype=tf.int64),\n }\n\n parsed = tf.io.parse_single_example(value, keys_to_features)\n image_bytes = tf.reshape(parsed['image/encoded'], shape=[])\n\n tensors_dict = preprocess_image(\n image_bytes=image_bytes,\n is_training=self.is_training,\n augmentation=self.augmentation,\n use_bfloat16=self.use_bfloat16,\n saturate_uint8=self.saturate_uint8,\n scale_and_center=self.scale_and_center,\n use_default_augment=self.use_default_augment)\n\n # Subtract one so that labels are in [0, 1000).\n label = tf.cast(tf.reshape(parsed['image/class/label'], shape=()) - 1,\n dtype=tf.int32)\n tensors_dict['label'] = label\n\n return tensors_dict", "def parser(serialized_example):\n features = tf.parse_single_example(\n serialized_example,\n features={\n \"image\": tf.FixedLenFeature([], tf.string),\n \"label\": tf.FixedLenFeature([], tf.int64),\n })\n image = tf.decode_raw(features[\"image\"], tf.uint8)\n image.set_shape([CHANNELS * HEIGHT * WIDTH])\n # Reshape from [depth * height * width] to [depth, height, width].\n image = tf.cast(\n tf.transpose(tf.reshape(image, [CHANNELS, HEIGHT, WIDTH]), [1, 2, 0]),\n tf.float32) * (2. / 255) - 1\n\n label = tf.cast(features['label'], tf.int32)\n\n random_noise = tf.random_normal([noise_dim])\n features = {\n 'real_images': image,\n 'random_noise': random_noise}\n\n return features, label", "def parse_train(self, proto, height, width):\n _, sequence_parsed = tf.io.parse_single_sequence_example(\n proto,\n context_features=self._context_features,\n sequence_features=self._sequence_features)\n\n # Deserialize images to float32 tensors.\n images = tf.map_fn(\n _deserialize_png, sequence_parsed['images'], dtype=tf.float32)\n\n # Resize images.\n if height is not None and width is not None:\n images = smurf_utils.resize(images, height, width, is_flow=False)\n\n return {'images': images}", "def _parse_example(self, example, scale_to_0_1: bool = False):\n\n features = {\n 'image': tf.FixedLenFeature([], tf.string),\n 'mask': tf.FixedLenFeature([], tf.string),\n }\n parsed_example = tf.parse_single_example(example, features)\n\n image = tf.decode_raw(parsed_example['image'], self.serialized_image_raw_dtype)\n image = tf.reshape(image, (self.image_width, self.image_width, self.image_channels))\n image = tf.cast(image, tf.float32)\n if scale_to_0_1:\n image /= 255.\n\n mask = tf.decode_raw(parsed_example['mask'], self.serialized_mask_raw_dtype)\n mask = tf.reshape(mask, (self.image_width, self.image_width, self.mask_channels))\n mask = tf.cast(mask, tf.float32) / 255.\n return image, mask", "def create_sequence_example(inner_image_path,\n inner_sample):\n\n # serialize a pointer to the disk location of the image features\n # copying data for every training example would consume too much storage\n image_path_feature = tf.train.Feature(\n bytes_list=tf.train.BytesList(\n value=[bytes(inner_image_path, \"utf-8\")]))\n\n # add all other tokens to the tf record\n words_feature = tf.train.FeatureList(\n feature=[tf.train.Feature(\n int64_list=tf.train.Int64List(value=[t])) for t in inner_sample.words])\n tags_feature = tf.train.FeatureList(\n feature=[tf.train.Feature(\n int64_list=tf.train.Int64List(value=[t])) for t in inner_sample.tags])\n\n # create the dictionary of features to save\n context_dict = dict(image_path=image_path_feature)\n sequence_dict = dict(words=words_feature, tags=tags_feature)\n\n # create a sequence example\n return tf.train.SequenceExample(\n context=tf.train.Features(feature=context_dict),\n feature_lists=tf.train.FeatureLists(\n feature_list=sequence_dict))", "def _convert_raw_example(\n self,\n mode_dict: MutableMapping[str, Any],\n example: Mapping[str, Any]) -> ProcessedExample:\n img_path = example['image_path_or_name']\n base_name = os.path.basename(img_path)\n img_fobj = example.get('image_fobj', tf.io.gfile.GFile(img_path, 'rb'))\n img_bytes, img_shape = image_utils.image_to_jpeg(fobj=img_fobj,\n filename=base_name)\n\n img_format = 'JPEG'\n key = hashlib.sha256(img_bytes.read()).hexdigest()\n img_bytes.seek(0)\n\n bboxes = example['bbox_info']\n processed_bboxes = []\n\n img_height = img_shape[0]\n img_width = img_shape[1]\n\n img_id = example.get('image_id', self._get_id('image'))\n mode_dict['images'].append({\n 'id': img_id,\n 'width': img_width,\n 'height': img_height,\n })\n\n for bbox_info in bboxes:\n annotations_bbox = bbox_info['bbox']\n bbox = bbox_utils.BBox(bbox=annotations_bbox,\n fmt=self.builder_config.bbox_format,\n img_width=img_width,\n img_height=img_height)\n label = bbox_info['label']\n if isinstance(label, int):\n text = str(label)\n elif isinstance(label, six.string_types):\n text = label\n label = bbox_info.get('label_id', self._get_label_id(text))\n else:\n raise TypeError(\n 'The provided label was not a string or int. Got: {}'.format(\n type(label)))\n\n if label >= self.builder_config.num_labels:\n raise ValueError('Provided label {} for {} is greater than '\n 'the number of classes specified. num_classes: '\n '{}'.format(label,\n base_name,\n self.builder_config.num_labels))\n\n annotation_id = example.get('annotation_id', self._get_id('annotation'))\n bbox.convert(bbox_utils.BBoxFormat.NORMALIZED_MIN_MAX)\n xmin, xmax, ymin, ymax = bbox.as_tuple()\n bbox = bbox.convert(bbox_utils.BBoxFormat.WIDTH_HEIGHT)\n mode_dict['annotations'].append({\n 'id': annotation_id,\n 'image_id': img_id,\n 'category_id': label,\n 'bbox': annotations_bbox,\n })\n\n processed_bboxes.append({\n 'bbox': tfds.features.BBox(ymin=ymin,\n xmin=xmin,\n ymax=ymax,\n xmax=xmax),\n 'class': {\n 'text': text,\n 'label': label,\n }\n })\n\n return img_id, {\n 'image': {\n 'height': img_width,\n 'width': img_shape[1],\n 'filename': img_path,\n 'source_id': img_id,\n 'encoded': img_bytes,\n 'format': img_format,\n 'key': {\n 'sha256': key,\n },\n 'object': processed_bboxes,\n }\n }", "def parse_record(raw_record):\n keys_to_features = {\n 'image/height':\n tf.FixedLenFeature((), tf.int64),\n 'image/width':\n tf.FixedLenFeature((), tf.int64),\n 'image/encoded':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/format':\n tf.FixedLenFeature((), tf.string, default_value='jpeg'),\n 'label/encoded':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'label/format':\n tf.FixedLenFeature((), tf.string, default_value='png'),\n }\n\n parsed = tf.parse_single_example(raw_record, keys_to_features)\n\n # height = tf.cast(parsed['image/height'], tf.int32)\n # width = tf.cast(parsed['image/width'], tf.int32)\n\n image = tf.image.decode_image(\n tf.reshape(parsed['image/encoded'], shape=[]), _DEPTH)\n image = tf.to_float(tf.image.convert_image_dtype(image, dtype=tf.uint8))\n image.set_shape([None, None, 3])\n\n label = tf.image.decode_image(\n tf.reshape(parsed['label/encoded'], shape=[]), 1)\n label = tf.to_int32(tf.image.convert_image_dtype(label, dtype=tf.uint8))\n label.set_shape([None, None, 1])\n\n\n return image, label", "def parser(_, serialized_example):\n features = {}\n\n for i in frame_nums:\n image_name = 'image_' + str(i)\n if flags.dataset_type == 'robot':\n pose_name = 'state_' + str(i)\n action_name = 'action_' + str(i)\n joint_pos_name = 'joint_positions_' + str(i)\n\n features[pose_name] = tf.FixedLenFeature([flags.pose_dim], tf.float32)\n features[image_name] = tf.FixedLenFeature([1], tf.string)\n features[action_name] = tf.FixedLenFeature([flags.pose_dim],\n tf.float32)\n features[joint_pos_name] = tf.FixedLenFeature([flags.joint_pos_dim],\n tf.float32)\n else:\n features[image_name] = tf.FixedLenFeature([1], tf.string)\n\n parsed_input = tf.parse_single_example(serialized_example, features)\n\n for i in frame_nums:\n image_name = 'image_' + str(i)\n pose_name = 'state_' + str(i)\n action_name = 'action_' + str(i)\n joint_pos_name = 'joint_positions_' + str(i)\n\n # Process image\n image_buffer = tf.reshape(parsed_input[image_name], shape=[])\n image = tf.image.decode_jpeg(image_buffer, channels=COLOR_CHAN)\n image = tf.image.resize_images(\n image, (IMG_HEIGHT, IMG_WIDTH),\n method=tf.image.ResizeMethod.BICUBIC)\n image = tf.cast(tf.expand_dims(image, 0), tf.float32) / 255.0\n\n if flags.dataset_type == 'robot':\n pose = tf.reshape(parsed_input[pose_name], shape=[flags.pose_dim])\n pose = tf.expand_dims(pose, 0)\n action = tf.reshape(parsed_input[action_name], shape=[flags.pose_dim])\n action = tf.expand_dims(action, 0)\n joint_pos = tf.reshape(\n parsed_input[joint_pos_name], shape=[flags.joint_pos_dim])\n joint_pos = tf.expand_dims(joint_pos, 0)\n else:\n pose = tf.zeros([1, flags.pose_dim])\n action = tf.zeros([1, flags.pose_dim])\n joint_pos = tf.zeros([1, flags.joint_pos_dim])\n\n if i == 0:\n image_seq = image\n action_seq, pose_seq, joint_pos_seq = action, pose, joint_pos\n else:\n image_seq = tf.concat([image_seq, image], 0)\n action_seq = tf.concat([action_seq, action], 0)\n pose_seq = tf.concat([pose_seq, pose], 0)\n joint_pos_seq = tf.concat([joint_pos_seq, joint_pos], 0)\n\n return image_seq, action_seq, action_seq, joint_pos_seq", "def create_cat_tf_example(label, label_text, img_path, img_name):\n\t\n\twith tf.gfile.FastGFile(img_path + img_name, 'rb') as fid:\n\t encoded_image = fid.read() \n\n\tencoded_image_data = sess.run(resize_image, {encoded_jpg_ph: encoded_image}) # I think this may not be the right way of doing this\n\tb_filename = str.encode(img_name)\n\n\timage_format = b'jpg'\n\txmins = [10.0 / width]\n\txmaxs = [(width - 10) / width]\n\tymins = [10.0 / height]\n\tymaxs = [(height - 10.0) / height]\n\t# classes_text = [str.encode(label_text)]\n\tclasses_text = []\n\tif label_text:\n\t\tclasses_text.append(label_text.encode('utf8'))\n\tclasses = []\n\t# if label == 1:\n\tclasses.append(int(label))\n\t# print(classes_text, classes, b_filename)\n\ttf_example = tf.train.Example(features=tf.train.Features(feature={\n\t\t'image/height': dataset_util.int64_feature(height),\n\t\t'image/width': dataset_util.int64_feature(width),\n\t\t'image/filename': dataset_util.bytes_feature(b_filename),\n\t\t'image/source_id': dataset_util.bytes_feature(b_filename),\n\t\t'image/encoded': dataset_util.bytes_feature(encoded_image_data),\n\t\t# 'image/encoded': dataset_util.bytes_feature(encoded_jpg),\n\t\t'image/format': dataset_util.bytes_feature(image_format),\n\t\t'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),\n\t\t'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),\n\t\t'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),\n\t\t'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),\n\t\t'image/object/class/text': dataset_util.bytes_list_feature(classes_text),\n\t\t'image/object/class/label': dataset_util.int64_list_feature(classes),\n\t}))\n\treturn tf_example", "def _parser(serialized_example):\n\n features = tf.compat.v1.parse_single_example(\n serialized_example,\n features={\n 'img_raw': tf.compat.v1.FixedLenFeature([], tf.string),\n 'label': tf.compat.v1.FixedLenFeature([], tf.int64),\n 'category': tf.compat.v1.FixedLenFeature([], tf.int64),\n 'elevation': tf.compat.v1.FixedLenFeature([], tf.int64),\n 'azimuth': tf.compat.v1.FixedLenFeature([], tf.int64),\n 'lighting': tf.compat.v1.FixedLenFeature([], tf.int64),\n })\n\n img = tf.compat.v1.decode_raw(features['img_raw'], tf.float64)\n img = tf.reshape(img, [96, 96, 1])\n img = tf.cast(img, tf.float32) # * (1. / 255) # left unnormalized\n\n lab = tf.cast(features['label'], tf.int32)\n cat = tf.cast(features['category'], tf.int32)\n elv = tf.cast(features['elevation'], tf.int32)\n azi = tf.cast(features['azimuth'], tf.int32)\n lit = tf.cast(features['lighting'], tf.int32)\n\n return img, lab, cat, elv, azi, lit", "def convert_to_example(past_traj, future_traj):\n # img = cv2.imread('temp.png')\n # cv2.imshow('img', img)\n # cv2.waitKey(1)\n\n img_bytes = open('temp.png', 'rb').read()\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/encoded': _bytes_feature(img_bytes),\n 'past_traj': _bytes_feature(past_traj.tostring()),\n 'future_traj': _bytes_feature(future_traj.tostring())\n }))\n\n return example", "def make_video_test_example(image_shape: Sequence[int] = (263, 320, 3),\n audio_shape: Sequence[int] = (10, 256),\n label: int = 42):\n raw_image_bytes = make_image_bytes(shape=image_shape)\n random_audio = np.random.normal(size=audio_shape).tolist()\n\n seq_example = tf.train.SequenceExample()\n put_int64_to_context(seq_example, label=label, key=LABEL_KEY)\n put_bytes_list_to_feature(\n seq_example, raw_image_bytes, key=IMAGE_KEY, repeat_num=4)\n\n put_float_list_to_feature(seq_example, value=random_audio, key=AUDIO_KEY)\n return seq_example", "def _parse_function(self, example_proto):\n\n # Currently only supports jpeg and png.\n # Need to use this logic because the shape is not known for\n # tf.image.decode_image and we rely on this info to\n # extend label if necessary.\n def _decode_image(content, channels):\n return tf.cond(\n tf.image.is_jpeg(content),\n lambda: tf.image.decode_jpeg(content, channels),\n lambda: tf.image.decode_png(content, channels))\n\n features = {\n 'image/encoded':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/filename':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/format':\n tf.FixedLenFeature((), tf.string, default_value='jpeg'),\n 'image/height':\n tf.FixedLenFeature((), tf.int64, default_value=0),\n 'image/width':\n tf.FixedLenFeature((), tf.int64, default_value=0),\n 'image/segmentation/class/encoded':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/segmentation/class/format':\n tf.FixedLenFeature((), tf.string, default_value='png'),\n }\n\n parsed_features = tf.parse_single_example(example_proto, features)\n\n image = _decode_image(parsed_features['image/encoded'], channels=3)\n\n label = None\n if self.split_name != common.TEST_SET:\n label = _decode_image(\n parsed_features['image/segmentation/class/encoded'], channels=1)\n\n image_name = parsed_features['image/filename']\n if image_name is None:\n image_name = tf.constant('')\n\n sample = {\n common.IMAGE: image,\n common.IMAGE_NAME: image_name,\n common.HEIGHT: parsed_features['image/height'],\n common.WIDTH: parsed_features['image/width'],\n }\n\n if label is not None:\n if label.get_shape().ndims == 2:\n label = tf.expand_dims(label, 2)\n elif label.get_shape().ndims == 3 and label.shape.dims[2] == 1:\n pass\n else:\n raise ValueError('Input label shape must be [height, width], or '\n '[height, width, 1].')\n\n label.set_shape([None, None, 1])\n\n sample[common.LABELS_CLASS] = label\n\n return sample", "def _format_example(self, image_path=None):\r\n image = tf.io.read_file(image_path)\r\n image = tf.io.decode_jpeg(image)\r\n image = tf.cast(image, tf.float32)\r\n image = tf.image.per_image_standardization(image)\r\n image = tf.reshape(image, (self.img_size, self.img_size, 3))\r\n return image", "def format_example(image, label):\n image = tf.cast(image, tf.float32)\n image = (image/127.5) - 1\n image = tf.image.resize(image, (IMG_SIZE, IMG_SIZE))\n return image, label", "def parser(record):\n # keys_to_features = {\n # \"image_data\": tf.FixedLenFeature((), tf.string, default_value=\"\"),\n # \"date_time\": tf.FixedLenFeature((), tf.int64, default_value=\"\"),\n # \"label\": tf.FixedLenFeature((), tf.int64,\n # default_value=tf.zeros([], dtype=tf.int64)),\n # }\n\n keys_to_features = {\n \"image_data\": tf.FixedLenFeature((), tf.float, default_value=\"\"),\n \"label\": tf.FixedLenFeature((), tf.int32,\n default_value=tf.zeros([], dtype=tf.int64)),\n }\n parsed = tf.parse_single_example(record, keys_to_features)\n\n # Perform additional preprocessing on the parsed data.\n image = tf.image.decode_jpeg(parsed[\"image_data\"])\n image = tf.reshape(image, [299, 299, 1])\n label = tf.cast(parsed[\"label\"], tf.int32)\n\n return {\"image_data\": image, \"date_time\": parsed[\"date_time\"]}, label", "def parser(record):\n record_spec = {\n \"input\": tf.FixedLenFeature([seq_len], tf.int64),\n \"labels\": tf.FixedLenFeature([tgt_len], tf.int64),\n \"input_mask\": tf.FixedLenFeature([seq_len],tf.float32),\n \"target_mask\": tf.FixedLenFeature([tgt_len],tf.float32)\n }\n\n # retrieve serialized example\n example = tf.parse_single_example(\n serialized=record,\n features=record_spec)\n\n _convert_example(example, use_bfloat16)\n\n for k, v in example.items():\n tf.logging.info(\"%s: %s\", k, v)\n\n return example", "def create_tf_example(example, path, class_mapping):\n path = (path + os.sep).encode('ascii')\n filename = example['filename'].encode('ascii')\n image_format = b'jpg'\n \n image = plt.imread(path +filename, \"jpg\") \n height, width = image.shape[:2]\n \n # Encode the jpg to byte form\n with tf.gfile.GFile(path+filename, 'rb') as fid:\n encoded_jpg = bytes(fid.read())\n\n # normalize the box coordinates\n xmins = [box[0]/width for box in example['box_coords']] \n ymins = [box[1]/height for box in example['box_coords']] \n xmaxs = [box[2]/width for box in example['box_coords']]\n ymaxs = [box[3]/height for box in example['box_coords']]\n\n classes_text = [cls.encode('ascii') for cls in example[\"class\"]]\n classes = [class_mapping[cls] for cls in example[\"class\"]]\n\n # create the example\n tf_example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height' : dataset_util.int64_feature(height),\n 'image/width' : dataset_util.int64_feature(width),\n 'image/filename' : dataset_util.bytes_feature(filename),\n 'image/source_id' : dataset_util.bytes_feature(filename),\n 'image/encoded' : dataset_util.bytes_feature(encoded_jpg),\n 'image/format' : dataset_util.bytes_feature(image_format),\n 'image/object/bbox/xmin' : dataset_util.float_list_feature(xmins),\n 'image/object/bbox/xmax' : dataset_util.float_list_feature(xmaxs),\n 'image/object/bbox/ymin' : dataset_util.float_list_feature(ymins),\n 'image/object/bbox/ymax' : dataset_util.float_list_feature(ymaxs),\n 'image/object/class/text' : dataset_util.bytes_list_feature(classes_text),\n 'image/object/class/label' : dataset_util.int64_list_feature(classes),\n }))\n return tf_example", "def parse_attention_example(tf_example):\n\n # specify features in attention example \n features_map = {\n 'sequence_raw': tf.FixedLenFeature([], tf.string),\n 'label_raw': tf.FixedLenFeature([], tf.string),\n 'annotation_raw': tf.FixedLenFeature([], tf.string)}\n\n # parse tf example for internal tensors\n parsed_example = tf.parse_single_example(tf_example, features_map)\n\n # decode examples\n sequence_raw = tf.decode_raw(parsed_example['sequence_raw'], tf.uint8)\n label_raw = tf.decode_raw(parsed_example['label_raw'], tf.uint8)\n annotation_raw = tf.decode_raw(parsed_example['annotation_raw'], tf.float32)\n\n # parsed tensors are flat so reshape if needed\n # cast to floats for attention task\n sequence = tf.cast(tf.reshape(sequence_raw, SEQUENCE_SHAPE), dtype=tf.float32)\n label = tf.cast(label_raw, dtype=tf.float32)\n annotation = tf.reshape(annotation_raw, ANNOTATION_SHAPE)\n\n return {'sequence': sequence, 'label': label, 'annotation': annotation}", "def preprocess(example, num_classes=10, is_training=True):\n features = {'scores': tf.VarLenFeature(tf.float32),\n 'image': tf.FixedLenFeature((), tf.string)}\n parsed = tf.parse_single_example(example, features)\n image = tf.image.decode_jpeg(parsed['image'], channels=3)\n image = nima.preprocess_image(image, is_training=is_training)\n scores = parsed['scores']\n scores = tf.sparse_tensor_to_dense(scores)\n scores = tf.reshape(scores, [num_classes])\n scores = scores / tf.reduce_sum(scores, axis=-1, keepdims=True)\n return image, scores", "def dict_to_tf_example(label_map_dict):\n filename = label_map_dict[0]\n img_path = os.path.join(FLAGS.image_data_dir, filename)\n\n try:\n with tf.gfile.GFile(img_path, 'rb') as fid:\n encoded_jpg = fid.read()\n except:\n logging.warning('Image Not Found %s', img_path)\n return None\n\n encoded_jpg_io = io.BytesIO(encoded_jpg)\n image = Image.open(encoded_jpg_io)\n (witdh, height) = image.size\n\n if image.format != 'JPEG':\n raise ValueError('Image format not JPEG')\n key = hashlib.sha256(encoded_jpg).hexdigest()\n\n sentence_txt = label_map_dict[1]\n\n\n sentences = []\n f = open('dictionary.json', 'r')\n dictionary = f.read()\n dictionary = json.loads(dictionary)\n for index, _ in enumerate(sentence_txt):\n sentence = []\n for sen in sentence_txt[index].split(' '):\n try:\n sentence.append(dictionary[sen])\n except KeyError:\n sentence.append(dictionary['UNK'])\n sentences.append(sentence)\n\n feature_dict = {\n 'image/height': dataset_util.int64_feature(height),\n 'image/width': dataset_util.int64_feature(witdh),\n 'image/filename': dataset_util.bytes_feature(filename.encode('utf8')),\n 'image/score_0': dataset_util.int64_list_feature(sentences[0]),\n 'image/score_1': dataset_util.int64_list_feature(sentences[1]),\n 'image/score_2': dataset_util.int64_list_feature(sentences[2]),\n 'image/score_3': dataset_util.int64_list_feature(sentences[3]),\n 'image/score_4': dataset_util.int64_list_feature(sentences[4]),\n 'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),\n 'image/encoded': dataset_util.bytes_feature(encoded_jpg),\n 'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8'))\n }\n\n example = tf.train.Example(features=tf.train.Features(feature=feature_dict))\n return example", "def _generate_examples(self, images_path, annotations_path):\n caption_file = '/captions_val2014.json'\n with tf.io.gfile.GFile(annotations_path + caption_file) as f:\n data = json.load(f)\n path_head = images_path + '/COCO_val2014_'\n ann = data['annotations'] # Contains annotations\n \n img_names = [path_head + '%012d.jpg' % i['image_id'] for i in ann] \n captions = ['<start> ' + i['caption'] + ' <end>' for i in ann]\n ids = [i['id'] for i in ann]\n \n # The above lines create the captions (start and end tokens), the \n # image names (which consist of the path head and a 12 digit number,\n # right-aligned with the id), and the id to distinguish each unique image.\n\n for (i, name) in enumerate(img_names):\n yield ids[i], {\n 'image': name,\n 'caption': captions[i]\n }", "def parse_example(example, image_width:int = 224, image_channels: int = 3, mask_channels: int = 1000, scale_to_0_1: bool = False, serialized_mask_raw_dtype = tf.float64):\n\n features = {\n 'image': tf.FixedLenFeature([], tf.string),\n 'mask': tf.FixedLenFeature([], tf.string),\n }\n\n parsed_example = tf.parse_single_example(example, features)\n\n image = tf.decode_raw(parsed_example['image'], tf.uint8)\n image = tf.reshape(image, (image_width, image_width, image_channels))\n image = tf.cast(image, tf.float32)\n if scale_to_0_1:\n image /= 255.\n\n mask = tf.decode_raw(parsed_example['mask'], serialized_mask_raw_dtype) # tf.uint8)\n mask = tf.reshape(mask, (image_width, image_width, mask_channels))\n mask = tf.cast(mask, tf.float32) / 255.\n return image, mask", "def _read_from_file(queue, config, class_label):\n\t\n\tclass SequenceRecord(object):\n\t\tpass\n\tresult = SequenceRecord()\n\t\n\t# Dimensions of the images and the bytes they each take\n\t# up in the binary file\n\tresult.height = config.image_size\n\tresult.width = config.image_size\n\tresult.depth = config.image_depth\n\tresult.sequence_length = config.num_steps\n\tresult.image_bytes = (result.height * result.width * result.depth)\n\n\tresult.patient_ID_bytes = 5 #uint8\n\n\tinitial_image_name_bytes = 92 #uint8\n\tresult.num_features = config.num_features\n\tresult.one_feature_bytes = 8\n\tresult.feature_bytes = config.num_features * result.one_feature_bytes # float64\n\tresult.coord_bytes = config.num_steps*2*6 # x and y coords, uint32\n\n\trecord_bytes = result.image_bytes * result.sequence_length + result.coord_bytes + result.patient_ID_bytes + initial_image_name_bytes + result.feature_bytes\n\t\n\t# The amount of padding on the image_name must be adjusted based on the number of features\n\t# because the overall number of bytes must be a multiple of 8 for float64 processing of raw output.\n\tincrement = 8 - (record_bytes % 8)\n\tresult.image_name_bytes = initial_image_name_bytes + increment\n\trecord_bytes += increment\n\t\n\t# Create reader with the fixed record length and\n\t# read off one record\n\treader = tf.FixedLengthRecordReader(record_bytes=record_bytes)\n\tresult.key, value = reader.read(queue)\n\t# Convert from a string to a vector of uint8 that is record_bytes long.\n\trecord_data = tf.decode_raw(value, tf.uint8, name='decode_raw_uint8')\n\tfeature_data = tf.decode_raw(value, tf.float64, name='decode_raw_float64')\n\tindex = 0\n\tnext_index = result.patient_ID_bytes\n\tresult.subject_id, index = process_slice(index, result.patient_ID_bytes, record_data)\n\tresult.image_name, index = process_slice(index, result.image_name_bytes, record_data)\n\tresult.patch_coords, index = process_slice(index, result.coord_bytes, record_data)\n\n\t# features are taken from float64 stream, they are taken out as a single block of data.\n\tfeature_index = index // result.one_feature_bytes\n\tresult.features, feature_index = process_removal_slice(feature_index, result.num_features, feature_data, config.remove_feature)\n\n\t_ , index = process_slice(index, result.feature_bytes, record_data)\n\tsequence_data = tf.strided_slice(record_data, [index], [record_bytes])\n\n\t# Treat sequence as an image of dimensions [(steps * patch height), width, depth] and normalize per image\n\t# Then reshape back to a single sequence\n\n\twith tf.device(\"/cpu:0\"):\n\t\tnormalized_sequence = tf.reshape(sequence_data,\n\t\t\t[result.sequence_length*result.height,result.width, result.depth])\n\t\tnormalized_sequence = tf.image.per_image_standardization(normalized_sequence)\n\n\t\tresult.sequence = tf.reshape(normalized_sequence,\n\t\t\t\t\t\t\t\t[result.sequence_length, result.height * result.width * result.depth]) #result.image_bytes])\n\t\t\t\t\t\t\t\t\n\tresult.sequence = tf.cast(result.sequence, tf.float32)\n\tresult.label = tf.constant(class_label, shape=[1])\n\n\treturn result", "def _convert_to_example(filename, image_buffer, label, height, width):\n\n colorspace = 'RGB'\n channels = 3\n image_format = 'JPEG'\n\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': _int64_feature(height),\n 'image/width': _int64_feature(width),\n 'image/colorspace': _bytes_feature(tf.compat.as_bytes(colorspace)),\n 'image/channels': _int64_feature(channels),\n 'image/class/label': _float64_feature(label),\n 'image/class/p1': _float64_feature(label[0:2]),\n 'image/class/p2': _float64_feature(label[2:4]),\n 'image/class/p3': _float64_feature(label[4:6]),\n 'image/class/p4': _float64_feature(label[6:8]),\n 'image/format': _bytes_feature(tf.compat.as_bytes(image_format)),\n 'image/filename': _bytes_feature(tf.compat.as_bytes(os.path.basename(filename))),\n 'image/encoded': _bytes_feature(tf.compat.as_bytes(image_buffer))}))\n return example", "def parser(record):\n parsed = tf.parse_single_example(record, transformed_feature_spec)\n label = parsed.pop(LABEL_KEY)\n return parsed, label", "def _convert_to_example(image_buffer, label, height, width):\n\n channels = 3\n\n example = tf.train.Example(features=tf.train.Features(feature={\n 'height': _int64_feature(height),\n 'width': _int64_feature(width),\n 'depth': _int64_feature(channels),\n 'label': _int64_feature(label),\n 'image': _bytes_feature(image_buffer.tostring())}))\n return example", "def parse_record(raw_record, is_training):\n keys_to_features = {\n 'image/encoded':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/format':\n tf.FixedLenFeature((), tf.string, default_value='jpeg'),\n 'image/class/label':\n tf.FixedLenFeature([], dtype=tf.int64, default_value=-1),\n 'image/class/text':\n tf.FixedLenFeature([], dtype=tf.string, default_value=''),\n }\n\n parsed = tf.parse_single_example(raw_record, keys_to_features)\n\n image = tf.image.decode_image(\n tf.reshape(parsed['image/encoded'], shape=[]),\n _NUM_CHANNELS)\n\n # Note that tf.image.convert_image_dtype scales the image data to [0, 1).\n image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n\n label = tf.cast(\n tf.reshape(parsed['image/class/label'], shape=[]),\n dtype=tf.int32)\n\n return {\"image\": image}, label", "def parse_func(record):\n keys_to_features = {\n 'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/format': tf.FixedLenFeature((), tf.string, default_value='png'),\n 'image/path': tf.FixedLenFeature((), tf.string, default_value=''),\n 'label/encoded': tf.FixedLenFeature((), tf.string, default_value=''),\n 'label/format': tf.FixedLenFeature((), tf.string, default_value='png'),\n 'label/path': tf.FixedLenFeature((), tf.string, default_value=''),\n 'height': tf.FixedLenFeature((), tf.int64),\n 'width': tf.FixedLenFeature((), tf.int64)\n }\n\n features = tf.parse_single_example(record, keys_to_features)\n\n image = tf.image.decode_png(features['image/encoded'], channels=3)\n label_dtype = tf.uint8\n label = tf.image.decode_png(features['label/encoded'], channels=1, dtype=label_dtype)\n label = tf.reshape(label, tf.convert_to_tensor([features['height'], features['width'], 1]))\n label = tf.squeeze(label)\n\n paths = (features['image/path'], features['label/path'])\n return image, label, paths", "def caption(self, images):\n # Convert single element to list\n values = [images] if not isinstance(images, list) else images\n\n # Open images if file strings\n values = [\n I.Image.open(image) if isinstance(image, str) else image for image in values\n ]\n\n # Feature extraction\n pixels = self.extractor(images=values, return_tensors=\"pt\").pixel_values\n pixels = pixels.to(self.torch_device)\n\n # Run model\n import torch\n\n with torch.no_grad():\n outputs = self.model.generate(\n pixels, max_length=16, num_beams=4, return_dict_in_generate=True\n ).sequences\n\n # Tokenize outputs into text results\n captions = self.tokenizer.batch_decode(outputs, skip_special_tokens=True)\n captions = [caption.strip() for caption in captions]\n\n # Return single element if single element passed in\n return captions[0] if not isinstance(images, list) else captions", "def dict_to_tf_example(data, label_map_dict):\n\n encoded_jpg_io = io.BytesIO()\n image = data['image']\n image.save(encoded_jpg_io, \"JPEG\", quality=80)\n encoded_jpg = encoded_jpg_io.getvalue()\n key = hashlib.sha256(encoded_jpg).hexdigest()\n\n width, height = image.size\n\n xmin = []\n ymin = []\n xmax = []\n ymax = []\n rotation = []\n classes = []\n classes_text = []\n truncated = []\n poses = []\n masks = []\n difficult_obj = []\n for obj in data['object']:\n difficult = bool(int(obj['difficult']))\n difficult_obj.append(int(difficult))\n\n xmin.append(float(obj['bndbox']['xmin']) / width)\n ymin.append(float(obj['bndbox']['ymin']) / height)\n xmax.append(float(obj['bndbox']['xmax']) / width)\n ymax.append(float(obj['bndbox']['ymax']) / height)\n rotation.append(float(obj['rotation']))\n masks.append(obj['mask'])\n classes_text.append(obj['name'].encode('utf8'))\n classes.append(label_map_dict[obj['name']])\n truncated.append(int(obj['truncated']))\n poses.append(obj['pose'].encode('utf8'))\n\n mask = np.stack(masks)\n encoded_mask = pn_encode(mask.flatten()).tolist()\n print('mask encode:', mask.shape, '->', len(encoded_mask)) ###\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': dataset_util.int64_feature(height),\n 'image/width': dataset_util.int64_feature(width),\n 'image/filename': dataset_util.bytes_feature(\n data['filename'].encode('utf8')),\n 'image/source_id': dataset_util.bytes_feature(\n data['filename'].encode('utf8')),\n 'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),\n 'image/encoded': dataset_util.bytes_feature(encoded_jpg),\n 'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),\n 'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),\n 'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),\n 'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),\n 'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),\n 'image/object/rotation': dataset_util.float_list_feature(rotation),\n 'image/object/class/text': dataset_util.bytes_list_feature(classes_text),\n 'image/object/class/label': dataset_util.int64_list_feature(classes),\n 'image/object/difficult': dataset_util.int64_list_feature(difficult_obj),\n 'image/object/truncated': dataset_util.int64_list_feature(truncated),\n 'image/object/view': dataset_util.bytes_list_feature(poses),\n 'image/segmentation/object': dataset_util.int64_list_feature(encoded_mask),\n 'image/segmentation/object/class': dataset_util.int64_list_feature(classes),\n }))\n return example", "def _deserialize_example(example_proto, labeled=True):\n if labeled:\n feature_description = {\n 'image': tf.io.FixedLenFeature([], tf.string),\n 'image_name': tf.io.FixedLenFeature([], tf.string),\n 'patient_id': tf.io.FixedLenFeature([], tf.int64),\n 'sex': tf.io.FixedLenFeature([], tf.int64),\n 'age_approx': tf.io.FixedLenFeature([], tf.int64),\n 'anatom_site_general_challenge': tf.io.FixedLenFeature([], tf.int64),\n 'diagnosis': tf.io.FixedLenFeature([], tf.int64),\n 'target': tf.io.FixedLenFeature([], tf.int64)\n }\n else:\n feature_description = {\n 'image': tf.io.FixedLenFeature([], tf.string),\n 'image_name': tf.io.FixedLenFeature([], tf.string)\n }\n\n return tf.io.parse_single_example(example_proto, feature_description)", "def extract_info_from_sequence_example(path_to_tfrecord, from_scratch=False):\n assert(os.path.isfile(path_to_tfrecord))\n\n # The csv file containing extraction result\n output_dir = os.path.dirname(path_to_tfrecord)\n yaml_name = '.do_not_modify.dataset_info.yaml'\n csv_name = '.do_not_modify.example_info.csv'\n yaml_filepath = os.path.join(output_dir, yaml_name)\n csv_filepath = os.path.join(output_dir, csv_name)\n\n if not from_scratch \\\n and os.path.isfile(yaml_filepath) \\\n and os.path.isfile(csv_filepath):\n with open(yaml_filepath, 'r') as f:\n dataset_info = yaml.load(f)\n examples_info = pd.read_csv(csv_filepath)\n if verbose:\n print(\"Successfully loaded existing dataset info and examples info.\")\n return dataset_info, examples_info\n else: # from scratch\n if verbose:\n print(\"Extracting dataset info and examples info from scratch\",\n \"(by iterating the sequence examples)...\")\n\n # Some basic information on the dataset\n matrix_bundle_fields = []\n classes = set()\n # For now we only have dataset having 1 single bundle (e.g. no video+audio)\n num_bundles = 1\n num_classes = 0\n num_examples = 0\n sequence_size_max = 0\n sequence_size_min = 0\n sequence_size_median = 0\n is_sparse = None # True or False\n # Domain in ['audio_text_or_time_series', 'image_or_vector', 'video']\n # inferred_dataset_domain = None\n\n # Some basic information on each example\n num_timestamps = []\n num_features = []\n num_labels = []\n\n # Begin extracting\n counter = 0\n for se in tf.python_io.tf_record_iterator(path_to_tfrecord):\n sequence_example = tf.train.SequenceExample.FromString(se)\n\n context_feature = sequence_example.context.feature\n feature_lists_container = sequence_example.feature_lists.feature_list\n # Update num_labels\n labels = list(context_feature['label_index'].int64_list.value)\n num_labels.append(len(labels))\n\n if not matrix_bundle_fields:\n matrix_bundle_fields += list(feature_lists_container)\n else: # Make sure that fields name are consistent (coherent)\n assert(all([x in matrix_bundle_fields for x in feature_lists_container]))\n\n # Update classes\n classes = classes.union(set(labels))\n\n dense_key = '0_dense_input'\n sparse_value = '0_sparse_value'\n if dense_key in feature_lists_container:\n if is_sparse:\n raise ValueError(\"Inconsistent sparsity at index {}!\".format(counter))\n elif is_sparse is None:\n is_sparse = False\n key = dense_key\n elif sparse_value in feature_lists_container:\n if is_sparse is not None:\n if not is_sparse:\n raise ValueError(\"Inconsistent sparsity at index {}!\"\\\n .format(counter))\n else:\n is_sparse = True\n key = sparse_value\n\n # Update num_timestamps\n feature_list = feature_lists_container[key]\n num_timestamps.append(_len_feature_list(feature_list))\n # Update num_features\n feature_vec = _get_first_feature(feature_list)\n num_features.append(_len_feature(feature_vec))\n\n counter += 1\n\n examples_info = pd.DataFrame({'num_timestamps': num_timestamps,\n 'num_features': num_features,\n 'num_labels': num_labels})\n\n sequence_sizes = examples_info['num_timestamps']\n sequence_size_max = int(sequence_sizes.max())\n sequence_size_min = int(sequence_sizes.min())\n sequence_size_median = sequence_sizes.median()\n\n dataset_info = {'matrix_bundle_fields': matrix_bundle_fields,\n 'classes': list(classes),\n 'num_bundles': num_bundles,\n 'num_classes': len(classes),\n 'num_examples': examples_info.shape[0],\n 'sequence_size_max': sequence_size_max,\n 'sequence_size_min': sequence_size_min,\n 'sequence_size_median': sequence_size_median,\n 'is_sparse': is_sparse\n }\n examples_info.to_csv(csv_filepath, index=False)\n with open(yaml_filepath, 'w') as f:\n yaml.dump(dataset_info, f)\n return dataset_info, examples_info", "def parse_record_reid(raw_record):\n keys_to_features = {\n 'image_raw': tf.FixedLenFeature([], tf.string),\n 'height': tf.FixedLenFeature([], tf.int64),\n 'width': tf.FixedLenFeature([], tf.int64),\n 'depth': tf.FixedLenFeature([], tf.int64),\n 'label': tf.FixedLenFeature([], tf.int64)\n }\n parsed = tf.parse_single_example(raw_record, keys_to_features)\n # image = tf.image.decode_image(\n # tf.reshape(parsed['image_raw'], shape=[]), _DEPTH)\n\n image = tf.decode_raw(parsed['image_raw'], tf.uint8)\n # image = tf.to_float(tf.image.convert_image_dtype(image, dtype=tf.uint8))\n image = tf.reshape(image, [_HEIGHT, _WIDTH, 3])\n # image = tf.cast(image, tf.float32) * (1. / 255.0)\n image = tf.cast(image,tf.float32)\n\n label = tf.cast(parsed['label'],tf.int32)\n\n label = tf.one_hot(label, labels_nums, 1, 0)\n # labels={\"seg\":None,\"reid\":label}\n return image, label", "def input_fn(params=None):\n del params\n filenames = gfile.Glob(os.path.join(flags.data_dir, pattern))\n if not filenames:\n raise RuntimeError('No data files found.')\n filename_queue = tf.train.string_input_producer(filenames, shuffle=True)\n reader = tf.TFRecordReader()\n\n _, val = reader.read(filename_queue)\n serialized_input = tf.reshape(val, shape=[1])\n\n image_seq = None\n\n for i in range(0, flags.sequence_length, flags.skip_num):\n image_name = 'image_' + str(i)\n\n if flags.dataset_type == 'robot':\n pose_name = 'state_' + str(i)\n action_name = 'action_' + str(i)\n joint_pos_name = 'joint_positions_' + str(i)\n features = {\n pose_name:\n tf.FixedLenFeature([flags.pose_dim], tf.float32),\n image_name:\n tf.FixedLenFeature([1], tf.string),\n action_name:\n tf.FixedLenFeature([flags.pose_dim], tf.float32),\n joint_pos_name:\n tf.FixedLenFeature([flags.joint_pos_dim], tf.float32)\n }\n else:\n features = {\n image_name: tf.FixedLenFeature([1], tf.string),\n }\n\n parsed_input = tf.parse_example(serialized_input, features)\n\n # Process image\n image_buffer = tf.reshape(parsed_input[image_name], shape=[])\n image = tf.image.decode_jpeg(image_buffer, channels=COLOR_CHAN)\n image = tf.image.resize_images(\n image, (IMG_HEIGHT, IMG_WIDTH), method=tf.image.ResizeMethod.BICUBIC)\n image = tf.cast(tf.expand_dims(image, 0), tf.float32) / 255.0\n\n if flags.dataset_type == 'robot':\n pose = tf.reshape(parsed_input[pose_name], shape=[flags.pose_dim])\n pose = tf.expand_dims(pose, 0)\n action = tf.reshape(parsed_input[action_name], shape=[flags.pose_dim])\n action = tf.expand_dims(action, 0)\n joint_pos = tf.reshape(\n parsed_input[joint_pos_name], shape=[flags.joint_pos_dim])\n joint_pos = tf.expand_dims(joint_pos, 0)\n else:\n pose = tf.zeros([1, flags.pose_dim])\n action = tf.zeros([1, flags.pose_dim])\n joint_pos = tf.zeros([1, flags.joint_pos_dim])\n\n if i == 0:\n image_seq = image\n action_seq, pose_seq, joint_pos_seq = action, pose, joint_pos\n else:\n image_seq = tf.concat([image_seq, image], 0)\n action_seq = tf.concat([action_seq, action], 0)\n pose_seq = tf.concat([pose_seq, pose], 0)\n joint_pos_seq = tf.concat([joint_pos_seq, joint_pos], 0)\n\n [images, actions, poses, joint_pos] = tf.train.shuffle_batch(\n [image_seq, action_seq, pose_seq, joint_pos_seq],\n batch_size,\n num_threads=4,\n capacity=200 * batch_size,\n min_after_dequeue=batch_size * 10,\n )\n\n joint_poses = tf.concat([joint_pos, poses], 2)\n\n output_features = {\n IMAGE_FEATURE_NAME: images,\n JOINT_POSE_FEATURE_NAME: joint_poses,\n ACTION_FEATURE_NAME: actions\n }\n\n return output_features, None", "def read_and_decode(filename, is_train=None):\n filename_queue = tf.train.string_input_producer([filename])\n reader = tf.TFRecordReader()\n _, serialized_example = reader.read(filename_queue)\n features = tf.parse_single_example(\n serialized_example, features={\n 'label': tf.FixedLenFeature([], tf.int64),\n 'img_raw': tf.FixedLenFeature([], tf.string),\n }\n )\n # You can do more image distortion here for training data\n img = tf.decode_raw(features['img_raw'], tf.float32)\n img = tf.reshape(img, [32, 32, 3])\n # img = tf.cast(img, tf.float32) #* (1. / 255) - 0.5\n if is_train ==True:\n # 1. Randomly crop a [height, width] section of the image.\n img = tf.random_crop(img, [24, 24, 3])\n\n # 2. Randomly flip the image horizontally.\n img = tf.image.random_flip_left_right(img)\n\n # 3. Randomly change brightness.\n img = tf.image.random_brightness(img, max_delta=63)\n\n # 4. Randomly change contrast.\n img = tf.image.random_contrast(img, lower=0.2, upper=1.8)\n\n # 5. Subtract off the mean and divide by the variance of the pixels.\n img = tf.image.per_image_standardization(img)\n\n elif is_train == False:\n # 1. Crop the central [height, width] of the image.\n img = tf.image.resize_image_with_crop_or_pad(img, 24, 24)\n\n # 2. Subtract off the mean and divide by the variance of the pixels.\n img = tf.image.per_image_standardization(img)\n\n elif is_train == None:\n img = img\n\n label = tf.cast(features['label'], tf.int32)\n return img, label", "def dict_to_tf_example(data,\n label_map_dict,\n ignore_difficult_instances=False):\n full_path = os.path.join(FLAGS.data_dir, 'IMAGENES', data['filename'])[0:-3] + 'jpg'\n image_ = cv2.imread(full_path)\n with tf.gfile.GFile(full_path, 'rb') as fid:\n encoded_jpg = fid.read()\n encoded_jpg_io = io.BytesIO(encoded_jpg)\n image = PIL.Image.open(encoded_jpg_io)\n if image.format != 'JPEG':\n raise ValueError('Image format not JPEG')\n key = hashlib.sha256(encoded_jpg).hexdigest()\n image_id = get_image_id(data['filename'])\n width = int(image_.shape[1])\n height = int(image_.shape[0])\n image_id = get_image_id(data['filename'])\n xmin = []\n ymin = []\n xmax = []\n ymax = []\n area = []\n classes = []\n classes_text = []\n if 'object' in data:\n for obj in data['object']:\n name_in_obj_ = obj['name'].replace(' ', '').strip()\n if name_in_obj_ in label_map_dict:\n x_pos = [int(obj['bndbox']['xmax']), int(obj['bndbox']['xmin'])]\n y_pos = [int(obj['bndbox']['ymax']), int(obj['bndbox']['ymin'])]\n xmin.append((float(min(x_pos))) / width)\n ymin.append((float(min(y_pos))) / height)\n xmax.append((float(max(x_pos))) / width)\n ymax.append((float(max(y_pos))) / height)\n area.append((xmax[-1] - xmin[-1]) * (ymax[-1] - ymin[-1]))\n classes_text.append(name_in_obj_.replace(' ', '').encode('utf8'))\n classes.append(int(label_map_dict[name_in_obj_]))\n\n example = tf.train.Example(\n features=tf.train.Features(\n feature={\n 'image/height':\n tfrecord_util.int64_feature(height),\n 'image/width':\n tfrecord_util.int64_feature(width),\n 'image/filename':\n tfrecord_util.bytes_feature(data['filename'].encode('utf8')),\n 'image/source_id':\n tfrecord_util.bytes_feature(str(image_id).encode('utf8')),\n 'image/key/sha256':\n tfrecord_util.bytes_feature(key.encode('utf8')),\n 'image/encoded':\n tfrecord_util.bytes_feature(encoded_jpg),\n 'image/format':\n tfrecord_util.bytes_feature('jpeg'.encode('utf8')),\n 'image/object/bbox/xmin':\n tfrecord_util.float_list_feature(xmin),\n 'image/object/bbox/xmax':\n tfrecord_util.float_list_feature(xmax),\n 'image/object/bbox/ymin':\n tfrecord_util.float_list_feature(ymin),\n 'image/object/bbox/ymax':\n tfrecord_util.float_list_feature(ymax),\n 'image/object/area':\n tfrecord_util.float_list_feature(area),\n 'image/object/class/text':\n tfrecord_util.bytes_list_feature(classes_text),\n 'image/object/class/label':\n tfrecord_util.int64_list_feature(classes),\n }))\n return example", "def _parse_single_example(example, options):\n # Initialize `keys_to_features`.\n keys_to_features = {\n TFExampleFields.img_id: tf.io.FixedLenFeature([], tf.string),\n TFExampleFields.annot_id: tf.io.FixedLenFeature([], tf.string),\n TFExampleFields.answer_label: tf.io.FixedLenFeature([], tf.int64),\n TFExampleFields.img_bbox_label: tf.io.VarLenFeature(tf.string),\n TFExampleFields.img_bbox_score: tf.io.VarLenFeature(tf.float32),\n TFExampleFields.img_bbox_feature: tf.io.VarLenFeature(tf.float32),\n TFExampleFields.question: tf.io.VarLenFeature(tf.string),\n TFExampleFields.question_tag: tf.io.VarLenFeature(tf.int64),\n }\n for bbox_key in TFExampleFields.img_bbox_field_keys:\n bbox_field = os.path.join(TFExampleFields.img_bbox_scope, bbox_key)\n keys_to_features[bbox_field] = tf.io.VarLenFeature(tf.float32)\n for i in range(1, 1 + NUM_CHOICES):\n keys_to_features.update({\n TFExampleFields.cls_bert + '_%i' % i:\n tf.io.VarLenFeature(tf.float32),\n TFExampleFields.question_bert + '_%i' % i:\n tf.io.VarLenFeature(tf.float32),\n TFExampleFields.answer_choice + '_%i' % i:\n tf.io.VarLenFeature(tf.string),\n TFExampleFields.answer_choice_tag + '_%i' % i:\n tf.io.VarLenFeature(tf.int64),\n TFExampleFields.answer_choice_bert + '_%i' % i:\n tf.io.VarLenFeature(tf.float32)\n })\n\n # Initialize `items_to_handlers`.\n items_to_handlers = {\n InputFields.img_id:\n tfexample_decoder.Tensor(tensor_key=TFExampleFields.img_id,\n default_value=''),\n InputFields.annot_id:\n tfexample_decoder.Tensor(tensor_key=TFExampleFields.annot_id,\n default_value=''),\n InputFields.answer_label:\n tfexample_decoder.Tensor(tensor_key=TFExampleFields.answer_label,\n default_value=-1),\n InputFields.object_bboxes:\n tfexample_decoder.BoundingBox(\n keys=TFExampleFields.img_bbox_field_keys,\n prefix=TFExampleFields.img_bbox_scope),\n InputFields.object_labels:\n tfexample_decoder.Tensor(tensor_key=TFExampleFields.img_bbox_label,\n default_value=''),\n InputFields.object_scores:\n tfexample_decoder.Tensor(tensor_key=TFExampleFields.img_bbox_score,\n default_value=0),\n InputFields.question:\n tfexample_decoder.Tensor(tensor_key=TFExampleFields.question,\n default_value=PAD),\n InputFields.question_tag:\n tfexample_decoder.Tensor(tensor_key=TFExampleFields.question_tag,\n default_value=-1),\n TFExampleFields.img_bbox_feature:\n tfexample_decoder.Tensor(tensor_key=TFExampleFields.img_bbox_feature,\n default_value=0),\n }\n\n for i in range(1, 1 + NUM_CHOICES):\n tensor_key = TFExampleFields.cls_bert + '_%i' % i\n items_to_handlers[tensor_key] = tfexample_decoder.Tensor(\n tensor_key=tensor_key, default_value=0)\n tensor_key = TFExampleFields.question_bert + '_%i' % i\n items_to_handlers[tensor_key] = tfexample_decoder.Tensor(\n tensor_key=tensor_key, default_value=0)\n tensor_key = TFExampleFields.answer_choice + '_%i' % i\n items_to_handlers[tensor_key] = tfexample_decoder.Tensor(\n tensor_key=tensor_key, default_value=PAD)\n tensor_key = TFExampleFields.answer_choice_tag + '_%i' % i\n items_to_handlers[tensor_key] = tfexample_decoder.Tensor(\n tensor_key=tensor_key, default_value=-1)\n tensor_key = TFExampleFields.answer_choice_bert + '_%i' % i\n items_to_handlers[tensor_key] = tfexample_decoder.Tensor(\n tensor_key=tensor_key, default_value=0)\n if options.decode_jpeg:\n keys_to_features.update({\n TFExampleFields.img_encoded: tf.io.FixedLenFeature([], tf.string),\n TFExampleFields.img_format: tf.io.FixedLenFeature([], tf.string),\n })\n items_to_handlers.update({\n InputFields.img_data:\n tfexample_decoder.Image(image_key=TFExampleFields.img_encoded,\n format_key=TFExampleFields.img_format,\n shape=None)\n })\n\n # Decode example.\n example_decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,\n items_to_handlers)\n\n output_keys = example_decoder.list_items()\n output_tensors = example_decoder.decode(example)\n output_tensors = [\n x if x.dtype != tf.int64 else tf.cast(x, tf.int32) for x in output_tensors\n ]\n decoded_example = dict(zip(output_keys, output_tensors))\n return _update_decoded_example(decoded_example, options)", "def parse_sequence_example(serialized,\n context_features=None,\n sequence_features=None,\n example_names=None,\n name=None):\n # pylint: disable=line-too-long\n if not (context_features or sequence_features):\n raise ValueError(\"Both `context_features` and `sequence_features` argument \"\n \"are None, but at least one should have values.\")\n context_params = _ParseOpParams.from_features(\n context_features, [VarLenFeature, FixedLenFeature, RaggedFeature])\n feature_list_params = _ParseOpParams.from_features(\n sequence_features,\n [VarLenFeature, FixedLenSequenceFeature, RaggedFeature])\n\n with ops.name_scope(name, \"ParseSequenceExample\",\n [serialized, example_names]):\n outputs = _parse_sequence_example_raw(serialized, example_names,\n context_params, feature_list_params,\n name)\n context_output, feature_list_output, feature_list_lengths = outputs\n\n if context_params.ragged_keys:\n context_output = _construct_tensors_for_composite_features(\n context_features, context_output)\n if feature_list_params.ragged_keys:\n feature_list_output = _construct_tensors_for_composite_features(\n sequence_features, feature_list_output)\n\n return context_output, feature_list_output, feature_list_lengths", "def parser(serialized_example):\n features = tf.parse_single_example(\n serialized_example,\n features={\n 'env': tf.FixedLenFeature([1, 4], tf.int64),\n # 'env_segment_number': tf.FixedLenFeature([], tf.int64),\n # 'env_segment_cpu': tf.FixedLenFeature([], tf.int64),\n # 'env_segment_mem': tf.FixedLenFeature([], tf.int64),\n # 'query_plan_ops': tf.VarLenFeature(tf.string),\n # 'query_table_size': tf.VarLenFeature(tf.float32),\n # 'segment_cpu_usage': tf.VarLenFeature(tf.float32),\n 'label': tf.FixedLenFeature([], tf.float32)\n })\n env = tf.cast(features['env'], tf.float32)\n # image.set_shape([DEPTH * HEIGHT * WIDTH])\n\n # # Reshape from [depth * height * width] to [depth, height, width].\n # image = tf.cast(\n # tf.transpose(tf.reshape(image, [DEPTH, HEIGHT, WIDTH]), [1, 2, 0]),\n # tf.float32)\n label = tf.cast(features['label'], tf.float32)\n reshape_label = tf.reshape(features['label'], (1,1))\n return env, reshape_label", "def build_example((key, label, img_bytes)):\n try:\n features = {\n 'id': key,\n 'label': label,\n 'feat': img_bytes,\n }\n yield features\n\n except Exception as e:\n examples_failed.inc()\n logging.error(e, exc_info=True)\n pass", "def _process_frames(dataset_info, example):\n frames = tf.concat(example['frames'], axis=0)\n frames = tf.map_fn(_convert_frame_data, tf.reshape(frames, [-1]), dtype=tf.float32, back_prop=False)\n img_dims = (dataset_info.frame_size, dataset_info.frame_size, 3)\n frames = tf.reshape(frames, (-1, dataset_info.sequence_size) + img_dims)\n\n if (dataset_info.frame_size != 64):\n frames = tf.reshape(frames, (-1, ) + img_dims) # (B * S, W, H, C)\n default_img_dims = (64, 64, 3)\n frames = tf.image.resize_bilinear(frames, default_img_dims[:2], align_corners=True)\n frames = tf.reshape(frames, (-1, dataset_info.sequence_size) + default_img_dims)\n \n return frames", "def _creatExamplesTensorData(self, examples):\n\n images = []\n \n images2 = []\n images3 = []\n images4 = []\n images5 = [] \n labels = []\n for (img_idx, label) in examples:\n img = self.dataset[img_idx][0]\n #print(img)\n ##exit(0)\n if self.load:\n img = Image.fromarray(img)\n else:\n img = read_image(img)\n #print(img.size)\n #print(np.array(img).shape)\n #exit(0)\n if self.transform is not None:\n img1 = self.transform(img)\n\n img2 = self.transform_test(img)\n img3 = self.transform_test(img)\n img4 = self.transform_test(img)\n img5 = self.transform_test(img) \n #print((img2-img1).abs().sum(),(img3-img1).abs().sum(),(img2-img3).abs().sum())\n #print(img.shape,'located in test_loader.py at 146')\n #exit(0)\n images.append(img1)\n \n images2.append(img2)\n images3.append(img3)\n images4.append(img4)\n images5.append(img5) \n labels.append(label)\n images = torch.stack(images, dim=0)\n\n images2 = torch.stack(images2, dim=0)\n images3 = torch.stack(images3, dim=0)\n images4 = torch.stack(images4, dim=0)\n images5 = torch.stack(images5, dim=0) \n labels = torch.LongTensor(labels)\n return images, images2,images3,images4,images5,labels", "def imagenet_parser(value, image_size, is_training):\n keys_to_features = {\n 'image/encoded':\n tf.FixedLenFeature((), tf.string, ''),\n 'image/format':\n tf.FixedLenFeature((), tf.string, 'jpeg'),\n 'image/class/label':\n tf.FixedLenFeature([], tf.int64, -1),\n 'image/class/text':\n tf.FixedLenFeature([], tf.string, ''),\n 'image/object/bbox/xmin':\n tf.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/ymin':\n tf.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/xmax':\n tf.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/ymax':\n tf.VarLenFeature(dtype=tf.float32),\n 'image/object/class/label':\n tf.VarLenFeature(dtype=tf.int64),\n }\n\n parsed = tf.parse_single_example(value, keys_to_features)\n\n image_buffer = tf.reshape(parsed['image/encoded'], shape=[])\n\n xmin = tf.expand_dims(parsed['image/object/bbox/xmin'].values, 0)\n ymin = tf.expand_dims(parsed['image/object/bbox/ymin'].values, 0)\n xmax = tf.expand_dims(parsed['image/object/bbox/xmax'].values, 0)\n ymax = tf.expand_dims(parsed['image/object/bbox/ymax'].values, 0)\n # Note that ordering is (y, x)\n bbox = tf.concat([ymin, xmin, ymax, xmax], 0)\n # Force the variable number of bounding boxes into the shape\n # [1, num_boxes, coords].\n bbox = tf.expand_dims(bbox, 0)\n bbox = tf.transpose(bbox, [0, 2, 1])\n\n image = image_preprocessing(\n image_buffer=image_buffer,\n bbox=bbox,\n image_size=image_size,\n is_training=is_training\n )\n\n # Labels are in [1, 1000] range\n label = tf.cast(\n tf.reshape(parsed['image/class/label'], shape=[]), dtype=tf.int32)\n\n return image, label", "def decode(value):\n keys_to_features = {\n 'image/encoded':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/height':\n tf.FixedLenFeature((), tf.int64, default_value=0),\n 'image/width':\n tf.FixedLenFeature((), tf.int64, default_value=0),\n 'image/segmentation/class/encoded':\n tf.FixedLenFeature((), tf.string, default_value='')\n }\n data = tf.parse_single_example(value, keys_to_features)\n return data", "def read_and_convert(self, result_file):\n if self._example_pointer == self._num_examples:\n return None\n path_to_image_file = self._path_to_image_files[self._example_pointer]\n\n # Get image index\n index = int(path_to_image_file.split('/')[-1].split('.')[0])\n self._example_pointer += 1\n\n label_of_digits = result_file[index].strip().split(' ')\n\n # for digits: 10 represents no digit, for letters: 0 represents no letter\n digits = [10, 10, 10, 10]\n letters = [0, 0, 0, 0, 0]\n idd = 0\n idl = 0\n for i in range(len(label_of_digits)):\n if i in [0, 4, 5, 6]:\n digits[idd] = int(label_of_digits[i]) # label 10 is essentially digit zero\n idd += 1\n if i in [1, 2, 3, 7, 8]:\n letters[idl] = int(label_of_digits[i])\n idl += 1\n\n image = Image.open(path_to_image_file)\n image = image.resize([96, 24])\n image = np.array(image).tobytes()\n\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image': ExampleReader._bytes_feature(image),\n 'digits': tf.train.Feature(int64_list=tf.train.Int64List(value=digits)),\n 'letters': tf.train.Feature(int64_list=tf.train.Int64List(value=letters))\n }))\n return example", "def _generate_elements(example, label):\n\n class_label = None\n parsed = tf.train.Example.FromString(example.numpy())\n if parsed.features.feature[label].int64_list.value:\n val = parsed.features.feature[label].int64_list.value\n if len(val) > 0:\n class_label = val[0]\n else:\n val = parsed.features.feature[label].bytes_list.value\n if len(val) > 0:\n class_label = val[0].decode()\n return (class_label, parsed)", "def make_img_example(img):\n height, width = img.shape[:2]\n example = tf.train.Example(\n features=tf.train.Features(feature={\n \"height\": _int64_feature([height]),\n \"width\": _int64_feature([width]),\n \"image\": _bytes_feature([tf.compat.as_bytes(img.tostring())])\n })\n )\n return example", "def make_example(self, tensor_dict):\n image = tensor_dict['image']\n image = image.tobytes()\n label = tensor_dict['label']\n return tf.train.Example(\n features=tf.train.Features(\n feature={\n 'image':\n tf.train.Feature(bytes_list=tf.train.BytesList(value=[image])),\n 'label':\n tf.train.Feature(int64_list=tf.train.Int64List(value=[label]))\n }))", "def create_tf_example(group, path, label_map):\n\n #load image and extract attributes (width, height, filename)\n with tf.gfile.GFile(os.path.join(path, \"{}\".format(group.filename)), \"rb\") as fid:\n encoded_jpg = fid.read()\n encoded_jpg_io = io.BytesIO(encoded_jpg)\n image = Image.open(encoded_jpg_io)\n width, height = image.size\n\n filename = group.filename.encode(\"utf8\")\n image_format = b\"jpg\"\n \n #tf.train.Example() expects several objects in lists\n xmins = []\n xmaxs = []\n ymins = []\n ymaxs = []\n classes_text = []\n classes = []\n\n for index, row in group.object.iterrows():\n #Extract bounding box\n xmins.append(row[\"xmin\"] / width)\n xmaxs.append(row[\"xmax\"] / width)\n ymins.append(row[\"ymin\"] / height)\n ymaxs.append(row[\"ymax\"] / height)\n\n #Extract class name and retrieve class id\n #classes_text.append(row[\"class\"].encode(\"utf8\"))\n class_index = label_map.get(str(row[\"class\"]))\n \n #Check if class id could be retrieved\n assert (\n class_index is not None\n ), \"class label: `{}` not found in label_map: {}\".format(\n row[\"class\"], label_map\n )\n\n #For troubleshooting only\n print(f\"{filename} has class_index {class_index} and class {row['class']}\")\n\n classes.append(class_index)\n\n #Build tf_example object\n tf_example = tf.train.Example(\n features=tf.train.Features(\n feature={\n \"image/height\": dataset_util.int64_feature(height),\n \"image/width\": dataset_util.int64_feature(width),\n \"image/filename\": dataset_util.bytes_feature(filename),\n \"image/source_id\": dataset_util.bytes_feature(filename),\n \"image/encoded\": dataset_util.bytes_feature(encoded_jpg),\n \"image/format\": dataset_util.bytes_feature(image_format),\n \"image/object/bbox/xmin\": dataset_util.float_list_feature(xmins),\n \"image/object/bbox/xmax\": dataset_util.float_list_feature(xmaxs),\n \"image/object/bbox/ymin\": dataset_util.float_list_feature(ymins),\n \"image/object/bbox/ymax\": dataset_util.float_list_feature(ymaxs),\n \"image/object/class/text\": dataset_util.bytes_list_feature(\n classes_text\n ),\n \"image/object/class/label\": dataset_util.int64_list_feature(classes),\n }\n )\n )\n return tf_example", "def _preprocess(self, txt_seq):\n input = []\n label = []\n punc = \" \"\n for token in txt_seq.split():\n if token in self.punc2id:\n punc = token\n else:\n input.append(self.word2id.get(token, self.word2id[\"<UNK>\"]))\n label.append(self.punc2id[punc])\n punc = \" \"\n input.append(self.word2id[\"<END>\"])\n label.append(self.punc2id[punc])\n input = torch.LongTensor(input)\n label = torch.LongTensor(label)\n # input = np.array(input)\n # label = np.array(label)\n return input, label", "def _parse_sequence_example_raw(serialized,\n debug_name,\n context,\n feature_list,\n name=None):\n if context.num_features + feature_list.num_features == 0:\n raise ValueError(\"Must provide at least one feature key.\")\n with ops.name_scope(name, \"ParseSequenceExample\", [serialized]):\n debug_name = [] if debug_name is None else debug_name\n\n # Internal\n feature_list_dense_missing_assumed_empty = []\n for k, v in feature_list.dense_defaults.items():\n if v is not None:\n raise ValueError(\"Value feature_list.dense_defaults[%s] must be None\" %\n k)\n feature_list_dense_missing_assumed_empty.append(k)\n\n has_ragged = context.ragged_keys or feature_list.ragged_keys\n serialized = ops.convert_to_tensor(serialized, name=\"serialized\")\n if has_ragged and serialized.shape.ndims is None:\n raise ValueError(\"serialized must have statically-known rank to \"\n \"parse ragged features.\")\n feature_list_dense_missing_assumed_empty_vector = [\n key in feature_list_dense_missing_assumed_empty\n for key in feature_list.dense_keys\n ]\n outputs = gen_parsing_ops.parse_sequence_example_v2(\n # Inputs\n serialized=serialized,\n debug_name=debug_name,\n context_sparse_keys=context.sparse_keys,\n context_dense_keys=context.dense_keys,\n context_ragged_keys=context.ragged_keys,\n feature_list_sparse_keys=feature_list.sparse_keys,\n feature_list_dense_keys=feature_list.dense_keys,\n feature_list_ragged_keys=feature_list.ragged_keys,\n feature_list_dense_missing_assumed_empty=(\n feature_list_dense_missing_assumed_empty_vector),\n context_dense_defaults=context.dense_defaults_vec,\n # Attrs\n Ncontext_sparse=len(context.sparse_keys),\n Nfeature_list_sparse=len(feature_list.sparse_keys),\n Nfeature_list_dense=len(feature_list.dense_keys),\n context_sparse_types=context.sparse_types,\n context_ragged_value_types=context.ragged_value_types,\n context_ragged_split_types=context.ragged_split_types,\n feature_list_dense_types=feature_list.dense_types,\n feature_list_sparse_types=feature_list.sparse_types,\n feature_list_ragged_value_types=feature_list.ragged_value_types,\n feature_list_ragged_split_types=feature_list.ragged_split_types,\n context_dense_shapes=context.dense_shapes_as_proto,\n feature_list_dense_shapes=feature_list.dense_shapes,\n name=name)\n (context_sparse_indices, context_sparse_values, context_sparse_shapes,\n context_dense_values, context_ragged_values, context_ragged_row_splits,\n feature_list_sparse_indices, feature_list_sparse_values,\n feature_list_sparse_shapes, feature_list_dense_values,\n feature_list_dense_lengths, feature_list_ragged_values,\n feature_list_ragged_outer_splits,\n feature_list_ragged_inner_splits) = outputs\n # pylint: disable=protected-access\n context_ragged_tensors = parsing_config._build_ragged_tensors(\n serialized.shape, context_ragged_values, context_ragged_row_splits)\n feature_list_ragged_tensors = parsing_config._build_ragged_tensors(\n serialized.shape, feature_list_ragged_values,\n feature_list_ragged_outer_splits, feature_list_ragged_inner_splits)\n\n # pylint: disable=g-complex-comprehension\n context_sparse_tensors = [\n sparse_tensor.SparseTensor(ix, val, shape)\n for (ix, val,\n shape) in zip(context_sparse_indices, context_sparse_values,\n context_sparse_shapes)\n ]\n\n feature_list_sparse_tensors = [\n sparse_tensor.SparseTensor(ix, val, shape)\n for (ix, val, shape\n ) in zip(feature_list_sparse_indices, feature_list_sparse_values,\n feature_list_sparse_shapes)\n ]\n # pylint: enable=g-complex-comprehension\n\n context_output = dict(\n zip(\n context.sparse_keys + context.dense_keys + context.ragged_keys,\n context_sparse_tensors + context_dense_values +\n context_ragged_tensors))\n feature_list_output = dict(\n zip(\n feature_list.sparse_keys + feature_list.dense_keys +\n feature_list.ragged_keys, feature_list_sparse_tensors +\n feature_list_dense_values + feature_list_ragged_tensors))\n feature_list_lengths = dict(\n zip(feature_list.dense_keys, feature_list_dense_lengths))\n\n return (context_output, feature_list_output, feature_list_lengths)", "def process(self, example):\n self.get_counter(\"examples-total\").inc()\n label = example_util.get_bytes_feature(example, _LABEL_COLUMN)[0]\n self.get_counter(\"examples-{}\".format(label)).inc()\n yield example", "def read_and_decode(filename_queue, shape=None):\n label_bytes = 1\n width = shape[0]\n height = shape[1]\n depth = shape[2]\n record_byte_length = label_bytes + width * height\n\n with tf.name_scope(\"read_and_decode\"):\n # Length of record bytes in the dataset\n # Defined in utils module\n reader = tf.TFRecordReader()\n key, record_string = reader.read(filename_queue)\n\n feature_map = {\n \"image/encoded\": tf.FixedLenFeature(\n shape=[], dtype=tf.string)\n }\n parsed = tf.parse_single_example(record_string, feature_map)\n record_bytes = tf.decode_raw(parsed[\"image/encoded\"], tf.int8)\n\n # first byte is the label\n label = tf.cast(tf.strided_slice(record_bytes,\n begin=[0],\n end=[label_bytes]), tf.int32)\n # label = tf.reshape(label, [1])\n # print(label)\n\n # remaining bytes is the example\n example = tf.reshape(tf.strided_slice(record_bytes,\n begin=[label_bytes],\n end=[record_byte_length]), [width, height, depth])\n example = tf.cast(example, tf.float32)\n example.set_shape([width, height, depth])\n label.set_shape(1)\n label = tf.squeeze(label)\n # print(label)\n # label = tf.reshape(label, [0])\n\n return example, label", "def parse_fn(serialized_example):\n\n features = {\n 'video': tf.io.FixedLenFeature([], tf.string),\n 'label': tf.io.FixedLenFeature([], tf.int64),\n 'seq_len': tf.io.FixedLenFeature([], tf.int64),\n 'height': tf.io.FixedLenFeature([], tf.int64),\n 'width': tf.io.FixedLenFeature([], tf.int64),\n 'channels': tf.io.FixedLenFeature([], tf.int64)\n }\n\n # Parse the input tf.Example proto using the dictionary above.\n parsed = tf.io.parse_single_example(serialized_example, features)\n\n # Decodes and reshapes video\n seq_len = tf.cast(parsed['seq_len'], tf.uint32)\n height = tf.cast(parsed['height'], tf.uint32)\n width = tf.cast(parsed['width'], tf.uint32)\n channels = tf.cast(parsed['channels'], tf.uint32)\n video = tf.io.decode_raw(parsed['video'], tf.uint8)\n video = tf.reshape(video, shape=[seq_len, height, width, channels])\n\n # Normalizes video frames, label\n video = tf.cast(video, tf.float32) / 255\n label = tf.cast(parsed['label'], tf.float32)\n return video, label", "def parse_example(self, serialized_example):\n # Because of RaggedTensor specs, feature_specs can be a 2-level nested dict,\n # so have to wrap `tf.io.parse_single_example` between\n # `flatten_nest_dict`/`pack_as_nest_dict`.\n # {\n # 'video/image': tf.io.FixedLenSequenceFeature(...),\n # 'video/object/bbox': {\n # 'ragged_flat_values': tf.io.FixedLenSequenceFeature(...),\n # 'ragged_row_lengths_0', tf.io.FixedLenSequenceFeature(...),\n # },\n # }\n example = tf.io.parse_single_example(\n serialized=serialized_example,\n features=self.flat_feature_specs,\n )\n example = utils.pack_as_nest_dict(example, self._nested_feature_specs)\n\n example = { # pylint:disable=g-complex-comprehension\n k: _deserialize_single_field(example_data, tensor_info)\n for k, (example_data, tensor_info) in utils.zip_dict(\n example, self._flat_example_specs\n )\n }\n # Reconstruct all nesting\n example = utils.pack_as_nest_dict(example, self._example_specs)\n return example", "def _parse_single(filename, label, image_size=IMAGE_SIZE):\n # Decode and convert image to appropriate type\n image = tf.image.decode_png(tf.read_file(filename), channels=image_size[2])\n image = tf.image.convert_image_dtype(image, tf.float32) # Also scales from [0, 255] to [0, 1)\n # Resize according to module requirements\n image = tf.image.resize_images(image, image_size[:2])\n return image, label", "def read_tfrecord_and_decode_into_image_label_pair_tensors(tfrecord_filenames_queue, size):\n\n reader = tf.TFRecordReader()\n\n _, serialized_example = reader.read(tfrecord_filenames_queue)\n\n features = tf.parse_single_example(\n serialized_example,\n features={\n 'image/height': tf.FixedLenFeature([], tf.int64),\n 'image/width': tf.FixedLenFeature([], tf.int64),\n 'image/depth': tf.FixedLenFeature([], tf.int64),\n 'image/encoded': tf.FixedLenFeature([], tf.string),\n 'image/class/label': tf.FixedLenFeature([], tf.int64),\n # 'image': tf.FixedLenFeature([], tf.string)\n })\n\n image = tf.decode_raw(features['image/encoded'], tf.uint8)\n label = tf.cast(features['image/class/label'], tf.int64)\n height = tf.cast(features['image/height'], tf.int64)\n width = tf.cast(features['image/width'], tf.int64)\n depth = tf.cast(features['image/depth'], tf.int64)\n\n image = tf.reshape(image, [size,size,3]) #height,width,depth\n image = tf.to_float(image)\n image = image/127.5 - 1.0\n\n return image, label", "def _convert_to_example(filename, image_buffer, labels, height, width, channels=3):\n feature = {'image/height': _int64_feature(height),\n 'image/width': _int64_feature(width),\n 'image/channels': _int64_feature(channels),\n 'image/filename': _bytes_feature(tf.compat.as_bytes(os.path.basename(filename))),\n 'image/encoded': _bytes_feature(tf.compat.as_bytes(image_buffer))}\n for level_name, one_hot in labels:\n feature.update({'image/label/%s' % level_name: _int64_feature(one_hot)})\n example = tf.train.Example(features=tf.train.Features(feature=feature))\n return example", "def process_example(example_string: tf.train.Example,\n schema: tfgnn.GraphSchema):\n spec = tfgnn.create_graph_spec_from_schema_pb(schema)\n graph = tfgnn.parse_single_example(spec, example_string)\n\n # Note: the output tags cannot be structured; they must be single string\n # objects.\n for key, tensor in iter_stats_graph(graph):\n if isinstance(tensor, tf.RaggedTensor):\n tensor = tensor.flat_values\n for value in tensor.numpy().flat:\n yield beam.pvalue.TaggedOutput(key, value)", "def image_to_tfexample(image_data, img_name, height, width, class_label, class_desc):\n\n return tf.train.Example(features=tf.train.Features(feature={\n 'image/encoded': _bytes_list_feature(image_data),\n 'image/filename': _bytes_list_feature(img_name),\n 'image/height': _int64_list_feature(height),\n 'image/width': _int64_list_feature(width),\n 'image/label': _int64_list_feature(class_label),\n 'image/labeldesc': _bytes_list_feature(class_desc)\n }))", "def read_images_from_disk(input_queue):\n label = input_queue[1]\n file_contents = tf.read_file(input_queue[0])\n example = tf.image.decode_png(file_contents, channels=3)\n return example, label", "def process(self, example: str) -> List[torch.Tensor]:\n return self._tokenizer.batch_encode_plus([example], return_tensors=\"pt\", output_past=True, max_length=self.max_seq_len)['input_ids'][0]", "def parse_function(example_proto):\r\n\r\n\t# Parse through features and extract byte string\r\n\tparsed_features = tf.parse_single_example(example_proto,features ={\r\n\t\t'image': tf.FixedLenFeature([],tf.string),\r\n\t\t'joint': tf.FixedLenFeature([],tf.string),\r\n\t\t'offset': tf.FixedLenFeature([],tf.string),\r\n\t\t'handScale': tf.FixedLenFeature([],tf.string)\r\n\t\t},name='features')\r\n\r\n\t# Decode content into correct types\r\n\timage_dec = tf.decode_raw(parsed_features['image'],tf.float32)\r\n\tjoint_dec = tf.decode_raw(parsed_features['joint'],tf.float32)\r\n\toffset_dec = tf.decode_raw(parsed_features['offset'],tf.float32)\r\n\thandScale_dec = tf.decode_raw(parsed_features['handScale'],tf.float32)\r\n\r\n\t# Reshape image to 176x176\r\n\timage_reshaped = tf.reshape(image_dec,[176,176,1])\r\n\r\n\t# Crop 128x128 image around COM\r\n\timage_com_cropped = tf.image.crop_to_bounding_box(image_reshaped,24,24,128,128)\r\n\r\n\treturn image_com_cropped, joint_dec, offset_dec, handScale_dec", "def read_images_from_disk(input_queue):\n\tlabel = input_queue[1]\n\tfile_contents = tf.read_file(input_queue[0])\n\texample = tf.image.decode_jpeg(file_contents, channels=3)\n\treturn example, label", "def _parse_function(example_proto):\n # Parse the tf.example according to the features_spec definition\n parsed_features = tf.parse_single_example(example_proto, features_spec)\n sequence = parsed_features[\"sequence\"]\n # Convert the sparse sequence tensor to dense.\n sequence_d = tf.sparse_to_dense(sequence.indices, sequence.dense_shape, sequence.values)\n # Return all the elements\n return parsed_features[\"sequence_length\"], parsed_features[\"label\"], sequence_d", "def _prepare_image_and_label(self, data):\n image = tf.io.decode_image(data['image/encoded'], channels=3)\n label = tf.io.decode_image(data['image/segmentation/class/encoded'],\n channels=1)\n height = data['image/height']\n width = data['image/width']\n image = tf.reshape(image, (height, width, 3))\n label = tf.reshape(label, (1, height, width))\n label = tf.cast(label, tf.float32)\n # Normalizes image with mean and std pixel values.\n image = input_utils.normalize_image(image)\n return image, label", "def generate_caption(image, t = 1, sample = False, max_len = 20):\n\n # condition lstm on the image\n s.run(generator_model.init_lstm, {generator_model.input_image: image})\n\n caption = [vocab[START]]\n\n for _ in range(max_len):\n\n next_word_probs = s.run(generator_model.one_step, {generator_model.current_word: [caption[-1]]})[0]\n\n next_word_probs = next_word_probs.ravel()\n\n next_word_probs = next_word_probs ** (1 / t) / np.sum(next_word_probs ** (1 / t))\n\n if sample:\n next_word = np.random.choice(range(len(vocab)), p = next_word_probs)\n else:\n next_word = np.argmax(next_word_probs)\n caption.append(next_word)\n if next_word == vocab[END]:\n break\n\n return list(map(vocab_inverse.get, caption))", "def dict_to_tf_example(data,\n label_map_dict,\n image_subdirectory,\n ignore_difficult_instances=False):\n \n data = data.strip().split()\n \n img_path = os.path.join(image_subdirectory, data[0])\n \n with tf.gfile.GFile(img_path, 'rb') as fid:\n encoded_jpg = fid.read()\n encoded_jpg_io = io.BytesIO(encoded_jpg)\n image = PIL.Image.open(encoded_jpg_io)\n\n if image.format != 'JPEG':\n raise ValueError('Image format not JPEG')\n\n key = hashlib.sha256(encoded_jpg).hexdigest()\n \n width, height = image.size\n \n num_boxes = len(data[1:])/5\n \n xmin = []\n ymin = []\n xmax = []\n ymax = []\n classes = []\n classes_text = []\n truncated = []\n poses = []\n difficult_obj = []\n \n for i in xrange(num_boxes):\n xmin.append(int(data[1 + 5 * i]))\n ymin.append(int(data[2 + 5 * i]))\n \n xmax.append(int(data[3 + 5 * i]))\n ymax.append(int(data[4 + 5 * i]))\n \n xmin[-1] = float(xmin[-1]) / width\n ymin[-1] = float(ymin[-1]) / height\n xmax[-1] = float(xmax[-1]) / width\n ymax[-1] = float(ymax[-1]) / height\n \n classes.append(int(data[5 + 5 * i]))\n \n classes_text.append(label_map_dict[classes[-1]].encode('utf8'))\n truncated.append(0)\n poses.append('Frontal'.encode('utf8'))\n difficult_obj.append(0)\n \n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': dataset_util.int64_feature(height),\n 'image/width': dataset_util.int64_feature(width),\n 'image/filename': dataset_util.bytes_feature(\n data[0].encode('utf8')),\n 'image/source_id': dataset_util.bytes_feature(\n data[0].encode('utf8')),\n 'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),\n 'image/encoded': dataset_util.bytes_feature(encoded_jpg),\n 'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),\n 'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),\n 'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),\n 'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),\n 'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),\n 'image/object/class/text': dataset_util.bytes_list_feature(classes_text),\n 'image/object/class/label': dataset_util.int64_list_feature(classes),\n 'image/object/difficult': dataset_util.int64_list_feature(difficult_obj),\n 'image/object/truncated': dataset_util.int64_list_feature(truncated),\n 'image/object/view': dataset_util.bytes_list_feature(poses),\n }))\n return example", "def get_example(self, i):\n id_ = self.ids[i]\n bbox = list()\n label = list()\n difficult = list()\n depth = list()\n y_rot = list()\n \n label_f = os.path.join(self.data_dir, 'label_2', id_ + '.txt')\n lines = open(label_f).readlines()\n items = [x.strip(' ').split(' ') for x in lines]\n for i in range(len(lines)):\n name = items[i][0]\n '''\n ingore the DontCare part\n '''\n if name == 'DontCare':\n continue\n xmin, ymin, xmax, ymax = items[i][4:8]\n bbox.append([int(float(ymin)), int(float(xmin)), int(float(ymax)), int(float(xmax))])\n label.append(KITTI_LABEL_NAMES.index(name))\n difficult.append(False)\n \n depth_ = float(items[i][13])/70.0\n if abs(depth_) > 1:\n depth_ = 1\n depth.append(depth_)\n \n y_rot.append(float(items[i][3]))\n\n\n\n bbox = np.stack(bbox).astype(np.float32)\n label = np.stack(label).astype(np.int32)\n depth = np.stack(depth).astype(np.float32)\n y_rot = np.stack(y_rot).astype(np.float32)\n # When `use_difficult==False`, all elements in `difficult` are False.\n difficult = np.array(difficult, dtype=np.bool).astype(np.uint8) # PyTorch don't support np.bool\n\n # Load a image\n img_file = os.path.join(self.data_dir, 'image_2', id_ + '.png')\n img = read_image(img_file, color=True)\n\n # if self.return_difficult:\n # return img, bbox, label, difficult\n return img, bbox, label, difficult, depth, y_rot", "def _read_tf_example(self,\n record: tf.Tensor,\n feature_preprocessor: Callable[[str], List[str]]\n ) -> types.FeatureAndLabelTensors:\n\n keys_to_features = {}\n keys_to_features[self._text_feature] = tf.FixedLenFeature([], tf.string)\n for label, dtype in self._labels.items():\n keys_to_features[label] = tf.FixedLenFeature([], dtype)\n parsed = tf.parse_single_example(\n record, keys_to_features) # type: Dict[str, types.Tensor]\n\n text = parsed[self._text_feature]\n # I think this could be a feature column, but feature columns seem so beta.\n preprocessed_text = feature_preprocessor(text)\n features = {self._text_feature: preprocessed_text}\n if self._round_labels:\n labels = {label: tf.round(parsed[label]) for label in self._labels}\n else:\n labels = {label: parsed[label] for label in self._labels}\n\n return features, labels", "def parse_fn(example):\n\n example_fmt = {\n \"image\": tf.FixedLenFeature((), tf.string),\n \"target\": tf.FixedLenFeature((), tf.float32, -1)\n }\n parsed = tf.parse_single_example(example, example_fmt)\n\n if return_full_size_image:\n preprocessed_image, full_size_image = _image_preprocess_fn(\n image_buffer=parsed[\"image\"], input_height=299, input_width=299, input_mean=128,\n input_std=128, return_full_size_image=True)\n return preprocessed_image, parsed[\"target\"], full_size_image\n\n preprocessed_image = _image_preprocess_fn(image_buffer=parsed[\"image\"], input_height=299, input_width=299,\n input_mean=128, input_std=128)\n\n return preprocessed_image, parsed[\"target\"]", "def process(self, example: str) -> List[torch.Tensor]:\n return torch.tensor(self._tokenizer.encode(example, max_length=self.max_seq_len))", "def showTensorImg(ts, title):\n img = np.transpose(ts, (1, 2, 0))\n showImg(img, title)\n return", "def preprocess(self, sequence, word2id, trg=True):\r\n if trg:\r\n story = [word2id[word] if word in word2id else UNK_token for word in sequence.split(' ')] + [EOS_token]\r\n else:\r\n story = []\r\n for i, word_triple in enumerate(sequence):\r\n story.append([])\r\n for ii, word in enumerate(word_triple):\r\n temp = word2id[word] if word in word2id else UNK_token\r\n story[i].append(temp)\r\n try:\r\n story = torch.Tensor(story)\r\n except:\r\n print(sequence)\r\n print(story)\r\n # print('111111111111111111111111')\r\n return story", "def get_example(self, i):\n img = read_image(self.img_paths[i])\n label_orig = read_image(\n self.label_paths[i], dtype=np.int32, color=False)[0]\n if self.ignore_labels:\n label_out = np.ones(label_orig.shape, dtype=np.int32) * -1\n for label in cityscapes_labels:\n if not label.ignoreInEval:\n label_out[label_orig == label.id] = label.trainId\n else:\n label_out = label_orig\n return img, label_out", "def _create_tf_example(data):\n # File path url\n full_path = os.path.join(os.getcwd(), FLAGS.img_folder,\n '{}'.format(data['name']))\n\n # Read encoded image file, and get properties we need.\n with tf.gfile.GFile(full_path, 'rb') as fid:\n encoded_jpg = fid.read()\n encoded_jpg_io = io.BytesIO(encoded_jpg)\n image = Image.open(encoded_jpg_io)\n width, height = image.size\n filename = data['name'].encode('utf8')\n image_format = b'jpg'\n label_x = data['x']\n label_y = data['y']\n\n # After geting all the features, time to generate tensorflow record file.\n tf_example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': _int64_feature(height),\n 'image/width': _int64_feature(width),\n 'image/filename': _bytes_feature(filename),\n 'image/source_id': _bytes_feature(filename),\n 'image/encoded': _bytes_feature(encoded_jpg),\n 'image/format': _bytes_feature(image_format),\n 'label/x': _int64_feature(label_x),\n 'label/y': _int64_feature(label_y),\n }))\n return tf_example", "def create_image_caption_pairs(self):", "def serialize_example(image_inp_string,image_out_string):\n image_inp_shape = tf.image.decode_jpeg(image_inp_string).shape\n image_out_shape = tf.image.decode_jpeg(image_out_string).shape\n feature = {\n\n 'image_input': _bytes_feature(image_inp_string),\n 'image_output':_bytes_feature(image_out_string),\n }\n\n example_proto = tf.train.Example(features=tf.train.Features(feature=feature))\n return example_proto.SerializeToString()\n\n\n #--------------------------------------------------------------------------------------\n\n ###process image", "def _example_parser(range_val: int) -> Dict[str, tf.Tensor]:\n image = tf.random.stateless_categorical(\n tf.math.log([[0.5, 0.5]]), np.prod(self._image_shape),\n [self._split_seed[split], self._split_seed[split] + range_val],\n dtype=tf.int32)\n image = tf.reshape(tf.cast(image, tf.float32), self._image_shape)\n image = 2.0 * (image - 0.5)\n label = tf.zeros([], tf.int32)\n return {\"features\": image, \"labels\": label}", "def _process_image(filename, label):\n # Read the image file.\n height = 224\n width = 224\n img_raw = tf.io.read_file(filename)\n jpeg_img = tf.image.decode_jpeg(img_raw, channels=3)\n jpeg_img_resized = tf.image.resize(jpeg_img, (height, width))\n\n return jpeg_img_resized, label", "def show_record(filenames):\n # Generate dataset from TFRecord file.\n dataset = tf.data.TFRecordDataset(filenames)\n\n # Make dataset iteratable.\n iterator = dataset.make_one_shot_iterator()\n next_example = iterator.get_next()\n\n # Extract features from single example\n features = _extract_feature(next_example)\n image_decoded = tf.image.decode_image(features['image/encoded'])\n label_x = tf.cast(features['label/x'], tf.int32)\n label_y = tf.cast(features['label/y'], tf.int32)\n\n # Use openCV for preview\n cv2.namedWindow(\"image\", cv2.WINDOW_NORMAL)\n\n # Actrual session to run the graph.\n with tf.Session() as sess:\n while True:\n try:\n image_tensor, label_text = sess.run(\n [image_decoded, (label_x, label_y)])\n\n # Use OpenCV to preview the image.\n image = np.array(image_tensor, np.uint8)\n cv2.imshow(\"image\", image)\n cv2.waitKey(100)\n\n # Show the labels\n print(label_text)\n except tf.errors.OutOfRangeError:\n break", "def _convert_single_example(example, max_seq_length, tokenizer):\n tokens = [\"[CLS]\"]\n tokens.extend(example.words)\n tokens.append(\"[SEP]\")\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n label_ids = [_PADDING_LABEL_ID]\n label_ids.extend(example.label_ids)\n label_ids.append(_PADDING_LABEL_ID)\n\n segment_ids = [0] * len(input_ids)\n input_mask = [1] * len(input_ids)\n\n # Pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n label_ids.append(_PADDING_LABEL_ID)\n\n def create_int_feature(values):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n\n features = collections.OrderedDict()\n features[\"input_ids\"] = create_int_feature(input_ids)\n features[\"input_mask\"] = create_int_feature(input_mask)\n features[\"segment_ids\"] = create_int_feature(segment_ids)\n features[\"label_ids\"] = create_int_feature(label_ids)\n features[\"sentence_id\"] = create_int_feature([example.sentence_id])\n features[\"sub_sentence_id\"] = create_int_feature([example.sub_sentence_id])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n return tf_example", "def convert_single_example(ex_index, example, label_list, max_seq_length,tokenizer):\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n text_a=example.text_a\n labels_a=[]\n text_a=re.split(\"(<[a-zA-Z]+>[^<>]+</[a-zA-Z]+>)\",text_a)\n tokens_a=[]\n for sub_text in text_a:\n if len(sub_text.strip())<1:\n continue\n elif re.search('<([a-zA-Z]+)>([^<>]+)<[/a-zA-Z]+>',sub_text):\n re_res=re.search('<([a-zA-Z]+)>([^<>]+)<[/a-zA-Z]+>',sub_text)\n slot_name=re_res.group(1)\n slot_value=re_res.group(2)\n slot_value=tokenizer.tokenize(slot_value)\n slot_labels=[]\n for i,s in enumerate(slot_value):\n if i==0:\n slot_labels.append(\"B_\"+slot_name)\n elif re.search(\"^##\",s):\n slot_labels.append(\"x\")\n else:\n slot_labels.append(\"M_\"+slot_name)\n tokens_a.extend(slot_value)\n labels_a.extend(slot_labels)\n else:\n sub_text=tokenizer.tokenize(sub_text)\n sub_labels=['x' if re.search(\"^##\",i) else 'o' for i in sub_text]\n tokens_a.extend(sub_text)\n labels_a.extend(sub_labels)\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n labels=[example.label]\n for label in labels_a:\n labels.append(label)\n labels.append('o')\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n input_mask = [1] * len(input_ids)\n output_mask=[1 if i!='x' else 0 for i in labels]\n label_ids=[label_map[i] for i in labels]\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n segment_ids.append(0)\n input_mask.append(0)\n output_mask.append(0)\n label_ids.append(label_map['<PAD>'])\n assert len(input_ids)==max_seq_length\n assert len(segment_ids)==max_seq_length\n assert len(label_ids)==max_seq_length\n assert len(input_mask)==max_seq_length\n assert len(output_mask)==max_seq_length\n if ex_index < 5:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"guid: %s\" % (example.guid))\n tf.logging.info(\"tokens: %s\" % \" \".join(tokens))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n tf.logging.info(\"labels: %s\" % \" \".join([str(x) for x in labels]))\n tf.logging.info(\"label_ids: %s\" % \" \".join([str(x) for x in label_ids]))\n tf.logging.info(\"output_mask: %s\" % \" \".join([str(x) for x in output_mask]))\n feature = InputFeatures(\n input_ids=input_ids,\n segment_ids=segment_ids,\n label_ids=label_ids,\n input_mask=input_mask,\n output_mask=output_mask)\n return feature", "def _parse_single_sequence_example_raw(serialized,\n context,\n feature_list,\n debug_name,\n name=None):\n with ops.name_scope(name, \"ParseSingleExample\", [serialized, debug_name]):\n serialized = ops.convert_to_tensor(serialized, name=\"serialized\")\n serialized = _assert_scalar(serialized, \"serialized\")\n return _parse_sequence_example_raw(serialized, debug_name, context,\n feature_list, name)[:2]", "def generate_title(model, tokenizer, photo, max_length):\n in_text = \"startseq\"\n vocab = len(tokenizer.word_index) + 1\n prev_word = \"\"\n\n for i in range(max_length):\n sequence = tokenizer.texts_to_sequences([in_text])[0]\n sequence = pad_sequences([sequence], maxlen=max_length)\n yhat = model.predict([photo, sequence], verbose=0)\n yhat = random.choice(list(range(vocab)), 1, p=yhat[0])\n # yhat = argmax(yhat)\n word = word_for_id(yhat, tokenizer)\n\n if word is None:\n break\n\n if word == prev_word:\n pass\n\n in_text += \" \" + word\n\n prev_word = word\n\n if word == \"endseq\":\n break\n\n return in_text", "def build_inputs(self):\n # in prediction mode, we use a batch size of one\n batch_size = self.config.batch_size\n \n if self.mode == \"prediction\":\n batch_size = 1\n \n # In inference mode, images and inputs are fed via placeholders.\n image_feed = tf.placeholder(dtype=tf.string, shape=[], name=\"image_feed\") # shape: scalar value\n\n #image_fn_feed = tf.placeholder(dtype=tf.string, shape=[], name=\"image_fn_feed\")\n \n #image_filename_queue = tf.train.string_input_producer([image_fn_feed]) # list of files to read\n \n #reader = tf.WholeFileReader()\n #_, image_feed = reader.read(image_filename_queue)\n \n \n text_feed = tf.placeholder(dtype=tf.int64,\n shape=[None, self.config.sentence_length], # shape 2D tensor - variable size (first dimension sentence sequence, second dimension token sequence (actually fixed size))\n name=\"text_feed\")\n \n # arbitrary labels (not used)\n mi_label = tf.constant(-1, dtype=tf.int64) \n sc_label = tf.constant(-1.0, dtype=tf.float32) \n\n image = self.process_image(image_feed)\n\n # Process image and insert batch dimensions.\n images = tf.expand_dims(self.process_image(image_feed), 0)\n input_seqs = tf.expand_dims(text_feed, 0) \n mi_labels = tf.expand_dims(mi_label, 0)\n sc_labels = tf.expand_dims(sc_label, 0)\n input_mask = tf.expand_dims(tf.constant([1], dtype=tf.int32) , 0)\n \n else:\n # Prefetch serialized SequenceExample protos.\n input_queue = input_ops.prefetch_input_data(\n self.reader,\n self.config.input_file_pattern,\n is_training=self.is_training(),\n batch_size=batch_size,\n values_per_shard=self.config.values_per_input_shard,\n input_queue_capacity_factor=self.config.input_queue_capacity_factor,\n num_reader_threads=self.config.num_input_reader_threads,\n mode=self.mode)\n\n # Image processing and random distortion. Split across multiple threads\n # with each thread applying a slightly different distortion.\n assert self.config.num_preprocess_threads % 2 == 0\n images_and_texts = []\n for thread_id in range(self.config.num_preprocess_threads):\n serialized_sequence_example = input_queue.dequeue()\n encoded_image, text, mi, sc = input_ops.parse_sequence_example(\n serialized_sequence_example,\n image_feature=self.config.image_feature_name,\n sentences_feature=self.config.sentences_feature_name,\n sentence_length=self.config.sentence_length,\n mi_feature=self.config.mi_feature_name,\n sc_feature=self.config.sc_feature_name)\n image = self.process_image(encoded_image, thread_id=thread_id)\n images_and_texts.append([image, text, mi, sc])\n\n # Batch inputs.\n queue_capacity = (2 * self.config.num_preprocess_threads *\n batch_size)\n images, input_seqs, mi_labels, sc_labels, input_mask = (\n input_ops.batch_with_dynamic_pad(images_and_texts,\n batch_size=batch_size,\n queue_capacity=queue_capacity))\n \n #print('Shapes') \n #print('Shape images: ' + str(images.get_shape()))\n #print('Shape input_seqs: ' + str(input_seqs.get_shape())) \n #print('Shape input_mask: ' + str(input_mask.get_shape())) \n\n self.images = images\n self.input_seqs = input_seqs\n if self.mode == \"prediction\":\n self.mi_labels = None\n self.sc_labels = None\n else:\n self.mi_labels = mi_labels\n self.sc_labels = sc_labels\n self.input_mask = input_mask", "def parser(record):\n\n record_spec = {\n \"inputs\": tf.VarLenFeature(tf.int64),\n \"type_id\": tf.FixedLenFeature([1], tf.int64),\n }\n\n # retrieve serialized example\n example = tf.parse_single_example(\n serialized=record,\n features=record_spec)\n\n inputs = example[\"inputs\"]\n inp_len = tf.shape(inputs)[0]\n\n # expand type id to full length\n example[\"type_id\"] = tf.broadcast_to(example[\"type_id\"], [inp_len])\n\n # convert all sparse example to dense\n example = sparse_to_dense(example)\n\n return example", "def decode(self, serialized_example):\n parsed_tensors = tf.io.parse_single_example(\n serialized_example, self.KEYS_TO_FEATURES\n )\n for k in parsed_tensors:\n if isinstance(parsed_tensors[k], tf.SparseTensor):\n if parsed_tensors[k].dtype == tf.string:\n parsed_tensors[k] = tf.sparse.to_dense(\n parsed_tensors[k], default_value=\"\"\n )\n else:\n parsed_tensors[k] = tf.sparse.to_dense(\n parsed_tensors[k], default_value=0\n )\n\n image = self._decode_image(parsed_tensors)\n boxes = self._decode_boxes(parsed_tensors)\n decode_image_shape = tf.logical_or(\n tf.equal(parsed_tensors[\"image/height\"], -1),\n tf.equal(parsed_tensors[\"image/width\"], -1),\n )\n image_shape = tf.cast(tf.shape(image), dtype=tf.int64)\n\n parsed_tensors[\"image/height\"] = tf.where(\n decode_image_shape, image_shape[0], parsed_tensors[\"image/height\"]\n )\n parsed_tensors[\"image/width\"] = tf.where(\n decode_image_shape, image_shape[1], parsed_tensors[\"image/width\"]\n )\n\n decoded_tensors = {\n \"image\": image,\n \"height\": parsed_tensors[\"image/height\"],\n \"width\": parsed_tensors[\"image/width\"],\n \"groundtruth_classes\": parsed_tensors[\"image/object/class/label\"],\n \"groundtruth_boxes\": boxes,\n }\n return decoded_tensors", "def parse_serialized_simulation_example(example_proto, metadata):\n if 'context_mean' in metadata:\n feature_description = _FEATURE_DESCRIPTION_WITH_GLOBAL_CONTEXT\n else:\n feature_description = _FEATURE_DESCRIPTION\n context, parsed_features = tf.io.parse_single_sequence_example(\n example_proto,\n context_features=_CONTEXT_FEATURES,\n sequence_features=feature_description)\n for feature_key, item in parsed_features.items():\n convert_fn = functools.partial(\n convert_to_tensor, encoded_dtype=_FEATURE_DTYPES[feature_key]['in'])\n parsed_features[feature_key] = tf.py_function(\n convert_fn, inp=[item.values], Tout=_FEATURE_DTYPES[feature_key]['out'])\n\n # There is an extra frame at the beginning so we can calculate pos change\n # for all frames used in the paper.\n position_shape = [metadata['sequence_length'] + 1, -1, metadata['dim']]\n\n # Reshape positions to correct dim:\n parsed_features['position'] = tf.reshape(parsed_features['position'],\n position_shape)\n # Set correct shapes of the remaining tensors.\n sequence_length = metadata['sequence_length'] + 1\n if 'context_mean' in metadata:\n context_feat_len = len(metadata['context_mean'])\n parsed_features['step_context'] = tf.reshape(\n parsed_features['step_context'],\n [sequence_length, context_feat_len])\n # Decode particle type explicitly\n context['particle_type'] = tf.py_function(\n functools.partial(convert_fn, encoded_dtype=np.int64),\n inp=[context['particle_type'].values],\n Tout=[tf.int64])\n context['particle_type'] = tf.reshape(context['particle_type'], [-1])\n return context, parsed_features", "def parse_record(record, training): \n # Reshape from [depth * height * width] to [depth, height, width].\n # depth_major = tf.reshape(record, [3, 32, 32])\n depth_major = record.reshape((3, 32, 32))\n\n # Convert from [depth, height, width] to [height, width, depth]\n # image = tf.transpose(depth_major, [1, 2, 0])\n image = np.transpose(depth_major, [1, 2, 0])\n\n image = preprocess_image(image, training) # If any.\n\n return image", "def _create_example(self):\n source = np.random.randn(self.batch_size, self.max_decode_length,\n self.input_depth)\n source_len = np.random.randint(0, self.max_decode_length, [self.batch_size])\n target_len = np.random.randint(0, self.max_decode_length * 2,\n [self.batch_size])\n target = np.random.randn(self.batch_size,\n np.max(target_len), self.input_depth)\n labels = np.random.randint(0, self.vocab_size,\n [self.batch_size, np.max(target_len) - 1])\n\n example_ = namedtuple(\n \"Example\", [\"source\", \"source_len\", \"target\", \"target_len\", \"labels\"])\n return example_(source, source_len, target, target_len, labels)", "def __parser__(self, example_proto):\n # configure feature and label length\n # It is crucial that for tf.string, the length is not specified, as the data is stored as a single string!\n x_config = tf.FixedLenFeature([], tf.string) \\\n if self.x_dtype == tf.string else tf.FixedLenFeature([self.num_features], self.x_dtype)\n if self.num_labels == 0:\n proto_config = {'x': x_config}\n else:\n y_config = tf.FixedLenFeature([], tf.string) \\\n if self.y_dtype == tf.string else tf.FixedLenFeature([self.num_labels], self.y_dtype)\n proto_config = {'x': x_config, 'y': y_config}\n\n # decode examples\n datum = tf.parse_single_example(example_proto, features=proto_config)\n if self.x_dtype == tf.string: # if input is string / bytes, decode it to float32\n if self.decode_jpeg:\n # first decode compressed image string to uint8, as data is stored in this way\n # datum['x'] = tf.image.decode_image(datum['x'], channels=3)\n datum['x'] = tf.image.decode_jpeg(datum['x'], channels=3)\n else:\n # first decode data to uint8, as data is stored in this way\n datum['x'] = tf.decode_raw(datum['x'], tf.uint8)\n # then cast data to tf.float32 or tf.float16\n datum['x'] = tf.cast(datum['x'], tf.float32)\n # cannot use string_to_number as there is only one string for a whole sample\n # datum['x'] = tf.strings.to_number(datum['x'], tf.float32) # this results in possibly a large number\n\n # return data\n if 'y' in datum:\n # y can be present in many ways:\n # 1. a single integer, which requires y to be int32 or int64 (e.g, used in tf.gather in cbn)\n # 2. num-class bool/integer/float variables. This form is more flexible as it allows multiple classes and\n # prior probabilities as targets\n # 3. float variables in regression problem.\n # but...\n # y is stored as int (for case 1), string (for other int cases), or float (for float cases)\n # in the case of tf.string and tf.int64, convert to to int32\n if self.y_dtype == tf.string:\n # avoid using string labels like 'cat', 'dog', use integers instead\n datum['y'] = tf.decode_raw(datum['y'], tf.uint8)\n datum['y'] = tf.cast(datum['y'], tf.int32)\n else:\n datum['y'] = tf.cast(datum['y'], self.y_dtype)\n if self.use_one_hot_label:\n datum['y'] = tf.reshape(tf.one_hot(datum['y'], self.num_classes), (-1, ))\n if self.use_smooth_label: # label smoothing\n datum['y'] = 0.9 * datum['y'] + 0.1 / self.num_classes\n return datum['x'], datum['y']\n else:\n return datum['x']" ]
[ "0.7773432", "0.70653176", "0.6706283", "0.66781205", "0.66210777", "0.6502138", "0.63294804", "0.63211375", "0.62616134", "0.62233996", "0.6201436", "0.6195922", "0.6186161", "0.6184599", "0.6164636", "0.6148699", "0.6147658", "0.614749", "0.6147216", "0.61331445", "0.6099785", "0.6097788", "0.6080328", "0.6024054", "0.5964376", "0.59438324", "0.5934399", "0.5927543", "0.5909124", "0.5894257", "0.58817375", "0.5881077", "0.58764315", "0.586154", "0.586036", "0.5856217", "0.58377206", "0.57875127", "0.5760991", "0.5744697", "0.5743724", "0.57377404", "0.57054144", "0.57044804", "0.5697791", "0.5695427", "0.5694768", "0.5690761", "0.56784296", "0.56777877", "0.5672623", "0.5670684", "0.56598735", "0.56526875", "0.56226444", "0.56186104", "0.56103057", "0.5602336", "0.5582796", "0.5576747", "0.55690044", "0.55341053", "0.5531139", "0.5525773", "0.5520924", "0.5520262", "0.55071396", "0.5500383", "0.5496819", "0.5488749", "0.5487785", "0.5481773", "0.54797256", "0.54635173", "0.5461794", "0.54606295", "0.54606086", "0.5453374", "0.54438764", "0.54415643", "0.54364806", "0.5423123", "0.54199773", "0.5416674", "0.54145205", "0.54141337", "0.54061216", "0.5398293", "0.5391003", "0.53888303", "0.5377074", "0.53734267", "0.53703034", "0.53570664", "0.5351678", "0.53448737", "0.53349453", "0.53320026", "0.53296375", "0.532167" ]
0.7657589
1
Decodes and processes an image string.
def process_image(encoded_image, config, thread_id=0): return image_processing.process_image(encoded_image, is_training=False, height=config.image_height, width=config.image_width, thread_id=thread_id, image_format=config.image_format)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_image_string(self, image_string):\n image = image_util.load_image_from_string(image_string)\n return self.process_image(image)", "def convert_str_to_image(image_string):\n image = image_string.partition('base64,')[2]\n img_data = base64.b64decode(image)\n return img_data", "def decode(self, image):\r\n raise NotImplementedError(\"Not Implemented\")", "def base64_decode_image(inStr):\n imgDat, imgType, imgShape = json.loads(inStr)\n imgDat = bytes(imgDat, encoding=\"utf-8\")\n\n imgDat = base64_decode_array(imgDat, imgType)\n imgDat = imgDat.reshape(imgShape)\n return imgDat", "def process_image(self, base64_string: str) -> str:\n self.convert_base64_to_image(base64_string)\n self.corp_image()\n self.change_image_pixels()\n return self.image_to_string()", "def decode(self, path: str = None) -> str:\n if path:\n image = Image.open(path)\n else:\n image = self._encoded_image\n\n width, height = image.size\n pixels = image.load()\n\n binary_string = \"\"\n for x_pixel in range(width):\n for y_pixel in range(height):\n rgb = pixels[x_pixel, y_pixel]\n red, blue, green = self._rgb_to_binary(rgb)\n binary_string += red[-1] + blue[-1] + green[-1]\n\n return self._binary_string_to_str(binary_string, end=self._end_message)", "def encode_decode(self, img, img_metas):\n pass", "def _process_image(filename, coder):\n # Read the image file.\n with tf.gfile.FastGFile(filename, 'rb') as f:\n image_data = f.read()\n \n # Convert any PNG to JPEG's for consistency.\n if _is_png(filename):\n print('Converting PNG to JPEG for %s' % filename)\n image_data = coder.png_to_jpeg(image_data)\n # Decode the RGB JPEG.\n image = coder.decode_jpeg(image_data)\n\n # Check that image converted to RGB\n assert len(image.shape) == 3\n height = image.shape[0]\n width = image.shape[1]\n assert image.shape[2] == 3\n\n return image_data, height, width", "def deserialise_image(data):\n if \"data:image\" in data:\n data = data[data.find(\",\") + 1:]\n\n return Image.open(io.BytesIO(base64.urlsafe_b64decode(data)))", "def decode(self, s):", "def decode(self, s):", "def decode(self, imgObj):\r\n if not _checkIsStringIO(imgObj):\r\n raise TypeError('Given object is not a StringIO instance.')\r\n\r\n # Checking of image according to django.forms.fields.ImageField\r\n try:\r\n imgObj.seek(0)\r\n img = Image.open(imgObj)\r\n img.verify()\r\n except:\r\n raise ValueError('Content of given image could not be verified.')\r\n\r\n imgObj.seek(0)\r\n img = Image.open(imgObj)\r\n img.load()\r\n\r\n # Everything ok, convert PIL.Image to ROS and return it\r\n if img.mode == 'P':\r\n img = img.convert('RGB')\r\n\r\n rosimage = sensor_msgs.msg.Image()\r\n rosimage.encoding = ImageConverter._ENCODINGMAP_PY_TO_ROS[img.mode]\r\n (rosimage.width, rosimage.height) = img.size\r\n rosimage.step = (ImageConverter._PIL_MODE_CHANNELS[img.mode]\r\n * rosimage.width)\r\n rosimage.data = img.tostring()\r\n return rosimage", "def decode(self, image):\r\n\r\n # The data pulled out\r\n data = []\r\n\r\n # A list to build the individual bits in\r\n curr_byte_list = []\r\n decode_complete = False\r\n for pixel in image.getdata():\r\n for curr_color_pos, color in enumerate(pixel):\r\n tmp_color = list(dec_2_bin(color))\r\n\r\n bits = self.get_color_bits_used()[curr_color_pos]\r\n\r\n # Pull out the specified number of bits based on the color\r\n\r\n curr_byte_list.extend(self.get_decode_data_bits(tmp_color, bits))\r\n\r\n # If we have a full byte or more add the bit to the data\r\n if len(curr_byte_list) >= 8:\r\n data.append(chr(bin_2_dec(''.join(curr_byte_list[:8]))))\r\n curr_byte_list = curr_byte_list[8:]\r\n\r\n # Stop if we've reached our termination characters\r\n if len(data) >= len(self.termination_sequence):\r\n decode_complete = ''.join(data[-len(self.termination_sequence):]) == self.termination_sequence\r\n if decode_complete:\r\n break\r\n if decode_complete:\r\n break\r\n\r\n # Strip off the termination bytes\r\n return ''.join(data[:-len(self.termination_sequence)])", "def decode(img):\r\n ints=[];#A list of ints that will contain all of our alpha values.\r\n width,height=img.size #Get the width and the height of my image.\r\n pixelData=ImageUtilities.getPixelList(img); #Get all of the pixels in the image and put them into a list.\r\n for y in range(height): #Iterate across the pixels from top to bottom.\r\n for x in range(width):#Iterate across out image from left to right.\r\n alpha=ImageUtilities.getAlphaFromList(img,pixelData,x,y); #Referenced the dumped contents\r\n if(alpha==255): #If the alpha of our pixel is 255....\r\n continue; #I don't want 255 values because that means that is not part of my message.\r\n ints.append(alpha); #Get the alpha value and append it to my list of ints.\r\n\r\n msg=\"\"; #Make an empty string to store our decoded message.\r\n for value in ints: #Iterate across my list of ints. (For each int in my list...)\r\n msg+=chr(value); #Convert my int to it's character value and add it back to my message.\r\n return msg; #Return my message string.\r", "def decode_image(path):\n\n img = Image.open(path)\n image_width = img.width\n image_height = img.height\n pixels = np.array(img)\n\n print(pixels[0])\n bits = []\n\n for i in range(image_height):\n for j in range(image_width):\n bits.append(pixels[i][j][0] & ((1 << 1) - 1))\n\n bytes_l = [int(\"\".join(map(str, bits[i:i + 8])), 2) for i in range(0, len(bits), 8)]\n decoded_message = ''.join(map(chr, bytes_l))\n img.close()\n\n return decoded_message", "def decode_with_esponce(img):\n h = httplib2.Http()\n resp, content = h.request(ESPONCE_URL, \"POST\", img.read())\n content = json.loads(content)\n return content.get(\"content\")", "def decodeFrame(self, image):\n return image", "def decode_base64(data):\n\n image = None\n try:\n image = base64.decodestring(data)\n except:\n print \"Could not decode base64 image from json\"\n\n return image", "def base64_to_PIL(string):\n try:\n base64_data = base64.b64decode(string)\n img = Image.open(BytesIO(base64_data)).convert('RGB')\n return img\n except:\n return None", "def parse_image(self, image):\n # parse the image data into a pygame surface for display or screenshot\n # raw image is BGRA\n # if image_type is segmentation, here will convert to the pre-defined color\n image.convert(self.image_type)\n\n array = np.frombuffer(image.raw_data, dtype=np.dtype(\"uint8\"))\n array = np.reshape(array, (image.height, image.width, 4))\n array = array[:, :, :3]\n array = array[:, :, ::-1] # BGR -> RGB\n self.rgb_image = array\n self.pygame_surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))\n\n self.last_image_seconds = image.timestamp\n self.last_image_frame_num = image.frame", "def string_to_image(string, reference_images):\n # create string as array\n image = reference_images[string[0]]\n for i in string[1:]:\n image = np.hstack([image,reference_images[i]])\n return image", "def process(self, image):", "def eval_image_parse_function(filename, text):\n image = read_image(filename)\n resized_image = resize_image(image)\n resized_image = scale_image_value(resized_image)\n return resized_image, text", "def base_64_to_img(base_64_string):\r\n # convert image into np array\r\n return cv2.imdecode(\r\n np.frombuffer(base64.b64decode(base_64_string.split(\";base64,\").pop()), np.uint8),\r\n cv2.IMREAD_COLOR)", "def to_image_data(data):\n \n # removing image\n if not data:\n return u''\n\n # image path (not changed)\n if data[0:5] != u'data:':\n return None\n \n # TODO: better MIME handling\n mime = data[5:data.index(u';')].lower()\n img = data[data.index(u',') + 1:].decode('base64')\n \n return mime, img", "def decode(conf_dict, image):\n # FIXME\n msg_prosthesis = 'a'*(image.size[0]*image.size[1]//conf_dict['frequency'])\n msg_str = ''\n colors = ['red', 'green', 'blue']\n img_pixels = image.load()\n for pixel_info in PixelIter(conf_dict, msg_prosthesis):\n if pixel_info[0] == 'whatever':\n continue\n xy = (pixel_info[1], pixel_info[2])\n which_color = colors.index(pixel_info[0])\n letter_ord = img_pixels[xy][which_color]\n msg_str += chr(letter_ord)\n return msg_str", "def img(self):\n return self.img_decode(self.img_msg_)", "def image_decoder(rawbytes):\n img = Image.open(BytesIO(rawbytes))\n array = np.asarray(img, dtype=np.uint8)\n return array", "def decode_image(base64bytes, image_name):\n\n try:\n import base64\n import logging\n except ImportError as e:\n print(\"Necessary imports failed {}\".format(e))\n return\n\n logging.basicConfig(filename='decode_image.log',\n level=logging.DEBUG, filemode='w')\n\n if type(image_name) is not str:\n logging.error('Warning: the input should be type str')\n raise TypeError('Input is type {}, should be'\n ' type str'.format(type(image_name)))\n\n with open(image_name, 'wb') as image_out:\n image_out.write(base64.b64decode(base64bytes))\n logging.info(\"Writing to file {}\".format(image_name))", "def data_convert_image(data):\n if isinstance(data, basestring):\n if data.startswith(('http:', 'https:')):\n resp = requests.get(data).content\n image = np.asarray(bytearray(resp), dtype=np.uint8)\n image = cv2.imdecode(image, cv2.IMREAD_GRAYSCALE)\n elif data.endswith(('.jpg', '.png')):\n data = data.replace('\\\\', '/')\n image = cv2.imread(data, cv2.IMREAD_GRAYSCALE)\n else:\n image = np.asarray(bytearray(data), dtype=np.uint8)\n image = cv2.imdecode(image, cv2.IMREAD_GRAYSCALE)\n else:\n image = data\n return image", "def imfrombytes(content, flag='color', float32=False):\n img_np = np.frombuffer(content, np.uint8)\n imread_flags = {'color': cv2.IMREAD_COLOR, 'grayscale': cv2.IMREAD_GRAYSCALE, 'unchanged': cv2.IMREAD_UNCHANGED}\n img = cv2.imdecode(img_np, imread_flags[flag])\n if float32:\n img = img.astype(np.float32) / 255.\n return img", "def process(image):\n pass", "def blob2image(s):\n d0 = ord(s[0])\n d1 = ord(s[1])\n assert len(s)==d0*d1+2,(len(s),d0,d1)\n return numpy.frombuffer(s[2:],dtype='B').reshape(d0,d1)", "def process_image(self):\n pass", "def decode_loaded(x):\n return cv2.imdecode(x, flags=cv2.IMREAD_ANYDEPTH + cv2.IMREAD_COLOR)", "def decode_image(self, image_data):\n image = self._sess.run(self._decode,\n feed_dict={self._decode_data : image_data})\n if len(image.shape) != 3 or image.shape[2] not in (1,3):\n raise ValueError('The image channels not supported.')\n \n return image", "def _get_image(x):\n return b64encode(x).decode('ascii')", "def decode_jpeg(image_buffer, scope=None):\n with tf.op_scope([image_buffer], scope, 'decode_jpeg'):\n # Decode the string as an RGB JPEG.\n # Note that the resulting image contains an unknown height and width\n # that is set dynamically by decode_jpeg. In other words, the height\n # and width of image is unknown at compile-time.\n image = tf.image.decode_jpeg(image_buffer, channels=3)\n\n # After this point, all image pixels reside in [0,1)\n # until the very end, when they're rescaled to (-1, 1). The various\n # adjust_* ops all require this range for dtype float.\n image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n return image", "def image_loader(fileobj):\n if isinstance(fileobj, six.string_types):\n return cv2.imread(fileobj, cv2.IMREAD_COLOR)[..., ::-1] #bgr->rgb\n elif isinstance(fileobj, bytes):\n byte_arr = bytearray(fileobj)\n else:\n byte_arr = bytearray(fileobj.read())\n \n return cv2.imdecode(np.asarray(byte_arr, dtype=np.uint8), cv2.IMREAD_COLOR)[..., ::-1] #bgr->rgb", "def process_images(text):\n # if text != None:\n if text is not None:\n soup = BeautifulSoup(str(text), 'html.parser')\n img = soup.img\n try:\n image = img['title']\n return image\n except (TypeError, KeyError):\n # print(img)\n pass", "def decode(image):\n bitstream = ''\n for row in image:\n for pixel in row:\n for intensity in pixel:\n # Use get_bit function from bits.py library\n # to select the LSB of each intensity value\n bitstream += bits.get_bit(intensity,0)\n # Decode message using bits_to_message function\n message = bits.bits_to_message(bitstream)\n return message", "def formatImage(imgData):\n imgstr = re.search(b'base64,(.*)', imgData).group(1)\n with open('output.png','wb') as output:\n output.write(base64.decodebytes(imgstr))", "def proc_image(self, tokens):\n\n print \"IMAGE:\", tokens, tokens.asList(), tokens.keys()\n\n raise NotImplementedError", "def _b64decode(self, string):\n import base64\n return base64.b64decode(string)", "def _b64decode(self, string):\n import base64\n return base64.b64decode(string)", "def decode_image(file_location=\"images/encoded_sample.png\"):\n encoded_image = Image.open(file_location)\n red_channel = encoded_image.split()[0]\n\n\n x_size = encoded_image.size[0]\n y_size = encoded_image.size[1]\n\n\n decoded_image = Image.new(\"RGB\", encoded_image.size)\n pixels = decoded_image.load()\n for x in range(x_size):\n for y in range(y_size):\n red_pixel = red_channel.getpixel((x,y))\n binary = bin(red_pixel)\n\n lsb = int(binary[-1])\n if(lsb == 0):\n pixels[x,y] = (0,0,0)\n elif(lsb == 1):\n pixels[x,y] = (255,255,255)\n\n pass\n decoded_image.save(\"images/decoded_image.png\")", "def _restore_image_name(self, data: Dict[str, str]) -> ImageName:\n return ImageName.parse(data[\"str\"])", "def recognize_text_from_image_bytes(image_bytes: str):\n analyze_endpoint_url = service_constants.VISION_SERVICE_URL + \"recognizeText\"\n\n headers = {\n # subscription key must accompany every call\n 'Ocp-Apim-Subscription-Key': service_constants.OCP_APIM_SUBSCRIPTION_KEY,\n # when sending image bytes, set this content type\n 'Content-Type': 'application/octet-stream'\n }\n\n # if the text is handwritten, toggle this flag\n params = {'handwriting': 'false'}\n\n # make the POST request\n response = requests.post(analyze_endpoint_url, headers=headers, params=params, data=image_bytes)\n\n # if an error occurred\n response.raise_for_status()\n\n # json object from the body\n analysis = response.json()\n\n # This is the structure of the result dict\n # result[\"language\"]\n # result[\"orientation\"]\n # result[\"textAngle\"]\n # result[\"regions\"][0][\"boundingBox\"]\n # result[\"regions\"][0][\"lines\"][0][\"boundingBox\"]\n # result[\"regions\"][0][\"lines\"][0][\"words\"][0][\"boundingBox\"]\n # result[\"regions\"][0][\"lines\"][0][\"words\"][0][\"text\"]\n\n return analysis", "def decompress(cls, imgz):\n # translate back uint8 into string\n if not isinstance(imgz, str):\n imgz = ''.join([chr(d) for d in imgz])\n # zlib decompression\n imgz = VariableLength.decompress(imgz)\n ####\n w = binary_cast(imgz[:2], 'BB', 'H')[0]\n h = binary_cast(imgz[2:4], 'BB', 'H')[0]\n img = imgz[4:]\n img = np.reshape(img, (h, w))\n return img", "def parse_img(image_path):\n image = tf.read_file(image_path)\n image = tf.image.decode_image(image)\n image = tf.reshape(image, [INITIAL_RES, INITIAL_RES, 3])\n image = tf.image.resize_images(image, [OUTPUT_RES, OUTPUT_RES])\n #image = image[:, :, ::-1] # BGE -> RGB conversion if needed?\n #image = tf.image.rgb_to_grayscale(image)\n #image = tf.image.convert_image_dtype(image, tf.float32) # In neuralNet.py\n image = image.eval() # Convert from tensor to Numpy array for Keras\n return image", "def decode_img(img):\n img = tf.image.decode_jpeg(img, channels=3)\n img = tf.image.resize(img, [200, 300])\n\n return img", "def __parse_image(self, image_path: str, image_label: int) -> tuple:\n one_hot = tf.one_hot(image_label, self.num_classes, dtype=dtypes.int32)\n img_file = tf.read_file(image_path)\n img_decoded = tf.image.decode_jpeg(img_file, channels=self.image_shape[2])\n img_decoded = tf.image.resize_images(img_decoded, self.image_shape[0:2])\n img_decoded = tf.cast(img_decoded, tf.float32)\n if self.normalize_images:\n img_decoded = tf.image.per_image_standardization(img_decoded)\n\n return img_decoded, one_hot", "def _image_data(buff):\n code = buff.getvalue()\n m = _size(code)\n if m:\n size = int(m.group(1))\n else:\n raise Exception('Internal error: PPM header not found')\n return code[m.end():], size", "def decipher_raw2(s, key):\n assert struct.calcsize('I') == 4\n assert len(s) % 8 == 0, len(s)\n u = struct.unpack('%dI' % (len(s) / 4), s)\n stringa = str(b'\\xff\\xd8\\xff').replace('\\'', '')\n for i in range(len(u))[::2]:\n e = [decrypt2(u[i], u[i + 1], key)]\n i = b''.join([struct.pack('2I', ee, ef) for ee, ef in e])\n\n prova = str(i).replace('\\'', '')\n\n #lel = prova.find(stringa)\n\n if prova.find(stringa) != -1:\n print(\"detect format file: JPG\")\n return 0\n else:\n return 1", "def deserialize(self, str):\n try:\n if self.image is None:\n self.image = autonavigation.msg.Image()\n end = 0\n _x = self\n start = end\n end += 29\n (_x.unique_key, _x.gps_week, _x.gps_millisecond, _x.video_id, _x.image.header.seq, _x.image.header.stamp.secs, _x.image.header.stamp.nsecs,) = _struct_2IQB3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.image.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.image.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 212\n (_x.image.localPose.time, _x.image.localPose.dr_x, _x.image.localPose.dr_y, _x.image.localPose.dr_z, _x.image.localPose.dr_heading, _x.image.localPose.dr_roll, _x.image.localPose.dr_pitch, _x.image.localPose.lf_speed, _x.image.localPose.rf_speed, _x.image.localPose.lr_speed, _x.image.localPose.rr_speed, _x.image.localPose.rot_x, _x.image.localPose.rot_y, _x.image.localPose.rot_z, _x.image.localPose.acc_x, _x.image.localPose.acc_y, _x.image.localPose.acc_z, _x.image.localPose.batteryState, _x.image.localPose.batteryEnergy, _x.image.localPose.steer, _x.image.localPose.brake, _x.image.localPose.fuel, _x.image.localPose.trans, _x.image.localPose.VehicleState, _x.image.localPose.mode, _x.image.localPose.drStatus, _x.image.localPose.errorStatus, _x.image.localPose.emergency_flag, _x.image.localPose.hardswitch_on, _x.image.gpsPos.gps_flag, _x.image.gpsPos.gps_week, _x.image.gpsPos.gps_millisecond, _x.image.gpsPos.longitude, _x.image.gpsPos.laltitude, _x.image.gpsPos.gaussX, _x.image.gpsPos.gaussY, _x.image.gpsPos.height, _x.image.gpsPos.pitch, _x.image.gpsPos.roll, _x.image.gpsPos.azimuth, _x.image.gpsPos.northVelocity, _x.image.gpsPos.eastVelocity, _x.image.gpsPos.upVelocity, _x.image.gpsPos.positionStatus, _x.image.gpsPos.rot_x, _x.image.gpsPos.rot_y, _x.image.gpsPos.rot_z, _x.image.gpsPos.acc_x, _x.image.gpsPos.acc_y, _x.image.gpsPos.acc_z, _x.image.height, _x.image.width,) = _struct_d21i7bBI6d13i2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.image.encoding = str[start:end].decode('utf-8')\n else:\n self.image.encoding = str[start:end]\n _x = self\n start = end\n end += 5\n (_x.image.is_bigendian, _x.image.step,) = _struct_BI.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n self.image.data = str[start:end]\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def decode(self):\n s = self.encoded_content\n if self.encoded_content:\n if self.encoding:\n if self.encoding == u'base64':\n s = decode_base64(s)\n else:\n raise Exception(u'unknown data encoding %s' % (self.encoding))\n if self.compression:\n if self.compression == u'gzip':\n s = decompress_gzip(s)\n else:\n raise Exception(u'unknown data compression %s' %(self.compression))\n else:\n raise Exception(u'no encoded content to decode')\n self.decoded_content = []\n for idx in xrange(0, len(s), 4):\n val = ord(str(s[idx])) | (ord(str(s[idx + 1])) << 8) | \\\n (ord(str(s[idx + 2])) << 16) | (ord(str(s[idx + 3])) << 24)\n self.decoded_content.append(val)\n # generate the 2D version\n self._gen_2D()", "def decode(decryption=None):\n\n key_to_encrypt = {'a': 'q', 'b': 'v', 'c': 'x', 'd': 'z', 'e': 'y', 'f': 'w', 'g': 'u', 'h': 't', 'i': 's',\n 'j': 'r',\n 'k': 'p', 'l': 'o', 'm': 'n', 'n': 'm', 'o': 'l', 'p': 'k', 'r': 'j', 's': 'i', 't': 'h',\n 'u': 'g', 'w': 'f',\n 'y': 'e', 'z': 'd', 'x': 'c', 'v': 'b', 'q': 'a',\n 'A': 'Q', 'B': 'V', 'C': 'X', 'D': 'Z', 'E': 'Y', 'F': 'W', 'G': 'U', 'H': 'T', 'I': 'S',\n 'J': 'R', 'K': 'P',\n 'L': 'O', 'M': 'N', 'N': 'M', 'O': 'L', 'P': 'K', 'R': 'J', 'S': 'I', 'T': 'H', 'U': 'G',\n 'W': 'F', 'Y': 'E',\n 'Z': 'D', 'X': 'C', 'V': 'B', 'Q': 'S',\n '1': '5', '2': '9', '3': '8', '4': '7', '5': '6', '6': '4', '7': '3', '8': '2', '9': '1',\n '.': ',', ',': '.', ':': ';', ';': ':', '?': '!', '!': '?', '-': '_', '_': '-', '(': ')',\n ')': '(',\n '%': '$', '$': '%', ' ': '&', '&': ' ', '+': '*', '*': '+'}\n\n k1 = key.Key(key_to_encrypt)\n reversed_key = k1.createReverseKey()\n\n entered_image = input(\"Image name with extension: \")\n img = Image.open(entered_image, 'r')\n\n decoded_message = ''\n data_from_image = iter(img.getdata())\n\n while (True):\n pixels = [value for value in data_from_image.__next__()[:3] +\n data_from_image.__next__()[:3] +\n data_from_image.__next__()[:3]]\n\n binary = ''\n\n for i in pixels[:8]:\n if (i % 2 == 0):\n binary += '0'\n else:\n binary += '1'\n\n decoded_message += chr(int(binary, 2))\n d1 = monoalphabetic_decryption.Decryption(reversed_key, decoded_message)\n message = d1.decrypt()\n if (pixels[-1] % 2 != 0):\n return message", "def process_payload(payload):\n\n # Convertion of payload string to image array for opencv\n ret, img = make_image(payload)#ret is 0 when conversion is successful or 1 when not\n result='Unable to detect'\n if ret == 0:\n cv2.imwrite('received.png', img)\n try:\n roi = extract_roi_2(img)\n \n result = detect(roi) \n \n #write_characters(roi)\n\n except:\n result = \"----------------\"\n # # When roi is extracted its a 2d array \n \n return result", "def convertdataTOimage(data):\n data = data.partition(\",\")[2]\n padding = len(data)%4\n data += \"=\"*padding\n image = Image.open(BytesIO(b64decode(data)))\n return image", "def olive_image_parser(text: bytes) -> Optional[dict]:\n soup = BeautifulSoup(text, \"lxml\")\n root = soup.find(\"xmd-entity\")\n\n try:\n assert root is not None\n img = {\n 'id': root.get('id'),\n 'coords': root.img.get('box').split(),\n 'name': root.meta.get('name'),\n 'resolution': root.meta.get('images_resolution'),\n 'filepath': root.img.get('href')\n }\n return img\n except AssertionError:\n return None", "def convertImage(img):\n return '\\\\includegraphicsdata{%s}' % \":\".join([\n 'data',\n img.contentType,\n \"base64,%s\" % img.data.encode(\"base64\").replace(\"\\n\", \"\"),\n ])", "def deserialize_image(self, data, give_file_name):\r\n # Generate a random 8-character name\r\n # name = \"img_\" + self.generate_random_name() + \".png\"\r\n name = give_file_name + \".png\"\r\n file_path = os.path.join(self.temp_dir, name)\r\n img = Image.frombytes(data['mode'], data['size'], data['pixels'])\r\n img.save(file_path)\r\n return file_path", "def decode(base64img, size):\n # the bytes of the image ---> np array\n image_64_decode = base64.decodebytes(base64img.encode('utf-8'))\n image = Image.open(BytesIO(image_64_decode))\n\n # resize, and convert to RGB\n image = image.resize(size, Image.ANTIALIAS)\n image = image.convert('RGB')\n\n nparr = np.array(image)\n nparr = nparr[:,:,:3]\n return nparr", "def image_preprocess(image: str):\n if type(image) == str:\n image = cv2.imread(image)\n \n image_cropped = image[25:375, :]\n \n image = cv2.resize(image_cropped, IMG_SIZE, interpolation = cv2.INTER_AREA)\n \n return image", "def decode_and_resize(image_str_tensor, size):\n \n # Output a grayscale (channels=1) image\n image = tf.image.decode_jpeg(image_str_tensor, channels=3)\n \n # Note resize expects a batch_size, but tf_map supresses that index,\n # thus we have to expand then squeeze. Resize returns float32 in the\n # range [0, uint8_max]\n image = tf.expand_dims(image, 0)\n # image = tf.image.resize_bilinear(\n # image, [size, size], align_corners=False)\n image = tf.squeeze(image, squeeze_dims=[0])\n image = tf.cast(image, dtype=tf.uint8)\n return image", "def fromascii(self, *args, **kwargs):\n return _image.image_fromascii(self, *args, **kwargs)", "def decode(cls, flattened):\n if len(flattened) < 8:\n return None\n t = binary_cast(flattened[:8], 'BBBBBBBB', 'd')[0]\n img = cls.decompress(flattened[8:])\n return t, img", "def decode(data): #@NoSelf", "def decode(self, encoded):", "def _decode_string(box_string):\r\n if box_string == \"no_box\":\r\n return np.zeros((0,4))\r\n else:\r\n try:\r\n boxes = np.array([np.array([int(eval(i)) for i in box.split(\" \")])\r\n for box in box_string.split(\";\")])\r\n return boxes\r\n except:\r\n print(box_string)\r\n print(\"Submission is not well formatted. empty boxes will be returned\")\r\n return np.zeros((0,4))", "def to_internal_value(self, data):\n if isinstance(data, str) and data.startswith('data:image'):\n # Found image is encoded, and must be decoded\n format, imgstr = data.split(';base64,')\n ext = format.split('/')[-1] # Extract file extension\n id = uuid.uuid4()\n data = ContentFile(base64.b64decode(imgstr), name = id.urn[9:] + '.' + ext)\n return super(Base64ImageField, self).to_internal_value(data)", "def parse_function(path,label):\n # Read an image from a file\n image_string = tf.io.read_file(path)\n # Decode it into a dense vector\n image_decoded = tf.image.decode_jpeg(image_string, channels=CHANNELS)\n # Resize it to fixed shape\n image_resized = tf.image.resize(image_decoded, [IMG_SIZE, IMG_SIZE])\n # Normalize it from [0, 255] to [0.0, 1.0]\n image_normalized = image_resized / 255.0\n return image_normalized, label", "def decode(qf, st, en_filename, jpg_filename, img_shape):\n row, col, ch = img_shape\n en_bin = read_binstr_frome_file(en_filename)\n zbs = decode_AC_DC(en_bin, img_shape, st)\n sample_size = (row, col)\n if st == (4, 1, 1):\n sample_size = ((row // 2), (col // 2))\n elif st == (4, 2, 2):\n sample_size = (row, (col // 2))\n img = np.zeros((row, col, ch), np.uint8)\n for c, zb in enumerate(zbs):\n blocks = de_zigzag(zb)\n q_t = get_quantization_table_by_factor(qf, channel_select[c % len(zbs)])\n img_blocks = get_dequantization_img_blocks(blocks, q_t)\n\n b_r, b_c = (row, col) if c == 0 else sample_size\n\n tmp = np.ones((b_r, b_c), np.int8) * 128\n for i, (row_offset, col_offset) in enumerate(get_block_iterator(b_r, b_c)):\n tmp[row_offset:row_offset + 8 if row_offset + 8 <= b_r else b_r,\n col_offset:col_offset + 8 if col_offset + 8 <= b_c else b_c] += img_blocks[i]\n\n # inverse subsample\n img_blocks = cv2.resize(tmp, (row, col))\n\n img[:, :, c] = np.round(img_blocks)\n\n if ch == 3:\n img = cv2.cvtColor(img, cv2.COLOR_YCrCb2BGR)\n\n cv2.imwrite(jpg_filename, img)\n\n return img", "def process_image((uri, label)):\n image_bytes = read_image(uri)\n\n if image_bytes is not None:\n yield uri, label, image_bytes", "def image_transform(im_bytes):\n img = [mx.image.imdecode(bytes.fromhex(im.lstrip('0x'))) for im in im_bytes]\n out = gcv.data.transforms.presets.yolo.transform_test(img)\n return out[0]", "def read_image(self, item):\n assert item['image_dtype'] == 'uint16'\n\n filename = os.path.join(self.home(item['basename']))\n s = open(filename, 'rb').read()\n assert hashlib.md5(s).hexdigest() == item['md5']\n img = np.fromstring(s, dtype=item['image_dtype']).byteswap()\n img = img.reshape(item['image_shape'])\n return img", "def decode(p):\n #assert p.endswith('.' + EXTENSION)\n p2 = os.path.basename(p).replace('baseline.png', '.png')\n p2p = os.path.join('/mnt/Volume0/test/clic2020-devkit/result/', p2) #add by me\n pp = os.path.join('/mnt/Volume0/test/clic2020-devkit/targets',p2)\n p2 = os.path.join('/mnt/Volume0/test/clic2020-devkit/inputs/', p2) #add by me\n p1 = pframe_dataset_shared.get_previous_frame_path(p2)\n #p1 = os.path.join('/mnt/Volume0/test/clic2020-devkit/test_data/inputs/', p1)\n #assert os.path.isfile(p1), (p2, p1, p, len(glob.glob('*.png')))\n b = Image.open(p).convert('L')\n f2_reconstructed = decoder(np.array(Image.open(p1)), b)\n Image.fromarray(f2_reconstructed).save(p2p)\n return f2_reconstructed, np.array(Image.open(pp))", "def extract(self, source):\n\t\tp = Parser()\n\t\tf = open_pds(source)\n\t\tif self.log: self.log.debug(\"Parsing '%s'\" % (source))\n\t\tself.labels = p.parse(f)\n\t\tif self.log: self.log.debug(\"Found %d labels\" % (len(self.labels)))\n\t\tif self._check_image_is_supported():\n\t\t\tif self.log: self.log.debug(\"Image in '%s' is supported\" % (source))\n\t\t\tdim = self._get_image_dimensions()\n\t\t\tloc = self._get_image_location()\n\t\t\timageSampleBits = int(self.labels['IMAGE']['SAMPLE_BITS'])\n\t\t\timageSampleType = self.labels['IMAGE']['SAMPLE_TYPE']\n\t\t\tmd5Checksum = self._get_image_checksum()\n\t\t\tif self.log: self.log.debug(\"Image dimensions should be %s\" % (str(dim)))\n\t\t\tif self.log: self.log.debug(\"Seeking to image data at %d\" % (loc))\n\t\t\tf.seek(loc)\n\t\t\tif imageSampleBits == 8:\n\t\t\t\treadSize = dim[0] * dim[1]\n\t\t\telif imageSampleBits == 16:\n\t\t\t\treadSize = dim[0] * dim[1] * 2\n\t\t\tprint readSize\n\t\t\tif self.log: self.log.debug(\"Seek successful, reading data (%s)\" % (readSize))\n\t\t\t# rawImageData = f.readline()\n\t\t\t# f.seek(-int(self.labels[\"RECORD_BYTES\"]), os.SEEK_CUR)\n\t\t\trawImageData = f.read(readSize)\n\t\t\tif md5Checksum:\n\t\t\t\trawImageChecksum = hashlib.md5(rawImageData).hexdigest()\n\t\t\t\tchecksumVerificationPassed = rawImageChecksum == md5Checksum and True or False\n\t\t\t\tif not checksumVerificationPassed:\n\t\t\t\t\tif self.log: self.log.debug(\"Secure hash verification failed\")\n\t\t\t\t\tif self.raisesChecksumError:\n\t\t\t\t\t\terrorMessage = \"Verification failed! Expected '%s' but got '%s'.\" % (md5Checksum, rawImageChecksum)\n\t\t\t\t\t\traise ChecksumError, errorMessage\n\t\t\t\telse:\n\t\t\t\t\tif self.log: self.log.debug(\"Secure hash verification passed\")\n\t\t\tif self.log: self.log.debug(\"Read successful (len: %d), creating Image object\" % (len(rawImageData)))\n\t\t\t# The frombuffer defaults may change in a future release;\n\t\t\t# for portability, change the call to read:\n\t\t\t# frombuffer(mode, size, data, 'raw', mode, 0, 1).\n\t\t\tif (imageSampleBits == 16) and imageSampleType == ('MSB_INTEGER'):\n\t\t\t\t#img = Image.frombuffer('I', dim, rawImageData, 'raw', 'I;16BS', 0, 1)\n\t\t\t\timg = Image.frombuffer('F', dim, rawImageData, 'raw', 'F;16B', 0, 1)\n\t\t\t\timg = ImageMath.eval(\"convert(a/16.0, 'L')\", a=img)\n\t\t\telse:\n\t\t\t\timg = Image.frombuffer('L', dim, rawImageData, 'raw', 'L', 0, 1)\n\t\t\tif self.log:\n\t\t\t\tself.log.debug(\"Image result: %s\" % (str(img)))\n\t\t\t\tself.log.debug(\"Image info: %s\" % (str(img.info)))\n\t\t\t\tself.log.debug(\"Image mode: %s\" % (str(img.mode)))\n\t\t\t\tself.log.debug(\"Image size: %s\" % (str(img.size)))\n\t\telse:\n\t\t\tif self.log: self.log.error(\"Image is not supported '%s'\" % (source))\n\t\t\timg = None\n\t\tf.close()\n\n\t\treturn img, self.labels", "def add_png_decoding(input_width, input_height, input_depth):\n base64_str = tf.placeholder(tf.string, name='input_string')\n input_str = tf.decode_base64(base64_str)\n decoded_image = tf.image.decode_png(input_str, channels=input_depth)\n # Convert from full range of uint8 to range [0,1] of float32.\n decoded_image_as_float = tf.image.convert_image_dtype(decoded_image,\n tf.float32)\n decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)\n resize_shape = tf.stack([input_height, input_width])\n resize_shape_as_int = tf.cast(resize_shape, dtype=tf.int32)\n resized_image = tf.image.resize_bilinear(decoded_image_4d,\n resize_shape_as_int)\n tf.identity(resized_image, name=\"DecodePNGOutput\")\n return input_str, resized_image", "def parser_image_data(jpeg_file_path):\n image = tf.io.read_file(jpeg_file_path)\n image = tf.image.decode_jpeg(image)\n image = tf.image.resize(image, [image_height, image_width])\n image = tf.cast(image, dtype=tf.float32)\n image = (image / 127.5) - 1.0\n return image", "def __parse_image_load(self, image_path: str, image_label: int):\n one_hot = tf.one_hot(image_label, self.num_classes, dtype=dtypes.int32)\n if self.rgb:\n flag = cv2.IMREAD_COLOR\n else:\n flag = cv2.IMREAD_GRAYSCALE\n\n img = cv2.imread(image_path, flags=flag)\n img = cv2.resize(img, (self.image_shape[1], self.image_shape[0]), interpolation=cv2.INTER_AREA).astype(\n np.float32)\n\n if self.normalize_images:\n img_mean = np.mean(img, axis=(0, 1))\n img_std = np.std(img, axis=(0, 1))\n\n img = (img - img_mean) / img_std\n\n return img, one_hot", "def preprocess(image):\n image = rgb2yuv(image)\n return image", "def _str_eval_img(eval, act, ctxt, *obs) :\n from PIL import Image\n import os.path\n filename = obs[0][0]\n css_class = \"desc_img\"\n if len(obs)>1 and obs[1][0] == \"left\" :\n css_class = \"desc_img_left\"\n width, height = Image.open(os.path.join(os.path.abspath(\"games/teptour_files\"), filename)).size\n print \"Image\",filename,\"is\",width,\"x\",height\n return [\"<img class=\\\"\"+css_class+\"\\\" width=\\\"\"+str(width)+\"\\\" height=\\\"\"+str(height)+\"\\\" src=\\\"teptour/\"+filename+\"\\\">\"]", "def byte2img(filename):\n try:\n with open(filename, 'r') as f:\n arr = []\n for line in f:\n vals = line.split()\n del vals[0]\n arr.append(vals)\n \n max_len = max([len(vals) for vals in arr])\n \n new_arr = []\n for vals in arr:\n new_arr.append([val.replace('?', '0') for val in vals])\n \n for vals in new_arr:\n if '?' in vals:\n print(vals)\n \n hexstring = ''.join(list(itertools.chain.from_iterable(new_arr)))\n \n byte_arr = bytearray.fromhex(hexstring)\n width = 1024\n rem = len(byte_arr) % width\n byte_arr_len = len(byte_arr) - rem\n byte_arr = byte_arr[:byte_arr_len]\n byte_arr = np.asarray(byte_arr)\n np_arr = np.reshape(byte_arr, (len(byte_arr)//width, width))\n np_arr = np.uint8(np_arr)\n img = Image.fromarray(np_arr)\n return img\n except Exception as error:\n logging.error(traceback.format_exc())", "def onImageReceived(self, msg):\n\n self.BGR = self.bridge.imgmsg_to_cv2(msg)\n self.processImage(self.BGR)", "def decode_lcmt_image(msg, image_in=None):\n enums = lcmt_image\n w = msg.width\n h = msg.height\n pixel_desc = (msg.pixel_format, msg.channel_type)\n if pixel_desc == (enums.PIXEL_FORMAT_RGBA, enums.CHANNEL_TYPE_UINT8):\n num_channels = 4\n dtype = np.uint8\n elif pixel_desc == (enums.PIXEL_FORMAT_DEPTH, enums.CHANNEL_TYPE_FLOAT32):\n num_channels = 1\n dtype = np.float32\n elif pixel_desc == (enums.PIXEL_FORMAT_DEPTH, enums.CHANNEL_TYPE_UINT16):\n num_channels = 1\n dtype = np.uint16\n elif pixel_desc == (enums.PIXEL_FORMAT_LABEL, enums.CHANNEL_TYPE_INT16):\n num_channels = 1\n dtype = np.int16\n else:\n raise RuntimeError(\"Unsupported pixel type: {}\".format(pixel_desc))\n bytes_per_pixel = np.dtype(dtype).itemsize * num_channels\n assert msg.row_stride == msg.width * bytes_per_pixel, msg.row_stride\n if msg.compression_method == enums.COMPRESSION_METHOD_NOT_COMPRESSED:\n data_bytes = msg.data\n elif msg.compression_method == enums.COMPRESSION_METHOD_ZLIB:\n # TODO(eric.cousineau): Consider using `data`s buffer, if possible.\n # Can decompress() somehow use an existing buffer in Python?\n data_bytes = zlib.decompress(msg.data)\n else:\n raise RuntimeError(\n \"Unsupported compression type: {}\".format(msg.compression_method))\n # Cast to desired type and shape.\n data = np.frombuffer(data_bytes, dtype=dtype)\n data.shape = (h, w, num_channels)\n # Copy data to VTK image.\n image = create_image_if_needed(w, h, num_channels, dtype, image_in)\n image_data = vtk_image_to_numpy(image)\n image_data[:] = data[:]\n return image", "def _read_image(self, image_path:str, label:str):\n # Get the full path to the image\n image = \"\"\n if label == \"real\":\n image = os.path.join(self.root, \"real\", image_path)\n else:\n image = os.path.join(self.root, \"fake\", image_path)\n \n # Read the image\n image = cv2.imread(image)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n # Normalize the image\n image = image / 255.0\n\n # Convert the image to floating point to use it as\n # an input to the PyTorch model\n image = image.astype(np.float32)\n\n return image", "def bytes_to_cv2img(b):\n im = np.array(Image.open(io.BytesIO(b)))\n return im", "def OCR_Return_Verifycode(b64string):\n beforeRepaireStr = b64string if isinstance(b64string,bytes) else b64string.encode()\n \n #repair URL parse problem\n #last_img_str=beforeRepaireStr.replace(b' ',b'+');\n #use base64.urlsafe_decode replace\n \n last_img,status_code = base64strToStringIO(beforeRepaireStr)\n if len(status_code)>1:\n code=\"\"\n else:\n code = pytesseract.image_to_string(last_img, lang='eng', config='-psm 7')\n print(code,status_code)\n return code,status_code", "def load(filename, imageprops):\n with gzip.open(filename, 'rb') as f:\n file_content = f.read()\n return parse_svg.parse_svg_string(file_content, imageprops, \"en\")", "def muestraPokemon(bytes):\n image = Image.open(io.BytesIO(bytes))\n data = np.array(image)\n plt.imshow(data)\n plt.axis('off')\n plt.show()", "def from_image(cls, image):\n raise NotImplementedError", "def image(self, text):\n pattern = re.compile(r\"\"\"\n (?:[\\[{])? # pre\n \\! # opening !\n (\\<|\\=|\\>)? # optional alignment atts\n (%s) # optional style,class atts\n (?:\\. )? # optional dot-space\n ([^\\s(!]+) # presume this is the src\n \\s? # optional space\n (?:\\(([^\\)]+)\\))? # optional title\n \\! # closing\n (?::(\\S+))? # optional href\n (?:[\\]}]|(?=\\s|$)) # lookahead: space or end of string\n \"\"\" % self.c, re.U | re.X)\n return pattern.sub(self.fImage, text)", "def read(self):\r\n self.set_generator() # rearm\r\n total = self.width * self.height * 3\r\n if total < 32:\r\n raise Exception(\"Text not found.\")\r\n size = chunk = string = str()\r\n i = 0 # for(i=0; true; ++i)\r\n while True:\r\n (wp, hp, ch) = self.generator.next() # i byte\r\n values = self.im.getpixel((wp, hp))\r\n tmp = self.binary(values[ch], 1)\r\n if i < 32: # it's lame but I prefer string/bitset\r\n size += tmp[7]\r\n if i == 31:\r\n size = int(size, 2)\r\n if size < 1 or (size + 32) > total:\r\n raise Exception(\"Text not found.\")\r\n elif i < size + 32:\r\n chunk += tmp[7]\r\n if len(chunk) == 8:\r\n string += chr(int(chunk, 2))\r\n chunk = str()\r\n else:\r\n break\r\n i += 1\r\n if self.useAES and self.aes:\r\n if len(string) % 16 != 0:\r\n raise Exception(\"Text not encrypted.\")\r\n string = self.aes.decrypt(string).rstrip(chr(0))\r\n string.decode() # rise an exception if invalid\r\n return string", "def parse_function(images, labels, n_classes, resized_shape, palette):\r\n images = load_image(images, resized_shape)\r\n labels = load_label(labels, n_classes, resized_shape, palette)\r\n return images, labels", "def decoder(self, value) -> Tuple:\n data = self.decode(value)\n # TODO: remove hardcoded value.\n image_id = 1.0\n image = data[\"image\"]\n boxes = data[\"groundtruth_boxes\"]\n classes = data[\"groundtruth_classes\"]\n return (image_id, image, boxes, classes)", "def _parse_single(filename, label, image_size=IMAGE_SIZE):\n # Decode and convert image to appropriate type\n image = tf.image.decode_png(tf.read_file(filename), channels=image_size[2])\n image = tf.image.convert_image_dtype(image, tf.float32) # Also scales from [0, 255] to [0, 1)\n # Resize according to module requirements\n image = tf.image.resize_images(image, image_size[:2])\n return image, label", "def str_to_img_ndarrary(s):\n img = str_to_pil_img(s)\n img_array = np.array(img)\n return skimage.util.img_as_float(img_array)", "def img_to_cv2(self, image_msg):\n # rospy.loginfo(\"image is of type: \" + str(type(image_msg)))\n type_as_str = str(type(image_msg))\n if type_as_str.find('sensor_msgs.msg._CompressedImage.CompressedImage') >= 0:\n # Image to numpy array\n np_arr = np.fromstring(image_msg.data, np.uint8)\n # Decode to cv2 image and store\n return cv2.imdecode(np_arr, cv2.IMREAD_COLOR)\n elif type_as_str.find('sensor_msgs.msg._Image.Image') >= 0:\n # Use CvBridge to transform\n try:\n return self.bridge.imgmsg_to_cv2(image_msg,\n image_msg.encoding) # \"bgr8\"\n except CvBridgeError as e:\n rospy.logerr(\"Error when converting image: \" + str(e))\n return None\n else:\n rospy.logerr(\"We don't know how to transform image of type \" +\n str(type(image_msg)) + \" to cv2 format.\")\n return None", "def process_image(im):\r\n h, _, _ = im.shape\r\n im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\r\n \r\n # Divide the picture into 3 regions\r\n l1 = int(0.65*h)\r\n l2 = int(0.77*h)\r\n im1 = im_gray[:l1,:]\r\n im2 = im_gray[l1+1:l2,:]\r\n im3 = im_gray[l2+1:,:]\r\n \r\n # Extract 4 pictures\r\n pics = extract_4_pics(im, im1)\r\n \r\n # Extract the word size\r\n word_size = extract_word_size(im2)\r\n \r\n # Extract the letters\r\n letters = extract_letters(im3)\r\n \r\n print 'word size =', word_size\r\n print 'letters =', letters\r\n for i, pic in enumerate(pics):\r\n imsave(str(i) + '.png', pic)\r\n\r\n return word_size, letters, pics" ]
[ "0.76571083", "0.70518196", "0.67220795", "0.6707585", "0.66721165", "0.6323393", "0.6302396", "0.6182582", "0.61463886", "0.6065278", "0.6065278", "0.603599", "0.6005533", "0.5996547", "0.59715015", "0.5929471", "0.5828417", "0.58076036", "0.5807062", "0.57541275", "0.5738936", "0.56811136", "0.56799453", "0.56702703", "0.56594604", "0.56518316", "0.5633855", "0.56259084", "0.56231534", "0.5620396", "0.5610526", "0.5595102", "0.5584771", "0.5568671", "0.55350786", "0.55164963", "0.550122", "0.54690826", "0.54406035", "0.5437382", "0.54369813", "0.5429317", "0.5415856", "0.5411013", "0.5411013", "0.5408659", "0.54042274", "0.5399507", "0.5397621", "0.53867036", "0.5374088", "0.53707093", "0.5370412", "0.53641087", "0.5360488", "0.5345281", "0.53441745", "0.53347856", "0.5332727", "0.53322", "0.5327172", "0.5300955", "0.5299871", "0.52945143", "0.52938294", "0.5285905", "0.52805245", "0.52700204", "0.52559197", "0.5247435", "0.5243083", "0.5242419", "0.52410614", "0.5232031", "0.51950544", "0.5185621", "0.5165278", "0.5157938", "0.5144903", "0.5132747", "0.51321626", "0.5124991", "0.51209074", "0.51152754", "0.51054263", "0.51039004", "0.50977725", "0.50975066", "0.509574", "0.5088625", "0.50848866", "0.50809073", "0.50748736", "0.5074331", "0.50740904", "0.5074013", "0.5072735", "0.5070584", "0.50684345", "0.5063578" ]
0.52232766
74
Estimate ND similarity transformation with or without scaling.
def umeyama(src, dst, estimate_scale): num = src.size(0) dim = src.size(1) # Compute mean of src and dst. src_mean = src.mean(dim=0) # [N] dst_mean = dst.mean(dim=0) # [N] # Subtract mean from src and dst. src_demean = src - src_mean # [M, N] dst_demean = dst - dst_mean # [M, N] # Eq. (38). A = (dst_demean.transpose(0, 1) @ src_demean) / num # [M, N] # Eq. (39). d = torch.ones(dim).type_as(src) # [N] if torch.det(A) < 0: d[dim - 1] = -1 T = torch.eye(dim + 1).type_as(src) U, S, VH = torch.svd(A) V = VH.transpose(0, 1) # difference between numpy and pytorch # Eq. (40) and (43). rank = torch.matrix_rank(A) if rank == 0: return float('nan') * T elif rank == dim - 1: if torch.det(U) * torch.det(V) > 0: T[:dim, :dim] = U @ V else: s = d[dim - 1] d[dim - 1] = -1 T[:dim, :dim] = U @ torch.diag(d) @ V d[dim - 1] = s else: T[:dim, :dim] = U @ torch.diag(d) @ V.transpose(0, 1) if estimate_scale: # Eq. (41) and (42). scale = 1.0 / src_demean.var(dim=0).sum() * (S @ d) else: scale = 1.0 src_mean = src_mean.unsqueeze(1) # [N, 1] T[:dim, dim] = dst_mean - scale * (T[:dim, :dim] @ src_mean).squeeze(1) T[:dim, :dim] *= scale return T
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _calculate_trilinear_similarity(self, context, query, context_max_len, query_max_len,\n w4mlu, bias):\n\n subres0 = nd.tile(self.w4c(context), [1, 1, query_max_len])\n subres1 = nd.tile(nd.transpose(\n self.w4q(query), axes=(0, 2, 1)), [1, context_max_len, 1])\n subres2 = nd.batch_dot(w4mlu * context,\n nd.transpose(query, axes=(0, 2, 1)))\n similarity_mat = subres0 + subres1 + subres2 + bias\n return similarity_mat", "def compute_similarity_transform(X, Y, compute_optimal_scale=False):\n\n muX = X.mean(0)\n muY = Y.mean(0)\n\n X0 = X - muX\n Y0 = Y - muY\n\n ssX = (X0**2.).sum()\n ssY = (Y0**2.).sum()\n\n # centred Frobenius norm\n normX = np.sqrt(ssX)\n normY = np.sqrt(ssY)\n\n # scale to equal (unit) norm\n X0 = X0 / normX\n Y0 = Y0 / normY\n\n # optimum rotation matrix of Y\n A = np.dot(X0.T, Y0)\n U,s,Vt = np.linalg.svd(A,full_matrices=False)\n V = Vt.T\n T = np.dot(V, U.T)\n\n # Make sure we have a rotation\n detT = np.linalg.det(T)\n V[:,-1] *= np.sign( detT )\n s[-1] *= np.sign( detT )\n T = np.dot(V, U.T)\n\n traceTA = s.sum()\n\n if compute_optimal_scale: # Compute optimum scaling of Y.\n b = traceTA * normX / normY\n d = 1 - traceTA**2\n Z = normX*traceTA*np.dot(Y0, T) + muX\n else: # If no scaling allowed\n b = 1\n d = 1 + ssY/ssX - 2 * traceTA * normY / normX\n Z = normY*np.dot(Y0, T) + muX\n\n c = muX - b*np.dot(muY, T)\n\n return d, Z, T, b, c", "def similarity_function_old(feature1, feature2):\n f1Magnitude = feature1.dot(feature1)\n f2Magnitude = feature2.dot(feature2)\n return 1 - feature1.dot(feature2) / (f1Magnitude * f2Magnitude)", "def similarity_transform_2d(v, mapping, alpha = 1):\r\n p_wgt = vec2(0, 0)\r\n q_wgt = vec2(0, 0)\r\n w = len(mapping)*[None]\r\n w_sum = 0\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n x = mp[0].x - v.x\r\n y = mp[0].y - v.y\r\n if (x == 0 and y == 0): return mp[1]\r\n w[i] = 1/((x*x + y*y) ** alpha)\r\n p_wgt += mp[0]*w[i]\r\n q_wgt += mp[1]*w[i]\r\n w_sum += w[i]\r\n p_wgt /= w_sum\r\n q_wgt /= w_sum\r\n mu = 0\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n p_adj = mp[0] - p_wgt\r\n mu += w[i]*(p_adj.dot(p_adj))\r\n A_fac = mat2([v.x - p_wgt.x, v.y - p_wgt.y, v.y - p_wgt.y, p_wgt.x - v.x])\r\n v_out = vec2(0, 0)\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n p_adj = mp[0] - p_wgt\r\n q_adj = mp[1] - q_wgt\r\n A = mat2([p_adj.x, p_adj.y, p_adj.y, -p_adj.x])*A_fac*w[i]\r\n A = A.transpose()\r\n v_out += A*q_adj/mu\r\n v_out += q_wgt\r\n return v_out", "def _scale(self, normalize, mat):\n mat = mat.astype(float)\n if normalize:\n mat = sklearn_norm(mat,\n feature_range=(0, 1),\n axis=0,\n copy=True)\n else:\n return mat\n return mat", "def compute_similarity(self, seq_node, **kwargs):\n pass", "def vi_similarity(self, normalize=True, model='m1'):\n R, C = self.shape\n N = self.grand_total\n\n max_dist = log(N)\n dist = self.vi_distance(normalize=False)\n score = max_dist - dist\n\n if model is None:\n null_score = 0\n elif model == 'm1': # only N is fixed\n null_dist = log(R) + log(C)\n null_score = max_dist - null_dist\n elif model == 'm2r': # fixed row margin\n null_dist = log(C) + fentropy(self.row_totals) / N\n null_score = max_dist - null_dist\n elif model == 'm2c': # fixed column margin\n null_dist = log(R) + fentropy(self.col_totals) / N\n null_score = max_dist - null_dist\n elif model == 'm3': # both row and column margins fixed\n null_dist = (fentropy(self.row_totals) + fentropy(self.col_totals)) / N\n null_score = max_dist - null_dist\n else:\n expected = self.expected(model)\n null_score = expected.vi_similarity(normalize=False, model=None)\n\n score -= null_score\n if normalize:\n max_score = max_dist - null_score\n score = 1.0 if score == max_score else _div(score, max_score)\n\n return score", "def test_similarity(self):\n self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'dog.n.01'), 1))\n self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'mammal.n.01'), 0.180901358))", "def align_reconstruction_naive_similarity(X, Xp):\n\t# Compute similarity Xp = s A X + b\n\tT = superimposition_matrix(X.T, Xp.T, scale=True)\n\tA, b = T[:3, :3], T[:3, 3]\n\ts = np.linalg.det(A)**(1. / 3)\n\tA /= s\n\treturn s, A, b", "def similarity_transformation(rot, mat):\n return np.dot(rot, np.dot(mat, np.linalg.inv(rot)))", "def similarity_transformation(rot, mat):\n return np.dot(rot, np.dot(mat, np.linalg.inv(rot)))", "def _dense_tanimotokernel(data_1, data_2):\n\n norm_1 = (data_1 ** 2).sum(axis=1).reshape(data_1.shape[0], 1)\n norm_2 = (data_2 ** 2).sum(axis=1).reshape(data_2.shape[0], 1)\n prod = data_1.dot(data_2.T)\n\n divisor = (norm_1 + norm_2.T - prod) + np.finfo(data_1.dtype).eps\n return prod / divisor", "def _calculate_similarity(self):\n self._logger.info(\"Calculating the similarity between images.\")\n\n # Create a helper function to simplify the loops below:\n def get_wrapper(fdx, mdx):\n wrapper = pos_wrappers.image_similarity_wrapper(\n reference_image=self.f['src_gray'](idx=fdx),\n moving_image=self.f['src_gray'](idx=mdx),\n affine_transformation=self.f['part_transf'](mIdx=mdx, fIdx=fdx))\n return copy.copy(wrapper)\n\n commands = [] # Will hold commands for calculating the similarity\n\n # Will hold (moving, fixed) images partial_transforms basically: all\n # partial transformations array\n partial_transforms = []\n\n self._logger.debug(\"Generating similarity measure warppers.\")\n for moving_slice in self.options.slice_range:\n # Get all fixed images to which given moving slice will be aligned:\n tpair = list(flatten(self._get_slice_pair(moving_slice)))\n\n # Append partial transformations for given moving slice to the\n # global partial transformations array\n partial_transforms.append(tpair)\n\n # Generate wrapper for measuring similarity for a given partial\n # transformation.\n for mdx, fdx in tpair:\n commands.append(get_wrapper(fdx, mdx))\n\n # Execute and commands and workflow the similarity measurements.\n stdout, stderr = self.execute(commands)\n simmilarity = map(lambda x: float(x.strip()),\n stdout.strip().split(\"\\n\"))\n simmilarity = dict(zip(flatten(partial_transforms), simmilarity))\n\n self._logger.debug(\"Generating graph edges.\")\n graph_connections = []\n\n # Lambda defines slice skipping is preffered (lower l), or reluctant\n # to slice skipping (higher)\n l = self.options.graphEdgeLambda\n\n for (mdx, fdx), s in simmilarity.iteritems():\n w = (1.0 + s) * abs(mdx - fdx) * (1.0 + l) ** (abs(mdx - fdx))\n graph_connections.append((fdx, mdx, w))\n\n self._logger.info(\"Creating a graph based on image similarities.\")\n # Generate the graph basen on the weight of the edges\n self.G = nx.DiGraph()\n self.G.add_weighted_edges_from(graph_connections)\n\n self._logger.debug(\"Saving the graph to a file.\")\n # Save the edges for some further analysis.\n nx.write_weighted_edgelist(self.G,\n self.f['graph_edges'](sign=self.signature))\n\n # Also, save the individual similarity metrics:\n simm_fh = open(self.f['similarity'](sign=self.signature), 'w')\n for (mdx, fdx), s in sorted(simmilarity.iteritems()):\n simm_fh.write(\"%d %d %f\\n\" % (mdx, fdx, s))\n simm_fh.close()", "def compute_similarity_transform(source_points, target_points):\n assert target_points.shape[0] == source_points.shape[0]\n assert target_points.shape[1] == 3 and source_points.shape[1] == 3\n source_points = source_points.T\n target_points = target_points.T\n mu1 = source_points.mean(axis=1, keepdims=True)\n mu2 = target_points.mean(axis=1, keepdims=True)\n X1 = source_points - mu1\n X2 = target_points - mu2\n var1 = np.sum(X1 ** 2)\n K = X1.dot(X2.T)\n U, _, Vh = np.linalg.svd(K)\n V = Vh.T\n Z = np.eye(U.shape[0])\n Z[-1, -1] *= np.sign(np.linalg.det(U.dot(V.T)))\n R = V.dot(Z.dot(U.T))\n scale = np.trace(R.dot(K)) / var1\n t = mu2 - scale * R.dot(mu1)\n source_points_hat = scale * R.dot(source_points) + t\n source_points_hat = source_points_hat.T\n return source_points_hat", "def calculate_ssim(img0, img1, data_range=None):\n ssim = skm.structural_similarity(img0, img1, data_range=data_range)\n return ssim", "def adaSynAdd(self, data, labels):\n r = {}\n g = {}\n rnorm = {}\n rsum = 0\n self.fit(data, labels)\n self.densityclf = neighbors.KNeighborsClassifier(n_neighbors=self.k) \n self.densityclf.fit(data, labels)\n \n #Note that this is an alternative approach for extracting the minority examples\n #in the *same* order as described in smoteTransform.fit()\n for index in xrange(0, len(data)):\n if labels[index] == abs(1 - self.minorityLabel):\n continue\n \n nrpoints = self.densityclf.kneighbors(data[index,:], return_distance=False)\n nrpoints = numpy.setdiff1d(nrpoints, [index])\n if self.minorityLabel == 1:\n num_majority = self.k - numpy.count_nonzero(labels[nrpoints])\n else:\n num_majority = numpy.count_nonzero(data[nrpoints])\n \n r[index] = float(num_majority) / float(self.k)\n assert(r[index] >= 0)\n \n \n for k, v in r.viewitems(): \n #print(k,v)\n rsum += v\n for k, v in r.viewitems():\n rnorm[k] = r[k] / rsum\n \n rnormsum = 0\n for k, v in rnorm.viewitems(): rnormsum += v\n #print(rnormsum)\n \n #m = mj + ml, -> if mj = m - ml, mj - ml = m - 2(ml)\n #where len(data) = m and len(r) = mj\n \n #Number of synthetic samples to generate\n G = float(len(data) - len(r) - len(r)) * float(self.beta)\n index = 0\n numNewPoints = 0\n #Convert normalised density distribution values to the number of values\n #to generate for each minority sample.\n for k, v in rnorm.viewitems():\n g[index] = int(round(rnorm[k] * G))\n numNewPoints += g[index]\n index += 1\n \n #print(numNewPoints)\n #print(self.minorityData)\n #Use this information to the smoteTransform transfer function.\n #for k, v in g.viewitems(): print(k,v)\n #len(g)\n #len(data[labels == 1])\n assert len(g) == len(data[labels == 1]), \"length of g ({0}) is different from num_minority ({1})\".format(len(g), len(data[labels == 1]))\n return self.transform(numRepeatArray = g)", "def sparse_norm(A: SparseTensor, out: Optional[torch.Tensor]) -> torch.Tensor:\n if not A.is_csr:\n raise RuntimeError(\"Norm can only be applied on CSR tensors\")\n if not check_same_dtype(A, out):\n raise ValueError(\"All data-types must match\")\n if A.shape[0] != out.shape[0]:\n raise ValueError(\"Dimension 0 of A must match the length of tensor 'out'\")\n\n return norm_(A.indexptr, A.data, out)", "def similarity(self, e1, e2):\n\t\tpass", "def norm(self):", "def test_ssd_similarity_measure_values():\n \n patch1 = torch.tensor([1.3, 4.5, 7.2, 0.2, -0.6])\n patch2 = torch.tensor([0.2, 4.4, 7.6, 0.1, 1.3])\n\n ssd = ssd_similarity_measure(patch1, patch2)\n assert np.isclose(ssd, 5.0, atol=1e-2)", "def inner_product_similarity(a: torch.Tensor, b: torch.Tensor, dim=1) -> torch.Tensor:\n outputs = (a * b).sum(dim=dim)\n return outputs", "def normalize_transform():\n\n # Default for PyTorch's pre-trained models\n return transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])", "def similarity(self, w1, w2):\r\n return self.represent(w1).dot(self.represent(w2))", "def similarity(self, w1, w2):\r\n return self.represent(w1).dot(self.represent(w2))", "def similarity_transform_3d(v, mapping, alpha = 1):\r\n p_wgt = vec3(0, 0, 0)\r\n q_wgt = vec3(0, 0, 0)\r\n w = len(mapping)*[None]\r\n w_sum = 0\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n x = mp[0].x - v.x\r\n y = mp[0].y - v.y\r\n z = mp[0].z - v.z\r\n if (x == 0 and y == 0 and z == 0): return mp[1]\r\n w[i] = 1/((x*x + y*y + z*z) ** alpha)\r\n p_wgt += mp[0]*w[i]\r\n q_wgt += mp[1]*w[i]\r\n w_sum += w[i]\r\n p_wgt /= w_sum\r\n q_wgt /= w_sum\r\n A = mat3(0)\r\n k = 0\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n p_adj = mp[0] - p_wgt\r\n q_adj = mp[1] - q_wgt\r\n A += w[i]*p_adj.transpose_multiply(q_adj)\r\n k += w[i]*p_adj.dot(p_adj)\r\n A_arr = np.array(A.matrix).reshape(3, 3)\r\n U, S, V = np.linalg.svd(A_arr)\r\n M_arr = np.matmul(np.transpose(V), np.transpose(U))\r\n M = mat3(M_arr.ravel().tolist())\r\n k = np.sum(S)/k\r\n v_out = k*M*(v - p_wgt) + q_wgt\r\n return v_out", "def test_similarity_measure_size_compatibility():\n\n patch1 = torch.randn(size=(4, 6, 2))\n patch2 = torch.randn(size=(4, 6, 2))\n\n ssd_similarity_measure(patch1, patch2)\n sad_similarity_measure(patch1, patch2)\n assert True # just check if the ssd calculation was successfull\n\n patch1 = torch.randn(size=(4, 3))\n patch2 = torch.randn(size=(4, 3))\n\n ssd_similarity_measure(patch1, patch2)\n sad_similarity_measure(patch1, patch2)\n assert True # just check if the ssd calculation was successfull\n\n patch1 = torch.randn(size=(5,))\n patch2 = torch.randn(size=(5,))\n\n ssd_similarity_measure(patch1, patch2)\n sad_similarity_measure(patch1, patch2)\n assert True # just check if the ssd calculation was successfull\n\n patch1 = torch.randn(size=(3, 7, 2, 4))\n patch2 = torch.randn(size=(3, 7, 2, 4))\n\n ssd_similarity_measure(patch1, patch2)\n sad_similarity_measure(patch1, patch2)\n assert True # just check if the ssd calculation was successful", "def normalequ(self):\n tx = self.train_x\n y = self.train_y\n if self.regularizer is None:\n return np.linalg.solve(np.dot(tx.T, tx), np.dot(tx.T, y))\n elif self.regularizer.name is 'Ridge':\n G = np.eye(tx.shape[1])\n G[0, 0] = 0\n hes = np.dot(tx.T, tx) + self.regularizer_p * G\n return np.linalg.solve(hes, np.dot(tx.T, y))\n else:\n raise NotImplementedError", "def test_mntd(self):\r\n distmat = array([[0.0, 0.4, 2.0, 1.3],\r\n [0.4, 0.0, 1.6, 0.2],\r\n [2.0, 1.6, 0.0, 1.1],\r\n [1.3, 0.2, 1.1, 0.0]])\r\n assert_almost_equal(1.9 / 4., mntd(distmat))", "def __call__(self, x):\n return self._pre_scale * tf.matmul(x, self._weight) + self._bias", "def similarity(self, w1, w2):\r\n sim = self.represent(w1).dot(self.represent(w2))\r\n return sim", "def test_similarity_numeric():\n similarity = pm.compute_similarity_for_numeric(900, 800)\n nose.tools.ok_(abs(similarity - 8/9) < tests.FLOAT_DELTA, \"Wrong numeric similarity\")", "def word_similarity(self):\n y_true = []\n y_pred = []\n for i in open(\"data/word_sim_dataset.txt\").read().split('\\n'):\n i = self.preprocessor(i)\n w1 = i.split()[-1]\n w2 = i.split()[-2] \n st = float(i.split()[-3]) / 4 #dataset has scale from 0 to 4\n \n try:\n w1 = self.embeddings_index[w1] \n w2 = self.embeddings_index[w2] \n w1 = w1 / np.linalg.norm(w1)\n w2 = w2 / np.linalg.norm(w2)\n y_pred.append(np.dot(w1,w2))\n y_true.append(st)\n except:\n pass\n if y_true == []:\n return 1.0\n return mean_squared_error(y_true, y_pred, squared=False)", "def similarity(self, x, y, keyboard_weight=None):\r\n dist = self.distance(x, y, keyboard_weight)\r\n max_len = max(len(x), len(y))\r\n max_dissimilarity = max_len * self.scale_coef\r\n similarity = 1 - dist / max_dissimilarity\r\n return similarity", "def get_true_scale_mixture(\n self, normalized_dist: LogisticMixture\n ) -> LogisticMixture:\n\n return normalized_dist.denormalize(self.scale)", "def nn(x, w):\n return np.dot(x, w)", "def ratio(n1,n2, explain=0, optimize=False):\n weight_normal_form = 5.0 #distance between soundexes of normal form\n weight_normal_form_soundex = 8.0 #average distance between soundexes of normal form\n weight_geslachtsnaam1 = 10.0 #distance between soundexes of geslachtsnamen\n weight_geslachtsnaam2 = 10.0 #distance between geslachtsnaam\n weight_initials = 2 #distance between initials\n\n nf1 = n1.guess_normal_form()\n nf2 = n2.guess_normal_form()\n\n if not nf1 or not nf2:\n return 0.0\n elif nf1 == nf2:\n return 1.0\n ratio_normal_form = Similarity.average_distance(split(nf1), split(nf2))\n \n #create a simkplified soundex set for this name\n #remove stopwords\n# nf1 = remove_stopwords( nf1)\n# nf2 = remove_stopwords( nf2)\n \n se1 = n1.get_normal_form_soundex()\n se2 = n2.get_normal_form_soundex()\n ratio_normal_form_soundex = Similarity.average_distance( se1, se2)\n \n #gelachtsnaam wordt op twee manieren met elkaar vergeleken\n g1 = n1.geslachtsnaam() #or n1.get_volledige_naam()\n g2 = n2.geslachtsnaam() #or n2.get_volledige_naam()\n g1 = to_ascii(g1)\n g2 = to_ascii(g2)\n if not optimize:\n #de soundexes van de achternaam worden meegewoen\n #g1_soundex = n1.soundex_nl(g1, group=2, length=-1)\n g1_soundex = n1.geslachtsnaam_soundex()\n #g2_soundex = n2.soundex_nl(g2, group=2, length=-1)\n g2_soundex = n2.geslachtsnaam_soundex()\n ratio_geslachtsnaam1 = Similarity.average_distance(g1_soundex, g2_soundex)\n else:\n ratio_geslachtsnaam1 = 1 \n weight_geslachtsnaam1 = 0\n \n #n de afstand van de woorden in de achtenraam zelf\n ratio_geslachtsnaam2 = Similarity.average_distance(\n re.split('[ \\.\\,\\-]', g1.lower()),\n re.split('[ \\.\\,\\-]', g2.lower()),\n levenshtein_ratio)\n n1_initials = n1.initials()\n n1_initials_lower = n1_initials.lower()\n n2_initials = n2.initials()\n n2_initials_lower = n2_initials.lower()\n n1_contains_initials = n1.contains_initials()\n n2_contains_initials = n2.contains_initials()\n #count initials only if we have more than one\n #(or perhaps make this: if we know the first name)\n if len(n1_initials) == 1 or len(n2_initials) == 1:\n #initials count much less if there is only one\n weight_initials = weight_initials_if_one_name_consists_of_one_word_only\n# ratio_initials = .5\n ratio_initials = levenshtein_ratio(n1_initials_lower, n2_initials_lower)\n elif n1_contains_initials or n2_contains_initials:\n ratio_initials = levenshtein_ratio(n1_initials_lower, n2_initials_lower)\n weight_initials = weight_initials_if_one_name_is_in_initials\n elif len(n1_initials) > 1 and len(n2_initials) > 1:\n ratio_initials = levenshtein_ratio(n1_initials_lower, n2_initials_lower)\n else:\n ratio_initials = 0.7\n \n if n1_contains_initials or n2_contains_initials:\n weight_normal_form = weight_normal_form_if_one_name_is_in_initials \n weight_normal_form_soundex = weight_normal_form_soundex_if_one_name_is_in_initials\n\n counter = (ratio_normal_form * weight_normal_form +\n ratio_normal_form_soundex * weight_normal_form_soundex +\n ratio_geslachtsnaam1 * weight_geslachtsnaam1 +\n ratio_geslachtsnaam2 * weight_geslachtsnaam2 +\n ratio_initials * weight_initials)\n numerator = (weight_normal_form + weight_normal_form_soundex +\n weight_initials + weight_geslachtsnaam1 + weight_geslachtsnaam2)\n if numerator == 0:\n return 0.0\n final_ratio = counter/numerator\n\n if explain:\n s = '-' * 100 + '\\n'\n s += 'Naam1: %s [%s] [%s] %s\\n' % (n1, n1_initials, n1.guess_normal_form(), se1)\n s += 'Naam2: %s [%s] [%s] %s\\n' % (n2, n2_initials, n2.guess_normal_form(), se2)\n s += 'Similarity ratio: %s\\n' % final_ratio\n s += '--- REASONS' + '-' * 30 + '\\n'\n format_s = '%-30s | %-10s | %-10s | %-10s | %-10s | %s-10s\\n'\n s += format_s % ('\\t property', ' ratio', ' weight','relative_weight', ' r*w', 'r * relative_w')\n s += '\\t' + '-' * 100 + '\\n'\n format_s = '\\t%-30s | %-10f | %-10f | %-10f | %-10f | %-10f\\n'\n s += format_s % (' normal_form', ratio_normal_form, weight_normal_form,weight_normal_form/counter, ratio_normal_form * weight_normal_form, ratio_normal_form * weight_normal_form/counter)\n s += format_s % ('soundex van normal_form', ratio_normal_form_soundex, weight_normal_form_soundex,weight_normal_form_soundex/counter, ratio_normal_form_soundex* weight_normal_form_soundex, ratio_normal_form_soundex * weight_normal_form_soundex/counter)\n s += format_s % ('soundex van geslachtsnaam1', ratio_geslachtsnaam1, weight_geslachtsnaam1,weight_geslachtsnaam1/counter, ratio_geslachtsnaam1 * weight_geslachtsnaam1, ratio_geslachtsnaam1 * weight_geslachtsnaam1/counter)\n s += format_s % ('geslachtsnaam', ratio_geslachtsnaam2, weight_geslachtsnaam2,weight_geslachtsnaam2/counter, ratio_geslachtsnaam2 *weight_geslachtsnaam2 , ratio_geslachtsnaam2 * weight_geslachtsnaam2/counter)\n s += format_s % ('initials', ratio_initials, weight_initials, weight_initials/counter, ratio_initials *weight_initials, ratio_initials * weight_initials/counter)\n s += '\\tTOTAL (numerator) | %s (counter = %s)\\n' % (counter, numerator)\n \n return s\n return final_ratio", "def distance_map(self, scaling='sum'):\n\n if scaling not in ['sum', 'mean']:\n raise ValueError(f'scaling should be either \"sum\" or \"mean\" ('\n f'\"{scaling}\" not valid)')\n\n um = nan * zeros((self._weights.shape[0],\n self._weights.shape[1],\n 8)) # 2 spots more for hexagonal topology\n\n ii = [[0, -1, -1, -1, 0, 1, 1, 1]]*2\n jj = [[-1, -1, 0, 1, 1, 1, 0, -1]]*2\n\n if self.topology == 'hexagonal':\n ii = [[1, 1, 1, 0, -1, 0], [0, 1, 0, -1, -1, -1]]\n jj = [[1, 0, -1, -1, 0, 1], [1, 0, -1, -1, 0, 1]]\n\n for x in range(self._weights.shape[0]):\n for y in range(self._weights.shape[1]):\n w_2 = self._weights[x, y]\n e = y % 2 == 0 # only used on hexagonal topology\n for k, (i, j) in enumerate(zip(ii[e], jj[e])):\n if (x+i >= 0 and x+i < self._weights.shape[0] and\n y+j >= 0 and y+j < self._weights.shape[1]):\n w_1 = self._weights[x+i, y+j]\n um[x, y, k] = fast_norm(w_2-w_1)\n\n if scaling == 'mean':\n um = nanmean(um, axis=2)\n if scaling == 'sum':\n um = nansum(um, axis=2)\n\n return um/um.max()", "def similarity_matrix(P, similarity_measure, normalize=True, inverse=True):\n N = len(P) \n S = np.zeros((N, N))\n for i in range(N): \n for j in range(i): \n S[i][j] = similarity_measure(P[i], P[j])\n\n S = square(S)\n if normalize: \n S = S / np.max(S)\n if inverse:\n S = 1 - S # Higher value = more similar\n\n return S", "def norm_weight(nin,nout=None, scale=0.01, ortho=True):\n if nout == None:\n nout = nin\n if nout == nin and ortho:\n W = ortho_weight(nin)\n else:\n W = scale * rng_np.randn(nin, nout)\n return W.astype('float32')", "def _sparse_tanimotokernel(data_1, data_2):\n\n norm_1 = np.array(data_1.power(2).sum(axis=1).reshape(data_1.shape[0], 1))\n norm_2 = np.array(data_2.power(2).sum(axis=1).reshape(data_2.shape[0], 1))\n prod = data_1.dot(data_2.T).A\n\n divisor = (norm_1 + norm_2.T - prod) + np.finfo(data_1.dtype).eps\n result = prod / divisor\n return result", "def _compute_nmig(mus_train, ys_train, active):\n print(\"start nmig\")\n score_dict = {}\n discretized_mus = utils.make_discretizer(mus_train)\n m = utils.discrete_mutual_info(discretized_mus, ys_train)\n # m shape: (10, nr_ground_truth)\n print(\"finished discretizing\")\n assert m.shape[0] == mus_train.shape[0]\n assert m.shape[1] == ys_train.shape[0]\n entropy = utils.discrete_entropy(ys_train)\n if active is not None:\n assert len(active) <= ys_train.shape[0]\n m = m[:, active]\n entropy = entropy[active]\n nr_lt = m.shape[0]\n nr_gt = m.shape[1]\n # m is [num_latents, num_factors]\n\n sorted_m = np.sort(m, axis=0)[::-1]\n individual_mig = np.divide(sorted_m[0, :] - sorted_m[1, :], entropy[:])\n print(\"ind mig\", individual_mig)\n mig = np.mean(individual_mig)\n\n if nr_gt == 1:\n nmig = np.max(np.divide(m, entropy[:]))\n else:\n m = np.divide(m, entropy[:])\n partials = np.zeros((nr_gt))\n best_ids = np.argmax(m, axis=0)\n for i in range(nr_gt):\n mask = np.ones((nr_gt), dtype=np.bool)\n mask[i] = 0\n best_id = best_ids[i]\n partials[i] = m[best_id, i] - np.max(m[best_id, mask])\n nmig = np.mean(partials)\n print(\"ind nmig\", partials)\n score_dict[\"discrete_mig\"] = mig\n score_dict[\"discrete_nmig\"] = nmig\n\n return score_dict", "def sparse_matmul(A: SparseTensor, B: SparseTensor, out: torch.Tensor) -> torch.Tensor:\n if A.nnz() == 0 or B.nnz() == 0:\n return out\n\n if A.is_cuda:\n return _sparse_matmul_cuda(A, B, out)\n else:\n return _sparse_matmul_cpu(A, B, out)", "def test_canonicalization_of_vectors_w_symm(free_alg):\n\n dr = free_alg\n p = dr.names\n x = IndexedBase('x')\n r = p.R\n i, j = p.i, p.j\n\n vs = Vec('vs')\n dr.set_symm(vs, Perm([1, 0]), valence=2)\n tensor = dr.sum((i, r), (j, r), x[i, j] * vs[j, i])\n res = tensor.simplify()\n assert res.n_terms == 1\n term = res.local_terms[0]\n assert term.sums == ((i, r), (j, r))\n assert term.amp == x[i, j]\n assert term.vecs == (vs[i, j],)\n\n va = Vec('va')\n dr.set_symm(va, Perm([1, 0], NEG), valence=2)\n tensor = dr.sum((i, r), (j, r), x[i, j] * va[j, i])\n res = tensor.simplify()\n assert res.n_terms == 1\n term = res.local_terms[0]\n assert term.sums == ((i, r), (j, r))\n assert term.amp == -x[i, j]\n assert term.vecs == (va[i, j],)", "def similarityMetric(Est, GT, options):\n\n if options == None:\n options = {}\n if not 'metric' in options:\n options['metric'] = 'basic'\n\n#########################################################\n## YOU MUST REMOVE THE REST OF THE CODE OF THIS FUNCTION\n## AND CHANGE FOR YOUR OWN CODE\n#########################################################\n comptador = 0\n if options['metric'].lower() == 'basic':\n for i in Est:\n if i in GT[1]:\n comptador = comptador + 1\n return comptador / len(Est)\n\n else:\n return 0", "def test__inverse_transform_continuous(self):", "def calculate_dist_from_eqm(distance_from_eqm, variable_mask):", "def weighted_example_mining(dist_mat, is_pos, is_neg, eps=1e-12):\n assert len(dist_mat.size()) == 2\n\n is_pos = is_pos\n is_neg = is_neg\n dist_ap = dist_mat * is_pos\n dist_an = dist_mat * is_neg\n\n weights_ap = softmax_weights(dist_ap, is_pos, eps)\n weights_an = softmax_weights(-dist_an, is_neg, eps)\n\n dist_ap = torch.sum(dist_ap * weights_ap, dim=1)\n dist_an = torch.sum(dist_an * weights_an, dim=1)\n\n return dist_ap, dist_an", "def pairwise_dot_product_similarity(x, y):\n return torch.mm(x, torch.transpose(y, 1, 0))", "def norm_weight(nin, nout=None, scale=0.01, ortho=True):\n if nout is None:\n nout = nin\n if nout == nin and ortho:\n W = ortho_weight(nin)\n else:\n W = scale * numpy.random.randn(nin, nout)\n return W.astype('float32')", "def compute_similarity(self, output, batch_output):\n raise NotImplementedError", "def damerau_levenshtein_similarity(s1, s2):\n max_cost = max(len(s1), len(s2))\n\n if max_cost == 0:\n return 1.0\n\n return 1.0 - float(damerau_levenshtein_distance(s1, s2)) / max_cost", "def __init__(self, input_dim, nDense1, nDense2):\n super(SLE, self).__init__()\n\n self.dim_C, self.dim_H, self.dim_W = input_dim\n self.estimator_1half = nn.Sequential(\n nn.Flatten(),\n nn.Linear(self.dim_C * self.dim_H * self.dim_W, nDense1),\n nn.Tanh()\n )\n\n # Note: the network structure described in original paper does not work in our implementation\n self.estimator_2half = nn.Sequential(\n nn.Linear(nDense1, nDense2),\n mul_matrixS(nDense2), # here is the problem\n nn.Sigmoid()\n )", "def get_true_scale_logistic(self, normalized_dist: Logistic) -> Logistic:\n\n return normalized_dist.denormalize(self.scale)", "def compute_ssm(self, X, metric=\"seuclidean\"):\n D = distance.pdist(X, metric=metric)\n D = distance.squareform(D)\n D /= D.max() # TODO: Why normalizing here ?\n return 1 - D", "def compute(self,x,y):\n\n if(self.npaMatrix == None):\n raise Exception(\"\".join([\"MLPYDistanceAdaptor. Attempted to compute distance with out a distance matrix passed in during construction.\"]))\n return self.npaMatrix[x[0],y[0]]", "def compute_similarity(site_a, site_b):\n return np.linalg.norm(site_a - site_b)", "def affine(s1, s2):\n if s1 is None or s2 is None:\n return np.NaN\n if pd.isnull(s1) or pd.isnull(s2):\n return np.NaN\n\n # Create the similarity measure object\n measure = sm.Affine()\n\n # if not isinstance(s1, six.string_types):\n # s1 = six.u(str(s1))\n #\n # if isinstance(s1, bytes):\n # s1 = s1.decode('utf-8', 'ignore')\n #\n # if not isinstance(s2, six.string_types):\n # s2 = six.u(str(s2))\n #\n # if isinstance(s2, bytes):\n # s2 = s2.decode('utf-8', 'ignore')\n\n s1 = gh.convert_to_str_unicode(s1)\n s2 = gh.convert_to_str_unicode(s2)\n\n # Call the function to compute the similarity\n return measure.get_raw_score(s1, s2)", "def snn(query, h_train, h_labs, temp=0.1):\n # Normalize embeddings\n query = tf.math.l2_normalize(query, axis=1)\n h_train = tf.math.l2_normalize(h_train, axis=1)\n\n # Compute similarity\n return tf.nn.softmax(query @ tf.transpose(h_train) / temp, axis=1) @ h_labs", "def calculate_dist_mat(embeddings: np.ndarray, norm: int) -> np.ndarray:\n kwargs = {'p': norm}\n condensed_dist = pdist(embeddings, metric='minkowski', **kwargs)\n dist_mat = squareform(condensed_dist)\n return dist_mat", "def test_normalize(dummy_input):\n # Test the 2D image: H, W, C\n image, label = dummy_input(image_size=(512, 512, 3),\n label_size=(512, 512, 1))\n transform = Normalize(means=None, stds=None)\n _image, _label = transform(image, label, normalize_tags=[True, False])\n assert not (image == _image).all()\n assert (label == _label).all()\n\n # Test the 3D image: H, W, D, C\n image, label = dummy_input(image_size=(512, 512, 20, 3),\n label_size=(512, 512, 20, 1))\n transform = Normalize(means=None, stds=None)\n _image, _label = transform(image, label, normalize_tags=[True, False])\n assert not (image == _image).all()\n assert (label == _label).all()\n assert np.abs(np.mean(_image)-0) < 1e-8\n assert np.abs(np.std(_image)-1) < 1e-8", "def multi_scale_ssim(x: torch.Tensor, y: torch.Tensor, kernel_size: int = 11, kernel_sigma: float = 1.5,\n data_range: Union[int, float] = 1., reduction: str = 'mean',\n scale_weights: Optional[torch.Tensor] = None,\n k1: float = 0.01, k2: float = 0.03) -> torch.Tensor:\n assert kernel_size % 2 == 1, f'Kernel size must be odd, got [{kernel_size}]'\n _validate_input([x, y], dim_range=(4, 5), data_range=(0, data_range))\n\n x = x / float(data_range)\n y = y / float(data_range)\n\n if scale_weights is None:\n # Values from MS-SSIM the paper\n scale_weights = torch.tensor([0.0448, 0.2856, 0.3001, 0.2363, 0.1333], dtype=x.dtype, device=x.device)\n else:\n # Normalize scale weights\n scale_weights = scale_weights / scale_weights.sum()\n if scale_weights.size(0) != scale_weights.numel():\n raise ValueError(f'Expected a vector of weights, got {scale_weights.dim()}D tensor')\n\n kernel = gaussian_filter(kernel_size, kernel_sigma, device=x.device, dtype=x.dtype).repeat(x.size(1), 1, 1, 1)\n\n _compute_msssim = _multi_scale_ssim_complex if x.dim() == 5 else _multi_scale_ssim\n msssim_val = _compute_msssim(\n x=x,\n y=y,\n data_range=data_range,\n kernel=kernel,\n scale_weights=scale_weights,\n k1=k1,\n k2=k2\n )\n return _reduce(msssim_val, reduction)", "def Mat_dis_s2(x, sig=0.0001, alp = 0.5):\n spe = x[:, :-2]\n spa = x[:, -2:]\n dist_spetral = Mat_dis(spe)/spe.shape[1]\n dist_spatial = Mat_dis(spa)/spa.shape[1]\n # dist_spetral = np.exp(-sig*dist_spetral)\n # dist_spatial = np.exp(-sig*dist_spatial)\n #dist_mat = alp*dist_spetral + (1-alp)*dist_spatial\n\n\n return dist_spetral, dist_spatial", "def rescale_data(self):\n\n # Dividing every array of simulated data vectors by the mean of that array.\n '''# Didnt work\n for key in self.data.keys():\n self.data[key] /= np.mean(self.data[key])\n '''\n\n self.rescaled = True\n\n # Mean normalization\n \"\"\" didnt work\n for key in self.data.keys():\n self.data[key] -= np.mean(self.data[key])\n self.data[key] /= (np.max(self.data[key]) - np.min(self.data[key]))\n \"\"\"\n\n # Median normalization\n \"\"\" didnt work, still dividing by large number \n for key in self.data.keys():\n self.data[key] -= np.median(self.data[key])\n self.data[key] /= (np.max(self.data[key]) - np.min(self.data[key]))\n \"\"\"\n\n # Divide by median\n \"\"\" didnt work\n for key in self.data.keys():\n self.data[key] -= np.median(self.data[key])\n self.data[key] /= (np.median(self.data[key]))\n \"\"\"\n\n # Take logarithm of data\n \"\"\" didnt work\n for key in self.data.keys():\n self.data[key] = np.log10(self.data[key])\n \"\"\"\n\n # Scale by length of vector\n \"\"\"\n for key in self.data.keys():\n self.data[key] /= np.linalg.norm(self.Cl_noiseless)\n \"\"\"\n\n \n # Scale by negative of the natural logarithm \n for key in self.data.keys():\n self.data[key] = -1 * np.log(self.data[key]) \n \n \"\"\"\n # Scale by subtracting the mean and dividing by std\n std = np.nanstd(self.data['data'])\n mean = np.nanmean(self.data['data'])\n for key in self.data.keys():\n # self.data[key] -= np.log(self.Cl_noiseless) # -1* # scale this same way\n # self.data[key] -= self.Cl_noiseless # -1* # scale this same way\n self.data[key] -= mean \n self.data[key] /= std\n \"\"\"", "def __modelSimilarity(self, mOrig: nn.Module, mDest: nn.Module) -> torch.Tensor:\n cos = nn.CosineSimilarity(0)\n d1 = nn.utils.parameters_to_vector(mOrig.parameters())\n d2 = nn.utils.parameters_to_vector(mDest.parameters())\n sim: torch.Tensor = cos(d1, d2)\n return sim", "def wordSimilarityRatio(sent_1,sent_2):", "def test_data_is_scaled():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run(\"SGD\")\n assert sum(atom.sgd.predict(X_bin)) > 0 # Always 0 if not scaled", "def _cust_square_dist(A: TensorType, B: TensorType, sc: TensorType) -> tf.Tensor:\n return tf.reduce_sum(tf.square((tf.expand_dims(A, 1) - tf.expand_dims(B, 0)) / sc), 2)", "def mpd(distmat):\r\n return distmat.sum() / (distmat.size - distmat.shape[0])", "def compute_MXt(M_t, X_t, xattr):\n return torch.mm(M_t, (X_t / xattr['scaled_scale']).T) - torch.mm(M_t, (xattr['scaled_center']/xattr['scaled_scale']).reshape(-1,1))", "def similarity(self, wf, positions = None, features = None):\n # The similarity is the inverse square of the distance between the two\n # WordForms. Impose a minimum on distances (to deal with zero).\n dist = self.distance(wf, positions = positions, features = features)\n if dist < .1:\n dist = .1\n sim = 1 / (dist ** 2)\n return sim", "def normScale( x, y ):\n if x == 0 and y == 0:\n return 0\n else:\n return 1.0 / pow( x*x + y*y, 0.5 )", "def TransformDistance(*args, **kwargs):\n return _gdi_.GraphicsMatrix_TransformDistance(*args, **kwargs)", "def similarity_matrix(points, sigma):\n distances_squared = spherical_distances(points, points)**2\n\n \n return np.exp( -distances_squared / (2.0 * sigma) )", "def __matmul__(self, other: 'SampledField'): # values @ representation\n return self.at(other, keep_extrapolation=False)", "def transform(self, data):\n #scaled_transform = data + self.sc_factor * (data* (1-self.sigma) - self.mu) / self.sigma\n # scaling = 1+self.sc_factor*(self.sigma-1)\n # scaling = tf.clip_by_value(scaling, 1.0e-8, 1.0e8)\n\n scaled_transform = (data-self.mu)/(tf.maximum(tf.sqrt(self.var)*self.sc_factor, 1e-2))\n return scaled_transform", "def rescale(A, d1, d2):\n \n A[0, 1] = A[0, 1] * (d2 / d1)\n A[1, 0] = A[1, 0] * (d1 / d2)\n \n return A", "def adapt_length_scale(self):\n Ne = max(1,self.Ne)\n Nc = max(1,self.Nc)\n ratio = Ne/(Ne+Nc)\n self.mu *= 2*ratio", "def similarity_function(x, y):\n\n def safe_get(field, row, default_value):\n # Safely get a value from the Row. If the value is None, get the\n # default value.\n return row[field] if row[field] is not None else default_value\n\n # Extract the values for the categorical and continuous features for both\n # the x and y samples. Use an empty string as the default value for missing\n # categorical fields and 0 for the continuous ones.\n x_categorical_features = [safe_get(k, x, \"\") for k in CATEGORICAL_FEATURES]\n x_continuous_features = [safe_get(k, x, 0) for k in CONTINUOUS_FEATURES]\n y_categorical_features = [safe_get(k, y, \"\") for k in CATEGORICAL_FEATURES]\n y_continuous_features = [safe_get(k, y, 0) for k in CONTINUOUS_FEATURES]\n\n # Here a larger distance indicates a poorer match between categorical variables.\n j_d = distance.hamming(x_categorical_features, y_categorical_features)\n j_c = distance.canberra(x_continuous_features, y_continuous_features)\n\n # Take the product of similarities to attain a univariate similarity score.\n # Add a minimal constant to prevent zero values from categorical features.\n # Note: since both the distance function return a Numpy type, we need to\n # call the |item| function to get the underlying Python type. If we don't\n # do that this job will fail when performing KDE due to SPARK-20803 on\n # Spark 2.2.0.\n return abs((j_c + 0.001) * j_d).item()", "def test_sad_similarity_measure_values():\n \n patch1 = torch.tensor([1.3, 4.5, 7.2, 0.2, -0.6])\n patch2 = torch.tensor([0.2, 4.4, 7.6, 0.1, 1.3])\n\n sad = sad_similarity_measure(patch1, patch2)\n\n assert np.isclose(sad, 3.6, atol=1e-2)", "def similarity(self, token1, token2):\n vec1 = self.get_vector(token1)\n vec2 = self.get_vector(token2)\n assert vec1 is not None and vec2 is not None, \"Cannot compute similarity between None type vectors.\"\n if not self.normalize:\n # if model not loaded as normalized embeddings \n vec1 = vec1 / np.linalg.norm(vec1)\n vec2 = vec2 / np.linalg.norm(vec2)\n return np.dot(vec1, vec2)", "def sim_mat(fc7_feats):\n print(\"Something\")\n t = time.time()\n pdist_ = spatial.distance.pdist(fc7_feats)\n print('Created distance matrix' + ' ' + str(time.time() - t) + ' sec')\n\n t = time.time()\n dist_mat = spatial.distance.squareform(pdist_)\n print('Created square distance matrix' + ' ' + str(time.time() - t) + ' sec')\n del pdist_\n\n t = time.time()\n sigmas = np.sort(dist_mat, axis=1)[:, 7] + 1e-16\n matrice_prodotti_sigma = np.dot(sigmas[:, np.newaxis], sigmas[np.newaxis, :])\n print('Generated Sigmas' + ' ' + str(time.time() - t) + ' sec')\n\n t = time.time()\n dist_mat /= -matrice_prodotti_sigma\n print('Computed dists/-sigmas' + ' ' + str(time.time() - t) + ' sec')\n\n del matrice_prodotti_sigma\n\n t = time.time()\n W = np.exp(dist_mat, dist_mat)\n # W = np.exp(-(dist_mat / matrice_prodotti_sigma))\n np.fill_diagonal(W, 0.)\n\n # sparsify the matrix\n k = int(np.floor(np.log2(fc7_feats.shape[0])) + 1)\n n = W.shape[0]\n print('Created inplace similarity matrix' + ' ' + str(time.time() - t) + ' sec')\n\n t = time.time()\n for x in W:\n x[np.argpartition(x, n - k)[:(n - k)]] = 0.0\n\n print('Sparsify the matrix' + ' ' + str(time.time() - t) + ' sec')\n\n t = time.time()\n # matrix_S = np.zeros((n, n))\n m1 = W[np.triu_indices(n, k=1)]\n m2 = W.T[np.triu_indices(n, k=1)]\n\n W = spatial.distance.squareform(np.maximum(m1, m2))\n print('Symmetrized the similarity matrix' + ' ' + str(time.time() - t) + ' sec')\n\n return W", "def __call__(self, *args, **kwargs):\n return tfd.Normal(loc=self.mu, scale=tf.eye(self.size)), self.mu", "def doc_doc_similarity(matrix_a, matrix_b):\n assert matrix_a.shape[1] == matrix_b.shape[0], \"Mismatched shape between matrix A and matrix B\"\n numerator = np.dot(matrix_a, matrix_b)\n assert numerator.shape == (matrix_a.shape[0], matrix_b.shape[1]), numerator.shape\n denominator = np.sqrt(np.sum(matrix_a ** 2, axis=1))[:, np.newaxis] * np.sqrt(\n np.sum(matrix_b.T ** 2, axis=1))[:, np.newaxis].T\n assert (denominator > 0).all(), \"Denominator is zero {}\".format(denominator)\n similarity_matrix = np.multiply(numerator, 1 / denominator)\n return similarity_matrix", "def process_noise_dist(self, dt=0.0):\n Q = self.process_noise_cov(dt)\n return dist.MultivariateNormal(\n torch.zeros(Q.shape[-1], dtype=Q.dtype, device=Q.device), Q\n )", "def test_tensor_can_be_canonicalized(free_alg):\n\n dr = free_alg\n p = dr.names\n i, j = p.R_dumms[:2]\n r = p.R\n m = p.m\n h = p.h\n v = p.v\n\n # Anti-symmetric real matrix.\n tensor = (\n dr.sum((i, r), (j, r), m[i, j] * v[i] * v[j]) +\n dr.sum((i, r), (j, r), m[j, i] * v[i] * v[j])\n )\n assert tensor.n_terms == 2\n res = tensor.simplify()\n assert res == 0\n\n # With wrapping under an even function.\n tensor = (\n dr.sum((i, r), (j, r), m[i, j] ** 2 * v[i] * v[j]) +\n dr.sum((i, r), (j, r), m[j, i] ** 2 * v[i] * v[j])\n )\n assert tensor.n_terms == 2\n res = tensor.simplify()\n assert res.n_terms == 1\n term = res.local_terms[0]\n assert term.sums == ((i, r), (j, r))\n assert term.amp == 2 * m[i, j] ** 2\n assert term.vecs == (v[i], v[j])\n\n # With wrapping under an odd function.\n tensor = (\n dr.sum((i, r), (j, r), m[i, j] ** 3 * v[i] * v[j]) +\n dr.sum((i, r), (j, r), m[j, i] ** 3 * v[i] * v[j])\n )\n assert tensor.n_terms == 2\n res = tensor.simplify()\n assert res.n_terms == 0\n\n # Hermitian matrix.\n tensor = dr.einst(\n h[i, j] * v[i] * v[j] + conjugate(h[j, i]) * v[i] * v[j]\n )\n assert tensor.n_terms == 2\n res = tensor.simplify()\n assert res == 0", "def test_transform(self):\n t = Linearize()\n assert t.transform(numpy.e) == numpy.log(numpy.e)\n t.transform(0)", "def normalize(X, norm=..., *, axis=..., copy=..., return_norm=...):\n ...", "def my_scale_sim_mat(w):\n rowsum = np.array(np.sum(w, axis=1), dtype=np.float32)\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n w = r_mat_inv.dot(w)\n return w", "def projection_metric_sq(Y1, Y2):\n assert Y1.shape == Y2.shape\n\n # abs used to ensure distance is non-negative in case of numerical imprecision\n # round m down to 0 in case of numerical imprecision\n m = Y1.shape[1] - (np.linalg.norm(Y1.transpose() @ Y2) ** 2)\n if np.isclose(m, 0):\n m = 0\n return m", "def SNLinear(*args, **kwargs):\n return spectral_norm(nn.Linear(*args, **kwargs))", "def get_transformation():\n return transforms.Compose([transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])", "def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n score = multi_scale_ssim(x=x, y=y, kernel_size=self.kernel_size, kernel_sigma=self.kernel_sigma,\n data_range=self.data_range, reduction=self.reduction, scale_weights=self.scale_weights,\n k1=self.k1, k2=self.k2)\n return torch.ones_like(score) - score", "def tanimoto_dissimilarity(X, Y, X_batch_size=50, Y_batch_size=50):\n n_features = X.shape[-1]\n if X.ndim == 1:\n X = X.reshape(-1, n_features)\n if Y.ndim == 1:\n Y = Y.reshape(-1, n_features)\n tan_sim = []\n X_total_batches = X.shape[0] // X_batch_size + 1\n Y_total_batches = Y.shape[0] // Y_batch_size + 1\n for X_batch_i in range(X_total_batches):\n X_start_idx = X_batch_i * X_batch_size\n X_end_idx = min((X_batch_i + 1) * X_batch_size, X.shape[0])\n X_batch = X[X_start_idx:X_end_idx, :]\n for Y_batch_i in range(Y_total_batches):\n Y_start_idx = Y_batch_i * Y_batch_size\n Y_end_idx = min((Y_batch_i + 1) * Y_batch_size, Y.shape[0])\n Y_batch = Y[Y_start_idx:Y_end_idx, :]\n\n # adapted from: https://github.com/deepchem/deepchem/blob/\n # 2531eca8564c1dc68910d791b0bcd91fd586afb9/deepchem/trans/\n # transformers.py#L752\n numerator = np.dot(X_batch, Y_batch.T).flatten()\n # equivalent to np.bitwise_and(X_batch, Y_batch), axis=1)\n denominator = n_features - np.dot(1 - X_batch,\n (1 - Y_batch).T).flatten()\n # np.sum(np.bitwise_or(X_rep, Y_rep), axis=1)\n\n tan_sim.append(numerator / denominator)\n tan_sim = np.hstack(tan_sim)\n return 1.0 - tan_sim", "def _likelihood_der2_scale(self, hyperparam):\n\n # Get eta\n eta = self._hyperparam_to_eta(hyperparam)\n\n # Set scale of the covariance object\n scale = self._hyperparam_to_scale(hyperparam[self.scale_index:])\n self.mixed_cor.set_scale(scale)\n\n # Initialize Hessian\n d2ell_dscale2 = numpy.zeros((scale.size, scale.size), dtype=float)\n\n # Update Y, C, Mz\n self._update_Y_C_Mz(hyperparam)\n\n # Find optimal sigma2\n sigma2 = self._find_optimal_sigma2(hyperparam)\n\n # Compute (or update) Kninv and KnpKninv\n if not self.stochastic_traceinv:\n self._update_Kninv_KnpKninv(hyperparam)\n\n # Knp is the derivative of mixed_cor (Kn) w.r.t p-th element of scale.\n for p in range(scale.size):\n\n KnpMz = self.mixed_cor.dot(self.Mz, eta=eta, derivative=[p])\n MKnpMz = self.M_dot(self.C, self.Y, eta, KnpMz)\n\n for q in range(scale.size):\n\n # 1. Compute zMKnqMKnpMz\n if p == q:\n KnqMz = KnpMz\n else:\n KnqMz = self.mixed_cor.dot(self.Mz, eta=eta,\n derivative=[q])\n zMKnqMKnpMz = numpy.dot(KnqMz, MKnpMz)\n\n # 2. Compute zMKnpqMz\n KnpqMz = self.mixed_cor.dot(self.Mz, eta=eta,\n derivative=[p, q])\n zMKnpqMz = numpy.dot(self.Mz, KnpqMz)\n\n # 3. Computing trace of Knpq * M in three steps\n\n # Compute the first component of trace of Knpq * Kninv\n Knpq = self.mixed_cor.get_matrix(eta, derivative=[p, q])\n if self.stochastic_traceinv:\n trace_KnpqKninv = self.mixed_cor.traceinv(\n eta, B=Knpq,\n imate_options={'method': 'hutchinson'})\n else:\n KnpqKninv = Knpq @ self.Kninv\n trace_KnpqKninv = imate.trace(KnpqKninv, method='exact')\n\n # Compute the second component of trace of Knpq * M\n KnpqY = self.mixed_cor.dot(self.Y, eta=eta, derivative=[p, q])\n YtKnpqY = numpy.matmul(self.Y.T, KnpqY)\n CYtKnpqY = numpy.matmul(self.C, YtKnpqY)\n trace_CYtKnpqY = numpy.trace(CYtKnpqY)\n\n # Compute trace of Knpq * M\n trace_KnpqM = trace_KnpqKninv - trace_CYtKnpqY\n\n # 4. Compute trace of Knp * M * Knq * M\n\n # Compute first part of trace of Knp * M * Knq * M\n Knp = self.mixed_cor.get_matrix(eta, derivative=[p])\n Knq = self.mixed_cor.get_matrix(eta, derivative=[q])\n if self.stochastic_traceinv:\n trace_KnpMKnqM_1 = self.mixed_cor.traceinv(\n eta, B=Knq, C=Knp,\n imate_options={'method': 'hutchinson'})\n else:\n KnpKninvKnqKninv = numpy.matmul(self.KnpKninv[p],\n self.KnpKninv[q])\n trace_KnpMKnqM_1 = imate.trace(KnpKninvKnqKninv,\n method='exact')\n\n # Compute the second part of trace of Knp * M * Knq * M\n KnpY = Knp @ self.Y\n if p == q:\n KnqY = KnpY\n else:\n KnqY = Knq @ self.Y\n KninvKnqY = self.mixed_cor.solve(KnqY, eta=eta)\n YtKnpKninvKnqY = numpy.matmul(KnpY.T, KninvKnqY)\n F21 = numpy.matmul(self.C, YtKnpKninvKnqY)\n F22 = numpy.matmul(self.C, YtKnpKninvKnqY.T)\n trace_KnpMKnqM_21 = numpy.trace(F21)\n trace_KnpMKnqM_22 = numpy.trace(F22)\n\n # Compute the third part of trace of Knp * M * Knq * M\n YtKnpY = numpy.matmul(self.Y.T, KnpY)\n if p == q:\n YtKnqY = YtKnpY\n else:\n YtKnqY = numpy.matmul(self.Y.T, KnqY)\n Dp = numpy.matmul(self.C, YtKnpY)\n if p == q:\n Dq = Dp\n else:\n Dq = numpy.matmul(self.C, YtKnqY)\n D = numpy.matmul(Dp, Dq)\n trace_KnpMKnqM_3 = numpy.trace(D)\n\n # Compute trace of Knp * M * Knq * M\n trace_KnpMKnqM = trace_KnpMKnqM_1 - trace_KnpMKnqM_21 - \\\n trace_KnpMKnqM_22 + trace_KnpMKnqM_3\n\n # 5. Second \"local\" derivatives w.r.t scale\n local_d2ell_dscale2 = -0.5*trace_KnpqM + 0.5*trace_KnpMKnqM + \\\n (0.5*zMKnpqMz - zMKnqMKnpMz) / sigma2\n\n # Computing total second derivative\n dp_log_sigma2 = -numpy.dot(self.Mz, KnpMz) / \\\n (self.rdof*sigma2)\n if p == q:\n dq_log_sigma2 = dp_log_sigma2\n else:\n dq_log_sigma2 = -numpy.dot(self.Mz, KnqMz) / \\\n (self.rdof*sigma2)\n d2ell_dscale2[p, q] = local_d2ell_dscale2 + \\\n 0.5 * self.rdof * dp_log_sigma2 * dq_log_sigma2\n\n if p != q:\n d2ell_dscale2[q, p] = d2ell_dscale2[p, q]\n\n return d2ell_dscale2", "def semantic_similarity(self,sentence_1, sentence_2, info_content_norm):\n\t words_1 = sentence_1.getList_of_words()\n\t words_2 = sentence_2.getList_of_words()\n\t joint_words = set(words_1).union(set(words_2))\n\t vec_1 = self.semantic_vector(words_1, joint_words, info_content_norm)\n\t vec_2 = self.semantic_vector(words_2, joint_words, info_content_norm)\n\t return np.dot(vec_1, vec_2.T) / (np.linalg.norm(vec_1) * np.linalg.norm(vec_2))", "def mu_na(n: float, a: float) -> float:\n return n * n * a * a * a", "def mntd(distmat):\r\n return masked_array(distmat, eye(distmat.shape[0])).min(0).mean()", "def _nb_subst_metric(seq1, seq2, subst_dict, as_similarity=False):\n assert len(seq1) == len(seq2)\n\n def _sim_func(s1, s2, subst):\n sim12 = 0.\n for i in range(len(s1)):\n k1 = s1[i] + '|' + s2[i]\n k2 = s2[i] + '|' + s1[i]\n sim12 += subst.get(k1, subst.get(k2, subst['n|a']))\n return sim12\n\n \"\"\"Site-wise similarity between seq1 and seq2 using the substitution matrix subst\"\"\"\n sim12 = _sim_func(seq1, seq2, subst_dict)\n\n if as_similarity:\n return sim12\n else:\n L = len(seq1)\n sim11 = _sim_func(seq1, seq1, subst_dict)\n sim22 = _sim_func(seq2, seq2, subst_dict)\n D = sim11 + sim22 - 2 * sim12\n return D", "def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)", "def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)" ]
[ "0.5920537", "0.5897834", "0.5621034", "0.55399406", "0.5531732", "0.54380125", "0.54253316", "0.539818", "0.5361793", "0.53471076", "0.53471076", "0.5319093", "0.5299263", "0.529202", "0.52626157", "0.52620703", "0.52577376", "0.52334017", "0.5212186", "0.52016556", "0.51950896", "0.5189338", "0.5186234", "0.5186234", "0.517651", "0.51598346", "0.5159044", "0.5138966", "0.5137008", "0.5127818", "0.51234436", "0.5104632", "0.5103799", "0.5100736", "0.5093523", "0.50778747", "0.50703835", "0.5065467", "0.5056105", "0.50517815", "0.50484675", "0.50447845", "0.5027819", "0.5021645", "0.5009641", "0.50049365", "0.49908146", "0.49897873", "0.49892133", "0.49859443", "0.4981848", "0.49781883", "0.4969455", "0.49571192", "0.49411595", "0.49392638", "0.49320957", "0.49309137", "0.4927642", "0.49275687", "0.4926172", "0.49209267", "0.49198475", "0.49151054", "0.4913511", "0.49127907", "0.49088645", "0.49037144", "0.49036798", "0.49031857", "0.4903046", "0.49029014", "0.4901731", "0.49004084", "0.48989972", "0.48988435", "0.48982105", "0.4897679", "0.48938644", "0.48905435", "0.48884147", "0.4879611", "0.48786703", "0.48780444", "0.48738113", "0.48643944", "0.4862382", "0.48535302", "0.48474225", "0.48467806", "0.48357803", "0.48298958", "0.48278463", "0.48278219", "0.48276472", "0.48263955", "0.4826077", "0.48247784", "0.4823256", "0.4823256" ]
0.5116158
31
Run demo, testing whether input words are beer related.
def run_demo(): while True: embeddings = beer_emb.embed_doc(input("Test if words are beer-related: "), word_filter=False) for word_vec in embeddings: print(is_beer_related(word_vec))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_run():\r\n print(count_words(\"cat bat mat cat bat cat\", 3))\r\n print(count_words(\"betty bought a bit of butter but the butter was bitter\", 3))", "def test_run():\n print count_words(\"cat bat mat cat bat cat\", 3)\n print count_words(\"betty bought a bit of butter but the butter was bitter\", 3)", "def test_run():\n print count_words(\"cat bat mat cat bat cat\", 3)\n print count_words(\"betty bought a bit of butter but the butter was bitter\", 3)", "def test_Demo(self):\n self._run(self._example_scenarios, \"Demo\")", "def demo(cls):\n print(\"\\tDemo for class Preprocess\\n\"\n \"For each method, you can see its arguments and output. \"\n \"For more information use the help function.\\n\\n\"\n \"Arguments used for instanciating the class:\\n\"\n \"\\tcorpus - {}\".format(cls.DEMO[\"corpus\"]))\n pre = cls(**cls.DEMO)\n print(\"{:=^90}\".format(\"corpus_stats()\"))\n pre.corpus_stats()\n print(\"{:=^90}\".format(\"bigrams()\"))\n print(pre.bigrams())\n print(\"{:=^90}\".format(\"bigrams('domain1.txt')\"))\n print(pre.bigrams(\"domain1.txt\"))\n print(\"{:=^90}\".format(\"get_frequency\"\n \"([('computational', 'linguistics'), \"\n \"('not', 'present')])\"))\n print(pre.get_frequency([('computational', 'linguistics'),\n ('not', 'present')]))\n print(\"{:=^90}\".format(\"is_lexical('hello', 'world')\"))\n print(pre.is_lexical('hello', 'world'))\n print(\"{:=^90}\".format(\"is_lexical('hello', '?')\"))\n print(pre.is_lexical('hello', '?'))\n print(\"{:=^90}\".format(\"has_relevant_tag(('computational', \"\n \"'linguistics'), \"\n \"relevant={'NN', 'NNP', 'NNS'})\"))\n print(pre.has_relevant_tag(('computational', 'linguistics'),\n relevant={'NN', 'NNP', 'NNS'}))\n print(\"{:=^90}\".format(\"has_relevant_tag(('is', 'difficult'),\"\n \"relevant={'NN', 'NNP', 'NNS'})\"))\n print(pre.has_relevant_tag(('is', 'difficult'),\n relevant={'NN', 'NNP', 'NNS'}))\n print(\"{:=^90}\".format(\"candidates(min_count=1, \"\n \"stops=['is', 'the', 'for', 'of'], \"\n \"tags={'NN', 'NNP', 'NNS'})\"))\n print(pre.candidates(min_count=1,\n stops=['is', 'the', 'for', 'of'],\n tags={'NN', 'NNP', 'NNS'}))", "def test_win(self):\n self.choice.return_value = \"ant\"\n self.input.side_effect = list(\"ant\" \"n\")\n\n gallows.main()\n\n self.xprint.assert_any_call('Yes! The secret word is \"ant\"! '\n 'You have won!')", "def main(word_count=2, use_caps=False, use_leet=False, caps_percent=25, leet_percent=20):\n\n phrase = get_phrase(word_count)\n\n if use_caps:\n phrase = random_caps(phrase, caps_percent)\n\n if use_leet:\n phrase = random_characters(phrase, leet_percent)\n\n print(phrase)", "def main():\n\n args = get_args()\n words = args.phrase\n\n words = codify_phrase(words)\n display = ' '.join(words)\n\n print(display)", "def runTests():\n\n\tsentenceList = [\n\t\t\"Sore was I ere I saw Eros.\",\n\t\t\"This is not a Palindrome!\",\n\t\t\"A man, a plan, a canal -- Panama\",\n\t\t\"Never a foot too far, even.\",\n\t\t\"Euston saw I was not Sue.\",\n\t\t\"Live on evasions? No, I save no evil.\",\n\t\t\"Red Roses run no risk, sir, on nurses order.\",\n\t\t\"Salisbury moor, sir, is roomy. Rub Silas.\",\n\t\t'''Marge, let's \"went.\" I await news telegram.''',\n\t\t\"A new order began, a more Roman age bred Rowena.\",\n\t\t\"I, man, am regal; a German am I.\",\n\t\t\"Tracy, no panic in a pony-cart.\",\n\t\t\"Egad! Loretta has Adams as mad as a hatter. Old age!\",\n\t\t\"Eve, mad Adam, Eve!\",\n\t\t\"Resume so pacific a pose, muser.\",\n\t\t\"Marge let a moody baby doom a telegram.\",\n\t\t\"Tenet C is a basis, a basic tenet.\",\n\t\t'''Nella's simple hymn: \"I attain my help, Miss Allen.\"''',\n\t\t\"Straw? No, too stupid a fad. I put soot on warts.\",\n\t\t\"Sir, I demand, I am a maid named Iris.\",\n\t\t\"Lay a wallaby baby ball away, Al.\",\n\t\t\"Tessa's in Italy, Latin is asset.\",\n\t\t\"Noel sees Leon.\",\n\t\t]\n\n\tprint()\n\tprint(\"Start of Proposal by Conrad Storz...\")\n\tfor candidate in sentenceList:\n\t\tprint(\" \" + str(isPalindrome_Storz(candidate)) + \"[\" + candidate + \"] {is a palindrome} \")\n\n\tprint()\n\tprint(\"Start of Proposal by Jaysen...\")\n\tfor candidate in sentenceList:\n\t\tprint(\" \" + str(isPalindrome_Jaysen(candidate)) + \"[\" + candidate + \"] {is a palindrome} \")\n\n\tprint()\n\tprint(\"Start of Proposal by Phillip Adkins...\")\n\tfor candidate in sentenceList:\n\t\tprint(\" \" + str(isPalindrome_PhillipAdkins(candidate)) + \"[\" + candidate + \"] {is a palindrome} \")\n\n\tprint()\n\tprint(\"Start of Proposal by Dmitry Kreslavskiy...\")\n\tfor candidate in sentenceList:\n\t\tprint(\" \" + str(isPalindrome_Dmitry(candidate)) + \"[\" + candidate + \"] {is a palindrome} \")", "def test_example_runs(self):\n run_example(\n verbose=False,\n testapp=self.testapp,\n )", "def main():\n answers_style = drink_style_input()\n drink = drink_make(answers_style)\n print \"\"\n print \"Your drink includes:\"\n for ingredient in drink:\n print \"A {}\".format(ingredient)", "def main(words, s):\n if words:\n words = int(words)\n click.echo(lorem.words(words))\n\n # Returns a lorem ipsum sentence\n elif s:\n click.echo(lorem.sentence())\n\n # Returns a lorem ipsum paragraph by default\n else:\n click.echo(lorem.paragraph())", "def run_tests():\r\n source1 = TextModel('50 Shades of Gray')\r\n source1.add_file('50.txt')\r\n \r\n print()\r\n \r\n source2 = TextModel('King James Version of the Bible')\r\n source2.add_file('kjv.txt')\r\n\r\n print()\r\n\r\n new1 = TextModel('Shakespeare')\r\n new1.add_file('shake.txt')\r\n new1.classify(source1, source2)\r\n \r\n print()\r\n \r\n new2 = TextModel('JK Rowling')\r\n new2.add_file('hp.txt')\r\n new2.classify(source1, source2)\r\n \r\n print()\r\n \r\n new3 = TextModel('Breitbart News Network')\r\n new3.add_file('bnn.txt')\r\n new3.classify(source1, source2)\r\n \r\n print()\r\n \r\n new4 = TextModel('Chaucer')\r\n new4.add_file('tct.txt')\r\n new4.classify(source1, source2)", "def run_tests():\n source1 = TextModel(\"Barack Obama\")\n source1.add_file('project/source_texts/barackobama_source_text.txt')\n\n source2 = TextModel('Donald Trump')\n source2.add_file('project/source_texts/donaldtrump_source_text.txt')\n\n new1 = TextModel('More Obama')\n new1.add_file('project/source_texts/moreobama_source_text.txt')\n new1.classify(source1, source2)\n\n new2 = TextModel('More Trump')\n new2.add_file('project/source_texts/moretrump_source_text.txt')\n new2.classify(source1, source2)\n\n new1 = TextModel('Gucci Gang by Lil Pump')\n new1.add_file('project/source_texts/guccigang_source_text.txt')\n new1.classify(source1, source2)\n\n new1 = TextModel(\"Spongebob Transcripts\")\n new1.add_file('project/source_texts/spongebobeps_source_text.txt')\n new1.classify(source1, source2)", "def main():\n myfactory = Faker()\n # database should be sorted lexographically and should not have any duplicate values\n # database = [\"abracadara\", \"al\", \"alice\", \"alicia\", \"allen\", \"alter\", \"altercation\", \"bob\", \"element\", \"ello\", \"eve\",\n # \"evening\", \"event\", \"eventually\", \"mallory\",\n database = sorted(list(set(myfactory.words(1000000000) +\n [\"za\", \"zazb\", \"zazc\", \"zazd\", \"zaze\", \"zazy\", \"zazz\", \"zb\", \"zba\", \"zbc\", \"zbd\", \"zbe\", \"zbz\"])))\n query = lambda prefix: [d for d in database if d.startswith(prefix)][:5]\n assert extract(query) == database", "def test_two_game(self):\n self.choice.side_effect = [\"ant\", \"baboon\"]\n self.input.side_effect = list(\"ant\" \"y\" \"babon\" \"n\")\n\n gallows.main()\n\n self.xprint.assert_any_call('Yes! The secret word is \"ant\"! '\n 'You have won!')\n self.xprint.assert_any_call('Yes! The secret word is \"baboon\"! '\n 'You have won!')", "def yes_straw_warts():\n check50.run(\"python3 palindrome.py\"\n ).stdout(\"Word? \", regex=False\n ).stdin(\"straw warts\", prompt=False\n ).stdout(\"YES\", regex=False\n ).exit()", "def test_text_classifier_vaporise(self):\n pass", "def run_examples():\n\n for example in examples:\n\n print(str(example) + \" : \", end=\" \")\n try:\n t, smush = analyse(example, my_env)\n print(lookup(t, smush))\n # print(\"Smush\")\n # for k,v in smush.items():\n # print(f\"\\t{k} : {v}\")\n except (ParseError, InferenceError) as e:\n print(e)", "def test():\n listpost,listclass = bayes.loaddataset()\n myvocablist = bayes.createlist(listpost)\n tmatrix = list()\n for doc in listpost:\n\t vec = bayes.word2vec(myvocablist,doc)\n\t tmatrix.append(vec)\n p0,p1,pa = bayes.train(tmatrix,listclass)\n testdoc1 = ['love','my','dalmation']\n testvec1 = bayes.word2vec(myvocablist,testdoc1)\n print testdoc1,'classify as :',bayes.classify(testvec1,p0,p1,pa)\n testdoc2 = ['stupid','love']\n testvec2 = bayes.word2vec(myvocablist,testdoc2)\n print testdoc2,'classify as :',bayes.classify(testvec2,p0,p1,pa)", "def test__extract_features(self):\n text_sample = \"I really really love this movie\"\n feature_sample = ['really','love','good']\n feature_score_type = \"presence\"\n model_sample = Model(feature_sample,feature_score_type)\n result_features = model_sample.extract_features(text_sample)\n assert_equal(result_features,{'really':1,'love':1,'good':0})\n feature_score_type = \"term_frequency\"\n model_sample = Model(feature_sample,feature_score_type)\n result_features = model_sample.extract_features(text_sample)\n assert_equal(result_features,{'really':2,'love':1,'good':0})", "def run_tests():\n source1 = TextModel('hilary_speaches')\n source1.add_file('hilary_source_text.txt')\n\n source2 = TextModel('bernie_speaches')\n source2.add_file('bernie_source_text.txt')\n\n new1 = TextModel('trump_speach')\n new1.add_file('trump_text.txt')\n new1.classify(source1, source2)\n\n new2 = TextModel('hilary_test')\n new2.add_file('hilary_test.txt')\n new2.classify(source1, source2)\n\n new3 = TextModel('bernie_test')\n new3.add_file('bernie_test.txt')\n new3.classify(source1, source2)\n\n new4 = TextModel('bill_clinton_test')\n new4.add_file('bill_clinton_source.txt')\n new4.classify(source1, source2)", "def main():\n # run_test_go_straight_inches()\n # run_test_turn_degrees()\n # run_test_spin_degrees()\n beep_if_blob_is_bigger_than(3000)", "def example_single(args, model, word2idx):\n #在命令行中加载和分段<目标、(推特内容)>配对\n while True:\n target = raw_input(\"问题: \")\n tweet = raw_input(\"回答: \")\n targets = [str(target)]\n tweets = [str(tweet)]\n seged_tweets = yutils.seg_sentence(tweets, choice=\"list\", place=\"hpc\") # may use lexicon here\n seged_targets = yutils.seg_sentence(targets, choice=\"list\", place=\"hpc\")\n predictions = evaluate(args, model, word2idx, seged_tweets, seged_targets)\n print(\"预测结果: \", predictions)", "def main():\n # Load and prep training files\n raw_speech_text = hg.load_training_file('trump_train.txt')\n speech_text = hg.prep_training(raw_speech_text)\n tweet_data = load_tweets('trump_tweets.json')\n raw_tweets = \"\"\n for dct in tweet_data:\n raw_tweets += \"{} \".format(dct['text'])\n tweets = hg.prep_training(raw_tweets)\n corpus = speech_text + tweets\n corpus = strip_punctuation(corpus)\n dict_1 = hg.map_one_to_one(corpus)\n dict_2 = hg.map_two_to_one(corpus)\n text = []\n \n # Introduction\n print(\"\\nTrump Speech Generator\\n\")\n print(\"Select words to add to speech\")\n print(\"\\'x\\' to exit\")\n print(\"\\'p\\' to add punctuation\")\n print(\"Select \\'p\\' before selecting the word you want to punctuate\")\n\n # Select first word\n options = corpus\n print ()\n selection = select_word(corpus)\n text.append(selection)\n \n # Select second word\n last = text[0]\n options = word_after_one(last, dict_1)\n print_text(text)\n selection = select_word(options)\n text.append(selection)\n \n # Select subsequent word\n while True:\n last = \"{} {}\".format(text[-2].strip(punctuation),\n text[-1].strip(punctuation))\n options = word_after_two(last, dict_2)\n if options == []:\n last = last.split()[1]\n options = word_after_one(last, dict_1)\n while options == []:\n last = random.choice(corpus)\n options = word_after_one(last, dict_1)\n print_text(text)\n selection = select_word(options)\n text.append(selection)\n \n print_text(text)", "def test_example():\n example_text = ['''Mark and Jack welcome back to couch on crackerjacks today I'm gonna show you how to make a basic and delicious potato salad some people might call this a country style potato salad some people might refer to it as a deli style of potato salad either way it's got the perfect balance of sweet and tangy from the sugar and the vinegar and pickles and everything else that's in this it's just your basic homemade potato salad you can add any number of things to this to make it your own but I'm just going to show you how I like to make mine so without further ado let's get started so naturally I'm going to start out with my potatoes every potato salad starts with potatoes for this recipe and for my potato salad I prefer using just regular old russet potatoes they're the cheapest they're the best I've tried using Yukon Gold potatoes and red potatoes for this recipe I prefer hands down at the russet potatoes it just it makes the best potato salad for me you can use whatever kind of potatoes you like though and using a potato peeler I'm just going to peel these potatoes a little trick for you that little end on most potato peelers it's kind of rounded use that to dig out the eyes of your potato it's what I've always used it for so it's just the perfect little tool to dig out the eyes of a potato but what you want to do is just go ahead and peel your potatoes and you don't have to peel your potatoes if you don't want to if you like skin on potato salad by all means go ahead and leave the skin on it doesn't make any difference personal preference and as you're peeling your potatoes and you get one done go ahead and put them into a large pot this is going to be the same profit I cut these in that's filled up with water you want to make sure and keep your potatoes covered that will prevent your potatoes from oxidizing and turning that pinky brown color but you just want to go through and peel all of your potatoes and I am using three pounds of potatoes for this recipe now once you get all your potatoes peeled you want to go ahead and cut them up basically you want to cut these into about 3/4 inch square pieces so for these medium potatoes I cut them half I turn them 90 degrees cut them into three pea is if you will that way if it's a larger potato do four and then cut those into chunks basically like I said you want about three quarters of an inch by three quarters of an inch by three quarters of an inch pieces and then again throw your potatoes back into the water that you pulled the potatoes out of that way they do not oxidize on you now when you get all your potatoes cut up your water is going to be cloudy and it's gonna be murky and it's gonna be just full of all the starch coming off of those potatoes what you want to do is rinse your potatoes well you want to make sure that the water coming off of that is completely clear go ahead and rinse these a good three or four times and then drain them completely you want to make sure that all of that starch gets off of those potatoes then you want to go ahead and light your stove and take your pot and you want a large pot for this put it over a medium-high heat time actually even high heat or at this point take your drained potatoes and put those into your pot and you want to add enough cold water to this to come up about one inch over the top of the potatoes starting off with cool water your potatoes cook evenly as the water comes up to temperature your potatoes come up with them to temperature if you start out putting cold potatoes into boiling water the outside of the potato is gonna be mush before the inside is actually cooked and before this gets going too far I'm gonna take two large eggs and I'm gonna put those in the water with the potatoes this recipe uses hard-boiled eggs and since I'm boiling the potatoes anyway I might as well just boil the eggs right along with the potatoes so just go ahead and add two large eggs to the pot and you want to cover your pot and you want to bring this up to a boil now once your water is that a boy I'll go ahead and give your potatoes an egg a gentle stir you want to be careful with this because you don't do not want to break your eggs and you also don't want to break up the potatoes but once this comes up to a boil you want to boil this for exactly ten minutes and how to check to make sure that your potatoes are done you want to take a couple large pieces take them out put them on a spoon and using a fork you want to put the fork into the potato and you want just a little bit of give in your potatoes before they break apart if you can see there it's just the slightest little bit of give before the potato breaks up you don't want to cook these any longer than that because they they will finish cooking when you take them off heat but you want to go ahead and drain these in a colander and once they are drained well go ahead and pour your potatoes and eggs back into the pot that you cooked them in and here you can dig out your eggs and you want to put your eggs in a bowl of cold water you want to stop that cooking process as soon as possible because if you cook your eggs too long you're gonna get that dreaded green ring around the yolk go ahead and put those in a bowl of cold water to stop the cooking process immediately and then you want to keep your potatoes in the pot that you cook them in to cool and you want to cool them completely before you do anything else with them if you add a salad dressing to hot potatoes it's gonna break on you and you don't want that so just go ahead and let your potatoes steam off and cool and I'm gonna let these sit for about a half an hour before I even start making the dressing for my potato salad and while you're waiting for your potatoes to cool off you can go ahead and peel your eggs it helps to wait a little bit for your eggs to cool down before you peel them just go ahead and crack them on a countertop and then start peeling them if you peel them underneath water or running water they peel super easy so as you can see here's I mean it takes nothing to do it under water water gets under there and the shell just slips off I just go ahead and peel your egg eggs and set them off until later I'm gonna need a few vegetables for my dressing I went ahead and already cut up half of a yellow onion here off a video I thought I was recording when I wasn't you don't need to see me chopped onions anyway everybody knows how to do that I've also got two stalks of celery here I'm just going to cut the ends off as well as the tops if you want to save the tops they make a nice garnish you don't have to keep them and I'm not gonna keep them here the celery I'm going to cut these sticks or stalks into orders and then I'm going to chop those up because I don't like really big chunks of celery in my potato salad so I'm just gonna cut these into four slices and then turn them around and cut these into dices if you will and I'm just going to go ahead after I get that died and set those off to the side until I need them later now for our dressing in a large bowl and you want to make sure that you use a plenty large bowl for this because it does make a lot of potato salad I've got one and a half cups of mayonnaise this recipe really does not work with Miracle Whip so since we're gonna be adding sugar to this stick to the plain old mayonnaise I'm gonna throw my eggs in there and using the back of a fork I'm just gonna break up my eggs if you like big chunks of egg in your potato salad don't mash it up as much but I'm gonna mash this up pretty fine and then you want to add in a quarter of a cup of sugar as well as a teaspoon and a half of salt it seems like a lot of salt it really isn't because there are a lot of potatoes here two teaspoons of white vinegar just plain white distilled vinegar then you want to add two tablespoons of sweet pickle relish you could also use dill pickle relish if you wanted to I like sweet in mine and finally I'm gonna add in two teaspoons of prepared yellow mustard if you like a more mustardy potato salad you can add more mustard if you want to this perfectly acceptable and then using a spoon or a fork whatever just go ahead and mix this up well and then you want to add in your onions and celery and go ahead and get that mixed in and you want to make sure to mix all of your ingredients and get your dressing thoroughly mixed before you add the potatoes because you don't want to over mix this once you get your potatoes added so go ahead and take your cooled potatoes again make sure that they are at least room temperature you do not want them warm or hot at all but go ahead and add those into your bowl and then using a spatula I'm going to gently fold the dressing into my potatoes you want your potatoes to remain as in this large of chunks as possible so don't go crazy you know stirring it stirring stirring you want to gently fold this so your potatoes do stay as whole as possible and a little secret for you just to bind up the dressing just a little bit I'm going to add two tablespoons of instant mashed potato flakes into the finished mixture I'm just going to fold this in basically what those do the potato flakes they bind up the dressing and make the dressing firm it also helps it kind of stick to the potatoes a little bit better so you you know the dressing doesn't run off of the potatoes which can be a problem with some recipes so there you go you want to make sure that those potato flakes are evenly distributed in there and everything is well mixed together everything is combined perfectly go ahead and give this a taste make sure that the salt is ok for you if you need a little bit more salt go ahead and add it if you want to if you need more mustard or vinegar or eggs whatever now is the time to do it but you want to go ahead and cover this with a piece of cling wrap saran wrap and refrigerate this for at least four to six hours before you serve this the longer you let this sit the better it gets but there you go there's your basic all-around simple homemade deli style or country style potato salad definitely give this recipe a try if you do let me know how you like it down below in the comment section if you like this video be sure to give it a thumbs up I would greatly appreciate it subscribe for more deliciousness and to keep up to date on all my latest videos thanks so much for watching and we will see you next time''']\n\n return str(example_text)", "def test_text_classifier_get_testing_samples(self):\n pass", "def test():\n source1 = TextModel('source1')\n source1.add_string('It is interesting that she is interested.')\n source2 = TextModel('source2')\n source2.add_string('I am very, very excited about this!')\n \n\n mystery = TextModel('mystery')\n mystery.add_string('Is he interested? No, but I am.')\n mystery.classify(source1, source2)", "def test_text_classifier_add_testing_samples(self):\n pass", "def main():\n data = pd.read_csv('./house-votes-84.data', header = None)\n\n class_names = [\"republican\", \"democrat\"]\n\n print(\"\\n-- Train and Test with Winnow --\\n\")\n train_and_test_with_winnow(data, class_names)\n\n print(\"\\n-- Train and Test with Naive Bayes --\\n\")\n train_and_test_with_naive_bayes(data, class_names)", "def test():\n source1 = TextModel('source1')\n source1.add_string('It is interesting that she is interested.')\n\n source2 = TextModel('source2')\n source2.add_string('I am very, very excited about this!')\n\n mystery = TextModel('mystery')\n mystery.add_string('Is he interested? No, but I am.')\n mystery.classify(source1, source2)", "def test():\n source1 = TextModel('source1')\n source1.add_string('It is interesting that she is interested.')\n\n source2 = TextModel('source2')\n source2.add_string('I am very, very excited about this!')\n\n mystery = TextModel('mystery')\n mystery.add_string('Is he interested? No, but I am.')\n mystery.classify(source1, source2)", "def test():\r\n source1 = TextModel('source1')\r\n source1.add_string('It is interesting that she is interested.')\r\n\r\n source2 = TextModel('source2')\r\n source2.add_string('I am very, very excited about this!')\r\n\r\n mystery = TextModel('mystery')\r\n mystery.add_string('Is he interested? No, but I am.')\r\n mystery.classify(source1, source2)", "def test_word(self):\n arr, result = ['Acronis', 0, 333, 450], []\n fizz_buzz(arr, result)\n self.assertEqual(result, ['Acronis', 'fizzbuzz', 'buzz', 'fizzbuzz'])", "def test_theft_and_stealing(self):", "def test_random_words(self):\n input_word_list = {\"words\": [\"word1\", \"word2\", \"word3\"]}\n rv = self.randomWords(input_word=input_word_list)\n response_data = json.loads(rv.get_data(as_text=True))\n self.assertEquals(rv.status_code, 200)\n self.assertIn(response_data[\"words\"], input_word_list[\"words\"])", "def test_examples():\n argv = [\"py.test\", \"-examples\"]\n assert get_sargs(argv) is None", "def run_tests():\n source1 = TextModel('CS111 Syllabus')\n source1.add_file('CS111_Syllabus.txt')\n\n source2 = TextModel('AR Syllabus')\n source2.add_file('AR_Syllabus.txt')\n\n new1 = TextModel('WR120 Syllabus')\n new1.add_file('WR120_Syllabus.txt')\n new1.classify(source1, source2)\n \n new2 = TextModel('CS131 Syllabus')\n new2.add_file('CS131_Syllabus.txt')\n new2.classify(source1, source2)\n \n new3 = TextModel('My Paper 2 for WR120')\n new3.add_file('WR_Paper_2.txt')\n new3.classify(source1, source2)\n \n new4 = TextModel('CS111 PS9PR0')\n new4.add_file('ps9pr0.txt')\n new4.classify(source1, source2)", "def demo():\n ...", "def test_output_get_word(self):\n actual = get_words('../corpus/alice.txt')\n expected = [\"alice\"]\n self.assertEqual(actual, expected)", "def main():\n file = \"http://icarus.cs.weber.edu/~hvalle/hafb/words.txt\"\n words = fetch_words(file)\n print_items(words)", "async def wordfilter_test(self, ctx, *, message):\n found = self.test_sentence(message)\n if found:\n await ctx.send(f\"Message contains `{found}`\")\n else:\n await ctx.send(\"Couldn't detect any filtered words\")", "def test_words():\n LINE = \"Two Owls and a Hen,\"\n for word in LineBuilder(LINE).words:\n assert(repr(word) == \"WordBuilder('\" + str(word) + \"')\")", "def yes_tenet():\n check50.run(\"python3 palindrome.py\"\n ).stdout(\"Word? \", regex=False\n ).stdin(\"tenet\", prompt=False\n ).stdout(\"YES\", regex=False\n ).exit()", "def testing():\n\n # lists which contains paths of keyword and non-keyword utterances\n non_kw_clips, kw_clips = generate_clips_kwds()\n\n non_kw_sent_dict, kw_sent_dict = {}, {}\n templates_dict = {}\n\n # calculate and store MFCC features in a dictionary\n for kw in listdir(kw_path):\n templates_dict[kw] = proc_one(kw_path + kw)\n\n for sent in non_kw_clips:\n filename = sent[:-3] + 'wav'\n non_kw_sent_dict[filename] = proc_one(filename)\n\n for word, paths in kw_clips.items():\n for path in paths:\n filename = path[:-3] + 'wav'\n kw_sent_dict[filename] = (proc_one(filename), word)\n\n final_results = {}\n\n # non-keyword comparisons\n for i, (non_kw_utterance, clip_feat) in enumerate(non_kw_sent_dict.items()):\n\n print(i, '/', len(non_kw_sent_dict))\n\n final_results[non_kw_utterance] = {}\n\n for keyword, kw_feat in templates_dict.items():\n print(\"Comparing keyword and non-kw sentence:\", keyword, non_kw_utterance)\n\n lmd = compare_all(clip_feat, kw_feat)\n final_results[non_kw_utterance][keyword] = (lmd, 0)\n\n with open(results_json, 'w') as f:\n json.dump(final_results, f)\n\n # keyword comparisons\n for i, (kw_utterance, (clip_feat, word)) in enumerate(kw_sent_dict.items()):\n\n print(i, '/', len(kw_sent_dict))\n final_results[kw_utterance] = {}\n\n for keyword, kw_feat in templates_dict.items():\n\n print(\"Comparing keyword and kw sentence:\", keyword, kw_utterance)\n\n lmd = compare_all(clip_feat, kw_feat)\n\n if keyword.split('_')[0] == word:\n final_results[kw_utterance][keyword] = (lmd, 1)\n else:\n final_results[kw_utterance][keyword] = (lmd, 0)\n\n with open(results_json, 'w') as f:\n json.dump(final_results, f)", "def main():\n ans = random_word()\n run_game(ans, N_TURNS)", "def main():\n # get config and processing of clauses\n config = Config(load=False)\n\n # Generators\n dev = Dataset(config.filename_dev)\n test = Dataset(config.filename_test)\n train = Dataset(config.filename_train)\n\n # Build tags vocab\n vocab_tags = get_tag_vocab([train, dev, test])\n vocab_tags.add(UNK)\n\n # Save vocab\n write_vocab(vocab_tags, config.filename_tags)\n\n\n # Build and save char vocab\n train = Dataset(config.filename_train)\n vocab_chars = get_char_vocab(train)\n write_vocab(vocab_chars, config.filename_chars)", "def main():\r\n _evaluative_test(5)\r\n _fuzz_test(1)\r\n _fuzz_test(1, 512)\r\n _fuzz_test(1, 1512)\r\n _fuzz_test(1000)\r\n _fuzz_test(1000, 512)\r\n _fuzz_test(1000, 4077)", "def run():\n print(\"clewsy CLEWs Model Building Script.\")\n print(\"When using clewsy please reference:\")\n print(\"T. Niet and A. Shivakumar (2020): clewsy: Script for building CLEWs models.\")\n main(sys.argv[1:])", "def test_training(self):\n warnings.filterwarnings('ignore')\n example_args = example_args_parser()\n example_args.unittest = True\n # prepare data\n example_args.stage = 'prepare'\n example_wrapper(example_args)\n # train goalDNN model\n example_args.stage = 'train'\n example_args.model = 'goalDNN'\n example_wrapper(example_args)\n # train cVAE model\n example_args.model = 'cVAE'\n example_wrapper(example_args)\n # train gcVAE model\n example_args.model = 'gcVAE'\n example_wrapper(example_args)\n # cVAE harmonization\n example_args.stage = 'predict'\n example_args.model = 'cVAE'\n example_wrapper(example_args)\n # gcVAE harmonization\n example_args.model = 'gcVAE'\n example_wrapper(example_args)\n # goalDNN prediction\n example_args.model = 'goalDNN'\n example_wrapper(example_args)\n # XGBoost\n example_args.stage = 'train'\n example_args.model = 'XGBoost'\n example_wrapper(example_args)\n # compare with reference results\n check_args = check_results_args_parser()\n check_args.unittest = True\n check_reference_results(check_args)", "def run_demo(mode, aws_service, input, outfile=None, max_threads=1, encoding=\"utf-8\"):\n\n result = None\n logger = logging.getLogger(__name__)\n logger.info(\"Starting Demo\")\n\n logger.debug(\"Is running in mode {}\".format(mode))\n if mode == MODE_SINGLE:\n api = ComprehendApi()\n result = api.get_sentiment_singledoc(input)\n\n elif mode == MODE_BATCH:\n result = run_batch_demo(input, encoding, max_threads, aws_service)\n\n pp = pprint.PrettyPrinter(indent=4)\n pp.pprint(result)\n write_result(result, outfile)", "def run_example():\r\n num_die_sides = 6\r\n hand = (1, 1, 1, 5, 6)\r\n hand_score, hold = strategy(hand, num_die_sides)\r\n print \"Best strategy for hand\", hand, \"is to hold\", hold, \"with expected score\", hand_score", "def run_example():\r\n num_die_sides = 6\r\n hand = (1, 1, 1, 5, 6)\r\n hand_score, hold = strategy(hand, num_die_sides)\r\n print \"Best strategy for hand\", hand, \"is to hold\", hold, \"with expected score\", hand_score", "def main() -> None:\n\n args = get_args()\n\n if words := find_words(args.length, args.file):\n print('\\n'.join(\n starmap(lambda i, w: f'{i:3}: {w}', enumerate(words, 1))))\n else:\n sys.exit(f'Found no words of length {args.length}!')", "def test_wordpiece_embedder(self, resource_loader):\n config = {\n \"model_type\": \"tagger\",\n \"example_type\": ENTITY_EXAMPLE_TYPE,\n \"label_type\": ENTITIES_LABEL_TYPE,\n \"model_settings\": {\"classifier_type\": \"embedder\"},\n \"params\": {\n \"emb_dim\": 30, \"tokenizer_type\": \"wordpiece-tokenizer\", \"add_terminals\": True\n },\n }\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n examples = self.labeled_data.queries()\n labels = self.labeled_data.entities()\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {**config, \"params\": {**config[\"params\"], \"use_crf_layer\": False}}\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)", "def test_language_sensitivity(self): \n \n for lang in self.LANGUAGES:\n activate(lang)\n \n self.assertEqual(get_wording_text('test_1'), lang)", "def test_visualize_recipe_taste(self):\n pass", "def test_out_of_order(self):\n self.choice.return_value = \"ant\"\n self.input.side_effect = list(\"tan\" \"n\")\n\n gallows.main()\n\n self.xprint.assert_any_call('Yes! The secret word is \"ant\"! '\n 'You have won!')", "def demo(draw_parses=None, print_parses=None):\n demos = ['aandeelhoudersvergadering', 'hardloopwedstrijd']\n trees = []\n with MBMA() as program:\n for word in demos:\n print 'Parsing: %s' % word\n results = program.classify(word)\n trees.extend(program.trees(results))\n if draw_parses is None:\n print\n print 'Draw parses (y/n)?',\n draw_parses = sys.stdin.readline().strip().lower().startswith('y')\n if draw_parses:\n from nltk.draw.tree import draw_trees\n print ' please wait...'\n draw_trees(*trees)\n\n if print_parses is None:\n print\n print 'Print parses (y/n)?',\n print_parses = sys.stdin.readline().strip().lower().startswith('y')\n if print_parses:\n for parse in trees:\n print parse", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--file', '-f', type=str, help='path to corpus file', default='./train')\n args = parser.parse_args()\n\n corpus_reader = CorpusReader(args.file)\n model = BigramModel(corpus_reader.sents())\n\n test_sentences = ['Suggestive, Watson, is it not?',\n 'It is amazing that a family can be torn apart by something as simple as a pack of wild dogs!',\n 'So spoke Sherlock Holmes and turned back to the great scrapbook in which he was arranging and indexing some of his recent material.',\n 'What I like best about my friends is that they are few.',\n 'Friends what is like are they about I best few my that.']\n\n # prints two paragraphs with each five sentences\n for _ in range(2):\n print(generate(model, 5) + '\\n')\n\n # for each sentence in the test_sentences print the perplexity\n for sentence in test_sentences:\n print(model.perplexity(nltk.word_tokenize(sentence)))", "def main(args):\n bad_words_file = codecs.open(args.language + \"/feature_files/bad_words\", \"r\", \"utf-8\").readlines()\n bad_words = read_known_words(bad_words_file)\n \n good_words_file = codecs.open(args.language + \"/feature_files/good_words\", \"r\", \"utf-8\").readlines()\n good_words = read_known_words(good_words_file)\n\n curse_words_file = codecs.open(args.language + \"/feature_files/curse_words\", \"r\", \"utf-8\").readlines()\n curse_words = read_known_words(curse_words_file)\n\n prepositions_file = codecs.open(args.language + \"/feature_files/prepositions\", \"r\", \"utf-8\").readlines()\n prepositions = read_known_words(prepositions_file)\n\n determiners_file = codecs.open(args.language + \"/feature_files/determiners\", \"r\", \"utf-8\").readlines()\n determiners = read_known_words(determiners_file)\n\n syllables_file = codecs.open(args.language + \"/feature_files/syllables\", \"r\", \"utf-8\").readlines()\n syllable_structure = read_syllables_file(syllables_file)\n\n other_feature_files = glob.glob(args.language + \"/feature_files/*.txt\")\n other_features = set_features_from_files(other_feature_files)\n \n ermaObj = ConllToErma(args, bad_words, good_words, curse_words, prepositions, \\\n determiners, syllable_structure, other_features)\n\n if not args.just_test:\n # Input training file.\n train_id = open(args.train, \"r\")\n train = train_id.readlines()\n train_id.close()\n sys.stdout.write(\"Reading training file...\\n\")\n (train_features, train_skip_chains) = ermaObj.read_conll_file(train)\n sys.stdout.write(\"Building model...\\n\")\n train_hash = ermaObj.make_nodes(train_features)\n # Freeze the known features based on what's seen in the training data\n ermaObj.cutoff_features()\n else:\n train_hash = {}\n train_skip_chains = {}\n # Input testing file.\n test_id = open(args.test, \"r\")\n test = test_id.readlines()\n test_id.close()\n sys.stdout.write(\"Reading test file...\\n\")\n (test_features, test_skip_chains) = ermaObj.read_conll_file(test)\n sys.stdout.write(\"Building model...\\n\")\n test_hash = ermaObj.make_nodes(test_features, test=True)\n ermaObj.write_out(train_hash, train_skip_chains, test_hash, test_skip_chains)", "def customer_wants_condiments(self):\n answer = raw_input(\"Would you like Lemon? (y/n)\").lower()\n if answer.startswith('y'):\n return True\n else:\n return False", "def test_analyze_text(self):\n\n mic = mi.MicrophoneToText()\n\n with open('../examples/result.txt', 'w', encoding='utf-8') as f:\n f.write('x transcript\": straße lautet aarbergerstraße }x\\n')\n f.write('x transcript\": ort lautet testort }x\\n')\n f.write('x transcript\": einkommen lautet testeinkommen }x\\n')\n f.write('x transcript\": kaufpreis lautet testkaufpreis }x\\n')\n f.write('x transcript\": eigenkapital lautet testkapital }x\\n')\n\n #mic.threader()\n\n mic.switchoff()\n print(mic.keywords.values())\n with open('../examples/result.txt', 'r', encoding='utf-8') as f:\n filestring = f.read()\n print(filestring)\n self.assertTrue(' straße lautet aarbergerstraße ' in filestring)", "def main():\r\n # Initialize words from specific file\r\n words_list = hangman_helper.load_words()\r\n # Run single game with given word list to choose from\r\n run_single_game(words_list)\r\n # Ask the user if he would like to play again\r\n request = hangman_helper.get_input()\r\n if request[INPUT_TYPE] == hangman_helper.PLAY_AGAIN:\r\n if request[INPUT_VALUE]:\r\n run_single_game(words_list)", "def main():\n tester = Tester()\n # parse args, load configuration and create all required objects.\n tester.setup_experiment()\n # GO!\n tester.run_experiment()", "def test_forward_word_extend_selection(self):\n before_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year, Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000 tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly weather impacts every American. Communities can now rely on the National Weather Service’s StormReady program to help them guard against the ravages of Mother Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading to around 500 deaths per year and nearly $14 billion in damage. StormReady, a program started in 1999 in Tulsa, OK, helps arm America's communities with the communication and safety skills needed to save lives and property– before and during the event. StormReady helps community leaders and emergency managers strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of severe weather through better planning, education, and awareness. No community is storm proof, but StormReady can help communities save lives. Does StormReady make a difference?\n \"\"\"\n after_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year, Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000 tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly weather impacts every American. Communities can now rely on the National Weather Service’s StormReady program to help them guard against the ravages of Mother Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading to around 500 deaths per year and nearly $14 billion in damage. StormReady, a program started in 1999 in Tulsa, OK, helps arm America's communities with the communication and safety skills needed to save lives and property– before and during the event. StormReady helps community leaders and emergency managers strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of severe weather through better planning, education, and awareness. No community is storm proof, but StormReady can help communities save lives. Does StormReady make a difference?\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.395\", \"1.395\"),\n after_sel=(\"1.395\", \"3.4\"),\n command_name=\"forward-word-extend-selection\",\n )", "def main():\n\n parser = argparse.ArgumentParser(description='Newsgroup post classifier')\n parser.add_argument('--data_dir',\n type=str,\n help=\"Data directory\")\n\n args = parser.parse_args()\n data_dir = args.data_dir\n\n example(data_dir=data_dir)", "def main():\n\n for line in sys.stdin:\n # reset distance to 0\n distance = 0\n # separate input and assign words to w1 and w2\n try:\n [w1, w2] = line.rstrip().split('\\t')\n except ValueError:\n # just in case someone makes an input error\n sys.stderr.write(\"Cannot accept input. Please use TAB between the words!\\n\")\n sys.exit()\n\n # print both words and Levenshtein distance\n print(\"{}\\t{}\\t{}\".format(w1, w2, lev(w1, w2)))", "def test_basic(self):\n test_cases = (\n makeSimpleTestCase('nicki minaj'),\n makeSimpleTestCase('jay-z'),\n makeTestCase('jay z', Matcher(title=Equals('jay-z'))),\n makeSimpleTestCase('passion pit'),\n makeSimpleTestCase('red hot chili peppers'),\n makeSimpleTestCase('the temper trap'),\n makeTestCase('glitch mob', Matcher(title=Equals('the glitch mob'))),\n makeTestCase('kanye', Matcher(title=Equals('kanye west'))),\n makeSimpleTestCase('justin bieber'),\n makeTestCase('carly rae', Matcher(title=Equals('carly rae jepsen'))),\n makeSimpleTestCase('johnny cash'),\n makeTestCase('waylon', Matcher(title=Equals('waylon jennings'))),\n makeTestCase('tmbg', Matcher(title=Equals('they might be giants'))),\n makeSimpleTestCase('mgmt'),\n makeSimpleTestCase('rjd2'),\n makeTestCase('B.o.B', Matcher(title=StartsWith('B.o.B'))),\n makeSimpleTestCase('50 cent'),\n makeTestCase('50', Matcher(title=Equals('50 cent'))),\n makeTestCase('fifty cent', Matcher(title=Equals('50 cent'))),\n makeSimpleTestCase('katy perry'),\n makeSimpleTestCase('alison krauss'),\n makeSimpleTestCase('the highwaymen'),\n makeSimpleTestCase('flo rida'),\n # TODO: Will this ever work? We don't have recordings by Mozart, just recordings of music he composed.\n # Do we really want to try to support composers?\n makeSimpleTestCase('mozart')\n )\n\n self._run_tests('basic_artist', test_cases)", "def test_main_incorrect_lang(runner: CliRunner) -> None:\n result = runner.invoke(__main__.main, \"-c tests/clippings-es.txt -l invented\")\n assert result.exit_code != 0", "def test_functional(self):\n with sphinx_build('pyexample'):\n with open('_build/text/docfx_yaml/example.example.yml') as yml_file:\n data = yaml.safe_load(yml_file)\n self.assertEqual(\n data['items'][0]['fullName'],\n 'example.example'\n )", "def test_one_disemvowel_code_wars():\n from disemvowel_trolls import disemvowel\n tests = [(\"This website is for losers LOL!\", \"Ths wbst s fr lsrs LL!\"),\n (\"No offense but,\\nYour writing is among the worst I've everread\",\n \"N ffns bt,\\nYr wrtng s mng th wrst 'v vrrd\"),\n (\"What are you, a communist?\", \"Wht r y, cmmnst?\")]\n\n for case in tests:\n assert disemvowel(case[0]) == case[1]", "def test(dfa, words):\n for word in words:\n try:\n dfa.test(word)\n except AssertionError as e:\n logging.error('ERROR: %s\\n' % e.message)", "def test_kill_word(self):\n before_b = \"\"\"\\\n This is the first sentence. This\n is the second sentence. And\n this is the last sentence.\n \"\"\"\n after_b = \"\"\"\\\n This is the first sentence. This\n is the sentence. And\n this is the last sentence.\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"2.6\", \"2.6\"),\n after_sel=(\"2.7\", \"2.7\"),\n command_name=\"kill-word\",\n )", "def test_wordMatch(self):\n words = []\n for line in self.output:\n words.extend(string.split(line))\n self.failUnless(self.sampleSplitText == words)", "def main(args):\n print(f\"hello world: {args}\")\n print_timestamp()\n print(\"ran to store\")\n print_fruits((\"apple\", \"cherry\", \"grape\"))\n print_starches((\"rice\", \"bread\"))\n print_sweets((\"sugar\", \"honey\"))\n print(\"bought egg\")", "def run_example():\n num_die_sides = 6\n hand = (1, 1, 1, 5, 6)\n hand_score, hold = strategy(hand, num_die_sides)\n print \"Best strategy for hand\", hand, \"is to hold\", hold, \"with expected score\", hand_score", "def run_example():\n num_die_sides = 6\n hand = (1, 1, 1, 5, 6)\n hand_score, hold = strategy(hand, num_die_sides)\n print \"Best strategy for hand\", hand, \"is to hold\", hold, \"with expected score\", hand_score", "def run_example():\n num_die_sides = 6\n hand = (1, 1, 1, 5, 6)\n hand_score, hold = strategy(hand, num_die_sides)\n print \"Best strategy for hand\", hand, \"is to hold\", hold, \"with expected score\", hand_score", "def main(args):\n preprocessor = Preprocessor(None)\n \n \"\"\"\n # collect words appear in the data\n words = set()\n logging.info('collecting words from {}'.format(config['valid_json_path']))\n words |= preprocessor.collect_words(config['test_json_path'],\n n_workers=args.n_workers)\n logging.info('collecting words from {}'.format(config['train_json_path']))\n words |= preprocessor.collect_words(config['train_json_path'],\n n_workers=args.n_workers)\n logging.info('collecting words from {}'.format(config['test_json_path']))\n words |= preprocessor.collect_words(config['valid_json_path'],\n n_workers=args.n_workers)\n # load embedding only for words in the data\n logging.info(\n 'loading embedding from {}'.format(config['embedding_vec_path'])\n )\n # EXPERIMENT oov off or on\n embedding = Embedding(config['embedding_vec_path'], words, oov_as_unk=True)\n embedding_pkl_path = os.path.join(args.dest_dir, 'embedding_{}.pkl'.format(args.postfix))\n logging.info('Saving embedding to {}'.format(embedding_pkl_path))\n with open(embedding_pkl_path, 'wb') as f:\n pickle.dump(embedding, f)\n \"\"\"\n embedding_pkl_path = os.path.join(args.dest_dir, 'embedding_{}.pkl'.format(args.postfix))\n embedding = pickle.load(open(embedding_pkl_path,'rb'))\n # update embedding used by preprocessor\n preprocessor.embedding = embedding\n \"\"\"\n # valid\n logging.info('Processing valid from {}'.format(config['valid_json_path']))\n valid = preprocessor.get_dataset(\n config['valid_json_path'], args.n_workers,\n {'n_positive': -1, 'n_negative': -1, 'shuffle': False}\n )\n valid_pkl_path = os.path.join(args.dest_dir, 'valid_{}.pkl'.format(args.postfix))\n logging.info('Saving valid to {}'.format(valid_pkl_path))\n with open(valid_pkl_path, 'wb') as f:\n pickle.dump(valid, f)\n \n # train\n logging.info('Processing train from {}'.format(config['train_json_path']))\n train = preprocessor.get_dataset(config['train_json_path'], args.n_workers)\n train_pkl_path = os.path.join(args.dest_dir, 'train_{}.pkl'.format(args.postfix))\n logging.info('Saving train to {}'.format(train_pkl_path))\n with open(train_pkl_path, 'wb') as f:\n pickle.dump(train, f)\n \"\"\"\n # test\n test = preprocessor.get_dataset(\n args.input, args.n_workers,\n {'n_positive': -1, 'n_negative': -1, 'shuffle': False}\n )\n test_pkl_path = os.path.join(args.dest_dir, 'test_{}.pkl'.format(args.postfix))\n logging.info('Saving test to {}'.format(test_pkl_path))\n with open(test_pkl_path, 'wb') as f:\n pickle.dump(test, f)", "def test_all():\n try:\n wd = WD('noaa_test.wav', 'noaa_test.png')\n return True\n except:\n return False", "def run_example():\n num_die_sides = 6\n hand = (1,2,5,5,5)\n hand_score, hold = strategy(hand, num_die_sides)\n print \"Best strategy for hand\", hand, \"is to hold\", hold, \"with expected score\", hand_score", "def main():\n\n # Get the arguments\n args = docopt(\"\"\"Combine two corpora and shuffle. Seed words are substituted in first corpus. (Word Injection)\n\n\n Usage:\n wi.py <corp1> <corp2> <lowerBound1> <upperBound1> <lowerBound2> <upperBound2> <targ> <outDir>\n \n Arguments:\n \n <corp1> = first corpus\n <corp2> = second corpus \n <lowerBound1> = lower bound for time period in first corpus\n <upperBound1> = upper bound for time period in first corpus\n <lowerBound2> = lower bound for time period in second corpus\n <upperBound2> = upper bound for time period in second corpus\n <targ> = target words (to substitute in one corpus)\n <outdir> = path+filename to target corpus (2 corpora combined, with substitution)\n\n \"\"\")\n \n corp1 = args['<corp1>']\n corp2 = args['<corp2>']\n lowerBound1 = int(args['<lowerBound1>'])\n upperBound1 = int(args['<upperBound1>'])\n lowerBound2 = int(args['<lowerBound2>'])\n upperBound2 = int(args['<upperBound2>'])\n targWords = args['<targ>']\n outFile = args['<outDir>']\n \n logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n logging.info(__file__.upper())\n start_time = time.time()\n\n # get seeds words\n seedList = []\n for line in codecs.open(targWords, \"r\", 'utf-8'):\n line = line.strip().split(\"\\t\")[0]\n seedList.append(line)\n\n searchPat = re.compile(r'(\\b(?:%s)\\b)' % '|'.join(seedList), re.UNICODE)\n \n lineCt = 0\n wFile = codecs.open(\"tempOutFile.txt\", \"w\", 'utf-8')\n for line in codecs.open(corp1, \"r\", 'utf-8'):\n date = int(line.split(\"\\t\")[0]) \n if not lowerBound1 <= date <= upperBound1: # skip every sentence which is not in timeframe\n continue\n newLine = re.sub(searchPat, r\"\\1_\", line) \n wFile.write(newLine)\n lineCt +=1\n for line in codecs.open(corp2, \"r\", 'utf-8'):\n date = int(line.split(\"\\t\")[0]) \n if not lowerBound2 <= date <= upperBound2: # skip every sentence which is not in timeframe\n continue\n wFile.write(line)\n lineCt +=1\n print(\"Seed words substituted. Total number of lines: %d\" % (lineCt))\n indList = list(range(lineCt))\n random.shuffle(indList)\n sublists = np.array_split(indList, 5)\n \n # make sure that you do not append at the outFile form the last iteration\n open(outFile, 'w').close()\n wFile = codecs.open(outFile, \"a\", 'utf-8')\n for nrSub, sublist in enumerate(sublists):\n sublist = set(sublist)\n print(\"Processing %d part ...\" % (nrSub))\n smallLineList = []\n for nrL, line in enumerate(codecs.open(\"tempOutFile.txt\", \"r\", 'utf-8')):\n if nrL in sublist:\n smallLineList.append(line)\n random.shuffle(smallLineList)\n for line in smallLineList:\n wFile.write(line.strip(\"\\n\")+\"\\n\")\n \n \n os.remove(\"tempOutFile.txt\")\n \n\n logging.info(\"--- %s seconds ---\" % (time.time() - start_time))", "def given_test_cases(self):\n self.assertTrue(anagram_finder(\"listen\", \"silent\"))\n self.assertTrue(anagram_finder(\"triangle\", \"integral\"))\n self.assertFalse(anagram_finder(\"apple\", \"pabble\"))", "def main():\n assert_verbose(triple(\"hello\"), \"hhheeellllllooo\")\n assert_verbose(triple(\"kevin szuchet\"), \"kkkeeevvviiinnn ssszzzuuuccchhheeettt\")\n assert_verbose(triple(\" \"), \" \")\n assert_verbose(triple(\"\"), \"\")\n print(\"--- All tests passed ---\")", "def workbench_scenarios():\n return [\n (\"Oppia Embedding\",\n \"\"\"<vertical_demo>\n <oppia oppiaid=\"0\" src=\"https://www.oppia.org\" width=\"700\" />\n </vertical_demo>\n \"\"\"),\n ]", "def run_feature_extraction_tests():\n test_feature_extraction()\n test_distributed_feature_extraction()\n test_multimodel_feature_extraction()\n test_distributed_multimodel_feature_extraction()", "def main():\r\n words = hangman_helper.load_words(file='words.txt')\r\n run_single_game(words)\r\n type_of_input=hangman_helper.get_input()\r\n while type_of_input[1]:\r\n run_single_game(words)\r\n type_of_input = hangman_helper.get_input()", "def test_rhyme_words(self):\n input_word_list = \"climbing\"\n expected_output_list = {\"rhyme\": [\"diming\", \"liming\", \"priming\", \"rhyming\", \"timing\"]}\n rv = self.rhymeWords(input_word=input_word_list)\n response_data = json.loads(rv.get_data(as_text=True))\n self.assertEquals(rv.status_code, 200)\n self.assertEquals(set(ast.literal_eval(response_data[\"rhyme\"])), set(expected_output_list[\"rhyme\"]))", "def test_solvation_simple(self):\n waters = np.random.randint(1000, 10000)\n log.debug('Trying {} waters with default settings...'.format(waters))\n solvate(tleapfile='./cb6-but/tleap.in', pdbfile='cb6-but.pdb',\n bufferwater=waters)\n grepped_waters = sp.check_output([\"grep -oh 'WAT' ./cb6-but/solvated.prmtop | wc -w\"],\n shell=True)\n self.assertEqual(int(grepped_waters), waters)", "def main():\n # random_peeler() # Run one or the other by uncommenting/commenting\n peel_digits_test_suite() # Run one or the other by uncommenting/commenting", "def test_welcome_exploration(self):\n self.init_player(\n '0', 'Welcome to Oppia!', 'do you know where the name \\'Oppia\\'')\n self.submit_and_compare(\n '0', 'Yes!', 'In fact, the word Oppia means \\'learn\\'.')\n self.submit_and_compare('Finish', 'Check your spelling!', '')\n self.submit_and_compare(\n 'Finnish', 'Yes! Oppia is the Finnish word for learn.',\n 'What is the value of')", "def _test_examples(self):\n checks = [\n (\n \"ex5_line-of-sight_solution\",\n [r\"RAJA sequential\", r\"RAJA OpenMP\", r\"result -- PASS\"],\n ),\n (\n \"ex6_stencil-offset-layout_solution\",\n [r\"RAJA Views \\(permuted\\)\", r\"result -- PASS\"],\n ),\n (\n \"ex8_tiled-matrix-transpose_solution\",\n [r\"parallel top inner loop\", r\"collapsed inner loops\", r\"result -- PASS\"],\n ),\n (\"kernel-dynamic-tile\", [r\"Running index\", r\"(24,24)\"]),\n (\"plugin-example\", [r\"Launching host kernel for the 10 time\"]),\n (\"tut_batched-matrix-multiply\", [r\"result -- PASS\"]),\n (\"wave-eqn\", [r\"Max Error = 2\", r\"Evolved solution to time\"]),\n ]\n for exe, expected in checks:\n reason = \"test: checking output of {0} for {1}\".format(exe, expected)\n self.run_test(\n exe,\n [],\n expected,\n installed=False,\n purpose=reason,\n skip_missing=True,\n work_dir=self._extra_tests_path,\n )", "def test_count_elongated_words(self):\n review = \"Hiiii how aare you todaaay?\"\n result = count_elongated_words(review)\n self.assertEqual(result, 2)", "def setup(self):\n setup = RandomWordGenerator().get()\n self.formatted_word = ConvertWord().convert_to_dict(setup)\n self.underscore_word = HangmanUnderscoreDiagram(\n setup).create_hidden_word()\n self.failed_guesses = 0\n print(\"Hello\")\n self.has_won = False\n self.start_game(True)", "def test_back_word_extend_selection(self):\n before_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year, Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000 tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly weather impacts every American. Communities can now rely on the National Weather Service’s StormReady program to help them guard against the ravages of Mother Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading to around 500 deaths per year and nearly $14 billion in damage. StormReady, a program started in 1999 in Tulsa, OK, helps arm America's communities with the communication and safety skills needed to save lives and property– before and during the event. StormReady helps community leaders and emergency managers strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of severe weather through better planning, education, and awareness. No community is storm proof, but StormReady can help communities save lives. Does StormReady make a difference?\n \"\"\"\n after_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year, Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000 tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly weather impacts every American. Communities can now rely on the National Weather Service’s StormReady program to help them guard against the ravages of Mother Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading to around 500 deaths per year and nearly $14 billion in damage. StormReady, a program started in 1999 in Tulsa, OK, helps arm America's communities with the communication and safety skills needed to save lives and property– before and during the event. StormReady helps community leaders and emergency managers strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of severe weather through better planning, education, and awareness. No community is storm proof, but StormReady can help communities save lives. Does StormReady make a difference?\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"3.342\", \"3.342\"),\n after_sel=(\"3.332\", \"3.342\"),\n command_name=\"back-word-extend-selection\",\n )", "def test():\n\tcheck_redondancy(teamz)\n\tcheck_redondancy(versions)\n\tname=\"The.Dark.Knight.x264.1080p.DTS.DD.5.1.MULTi.BluRay.GAÏA.mkv\"\n\twrez(\"\",name)\n\tname=\"XIII.La.Conspiration.Part2.FiNAL.FRENCH.720p.BluRay.x264.JMT.mkv\"\n\twrez(\"\",name)\n\tname=\"The.Matrix.1999.1080p.BluRay.AC3.DTS.x264.GAIA.mkv\"\n\twrez(\"\",name)\n\tname=\"You.Dont.Mess.With.The.Zohan.2008.TRUEFRENCH.720p.BluRay.x264.AiRLiNE.mkv\"\n\twrez(\"\",name)\n\tname=\"Benjamin.Gates.et.le.trésor.des.Templiers.x264.1080p.DTS.DD5.1.MULTi.BluRay.GAÏA.mkv\"\n\twrez(\"\",name)", "def test_random_smoke(fastonly):\n word_length = 5\n while word_length < 65:\n run_random_test(word_length, 3, 1000 if fastonly else 100000, word_length)\n word_length += 1", "def main():\n if sentence(Parser()):\n print 'yes'\n else:\n print 'no'", "def test_machine_learning():" ]
[ "0.64361274", "0.63639456", "0.63639456", "0.61933094", "0.5985079", "0.59168476", "0.58774954", "0.58620715", "0.58588994", "0.5743201", "0.57326597", "0.57213676", "0.5714051", "0.57126486", "0.5685293", "0.56775856", "0.56722903", "0.56616175", "0.56303006", "0.5619225", "0.56074715", "0.5596039", "0.5593733", "0.5568163", "0.5562195", "0.55598956", "0.55322343", "0.55266786", "0.5526038", "0.5520054", "0.55118227", "0.55118227", "0.55070615", "0.550483", "0.5466108", "0.54627067", "0.5459848", "0.5456789", "0.5453726", "0.54475063", "0.5444899", "0.5431011", "0.5414936", "0.54028404", "0.54017484", "0.5401027", "0.53949696", "0.5386136", "0.53807706", "0.5378404", "0.53626865", "0.53413725", "0.53413725", "0.53324986", "0.5331123", "0.5324135", "0.5316674", "0.53090996", "0.5308478", "0.529683", "0.52967215", "0.52942336", "0.5288127", "0.5287964", "0.5284016", "0.5283351", "0.527867", "0.52779096", "0.5276678", "0.52744144", "0.5272154", "0.5271705", "0.52707183", "0.52705735", "0.52694464", "0.52685046", "0.52661806", "0.52661806", "0.52661806", "0.52633697", "0.5263067", "0.5260797", "0.52589226", "0.52511984", "0.52502954", "0.5249568", "0.52495325", "0.52478844", "0.5246048", "0.52420616", "0.5240295", "0.52270496", "0.5221822", "0.52163655", "0.5216139", "0.5214651", "0.5212553", "0.5208013", "0.5200519", "0.51992005" ]
0.7582199
0
Creates and saves a new user
def create_user(self, phone, password=None, **extra_fields): print(extra_fields) if not phone: raise ValueError('Users must have an phone number') if not password: raise ValueError('Users must have a password') try: extra_fields['role'] except Exception: raise ValueError('Users must have a role') try: extra_fields['name'] except Exception: raise ValueError('Users must have a name') user = self.model(phone=phone, **extra_fields) user.set_password(password) user.save(using=self._db) return user
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_user():\n record = request.get_json()\n if record is None:\n return {\"Error\": \"No data Supplied.\"}, 400\n\n schema = user_schema.load(record)\n\n if UserModel.objects(email=schema['email']):\n return {\"Error\": \"User Data already exists.\"}, 400\n user = UserModel(**schema)\n user.hash_password()\n user.save()\n ser_data = user_schema.dump(user)\n token = Auth.generate_token(ser_data[\"_id\"])\n return {\"message\": \"User Created Successfully\", \"Token\": token, \"id\": str(user.id)}, 200", "def create_user():\n first_name = request.form['first_name'].capitalize()\n last_name = request.form['last_name'].capitalize()\n image_url = request.form['image_url']\n\n new_user = User(first_name=first_name, last_name=last_name, image_url=image_url)\n db.session.add(new_user)\n db.session.commit()\n\n return redirect(\"/users\")", "def create_new_user(first_name, last_name, email, password):\n \n new_user = User(first_name, last_name, email, password)\n db.session.add(new_user)\n db.session.commit()\n \n # link a root storage folder to the user\n root_folder = Folder()\n db.session.add(root_folder)\n db.session.commit()\n new_user.storage_root_id = root_folder.id\n new_user.storage_root = root_folder\n db.session.commit()\n\n # link usage tracking to the user\n usage = Usage()\n usage.user_id = new_user.id\n new_user.usage = usage\n db.session.add(usage)\n db.session.commit()\n\n # link a billing address to the user\n billing_address = BillingAddress()\n billing_address.user_id = new_user.id\n new_user.billing_address = billing_address\n db.session.add(billing_address)\n db.session.commit()\n\n # link settings to the User\n settings = Settings()\n settings.user_id = new_user.id\n new_user.settings = settings\n db.session.add(settings)\n db.session.commit()", "def new_user():\n success = True\n try:\n usr = User(request.json['username'], request.json['email'])\n db.session.add(usr)\n db.session.commit()\n except:\n success = False\n return jsonify(success=success)", "def new_user(request):\r\n rdict = request.params\r\n\r\n u = User()\r\n\r\n u.username = unicode(rdict.get('username'))\r\n if u.username:\r\n u.username = u.username.lower()\r\n u.email = unicode(rdict.get('email')).lower()\r\n passwd = get_random_word(8)\r\n u.password = passwd\r\n u.activated = True\r\n u.is_admin = False\r\n u.api_key = User.gen_api_key()\r\n\r\n try:\r\n DBSession.add(u)\r\n DBSession.flush()\r\n # We need to return the password since the admin added the user\r\n # manually. This is only time we should have/give the original\r\n # password.\r\n ret = dict(u)\r\n ret['random_pass'] = passwd\r\n return _api_response(request, ret)\r\n\r\n except IntegrityError, exc:\r\n # We might try to add a user that already exists.\r\n LOG.error(exc)\r\n request.response.status_int = 400\r\n return _api_response(request, {\r\n 'error': 'Bad Request: User exists.',\r\n })", "def create_user(username, password, user_fname, user_lname, email, profile_picture=\"/static/img/profile_pictures/default.png\"):\n\n user = User(username=username, password=password, user_fname=user_fname, user_lname=user_lname, profile_picture=profile_picture, email=email)\n\n db.session.add(user)\n db.session.commit()\n\n return user", "def create_user(self):\n User.objects.create_user('test', '[email protected]', 'testing')", "def create_user(self, **kwargs):\n\n user = self.user_model(**self._prepare_create_user_args(**kwargs))\n return self.put(user)", "def post(self):\n data = flask.request.json\n user_dao.create_user(data)\n return None, 201", "def create_user():\n new_user = User(id=login_session['gplus_id'],\n name=login_session['username'],\n email=login_session['email'],\n picture=login_session['picture'])\n session.add(new_user)\n session.flush()\n session.commit()\n user = session.query(User).filter_by(email=login_session['email']).one()\n return user.id", "def save(self, request, validated_data):\n # Create user\n user = User.objects.create_user(\n email=validated_data['email'],\n password=validated_data['password'],\n username=validated_data['username'].encode('utf-8')\n )\n\n return user", "def create_user(self):\n if not self.is_valid():\n return None\n # generate a username \n ids = User.objects.values_list('id', flat=True).order_by('-id')[:1]\n if len(ids) > 0:\n # ids[0] will be the maximum value (due to order_by: '-id')\n idnum = ids[0] + 1\n else:\n idnum = 1\n # create User object \n username = \"user%s\" % idnum\n # NOTE: store email in lower case\n email = self.clean_email().lower()\n password = self.clean_password2()\n user = User(username=username, email=email, password='tmp')\n user.save()\n # set the real password\n user.set_password(password)\n # make user inactive (until user has confirmed account)\n user.is_active = False\n # update\n user.save()\n return user", "def create_user(user, first_name, last_name, major, bio):\n return userAccount.objects.create(user=user, first_name=first_name, last_name=last_name, major=major, bio=bio)", "def create_new_user(cls, user_email, user_password, user_phone):\n\n new_user = User(email=user_email, password=user_password, mobile_phone=user_phone)\n\n db.session.add(new_user)\n db.session.commit()\n\n print \"Successfully added new user with the email: %s\" % user_email", "def create_user(self):\n return User.objects.create_user(**self.user_data)", "def make_new_user():\n\n new_user = User(\n first_name=request.form['first_name'],\n last_name=request.form['last_name'],\n image_url=request.form['image_url'] or None)\n\n db.session.add(new_user)\n db.session.commit()\n\n return redirect(\"/users\")", "def create_user(self, name, email, password):\n new_user = User(name=name, email=email, password=password)\n db.session.add(new_user)\n db.session.commit()", "def create(self, validated_data):\n user = get_user_model().objects.create(\n username=validated_data['username'],\n )\n user.set_password(validated_data['password'])\n user.save()\n return user", "def post(self):\n data = UserRegister.parser.parse_args()\n\n if UserModel.find_by_id(data['username']):\n print(\"Failed\", file=sys.stderr)\n return {\n 'message':\n \"A user with name '{}' already exists.\"\n .format(data['username'])\n }, 400\n\n\n user = UserModel(**data) # data['username'], data['details'].......\n user.save_to_db()\n\n return {\"message\": \"User created successfully.\"}, 201", "async def create_new_user(*, user: User):\n with Session(engine) as session:\n user.password = simple_hash(user.name, user.password) #Hashing password for security\n session.add(user)\n session.commit()\n return {\"message\": \"User {user_id} created\".format(user_id = user.id)}", "def create_user(self):\n u = USER.objects.create(username='test_user1',\n email='[email protected]', )\n u.set_password('test_password')\n u.save()\n self.user = u\n return u", "def save(self):\n data = self.cleaned_data\n del data['password_confirmation']\n return User.objects.create_user(**data)", "def create(self, validated_data):\n username = validated_data.get('username')\n email = validated_data.get('email')\n password = validated_data.get('password')\n first_name = validated_data.get('first_name', '')\n last_name = validated_data.get('last_name', '')\n return User.objects.create_user(username, email, password, first_name=first_name,\n last_name=last_name)", "def create (self, validated_data):\n user = models.UserProfile.objects.create_user(\n email = validated_data ['email'],\n name = validated_data ['name'],\n password = validated_data ['password']\n )\n\n return user", "def create_user():\n body = request.json\n username = body.get('username')\n password = body.get('password')\n validation = validate_user(username, password)\n password = md5(password.encode('utf-8')).hexdigest()\n if validation != \"OK\":\n return HTTPResponse(status=500, body={\"message\":validation})\n try:\n with db.atomic():\n user = User.create(username=username, password=password)\n user.save()\n ret = json.dumps({'message':'user created'})\n return HTTPResponse(status=200, body=ret)\n except IntegrityError:\n ret = json.dumps({'message':'user already exists'})\n return HTTPResponse(status=500, body=ret)", "def create_user():\r\n data = request.get_json() or {}\r\n print(data)\r\n # some data checks\r\n if 'username' not in data or 'password' not in data:\r\n return bad_request('must include username and password fields')\r\n if User.query.filter_by(username=data['username']).first():\r\n return bad_request('please use a different username')\r\n user = User()\r\n # add user to database\r\n user.add_user(data)\r\n # check that the transaction was successful\r\n res = User.query.filter_by(username=data['username']).one_or_none()\r\n # return added user as query response\r\n if res:\r\n response = jsonify(res.to_dict())\r\n response.status_code = 201\r\n # else return error\r\n else:\r\n response.status_code = 403\r\n response.headers['Location'] = url_for('api.get_user', id=user.id)\r\n return response", "def create_user():\n try:\n\n user = User(username=request.json.get(\"username\"), score=0,)\n\n user.insert()\n\n response = jsonify({\"success\": True, \"created_user_id\": user.id})\n\n except AttributeError:\n abort(400)\n\n return response", "def do_user_create():\n target = User(\n request.form['gender'],\n request.form['first_name'],\n request.form['name'],\n request.form['mail'],\n request.form['meter_id'],\n request.form['group_id'],\n secrets.token_hex(33))\n target.set_role(request.form['role'])\n target.nick = request.form['nick']\n db.session.add(target)\n db.session.commit()\n return user_list(\"Created user \" + target.name)", "def register_user(self):\n User.add_user(User(self.email.data, self.password.data))", "def create_user():\n body = request.get_json(silent=True)\n if body is None:\n abort(400, jsonify(error=\"Not a JSON\"))\n if 'email' not in body:\n abort(400, jsonify(error=\"Missing email\"))\n if 'password' not in body:\n abort(400, jsonify(error=\"Missing password\"))\n user = models.user.User(**body)\n models.storage.new(user)\n models.storage.save()\n return make_response(jsonify(user.to_dict()), 201)", "def create_user_object():\n user = User.objects.get_or_create(username='testuser',\n first_name='Test',\n last_name='User',\n email='[email protected]')[0]\n user.set_password('testabc123')\n user.save()\n return user", "def signup(cls, username, first_name, last_name, email, password):\n\n hashed_pwd = bcrypt.generate_password_hash(password).decode('UTF-8')\n\n user = User(\n username=username,\n first_name=first_name,\n last_name=last_name,\n email=email,\n password=hashed_pwd,\n )\n\n db.session.add(user)\n return user", "def create_user(self, request):\n if User.query(User.name == request.user_name).get():\n raise endpoints.ConflictException(\n 'A User with that name already exists!')\n user = User(name=request.user_name, email=request.email)\n user.put()\n return StringMessage(message='User {} created!'.format(\n request.user_name))", "def create_user(self, **kwargs):\n kwargs = self._prepare_create_user_args(**kwargs)\n user = self.user_model(**kwargs)\n # noinspection PyUnresolvedReferences\n return self.save(user)", "def create_new_user(self):\n username = 'pseudo'\n email = '[email protected]'\n password = '00000000'\n user_created = self.user.objects.create_user(id=1, username=username,\n email=email, password=password)\n HistoryUser.objects.create(user=user_created)\n StatusUser.objects.create(user=user_created)\n\n return user_created", "def create_user(self, request):\n if User.query(User.name == request.user_name).get():\n raise endpoints.ConflictException(\n 'A User with that name already exists!')\n user = User(name=request.user_name, email=request.email)\n user.put()\n return StringMessage(message='User {} created!'.format(\n request.user_name))", "def _create_user(self, email, password,username, **extra_fields):\r\n if not email:\r\n raise ValueError('The given email must be set')\r\n if not username:\r\n raise ValueError('The given username must be set')\r\n email = self.normalize_email(email)\r\n user = self.model(email=email,username=str.strip(username), **extra_fields)\r\n user.set_password(password)\r\n user.save(using=self._db)", "def add_user(first_name,last_name,email,password,typeOfUser):\n user=User.objects.create(first_name=first_name,last_name=last_name,email=email,password=password,role=typeOfUser)\n return user", "def create_user_object():\n user = User.objects.get_or_create(username='testuser',\n first_name='Test',\n last_name='User',\n email='[email protected]')[0]\n user.set_password('testabc123')\n user.save()\n\n return user", "def _create_user_Api(self,password,username, **extra_fields):\r\n if not username:\r\n raise ValueError('The given username must be set')\r\n user = self.model(email=username,username=str.strip(username), **extra_fields)\r\n user.set_password(password)\r\n user.save(using=self._db)", "def post(self):\n self.parser.add_argument(\n 'name', required=True, type=self.validator.validate_string_fields, help='Enter a valid name')\n self.parser.add_argument(\n 'email', required=True, type=self.validator.validate_string_fields, help='Must be a valid email')\n self.parser.add_argument(\n 'password', required=True, type=self.validator.validate_string_fields, help='Must enter a valid password')\n\n user = self.parser.parse_args()\n response = self.user_models.create_user(user['name'],\n user['email'],\n user['password'])\n return {\"message\": response}, 201", "def register_new_user(first_name,email,password):\n\n new_user = User(first_name=first_name, email=email, password=password)\n\n db.session.add(new_user)\n db.session.commit()\n\n return new_user", "def create_user(self, username, password, email, name):\n\n duplicate_check = User.query.filter_by(username=username).first()\n if duplicate_check is not None:\n return\n user = User(username=username, password=password, email=email, name=name)\n db.session.add(user)\n db.session.commit()", "def create(self, validated_data):\n username = validated_data.pop('username')\n email = validated_data.pop('email')\n password = validated_data.pop('password')\n user = User.objects.create_user(\n username, email, password, **validated_data)\n return user", "def create_user(self, request):\n if User.query(User.name == request.user_name).get():\n raise endpoints.ConflictException(\n 'A User with that name already exists!')\n # Validate request.user_name is alphanumeric\n if not str(request.user_name).isalnum():\n raise endpoints.BadRequestException(\n 'User name must be alphanumeric')\n # If email address is given, validate it.\n email = ''\n if not getattr(request, 'email') == None:\n email = str(getattr(request, 'email'))\n if len(email) > 0:\n if not validateEmail(email):\n raise endpoints.BadRequestException(\n 'The given email is invalid!')\n user = User(name=request.user_name, email=email)\n user.put()\n return StringMessage(message='User {} created!'.format(\n request.user_name))", "def create(self, validated_data):\n user = UserProfile.objects.create_user(\n email=validated_data['email'],\n first_name = validated_data['first_name'],\n last_name = validated_data['last_name'],\n password = validated_data['password']\n )\n return user", "def create_user(first_name,last_name,email,password):\n\n\tnew_user = User(first_name,last_name,email,password)\n\treturn new_user", "def create_user():\n usr = request.get_json()\n if not usr:\n abort(400, {'Not a JSON'})\n elif 'email' not in usr:\n abort(400, {'Missing email'})\n elif 'password' not in usr:\n abort(400, {'Missing password'})\n else:\n new_usr = User(**usr)\n storage.new(new_usr)\n storage.save()\n return jsonify(new_usr.to_dict()), 201", "def create(self, validated_data):\n ## overriding default create\n\n user = UserProfile.objects.create_user(\n email = validated_data['email'],\n name = validated_data['name'],\n password=validated_data['password']\n )\n \n return user", "def create(self, validated_data):\n user = UserProfile.objects.create_user(\n email=validated_data[\"email\"],\n name=validated_data[\"name\"],\n password=validated_data[\"password\"]\n )\n\n return user", "def create_user():\n email = request.json.get('email')\n username = request.json.get('username')\n password = request.json.get('password')\n\n details = [email, username, password]\n\n if not all(details):\n return bad_request(\"you must supply email, username and password\")\n if User.query.filter_by(email=email).first() is not None and User.query.filter_by(username=username) is not None:\n return forbidden(\"email or username already exist\")\n\n user = User(email=email, username=username)\n user.hash_password(password)\n user.save()\n\n return {'status': (user.username + ' has successfully registered')}", "def new_user():\n new_user = User(first_name=request.form['first_name'], last_name=request.form['last_name'], image_url=request.form['image_url'] or None)\n\n db.session.add(new_user)\n db.session.commit()\n\n return redirect(\"/users\")", "def create(self, validated_data:tuple):\n user = user_details.objects.create(user_name=validated_data[0], email=validated_data[1], password=validated_data[2])\n return user", "def create_user():\n form = UserForm(prefix='register')\n\n if not form.validate_on_submit():\n flash('Invalid input.', 'warning')\n return view_index(form)\n else:\n user, exists = db_insert_or_get(User, name=form.name.data, defaults={'password': form.password.data})\n if exists:\n flash('Username taken.', 'warning')\n else:\n db.session.commit()\n\n session['user_name'] = user.name\n app.logger.info('User %s created successfully.', user.name)\n flash('User created successfully.', 'success')\n\n return redirect(url_for('view_index'))", "def create(self, validated_data):\n user = User.objects.create(username=validated_data['username'],\n email=validated_data['email'],\n first_name=validated_data['first_name'],\n last_name=validated_data['last_name'])\n\n user.set_password(validated_data['password'])\n user.save()\n\n return user", "def save(self):\n new_user = RegistrationProfile.objects.create_inactive_user(username=self.cleaned_data['username'],\n password=self.cleaned_data['password1'],\n email=self.cleaned_data['email'],\n firstname=self.cleaned_data['first_name'],\n lastname=self.cleaned_data['last_name'],\n agree=self.cleaned_data['tos'])\n return new_user", "def create(self, validated_data: dict):\n return User.objects.create_user(**validated_data)", "def create(self, validated_data):\n\n user = models.User(\n email=validated_data['email'],\n name=validated_data['name']\n )\n\n user.set_password(validated_data['password'])\n\n user.save()\n\n return user", "def new_user():\n username = request.json.get('username')\n password = request.json.get('password')\n picture = request.json.get('picture')\n email = request.json.get('email')\n if username is None or password is None:\n print(\"missing arguments\")\n abort(400)\n\n if getUserByUsername(username) is not None:\n print(\"existing user\")\n return jsonify({'message': 'user already exists'}), 200\n\n user = addUser(username, picture, email, password)\n return jsonify(user=user.serialize), 201", "def create_a_user(self, username='fry', email='[email protected]', password='Qwerty!234'):\n user = User.objects.create_user(username, email, password)\n user.save()\n return user", "def create(self, validated_data):\n user = User.objects.create_user(\n email=validated_data['email'],\n password=validated_data['password'],\n )\n return user", "def create(self, data):\n # ensure 'create()' calls the specific 'create_user()' method\n # note that the 'data' gets validated\n user = get_user_model().objects.create_user(**data)\n return user", "def _create_user(self, username, email, password, phone, **extra_fields):\n\n username = self.model.normalize_username(username)\n user = self.model(username=username, email=email, phone=phone, **extra_fields) # using email_id instead of email\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user() -> tuple:\n # created new user\n user_data: dict = request.get_json()\n names: str = user_data.get(\"names\")\n surname: str = user_data.get(\"surname\")\n cell: str = user_data.get(\"cell\")\n email: str = user_data.get(\"email\")\n password: str = user_data.get(\"password\")\n uid: str = user_data.get(\"uid\")\n organization_id: str = user_data.get(\"organization_id\")\n\n # Add User View will perform error checking\n return user_view.add_user(organization_id=organization_id, uid=uid, names=names, surname=surname,\n cell=cell, email=email, password=password)", "def create_user():\n if not check_content_type():\n return jsonify(status=CONTENT_TYPE_ERROR)\n\n data = request.json\n #TODO check if request body contain required keys\n #if [\"login\", \"password\", \"user\", \"email\", \"first_name\", \"second_name\", \"phone\"].sort() != (data.keys()).sort():\n # return jsonify(status=\"err\")\n\n login = data[\"login\"]\n hash_password = raw_password_to_string(str(data[\"password\"]))\n role = \"user\"\n email = data[\"email\"]\n first_name = data[\"first_name\"]\n second_name = data[\"second_name\"]\n phone = data[\"phone\"] \n #TODO data validation\n #if login == \"\" or hash_password == \"\" or role == \"\" or email == \"\" or first_name == \"\" or second_name == \"\":\n # return jsonify(status=\"error\")\n\n db.session.add(User(login=login, hash_password=hash_password, role=role, email=email, first_name=first_name, second_name=second_name, phone=phone))\n try:\n db.session.commit()\n return jsonify(status=OK_STATUS)\n except:\n db.session.rollback()\n return jsonify(status=DATABASE_INTEGRITY_ERROR)", "def save(self, commit=True):\n\t\tprint('save django.user ')\n\t\tprint(self.cleaned_data)\n\t\tusr = User.objects.create_user(self.cleaned_data['username'], self.cleaned_data['email'], self.cleaned_data['pass1'])\n\t\tkuser = users.models.KUser()\n\t\tkuser.user = usr\n\t\tkuser.realName = self.cleaned_data['realName']\n\t\tkuser.job = self.cleaned_data['job']\n\t\tkuser.privilege = self.cleaned_data['privilege']\n\t\tkuser.employeeId = self.cleaned_data['employeeId']\n\t\tkuser.isManager = self.cleaned_data['isManager']\n\t\tkuser.gender = self.cleaned_data['gender']\n\t\tprint('create kuser:')\n\t\tprint(kuser)\n\n\t\tif commit:\n\t\t\tkuser.save()\n\t\treturn kuser", "def create_user(username, name):\n db.session.add(Users(username=username, name=name))\n db.session.commit()", "def create_new_user(data):\n status, error = validate_user(data)\n if status:\n user = mongo.db.users.insert(data)\n return True, str(user)\n else:\n return False, error", "def create(self,validated_data):\n\n user = models.User.object.create_user(\n email = validated_data['email'],\n full_name = validated_data['full_name'],\n phone = validated_data['phone'],\n password = validated_data['password']\n )\n\n #user.set_password(validated_data['password'])\n user.save()\n return user", "def create(self, validated_data):\n user = User.objects.create(\n first_name=validated_data.get('first_name'),\n middle_name=validated_data.get('middle_name'),\n last_name=validated_data.get('last_name'),\n email=validated_data.get('email'),\n username=validated_data.get('username'),\n mobile_number=validated_data.get('mobile_number'),\n gender=validated_data.get('gender'),\n is_active=validated_data.get('is_active'),\n country=validated_data.get('country'),\n address=validated_data.get('address'),\n role=validated_data.get('role'),\n )\n if self.context['request'].data.get('file_profile_picture') is not None:\n user.profile_picture = self.context['request'].data['file_profile_picture']\n if self.context['request'].data.get('file_signature') is not None:\n user.signature = self.context['request'].data['file_signature']\n user.set_password(validated_data.get('password'))\n user.save()\n return user", "def create_user(user_name: str):\n user = User()\n user.username = user_name\n user.save()\n return user", "def create_user(self) -> None:\n # update when the account was created\n self.account_created = datetime.now().date()\n self.insert_to_db()\n log(f\"An account for User:{self.id} has been created.\")", "def _create_user(self, new_user):\n new_user = User(user_name=new_user['user_name'], pin=new_user['pin'], user_type='customer')\n self.session.output(new_user.get_user_info(), '\\n[ New user created ]')", "def create_user(email, password, fname, lname):\n\n user = User(email=email, password=password, fname=fname, lname=lname)\n\n db.session.add(user)\n db.session.commit()\n\n return user", "def new_user():\n new_user = None\n try:\n new_user = user_schema.loads(request.data)\n except ValidationError as err:\n pass\n # TODO: send a exception message\n # save data:\n db.session.add(new_user)\n db.session.commit()\n\n return Response(\n response=user_schema.dumps(new_user, sort_keys=True, indent=4),\n status=http_status.OK,\n mimetype='application/json'\n )", "def create_new_user():\n return get_user_model().objects.create_user(\n email='[email protected]',\n password='test@londodnjisdjfois',\n username='tempusername'\n )", "def add_user():\n\n email = request.form[\"email\"]\n password = request.form[\"password\"] \n fname = request.form[\"fname\"]\n lname = request.form[\"lname\"]\n macaddress = request.form[\"macaddress\"]\n role = request.form[\"role\"]\n\n password_hash = generate_password_hash(password, method='sha256', salt_length=8)\n # create a new User object.\n new_user = User(email=email, password=password_hash,\n fname=fname, lname=lname, macaddress=macaddress, role=role)\n\n # add new user to db\n db.session.add(new_user)\n # commit the new add.\n db.session.commit()\n\n return userSchema.jsonify(new_user)", "def register_user(request):\n data = json.loads(request.body.decode())\n user = User.objects.create_user(\n username = data['username'],\n password = data['password'],\n email = data['email'],\n first_name = data['first_name'],\n last_name = data['last_name']\n )\n user.save()\n return login_user(request)", "def create_user(context, params):\n form_user = dict()\n # form_user['edited_by'] = context.user\n if params.get('username'):\n form_user['username'] = params.get('username')\n else:\n form_user['username'] = create_username(params) # 'email_user{}'.format(MISUser.objects.latest('id').id + 1\n form_user['first_name'] = params.get('first_name')\n form_user['last_name'] = params.get('last_name')\n form_person = create_person(params)\n form_user.update(form_person)\n user = User.objects.create(**form_user)\n user.set_password(params.get('password'))\n\n email = {'label': 'Work', 'val': params.get('email'), 'person': user, 'is_main': True}\n create_email(context, email)\n\n user.save()\n return user", "def create_user(user: User):\n coll = data_access.get_user_collection()\n\n if user.name == \"\":\n raise HTTPException(status.HTTP_400_BAD_REQUEST,\n \"User name must not be empty.\")\n\n if coll.find_one(user.dict()) is None:\n coll.insert_one(user.dict())", "def create(self, validated_data):\n return get_user_model().objects.create_user(**validated_data)", "def create(self, validated_data):\n return get_user_model().objects.create_user(**validated_data)", "def create(self, validated_data):\n return get_user_model().objects.create_user(**validated_data)", "def create(self, validated_data):\n return get_user_model().objects.create_user(**validated_data)", "def create(self, validated_data):\n return get_user_model().objects.create_user(**validated_data)", "def create(self, validated_data):\n return get_user_model().objects.create_user(**validated_data)", "def create(self, validated_data):\n return get_user_model().objects.create_user(**validated_data)", "def create(self, validated_data):\n\n # Here we actually create a new user.\n user = models.UserProfile(\n email = validated_data['email'],\n name = validated_data['name']\n )\n\n user.set_password(validated_data['password'])\n\n # Here we save the object to the database.\n user.save()\n\n return user", "def save(self, request):\n user = get_user_model()()\n cleaned_data = self.get_cleaned_data()\n email = cleaned_data.get('email')\n nickname = cleaned_data.get('nickname')\n\n user.email = email\n user.nickname = nickname\n\n if 'password1' in cleaned_data:\n user.set_password(cleaned_data[\"password1\"])\n else:\n user.set_unusable_password()\n\n user.save()\n\n return user", "def create(self, validated_data):\n user = models.UserProfile.objects.create_user(\n email=validated_data['email'],\n username=validated_data['username'],\n password=validated_data['password'],\n\n )\n\n return user", "def create(self, validated_data):\n password = validated_data.pop('password')\n new_user = User.objects.create(**validated_data)\n new_user.set_password(password)\n new_user.save()\n return new_user", "def post(self):\n data = request.json\n return save_new_user(data=data)", "def add_user():\n username = request.json['username']\n email = request.json['email']\n\n user = User(username, email)\n\n db.session.add(user)\n db.session.commit()\n return user_schema.jsonify(user)", "def create_user():\n try:\n payload = _validatePayload(request)\n timestamp = int(time.time() * 1000)\n user = {\n 'name': payload.get('name'),\n 'email': payload.get('email'),\n 'password': _encodePassword(payload.get('password')),\n 'createdAt': timestamp,\n 'updatedAt': timestamp,\n }\n\n resp = table.put_item(\n Item=user,\n Expected={'email': {'Exists': False}}\n )\n return jsonify(user), 200\n except Exception as e:\n logger.info('ERROR {}'.format(str(e)))\n return _customizeErrorMessage(e)", "def create():\n api_request = apireq.APIRequest(request, 'client_schema')\n if api_request.is_invalid():\n return api_request.error_text, 400\n return user_management.create_user(api_json['username'])", "def post(self):\n args = usr_parser.parse_args()\n # convert admin parameter into a boolean\n admin = bool(args['admin'])\n # check if the id of user is provided\n if args['uid'] is not None:\n user = User.new_user(admin, args['uid'])\n else:\n user = User.new_user(admin)\n \n \"\"\" check if the user is created, \n if the user with the same id exists it won't be created \"\"\"\n if user is None:\n return abort(422, message=\"User id already exists\")\n \n \"\"\" create an object to represent the user with the password provided\n and return it as a response \"\"\"\n userToReturn = { 'uid' : user.id, 'password':user.password,'admin':user.admin }\n return userToReturn", "def create(self, validated_data):\n # user = super().create(validated_data)\n # user.set_password(validated_data['password'])\n # user.save()\n\n user = User.objects.create_user(**validated_data)\n return user", "def add_user():\n\n email = request.form.get(\"email\")\n password = request.form.get(\"password\")\n fname = request.form.get(\"fname\")\n lname = request.form.get(\"lname\")\n language = request.form.get(\"language\")\n\n new_user = User(email=email, password=password,fname=fname,\n lname=lname,language=language)\n\n db.session.add(new_user)\n db.session.commit()\n\n return redirect(\"/\")", "def create(self, validated_data):\n user = User(\n email=validated_data['email'],\n username=validated_data['username']\n )\n user.set_password(validated_data['password'])\n user.save()\n return user", "def register():\n insert_user(json_body())\n try:\n db.session.commit()\n except IntegrityError:\n raise exc.CouldNotCreateEntry()\n\n return jsonify({'message': 'Created user.'}), 200", "def add_new_user_to_db():\n first_name = request.form['first_name']\n last_name = request.form['last_name']\n img_url = request.form['img_url']\n\n new_user = User(first_name=first_name,last_name=last_name, img_url=img_url)\n db.session.add(new_user)\n db.session.commit()\n\n return redirect('/users')" ]
[ "0.81209964", "0.7988362", "0.7942134", "0.794042", "0.7909767", "0.79059476", "0.7895585", "0.78710216", "0.7859143", "0.7820724", "0.78207135", "0.7810844", "0.77932024", "0.77833587", "0.77703035", "0.77604187", "0.7755999", "0.77458596", "0.7740836", "0.773325", "0.7719761", "0.7711842", "0.7701515", "0.7698405", "0.76949584", "0.76921296", "0.76910037", "0.7687408", "0.76833165", "0.76751274", "0.76750547", "0.7674562", "0.7670038", "0.76689476", "0.7664447", "0.76607686", "0.76595867", "0.76533335", "0.7648598", "0.7646991", "0.7645063", "0.7639056", "0.7629406", "0.76251674", "0.76125914", "0.7612525", "0.7610458", "0.7605239", "0.7595816", "0.7593075", "0.7592642", "0.75917715", "0.75899035", "0.7585106", "0.75774336", "0.75770664", "0.7575126", "0.7570146", "0.7568613", "0.7568577", "0.7566073", "0.7560057", "0.7559275", "0.75556296", "0.75534654", "0.7548958", "0.75474805", "0.75377953", "0.7534211", "0.75310594", "0.75265086", "0.7525702", "0.75227356", "0.7522374", "0.75223714", "0.7511384", "0.75112253", "0.7503081", "0.7502958", "0.7500214", "0.74990493", "0.74990493", "0.74990493", "0.74990493", "0.74990493", "0.74990493", "0.74990493", "0.7497747", "0.7494409", "0.7490789", "0.7486419", "0.7478672", "0.7469943", "0.74624026", "0.7456343", "0.7453431", "0.7442961", "0.7441358", "0.74406075", "0.743075", "0.7427708" ]
0.0
-1
Creates and saves a new super user
def create_superuser(self, phone, password): user = self.model(phone=phone) user.set_password(password) user.save(using=self._db) user.is_staff = True user.is_superuser = True user.save(using=self._db) return user
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_superuser(self, username, email, password):\n print(\"creating super user....\")\n user = self.create_user(\n\n username=username,\n password=password,\n email = email,\n commit=False,\n )\n user.is_staff = True\n user.is_superuser = True\n user.save(using=self._db)\n return user", "def createsuperuser():\n\n email = prompt('User E-Mail')\n email_confirm = prompt('Confirm E-Mail')\n\n if not email == email_confirm:\n sys.exit('\\nCould not create user: E-Mail did not match')\n\n if not EMAIL_REGEX.match(email):\n sys.exit('\\nCould not create user: Invalid E-Mail addresss')\n\n password = prompt_pass('User password')\n password_confirm = prompt_pass('Confirmed password')\n\n if not password == password_confirm:\n sys.exit('\\nCould not create user: Passwords did not match')\n\n datastore = SQLAlchemyUserDatastore(db, User, Role)\n datastore.create_user(\n email=email,\n password=encrypt_password(password),\n active=True,\n super_user=True)\n\n db.session.commit()", "def _create_superuser(username, email, password):\n if username and email and password:\n user, created = User.objects.get_or_create(pk=defaults.USERWARE_SUPERUSER_ID)\n if user:\n user.username = username\n user.email = email\n user.set_password(password)\n user.is_staff = True\n user.is_active = True\n user.is_superuser = True\n user.save()\n action = \"Created\" if created else \"Updated\"\n print >> sys.stderr, \"{} Superuser: [username={}, email={}, id={}]\".format(action, username, email, user.id)", "def create_user(self):\n User.objects.create_user('test', '[email protected]', 'testing')", "def create_superuser(self, su_id, first_name, last_name, email, phone_number, password):\n user = self.create_user(\n su_id,\n first_name,\n last_name,\n email,\n phone_number,\n password=password,\n )\n user.is_admin = True\n user.save(using=self._db)\n return user", "def create_superuser(self,email,name,password):\n\n user = self.create_user(email, name, password)\n user.is_superuser = True\n user.is_staff = True\n\n user.save(using=self._db)\n return user", "def create_superuser(self, username, firstname, lastname, email, password):\n user = self.create_user(\n username=username,\n firstname=firstname,\n lastname=lastname,\n email=email,\n password=password,\n )\n user.is_admin = True\n user.save(using=self._db)\n return user", "def create_superuser(self, email, full_name, password=None):\n print(\"Is this the method being called\")\n user = self.create_user(\n email,\n full_name,\n password=password,\n )\n user.staff = True\n user.admin = True\n user.save(using=self._db)\n return user", "def create_user(self):\n return User.objects.create_user(**self.user_data)", "def create_superuser(self, email, password, **extrac_fields):\n\n user = self.create_user(email, password)\n\n user.is_superuser = True\n user.is_staff = True\n user.save(using=self._db)\n return user", "def create_superuser(self, email, password):\n user = self.create_user(email, password)\n user.is_staff = True\n user.is_superuser = True\n user.save(using=self._db)\n\n return user", "def create_superuser(self, username, password, **extra_fields):\n extra_fields.setdefault('is_staff', True)\n extra_fields.setdefault('is_superuser', True)\n extra_fields.setdefault('is_active', True)\n\n if extra_fields.get('is_staff') is not True:\n raise ValueError(_('Superuser must have is_staff=True.'))\n if extra_fields.get('is_superuser') is not True:\n raise ValueError(_('Superuser must have is_superuser=True.'))\n user = self.model(username=username, **extra_fields)\n user.set_password(password)\n user.save()\n return user", "def create_superuser(self, username, email, password):\n\t\tuser = self._create_user(username, email, password)\n\t\tuser.is_admin = True\n\t\tuser.is_author = True\n\t\tuser.save(using=self._db)\n\t\treturn user", "def create_superuser(self, email, name, phone1, password=None, signed_up=timezone.localtime(),):\n user = self.create_user(\n email,\n password=password,\n name=name,\n phone1=phone1,\n signed_up=signed_up,\n )\n user.is_admin = True\n user.save(using=self._db)\n return user", "def create_superuser(self, first_name, last_name, username, email, date_of_birth, password):\n user = self.model(\n first_name = first_name,\n last_name = last_name,\n username = username,\n email=self.normalize_email(email),\n date_of_birth=date_of_birth,\n password=password,\n\n )\n user.is_admin = True\n user.save(using=self._db)\n return user", "def create_superuser(self, username, email, password=None):\n if password is None:\n raise ValueError('Password should not be none')\n\n # Creating user instance and saving it to database\n user = self.create_user(username, email, password)\n\n # Assigning current user as superuser\n user.is_superuser = True\n user.is_staff = True\n\n # Saving the modified data to the database\n user.save()\n\n return user", "def create_superuser(self,name,email,password):\n\n user = self.Create_user(email,name,password)\n user.is_superuser = True\n user.is_staff = True\n user.save(using=self._db)\n \n return user", "def create_superuser(self,email,password):\n user = self.create_user(email,password)\n user.is_staff = True\n user.is_superuser = True\n user.save(using=self._db)\n return user", "def create_superuser(self, email, name, password):\n\n user = self.create_user(email, name, password)\n\n # Make this user an admin.\n user.is_superuser = True\n user.is_staff = True\n user.save(using=self._db)\n\n return user", "def create_superuser(self, email, username, first_name, last_name, password):\n\n user = self.create_user(\n email,\n username,\n first_name,\n last_name,\n password\n )\n\n user.is_superuser = True\n user.is_staff = True\n\n user.save(using=self._db)\n\n return user", "def create_superuser(self, email, password, firstname, lastname):\n user = self.create_user(\n firstname,\n lastname,\n email,\n '',\n password=password,\n )\n user.is_staff = True\n user.is_superuser = True\n user.save(using=self._db)\n return user", "def save(self):\n data = self.cleaned_data\n del data['password_confirmation']\n return User.objects.create_user(**data)", "def create_superuser(self, username, email, password):\n user = self.create_user(\n username,\n email,\n password=password,\n )\n user.admin = True\n user.staff = True\n user.save(using=self._db)\n return user", "def save(self, commit=True):\n\t\tprint('save django.user ')\n\t\tprint(self.cleaned_data)\n\t\tusr = User.objects.create_user(self.cleaned_data['username'], self.cleaned_data['email'], self.cleaned_data['pass1'])\n\t\tkuser = users.models.KUser()\n\t\tkuser.user = usr\n\t\tkuser.realName = self.cleaned_data['realName']\n\t\tkuser.job = self.cleaned_data['job']\n\t\tkuser.privilege = self.cleaned_data['privilege']\n\t\tkuser.employeeId = self.cleaned_data['employeeId']\n\t\tkuser.isManager = self.cleaned_data['isManager']\n\t\tkuser.gender = self.cleaned_data['gender']\n\t\tprint('create kuser:')\n\t\tprint(kuser)\n\n\t\tif commit:\n\t\t\tkuser.save()\n\t\treturn kuser", "def create_superuser(self, email, password, **extra_fields):\n extra_fields.setdefault('is_staff', True)\n extra_fields.setdefault('is_superuser', True)\n extra_fields.setdefault('is_active', True)\n\n\n if extra_fields.get('is_superuser') is False:\n raise ValueError(_('Superuser must have is_superuser=True.'))\n return self._create_user(email, password, **extra_fields)", "def create_superuser(self, email, password, **extra_fields):\n extra_fields.setdefault('is_staff', True)\n extra_fields.setdefault('is_superuser', True)\n extra_fields.setdefault('is_active', True)\n\n return self.create_user(email, password, **extra_fields)", "def create_superuser(self, username, password, email=None):\n user = self.create_user(username, password)\n user.is_staff = True\n user.is_superuser = True\n user.save()\n\n return user", "def create_superuser(self, email, password, **extra_fields):\n user = self.model(\n email = email,\n **extra_fields \n )\n user.set_password(password)\n user.is_admin =True\n user.is_superuser=True\n user.is_staff=True\n user.save(using=self._db)\n return user", "def create_superuser(self, username, email, password):\n user = self.create_user(\n username,\n email,\n password=password,\n )\n user.is_admin = True\n user.save(using=self._db)\n return user", "def create_superuser(self, email, password, **kwargs):\n user = self.create_user(email, password, **kwargs)\n user.is_superuser = True\n user.is_staff = True\n user.save()\n\n return user", "def create_superuser(self, email, first_name, last_name, password):\n user = self.create_user(email, password=password, first_name=first_name, last_name=last_name)\n user.is_admin = True\n user.save(using=self._db)\n return user", "def create_superuser(self, email, password, **extra_fields):\n extra_fields.setdefault(\"is_staff\", True)\n extra_fields.setdefault(\"is_superuser\", True)\n extra_fields.setdefault(\"is_active\", True)\n\n if extra_fields.get(\"is_staff\") is not True:\n raise ValueError(_(\"Superuser must have is_staff=True.\"))\n if extra_fields.get(\"is_superuser\") is not True:\n raise ValueError(_(\"Superuser must have is_superuser=True.\"))\n return self.create_user(email, password, **extra_fields)", "def create_superuser(self, username, email, password=None):\n\n user = self.create_user(\n username,\n email,\n password=password,\n )\n user.is_admin = True\n user.save(using=self._db)\n return user", "def create_superuser(self, username, email, date_of_birth, full_name, password):\n user = self.create_user(\n username=username,\n full_name=full_name,\n email=UserManager.normalize_email(email),\n password=password,\n date_of_birth=date_of_birth,\n is_staff=True\n )\n user.is_superuser = True\n user.save(using=self._db)\n return user", "def register_user(self):\n User.add_user(User(self.email.data, self.password.data))", "def new_user(request):\r\n rdict = request.params\r\n\r\n u = User()\r\n\r\n u.username = unicode(rdict.get('username'))\r\n if u.username:\r\n u.username = u.username.lower()\r\n u.email = unicode(rdict.get('email')).lower()\r\n passwd = get_random_word(8)\r\n u.password = passwd\r\n u.activated = True\r\n u.is_admin = False\r\n u.api_key = User.gen_api_key()\r\n\r\n try:\r\n DBSession.add(u)\r\n DBSession.flush()\r\n # We need to return the password since the admin added the user\r\n # manually. This is only time we should have/give the original\r\n # password.\r\n ret = dict(u)\r\n ret['random_pass'] = passwd\r\n return _api_response(request, ret)\r\n\r\n except IntegrityError, exc:\r\n # We might try to add a user that already exists.\r\n LOG.error(exc)\r\n request.response.status_int = 400\r\n return _api_response(request, {\r\n 'error': 'Bad Request: User exists.',\r\n })", "def create_superuser(self, email, password=None, **extra_fields):\n user = self.create_user(email, password)\n user.is_staff = True\n user.is_superuser = True\n user.save(using=self._db)\n\n return user", "def create_superuser(self, username, major, password):\r\n user = self.create_user(username, major, password=password,)\r\n user.is_admin = True\r\n user.save(using=self._db)\r\n return user", "def create_user(self):\n u = USER.objects.create(username='test_user1',\n email='[email protected]', )\n u.set_password('test_password')\n u.save()\n self.user = u\n return u", "def create_superuser(self, username, email, password):\n user = self.create_user(username, email,\n password=password\n )\n user.is_admin = True\n user.is_active = True\n user.is_superuser = True\n user.save(using=self._db)\n return user", "def create_superuser(self, email, password, **extra_fields):\n return self._create_user(email, password, True, True, is_active=True,\n **extra_fields)", "def create_superuser(self, username, email, password):\n if password is None:\n raise TypeError('Superusers must have a password.')\n\n user = self.create_user(username, email, password)\n user.is_superuser = True\n user.is_staff = True\n user.save()\n\n return user", "def create_superuser(self, email, password, full_name=None):\n user = self.create_user(\n email,\n password=password,\n )\n user.staff = True\n user.admin = True\n user.save(using=self._db)\n return user", "def create_superuser(self, email, first_name, last_name, password):\n user = self.create_user(email, first_name, last_name, password)\n\n user.is_superuser = True\n user.is_staff = True\n user.save(using=self._db)\n\n return user", "def create_superuser(self, username, email, password):\n user = self.create_user(\n username,\n email,\n password=password,\n\n )\n\n user.is_admin = True\n user.is_staff = True\n user.save(using=self._db)\n return user", "def create_superuser(self, email, password, **extra_fields):\n extra_fields.setdefault('is_staff', True)\n extra_fields.setdefault('is_superuser', True)\n extra_fields.setdefault('is_active', True)\n\n if extra_fields.get('is_staff') is not True:\n raise ValueError(_('Superuser must have is_staff=True;'))\n if extra_fields.get('is_superuser') is not True:\n raise ValueError(_('Superuser must have is_superuser=True'))\n return self.create_user(email, password, **extra_fields)", "def create_superuser(self, username, email, password):\n if password is None:\n raise TypeError('Superusers must have a password.')\n\n id_number = self.create_id_number()\n user = self.create_user(\n username=username,\n email=email,\n password=password,\n id_number=id_number\n )\n user.is_superuser = True\n user.is_active = True\n user.is_staff = True\n user.save()\n\n return user", "def create_superuser(self, email, password):\n if password is None:\n raise TypeError('Superusers must have a password.')\n user = self.create_user(email, password)\n user.is_superuser = True\n user.is_staff = True\n user.save()\n return user", "def create_superuser(self, email, date_of_birth, password):\n user = self.create_user(email,\n password=password,\n date_of_birth=date_of_birth\n )\n user.is_admin = True\n user.save()\n return user", "def create_superuser(self, email, password, **extra_fields):\n extra_fields.setdefault('is_staff', True)\n extra_fields.setdefault('is_superuser', True)\n extra_fields.setdefault('is_active', True)\n\n if extra_fields.get('is_staff') is not True:\n raise ValueError(_('Superuser must have is_staff=True.'))\n if extra_fields.get('is_superuser') is not True:\n raise ValueError(_('Superuser must have is_superuser=True.'))\n return self.create_user(email, password, **extra_fields)", "def create_superuser(self, email, password, **extra_fields):\n extra_fields.setdefault('is_staff', True)\n extra_fields.setdefault('is_superuser', True)\n extra_fields.setdefault('is_active', True)\n\n if extra_fields.get('is_staff') is not True:\n raise ValueError(_('Superuser must have is_staff=True.'))\n if extra_fields.get('is_superuser') is not True:\n raise ValueError(_('Superuser must have is_superuser=True.'))\n return self.create_user(email, password, **extra_fields)", "def create_superuser(self, email, password, **extra_fields):\n extra_fields.setdefault('is_staff', True)\n extra_fields.setdefault('is_superuser', True)\n extra_fields.setdefault('is_active', True)\n\n if extra_fields.get('is_staff') is not True:\n raise ValueError(_('Superuser must have is_staff=True.'))\n if extra_fields.get('is_superuser') is not True:\n raise ValueError(_('Superuser must have is_superuser=True.'))\n return self.create_user(email, password, **extra_fields)", "def create_user(self, email, password=None, **extra_fields):\n extra_fields.setdefault('is_staff', False)\n extra_fields.setdefault('is_superuser', False)\n return self._create_user(email, password, **extra_fields)", "def create_new_user():\n return get_user_model().objects.create_user(\n email='[email protected]',\n password='test@londodnjisdjfois',\n username='tempusername'\n )", "def create_superuser(self, email, username, password):\n user = self.create_user(\n email,\n username,\n password,\n )\n user.is_staff = True\n user.is_superuser = True\n user.is_admin = True\n user.save(using=self._db)\n return user", "def create_superuser(self, email, name, password):\n user = self.create_user(email, name, password)\n user.is_superuser = True\n user.is_staff = True\n user.save(using=self._db)\n return user", "def create_superuser(self, email, password, **extra_fields):\n extra_fields.setdefault('is_staff', True)\n extra_fields.setdefault('is_superuser', True)\n extra_fields.setdefault('is_active', True)\n extra_fields.setdefault('type', \"ADMINISTRATOR\")\n\n if extra_fields.get('is_staff') is not True:\n raise ValueError(_('Superuser must have is_staff=True.'))\n if extra_fields.get('is_superuser') is not True:\n raise ValueError(_('Superuser must have is_superuser=True.'))\n return self.create_user(email, password, **extra_fields)", "def create_superuser(self, username, email, password=None):\n user = self.create_user(\n username,\n email,\n password=password,\n )\n user.is_admin = True\n user.is_staff = True\n user.save(using=self._db)\n return user", "def test_if_created_superusers_permissions(self):\r\n payload = {\r\n \"email\": \"[email protected]\",\r\n \"password\": \"password\",\r\n \"name\": \"asdasd\",\r\n \"is_superuser\": False,\r\n }\r\n\r\n res = self.client_superuser.post(reverse(CREATE_USER_URL),data=payload)\r\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)", "def create(self, validated_data):\n # user = super().create(validated_data)\n # user.set_password(validated_data['password'])\n # user.save()\n\n user = User.objects.create_user(**validated_data)\n return user", "def save(self, commit=True):\n user = super(UserCreationForm, self).save(commit=False)\n user.set_password(self.cleaned_data['password1'])\n\n user.save()\n\n # Making user profile and assigning to CPCESU\n # CPCESU\n #group = Organization.objects.get(name='Colorado Plateau')\n\n # New profile with group\n profile = UserProfile(user=user, first_name=self.cleaned_data.get('first_name'),\n last_name=self.cleaned_data.get('last_name'))\n profile.save()\n\n return user", "def create_superuser(self, email, username, gender, first_name, last_name, password):\n user = self.create_user(\n email = self.normalize_email(email),\n username = username,\n password = password,\n gender = gender,\n first_name = first_name,\n last_name = last_name\n )\n\n user.is_admin = True\n user.is_superuser = True\n user.is_staff = True\n user.save(using=self._db)\n return user", "def create_superuser(self, email, password, **extra_fields):\n extra_fields.setdefault('is_staff', True)\n extra_fields.setdefault('is_superuser', True)\n\n if extra_fields.get('is_staff') is not True:\n raise ValueError('Superuser must have is_staff=True.')\n if extra_fields.get('is_superuser') is not True:\n raise ValueError('Superuser must have is_superuser=True.')\n\n return self._create_user(email, password, **extra_fields)", "def create_superuser(self, email, password, **extra_fields):\n return self.create_user(email, password, is_staff=True,\n is_superuser=True, **extra_fields)", "def create_superuser(self, email, password, **extra_fields):\n extra_fields.setdefault('is_staff', True)\n extra_fields.setdefault('is_superuser', True)\n extra_fields.setdefault('is_manager', True)\n extra_fields.setdefault('is_active', True)\n\n if extra_fields.get('is_staff') is not True:\n raise ValueError(_('Superuser must have is_staff=True.'))\n if extra_fields.get('is_manager') is not True:\n raise ValueError(_('Superuser must have is_manager=True.'))\n if extra_fields.get('is_superuser') is not True:\n raise ValueError(_('Superuser must have is_superuser=True.'))\n return self.create_user(email, password, **extra_fields)", "def create_superuser(self, username, email, password):\n return self.create_user(username, email, password, is_staff = True, is_superuser= True)", "def create_superuser(self, *args, **kwargs):\n password = kwargs.pop('password', '')\n email = kwargs.pop('email', '')\n user = self.model(email=self.normalize_email(email), **kwargs)\n user.set_password(password)\n user.is_superuser = True\n user.is_staff = True\n user.save()\n\n return user", "def create_superuser(self, email, name, password):\n # Create a normal user first, then change it to the super user\n user = self.create_user(email, name, password)\n user.is_superuser = True\n user.is_staff = True\n user.save(using=self._db)\n\n return user", "def save(self):\n new_user = RegistrationProfile.objects.create_inactive_user(username=self.cleaned_data['username'],\n password=self.cleaned_data['password1'],\n email=self.cleaned_data['email'],\n firstname=self.cleaned_data['first_name'],\n lastname=self.cleaned_data['last_name'],\n agree=self.cleaned_data['tos'])\n return new_user", "def create_superuser(self, email, username, password):\n user = self.create_user(email, password=password,\n username=username )\n user.is_admin = True\n user.save(using=self._db)\n return user", "def save(self, request, validated_data):\n # Create user\n user = User.objects.create_user(\n email=validated_data['email'],\n password=validated_data['password'],\n username=validated_data['username'].encode('utf-8')\n )\n\n return user", "def create_superuser(self, email, password):\n\t\tuser = self.create_user(email, password)\n\t\tuser.is_admin = True\n\t\tuser.save(using=self._db)\n\t\treturn user", "def add_user(username, email, password, is_staff):\n\n user = User.objects.get_or_create(username=username, email=email)[0]\n user.set_password(password)\n user.is_staff = is_staff\n if is_staff:\n user.is_superuser = True\n user.save()\n registered_user = RegisteredUser.objects.get_or_create(user=user)[0]\n registered_user.save()\n return registered_user", "def create_new_user(self):\n username = 'pseudo'\n email = '[email protected]'\n password = '00000000'\n user_created = self.user.objects.create_user(id=1, username=username,\n email=email, password=password)\n HistoryUser.objects.create(user=user_created)\n StatusUser.objects.create(user=user_created)\n\n return user_created", "def create(self, validated_data):\n ## overriding default create\n\n user = UserProfile.objects.create_user(\n email = validated_data['email'],\n name = validated_data['name'],\n password=validated_data['password']\n )\n \n return user", "def create_superuser(self, email, name, password):\n\n user = self.create_user(email, name, password)\n\n user.is_superuser = True\n user.is_staff = True\n\n user.save(using=self._db)\n\n return user", "def create_superuser(self, email, first_name='', last_name='', password=None, **extra_fields):\n return self._create_user(email, password, first_name, last_name, is_staff=True, is_superuser=True,\n **extra_fields)", "def create_superuser(self, name, email, password):\n user = self.create_user(name, email, password)\n user.is_superuser = True\n user.is_staff = True\n user.save(using=self._db)\n return user", "def create_superuser(self, email, name, password):\n employee = self.create_user(email, name, password)\n employee.is_superuser = True\n employee.is_staff = True\n employee.save(using=self._db)\n\n return employee", "def create (self, validated_data):\n user = models.UserProfile.objects.create_user(\n email = validated_data ['email'],\n name = validated_data ['name'],\n password = validated_data ['password']\n )\n\n return user", "def create(self,validated_data):\n\n user = models.User.object.create_user(\n email = validated_data['email'],\n full_name = validated_data['full_name'],\n phone = validated_data['phone'],\n password = validated_data['password']\n )\n\n #user.set_password(validated_data['password'])\n user.save()\n return user", "def create_new_user(first_name, last_name, email, password):\n \n new_user = User(first_name, last_name, email, password)\n db.session.add(new_user)\n db.session.commit()\n \n # link a root storage folder to the user\n root_folder = Folder()\n db.session.add(root_folder)\n db.session.commit()\n new_user.storage_root_id = root_folder.id\n new_user.storage_root = root_folder\n db.session.commit()\n\n # link usage tracking to the user\n usage = Usage()\n usage.user_id = new_user.id\n new_user.usage = usage\n db.session.add(usage)\n db.session.commit()\n\n # link a billing address to the user\n billing_address = BillingAddress()\n billing_address.user_id = new_user.id\n new_user.billing_address = billing_address\n db.session.add(billing_address)\n db.session.commit()\n\n # link settings to the User\n settings = Settings()\n settings.user_id = new_user.id\n new_user.settings = settings\n db.session.add(settings)\n db.session.commit()", "def create_superuser(self,FirstName,LastName,MobileNo, EmailId, password=None, **extra_fields):\n extra_fields.setdefault('is_staff', True)\n extra_fields.setdefault('is_superuser', True)\n\n if extra_fields.get('is_staff') is not True:\n logger.error('creating super user with is_staff=False.')\n raise ValueError('Superuser must have is_staff=True.')\n if extra_fields.get('is_superuser') is not True:\n logger.error('creating super user with is_superuser=False.')\n raise ValueError('Superuser must have is_superuser=True.')\n return self._create_user(FirstName,LastName, EmailId, MobileNo , password, **extra_fields)", "def create_superuser(self, email, password):\n\n user = self.create_user(email, password=password)\n user.is_staff = True\n user.is_superuser = True\n user.save(using=self._db)\n return user", "def create_user(self, username, email, password=None, is_staff =False, is_superuser = False):\n print(\"create_user:\", \"creating .....\")\n if not email:\n raise ValueError('Users must have an email address')\n\n # get prefix \n try:\n db = select_write_db(model_name=self.model._meta.model_name)\n prefix = db.get_prefix \n except:\n uuid3 = uuid.uuid3(uuid.NAMESPACE_DNS,settings.USER_INIT_DATABASE)\n prefix = str(uuid3)[:8]\n \n # create uuidn\n uuidn = prefix + \"-\" + str(uuid.uuid4())[9:] \n\n user = self.model(\n username = username, \n email = self.normalize_email(email),\n nid = str(uuidn),\n )\n\n user.set_password(password)\n user.staff = is_staff\n user.admin = is_superuser\n\n if settings.SHARDING_USER_MODEL:\n user.save(using=str(db.get_name))\n db.count = db.count + 1\n db.save()\n else:\n user.save(using=self._db)\n return user", "def create_user(user, first_name, last_name, major, bio):\n return userAccount.objects.create(user=user, first_name=first_name, last_name=last_name, major=major, bio=bio)", "def createsuperuser(request):\r\n\r\n user = models.User()\r\n user.username = 'admin' # change later\r\n user.email = '[email protected]'\r\n user.set_password(\"qazwsxed\")\r\n user.is_staff = True\r\n user.is_superuser = True\r\n\r\n if models.User.objects.filter(username=user.username).exists():\r\n return redirect('/')\r\n else:\r\n user.save()\r\n return redirect('/console')", "def create_superuser(self, email, password):\n if password is None:\n raise TypeError('Superusers must have a password')\n user = self.create_user(email, password)\n user.is_superuser = True\n user.is_staff = True\n user.save()\n\n return user", "def test_creating_a_new_super_user(self):\n\n user = get_user_model().objects.create_superuser(\n email=\"[email protected]\", password=\"Test12345\"\n )\n\n self.assertTrue(user.is_superuser)\n self.assertTrue(user.is_staff)", "def create_user(request):\n post = request.POST.dict()\n username = post.get('username')\n if username is None:\n response = {'status':-1, 'status_message':'No username specified.'}\n return HttpResponse(json.dumps(response))\n password = post.get('password')\n if password is None:\n response = {'status': -1, 'status_message': 'No password specified.'}\n return HttpResponse(json.dumps(response))\n user_obj = User(\n username=username,\n first_name=post.get('first_name'),\n last_name=post.get('last_name'),\n email=post.get('email'),\n is_superuser=json.loads(post.get('is_admin', 'false')),\n is_active=json.loads(post.get('is_enabled', 'false'))\n )\n user_obj.set_password(password)\n user_obj.save()\n response = {'status':1, 'status_message':'Success'}\n return HttpResponse(json.dumps(response))", "def _create_user(self, email, password, is_staff, is_superuser, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n print email, '*****'\n email = self.normalize_email(email)\n print email\n print '*****'\n user = self.model(email=email, is_staff=is_staff, is_active=True,\n is_superuser=is_superuser, **extra_fields)\n print user\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_superuser():\n try:\n User.objects.get(is_superuser=True)\n except User.DoesNotExist:\n login = os.getenv('SUPERUSER_NAME', 'admin')\n password = os.getenv('SUPERUSER_PASSWORD', 'passw0rd')\n User.objects.create_superuser(username=login, password=password, email='')", "def create_superuser(self, email, rut, nombres, apellidos, password):\n user = self.create_user(\n email,\n rut=rut,\n nombres=nombres,\n apellidos=apellidos,\n password=password,\n )\n user.is_admin = True # Esta es la unica diferencia entre un superusuario y un usuario normal.\n user.save(using=self._db)\n return user", "def create_superuser(self, email, password):\n\n user = self.create_user(email, password)\n user.is_staff = True\n user.is_superuser = True\n user.save(using=self._db)\n\n return user", "def create_superuser(self, username, password):\n user = self.create_user(username=username, password=password)\n user.is_superuser = True\n user.is_staff = True\n user.save()\n\n return user", "def create_superuser(self, email, name, password):\r\n user = self.create_user(\r\n email,\r\n password=password,\r\n name=name,\r\n )\r\n user.is_admin = True\r\n user.save(using=self._db)\r\n return user", "def create_superuser(self, email, password):\n user = self.create_user(email, password)\n\n user.is_superuser = True # 'is_superuser' is created automatically\n user.is_staff = True\n user.save(using=self._db)\n\n return user", "def create_superuser(self, email, password=None):\n user = self.create_user(email, password)\n user.is_active = True\n user.is_staff = True\n user.save(using=self._db)\n return user", "def create_superuser(self, username, email, persona_id, nombre_completo, password, **kwargs):\n return self._create_user(username, email, persona_id, nombre_completo, password, True, True, **kwargs)", "def create_superuser(self, email, password):\n user = self.create_user(email, password=password)\n user.is_admin = True\n user.is_staff = True\n user.is_superuser = True\n user.save(using=self._db)\n return user", "def create_superuser(self, email, name, password):\n user = self.create_user(email, name, password)\n\n user.is_superuser = True\n user.is_staff = True\n user.save(using=self._db)\n\n return user" ]
[ "0.770437", "0.76964134", "0.76661605", "0.76653856", "0.7655734", "0.75484675", "0.7541768", "0.75341594", "0.75114566", "0.7500617", "0.74416953", "0.74323356", "0.7431842", "0.74239194", "0.7409911", "0.73949206", "0.73942757", "0.73923373", "0.7387035", "0.7382646", "0.73744804", "0.7365813", "0.73475444", "0.73332185", "0.73324865", "0.7328073", "0.73227084", "0.73065895", "0.72699785", "0.726456", "0.72638357", "0.7257338", "0.7249744", "0.7245342", "0.7242802", "0.72373855", "0.72349113", "0.7232337", "0.7231011", "0.7229476", "0.7228694", "0.7228187", "0.72267866", "0.72250044", "0.722433", "0.7223314", "0.7223198", "0.72176385", "0.7214639", "0.7213297", "0.7213297", "0.7213297", "0.7209716", "0.7205899", "0.72038233", "0.7193806", "0.7189163", "0.71851236", "0.71842396", "0.7183079", "0.71805316", "0.7174754", "0.7173153", "0.71729034", "0.7172272", "0.71668494", "0.7162274", "0.71600044", "0.71508867", "0.7150592", "0.71491766", "0.71482354", "0.7146861", "0.7144921", "0.7141378", "0.71409786", "0.7136054", "0.71344084", "0.7134219", "0.71310943", "0.71295387", "0.7114425", "0.7112069", "0.71033084", "0.71010053", "0.7096601", "0.7095207", "0.70921063", "0.7088805", "0.7083245", "0.70828253", "0.7080335", "0.7074411", "0.7069924", "0.706861", "0.70672685", "0.7067191", "0.7064277", "0.7061839", "0.70596385", "0.7057032" ]
0.0
-1
return least common multiple of two integers
def lcm(x: int, y: int) -> int: assert isinstance(x, int) and isinstance(y, int) and x > 0 and y > 0 return int(x * y / gcd(x, y))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def least_common_multiple(number1, number2):\n return number1 * number2 // math.gcd(number1, number2)", "def lowest_common_multiple(a, b):\n # 两个数字相乘后除以最大公约数 = 两个数字的最小公倍数\n return a * b // gcd(a, b)", "def least_common_multiple2(number1, number2, number3, number4):\n return least_common_multiple((number1 * number2 // math.gcd(number1, number2)),\n (number3 * number4 // math.gcd(number3, number4)))", "def lcm(a: int, b: int):\n return (a * b) // euclid(a, b)", "def lcm(a: int, b: int) -> int:\n return a * b // gcd(a, b)", "def lcm(a: int, b: int) -> int:\n return a * b // gcd(a, b)", "def lcm(a: int, b: int) -> int:\n return (a * b) // gcd(a, b)", "def least_common_multiple_func(self, other_denominator) -> int:\n least_common_mult = self.denominator_b\n while (least_common_mult % self.denominator_b + least_common_mult % other_denominator) != 0:\n least_common_mult += 1\n return least_common_mult", "def lcm(a, b):\r\n return a * b // gcd(a, b)", "def lcm(a, b):\n\treturn a * b // gcm(a, b)", "def lcm_for_two(a, b):\n\t\n\treturn a // gcd_for_two(a, b) * b", "def lcm(a, b):\n return a * b // gcd(a, b)", "def lcm(a, b):\n return a * b // gcd(a, b)", "def lcm(a, b):\n return a * b // gcd(a, b)", "def lcm(a, b):\n return a * b // gcd(a, b)", "def lcm(a, b):\n return a * b // gcd(a, b)", "def lcm(a, b):\n return a * b // gcd(a, b)", "def lcm(a, b):\n return a * b // gcd(a, b)", "def lcm(a, b):\n return a * b // gcd(a, b)", "def lcm(a, b):\n return a * b // gcd(a, b)", "def lcm(a, b):\n return (a * b) // gcd(a, b)", "def lcm(a, b):\n return (a * b) // gcd(a, b)", "def lcm(a, b):\n if not isinstance(a, int):\n a = int(a)\n if not isinstance(b, int):\n b = int(b)\n return abs(a*b) / gcd(a, b)", "def lcm(a, b):\n return a * b / gcd(a, b)", "def lcm(a, b):\n return a * b / gcd(a, b)", "def lcm(a, b):\n return a * b / gcd(a, b)", "def greatest_common_divisor(a: int, b: int) -> int:\n#[SOLUTION]\n while b:\n a, b = b, a % b\n return a", "def lcm(a, b):\n\n if a == b == 0:\n return 0\n\n return (a * b) // gcd(a, b)", "def lcm(a, b):\r\n return a * b / fr.gcd(a, b)", "def lcm(a, b):\n if not (a or b):\n return 0\n else:\n a = abs(a)\n b = abs(b)\n return a*b/gcd(a,b)", "def lcm(x, y):\n return x*y//gcd(x,y)", "def _lcm_f(a, b):\n return int((a * b) / _gcd_f(a, b))", "def lcm(a, b):\n\n\treturn (a * b)/gcd(a, b)", "def gcd1(n1, n2):\n\n best = 1\n\n for i in range(1,n2+1):\n if n1 % i == 0 and n2 % i == 0 and i > best:\n best = i \n \n return best", "def multiple(a, b):\n from fractions import gcd\n def lcm(x,y):\n \treturn (x*y)//gcd(x,y)\n #return lcm(a,b)\n \n def gcd(x,y):\n if y > x:\n x, y = y, x\n while y != 0:\n x, y = y, x % y\n return x\n return (a*b) // gcd(a,b)", "def lcm2(a, b):\n return a * b / gcd(a, b)", "def lcm(a, b):\n return abs(a*b) / gcd(a, b) if a and b else 0", "def gcd_algo(a,b):\n i = max(a,b)\n j = min(a,b)\n\n if j == 0:\n return i\n else:\n reminder = i%j\n return gcd_algo(j, reminder)", "def lcm(a,b):\n if a==0:\n return b\n if b==0:\n return a\n c=gcd(a,b)\n lcm=a*b/c\n return lcm", "def calculate_lcm(a, b):\n return a * b / calculate_gcd(a, b)", "def find_gcd(a, b):\n\n gcd = min(a, b)\n\n # Keep looping until gcd divides both a & b evenly\n while a % gcd != 0 or b % gcd != 0:\n gcd -= 1\n\n return gcd", "def lcm(\n numbers: List[int]\n) -> int:\n current_product = 1\n current_gcd = 1\n for num in numbers:\n current_gcd = gcd(current_gcd, num)\n current_product *= num\n return current_product // current_gcd", "def greatest_common_divisor(x: int, y: int) -> int:\n while y != 0:\n (x, y) = (y, x % y)\n return x", "def smallest_multiple(n):\n return reduce(lowest_common_multiple, range(1, n+1))", "def lcm(num1, num2):\n return num1 * num2 // fractions.gcd(num1,num2)", "def lcm(*numbers): \n def lcm(a, b):\n return (a * b) // gcd(a, b)\n return reduce(lcm, numbers, 1)", "def lcm(self, a, b):\n return a*b", "def main():\n num_1 = 12\n num_2 = 76\n print(find_lcm(num_1, num_2))", "def find_lcm(num_1, num_2):\n max_num = num_1 if num_1 > num_2 else num_2\n lcm = max_num\n while True:\n if ((lcm % num_1 == 0) and (lcm % num_2 == 0)):\n break\n lcm += max_num\n return lcm", "def lcm(num1, num2):\n\n if num1 > num2:\n bigger = num1\n else:\n bigger = num2\n while True:\n if bigger % num1 == 0 and bigger % num2 == 0:\n return bigger\n bigger += 1", "def gcd(a: int, b: int) -> int:\n while a != 0:\n a, b = b % a, a\n return b", "def _gcd(a, b):\n while b:\n a, b = b, a % b\n return a", "def lcm(*args):\r\n\treturn functools.reduce(lambda x, y: x * y / gcd(x, y), args)", "def lcm(*numbers):\n def lcm(a, b):\n return (a * b) // gcd(a, b)\n return reduce(lcm, numbers, 1)", "def vikgcd(a, b):\n i = 0\n while b:\n a, b = b, a % b\n i += 1\n return a, i", "def lcm(x, y):\n\n # choose the greater number\n if x > y:\n greater = x\n else:\n greater = y\n\n while(True):\n if((greater % x == 0) and (greater % y == 0)):\n lcm = greater\n break\n greater += 1\n\n return lcm", "def gcd(a: int, b: int) -> int:\n if a == 1 or b == 1:\n return 1\n if a == b:\n return a\n if a > b:\n return gcd(a % b, b)\n else:\n return gcd(a, b % a)", "def find_lonely_integer(numbers):\n lonely_integer = reduce(lambda x, y: x ^ y, numbers)\n return lonely_integer", "def _gcd(a, b):\n while b:\n a, b = b, a % b\n return a", "def gcd_for_two(a, b):\n\t\n\twhile b != 0:\n\t\ttemp = b\n\t\tb = a % b\n\t\ta = temp\n\treturn a", "def find_least_common_number(a, b, c):\n i = j = k = 0\n while i < len(a) and j < len(b) and k < len(c):\n x, y, z = a[i], b[j], c[k]\n if x == y == z:\n return x\n m = max(x, y, z)\n if x < m:\n i += 1\n if y < m:\n j += 1\n if z < m:\n k += 1\n return -1", "def lcm(*numbers):\n def lcm(a, b):\n return (a * b) // gcd(a, b)\n\n return reduce(lcm, numbers, 1)", "def lcm(x, y):\n lcm = (x*y)//gcd(x,y)\n return(lcm)", "def smallest_common_divisor(num):\n\n minDiv = num * (num-1)\n divisor = minDiv\n num -= 2\n\n while num > 1:\n if divisor % num == 0:\n num -= 1\n minDiv = divisor\n else:\n divisor += minDiv\n \n return minDiv", "def gcd(a,b):\r\n while b:\r\n a, b = b, a % b\r\n return a", "def lcm(x, y):\r\n\r\n # choose the greater number\r\n if x > y:\r\n greater = x\r\n else:\r\n greater = y\r\n\r\n while(True):\r\n if((greater % x == 0) and (greater % y == 0)):\r\n lcm = greater\r\n break\r\n\r\n greater += 1\r\n\r\n return lcm", "def gcd(a, b):\n while b:\n a, b = b, a % b\n return a", "def gcd(*numbers):\n # Am I terrible for doing it this way?\n from math import gcd\n\n return reduce(gcd, numbers)", "def _gcd(self, a, b) -> int:\n (a, b) = (max(a, b), min(a, b))\n while b > 0:\n (a, b) = (b, a % b)\n return a", "def gcd(a, b):\n while a > 0:\n if a < b:\n a, b = b, a\n a %= b\n return b", "def gcd(a, b):\n while b:\n a, b = b, a % b\n return a", "def gcd_multiple_nums(*numbers: int) -> int:\n\n def _gcd(a: int, b: int) -> int:\n\n if a < b:\n a, b, = b, a\n if b == 0:\n return a\n return _gcd(b, a % b)\n\n result = 0\n for number in numbers:\n result = _gcd(result, number)\n return result", "def gcd(a, b):\n r0, r1 = abs(a), abs(b)\n while r1 > 0:\n r0, r1 = r1, r0 % r1\n return r0", "def lcm(x, y):\n\n # choose the greater number\n if x > y:\n greater = x\n else:\n greater = y\n\n while True:\n if (greater % x == 0) and (greater % y == 0):\n lcm = greater\n break\n greater += 1\n\n return lcm", "def gcd(a, b):\r\n while b: \r\n a, b = b, a % b\r\n return a", "def lcm(x, y):\n\n # choose the greater number\n if x > y:\n greater = x\n else:\n greater = y\n\n while (True):\n if ((greater % x == 0) and (greater % y == 0)):\n lcm = greater\n break\n greater += 1\n\n return lcm", "def gcd(num_a, num_b):\n while num_b:\n num_a, num_b = num_b, num_a % num_b\n return num_a", "def gcd(a, b):\n while b:\n a, b = b, a % b\n return a", "def gcd(a, b):\n while b:\n a, b = b, a % b\n return a", "def gcd(a, b):\n while b:\n a, b = b, a % b\n return a", "def gcd(a, b):\n while b:\n a, b = b, a % b\n return a", "def gcd(a: int, b: int):\n return b if a % b == 0 else gcd(b, a % b)", "def _gcd(self, a, b):\n while b != 0:\n a, b = b, a % b\n return a", "def gcd(x:int, y:int) -> int:\n if x == 0:\n return y\n return gcd(y % x, x)", "def gcd(num_a: int, num_b: int) -> int:\n while num_a != 0:\n num_a, num_b = num_b % num_a, num_a\n return num_b", "def algo(a: int, b: int) -> int:\n\n while b != 0:\n a, b = b, a % b\n return a", "def gcd(a: int, b: int) -> int:\n while a != b:\n if a > b:\n a -= b\n else:\n b -= a\n return a", "def gcd(a, b):\n while b: \n a, b = b, a % b\n return a", "def gcd(a, b):\n assert a > 0 and b > 0\n\n while a:\n a, b = b % a, a\n\n return b", "def gcd(a, b):\n while b: \n a, b = b, a % b\n return a", "def gcd(a, b):\n while b: \n a, b = b, a % b\n return a", "def gcd(a, b):\n while b: \n a, b = b, a % b\n return a", "def gcd(a, b):\n while b: \n a, b = b, a % b\n return a", "def gcd(a, b):\r\n while b != 0:\r\n a, b = b, a % b\r\n return a", "def gcd(a, b):\r\n while b != 0:\r\n a, b = b, a % b\r\n return a", "def gcd(a, b):\r\n while b != 0:\r\n a, b = b, a % b\r\n return a", "def lcm(x,y):\n #Initialize counter & condition\n counter = 1\n condition = False\n #While loop iterates until LCM condition is satisfied\n while condition == False :\n counter = counter + 1\n condition = (counter % x == 0) and (counter % y == 0)\n return counter", "def euclidean_algorithm(a, b):\n if a == 0: return b\n if b == 0: return a\n r = a % b\n return euclidean_algorithm(b, r)", "def gcd(x, y):\n while y != 0:\n (x, y) = (y, x % y)\n return int(x)", "def hcf(num1, num2):\n\n if num1 > num2:\n smaller = num2\n else:\n smaller = num1\n for i in range(1, smaller + 1):\n if ((num1 % i == 0) and (num2 % i == 0)):\n return i" ]
[ "0.8421955", "0.8331325", "0.7644219", "0.76081353", "0.75759864", "0.75759864", "0.75638235", "0.7436232", "0.73484886", "0.7300214", "0.7295642", "0.72913396", "0.72913396", "0.72913396", "0.72913396", "0.72913396", "0.72913396", "0.72913396", "0.72913396", "0.72913396", "0.72814834", "0.72814834", "0.72279185", "0.71809876", "0.71809876", "0.71809876", "0.7156004", "0.7151691", "0.71416146", "0.7138799", "0.70950145", "0.7093722", "0.70758426", "0.70705104", "0.7068547", "0.70677495", "0.7031342", "0.70136267", "0.7010378", "0.6957251", "0.6940257", "0.6933254", "0.69311684", "0.692272", "0.6868364", "0.6840499", "0.68381757", "0.6814765", "0.6807544", "0.67814976", "0.67672664", "0.67573607", "0.67554337", "0.6745785", "0.67262346", "0.6716315", "0.6714948", "0.6714798", "0.67140484", "0.6711574", "0.67080355", "0.6700152", "0.6699618", "0.66695946", "0.6659182", "0.6655974", "0.66504085", "0.6641675", "0.6640393", "0.6637542", "0.6633278", "0.6626666", "0.66263217", "0.6624927", "0.66221875", "0.6618167", "0.66100675", "0.65949917", "0.65949917", "0.65949917", "0.65949917", "0.6589876", "0.65895873", "0.65860635", "0.6575947", "0.6556229", "0.65533704", "0.6552703", "0.6552434", "0.6551632", "0.6551632", "0.6551632", "0.6551632", "0.65509456", "0.65509456", "0.65509456", "0.65502137", "0.6545973", "0.65420747", "0.6541259" ]
0.71541774
27
return the greatest common divisor of two integers
def gcd(x: int, y: int) -> int: assert isinstance(x, int) and isinstance(y, int) and x > 0 and y > 0 while y != 0: x, y = y, x % y return x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def greatest_common_divisor(x: int, y: int) -> int:\n while y != 0:\n (x, y) = (y, x % y)\n return x", "def greatest_common_divisor(a: int, b: int) -> int:\n#[SOLUTION]\n while b:\n a, b = b, a % b\n return a", "def gcd_algo(a,b):\n i = max(a,b)\n j = min(a,b)\n\n if j == 0:\n return i\n else:\n reminder = i%j\n return gcd_algo(j, reminder)", "def _gcd(self, a, b) -> int:\n (a, b) = (max(a, b), min(a, b))\n while b > 0:\n (a, b) = (b, a % b)\n return a", "def find_gcd(a, b):\n\n gcd = min(a, b)\n\n # Keep looping until gcd divides both a & b evenly\n while a % gcd != 0 or b % gcd != 0:\n gcd -= 1\n\n return gcd", "def gcd(a: int, b: int) -> int:\n while a != 0:\n a, b = b % a, a\n return b", "def get_gcd(a, b):\n return a if b == 0 else get_gcd(b, a % b)", "def gcd(a: int, b: int) -> int:\n if a == 1 or b == 1:\n return 1\n if a == b:\n return a\n if a > b:\n return gcd(a % b, b)\n else:\n return gcd(a, b % a)", "def lcm(a: int, b: int) -> int:\n return (a * b) // gcd(a, b)", "def GCD(a, b) -> int:\n\n if a == 0:\n return b\n\n return GCD(b % a, a)", "def lcm(a: int, b: int) -> int:\n return a * b // gcd(a, b)", "def lcm(a: int, b: int) -> int:\n return a * b // gcd(a, b)", "def gcd(a: int, b: int) -> int:\n while a != b:\n if a > b:\n a -= b\n else:\n b -= a\n return a", "def _gcd(a, b):\n while b:\n a, b = b, a % b\n return a", "def gcd(a, b):\n r0, r1 = abs(a), abs(b)\n while r1 > 0:\n r0, r1 = r1, r0 % r1\n return r0", "def gcd(num_a, num_b):\n while num_b:\n num_a, num_b = num_b, num_a % num_b\n return num_a", "def gcd(num_a: int, num_b: int) -> int:\n while num_a != 0:\n num_a, num_b = num_b % num_a, num_a\n return num_b", "def gcd(a,b):\n\ta = abs(a); b = abs(b)\n\twhile (a > 0):\n\t\tb = b % a\n\t\ttmp=a; a=b; b=tmp\n\treturn b", "def lowest_common_multiple(a, b):\n # 两个数字相乘后除以最大公约数 = 两个数字的最小公倍数\n return a * b // gcd(a, b)", "def gcd(a: int, b: int):\n return b if a % b == 0 else gcd(b, a % b)", "def gcd(x:int, y:int) -> int:\n if x == 0:\n return y\n return gcd(y % x, x)", "def gcd_for_two(a, b):\n\t\n\twhile b != 0:\n\t\ttemp = b\n\t\tb = a % b\n\t\ta = temp\n\treturn a", "def _gcd(a, b):\n while b:\n a, b = b, a % b\n return a", "def _gcd_f(a, b):\n return a if b == 0 else _gcd_f(b, a % b)", "def gcd(a, b):\n a = abs(a)\n b = abs(b)\n if a == b:\n return a\n if b > a:\n a, b = b, a\n q = a // b\n r = a - b * q\n while r != 0:\n a = b\n b = r\n q = a // b\n r = a - b * q\n return b", "def __gcd(self, a, b):\n while (b != 0):\n\t\t\ttmp = a\n\t\t\ta = b\n\t\t\tb = tmp % b\n return -a if a < 0 else a", "def gcd(x, y):\n while y != 0:\n (x, y) = (y, x % y)\n return int(x)", "def gcd(a, b):\n assert a > 0 and b > 0\n\n while a:\n a, b = b % a, a\n\n return b", "def gcd(a, b):\n __check_args(a, b)\n\n if b > a:\n return __calc_gcd(b, a)\n else:\n return __calc_gcd(a, b)", "def gcd(a, b):\r\n while b != 0:\r\n a, b = b, a % b\r\n return a", "def gcd(a, b):\r\n while b != 0:\r\n a, b = b, a % b\r\n return a", "def gcd(a, b):\r\n while b != 0:\r\n a, b = b, a % b\r\n return a", "def gcd(a, b):\n if a == 0:\n return b\n if b == 0:\n return a\n\n if isinstance(a, int) and isinstance(b, int):\n _gcd = __internal_gcd\n else:\n def _gcd(g, r):\n if r == 0:\n return g\n else:\n return _gcd(r, g % r)\n\n return _gcd(a, b)", "def gcd(a, b):\r\n\r\n if a > b:\r\n a, b = b, a\r\n\r\n while a:\r\n a, b = b % a, a\r\n\r\n return b", "def _lcm_f(a, b):\n return int((a * b) / _gcd_f(a, b))", "def lcm(a: int, b: int):\n return (a * b) // euclid(a, b)", "def gcd(a, b):\n while b:\n a, b = b, a % b\n return a", "def gcd(a, b):\n while b:\n a, b = b, a % b\n return a", "def gcd(a,b):\r\n while b:\r\n a, b = b, a % b\r\n return a", "def gcd(a, b):\n\tif a == 0:\n\t\treturn b\n\n\treturn gcd(b%a, a)", "def gcd(a,b):\r\n\tif a == 0:\r\n\t\treturn abs(b)\r\n\treturn abs(gcd(b % a, a))", "def _gcd(self, a, b):\n while b != 0:\n a, b = b, a % b\n return a", "def gcd(a, b):\r\n while b: \r\n a, b = b, a % b\r\n return a", "def gcd1(n1, n2):\n\n best = 1\n\n for i in range(1,n2+1):\n if n1 % i == 0 and n2 % i == 0 and i > best:\n best = i \n \n return best", "def lcm(a, b):\n return (a * b) // gcd(a, b)", "def lcm(a, b):\n return (a * b) // gcd(a, b)", "def lcm(a, b):\r\n return a * b // gcd(a, b)", "def euclidean_gcd_recursive(a: int, b: int) -> int:\n return a if b == 0 else euclidean_gcd_recursive(b, a % b)", "def euclidean_gcd(num1: int, num2: int) -> int:\n\n if num1 < num2:\n num1, num2 = num2, num1\n if num2 == 0:\n return num1\n return euclidean_gcd(num2, num1 % num2)", "def gcd(a,b):\n while b > 0:\n a, b = b, a % b\n return a", "def gcd(a,b):\n while b > 0:\n a, b = b, a % b\n return a", "def gcd(a, b):\n while b > 0:\n a, b = b, a % b\n return a", "def gcd(a, b):\n while b > 0:\n a, b = b, a % b\n return a", "def lcm(a, b):\n return a * b // gcd(a, b)", "def lcm(a, b):\n return a * b // gcd(a, b)", "def lcm(a, b):\n return a * b // gcd(a, b)", "def lcm(a, b):\n return a * b // gcd(a, b)", "def lcm(a, b):\n return a * b // gcd(a, b)", "def lcm(a, b):\n return a * b // gcd(a, b)", "def lcm(a, b):\n return a * b // gcd(a, b)", "def lcm(a, b):\n return a * b // gcd(a, b)", "def lcm(a, b):\n return a * b // gcd(a, b)", "def gcd(a, b):\n if b == 0:\n return a\n return gcd(b, a%b)", "def gcd(a, b):\n if a == 0:\n return b\n return gcd(b % a, a)", "def gcd(a, b):\n while a > 0:\n if a < b:\n a, b = b, a\n a %= b\n return b", "def lcm(a, b):\n if not isinstance(a, int):\n a = int(a)\n if not isinstance(b, int):\n b = int(b)\n return abs(a*b) / gcd(a, b)", "def euclidean_gcd(a: int, b: int) -> int:\n\n if a == 0 or b == 0:\n return a + b\n if a == b:\n return a\n if a < b:\n a, b = b, a\n mod = a % b\n if mod == 0:\n return b\n return euclidean_gcd(b, mod)", "def gcd(a, b):\n if b < 0:\n b = -b\n r = a % b\n while r:\n a = b\n b = r\n r = a % b\n return b", "def gcd(a, b):\n while b:\n a, b = b, a % b\n return a", "def gcd(a, b):\n while b:\n a, b = b, a % b\n return a", "def gcd(a, b):\n while b:\n a, b = b, a % b\n return a", "def gcd(a, b):\n while b:\n a, b = b, a % b\n return a", "def gcd(self, num1, num2):\n if num1 < num2:\n num1, num2 = num2, num1\n while num2 != 0:\n num1, num2 = num2, num1 % num2\n return num1", "def gcd(a, b):\n while b: \n a, b = b, a % b\n return a", "def gcd(a, b):\n while b: \n a, b = b, a % b\n return a", "def gcd(a, b):\n while b: \n a, b = b, a % b\n return a", "def gcd(a, b):\n while b: \n a, b = b, a % b\n return a", "def gcd(a, b):\n if a < b:\n raise ValueError(\"a must be larger than b\")\n if b == 0:\n return a\n else:\n return gcd(b, a % b)", "def gcd(a,b):\n\n if b == 0:\n return a\n else:\n return gcd(b, a % b)", "def gcd(x, y):\n #assert x >= 0 and y >= 0 and type(x) == int and type(y) == int, \"Error: invalid input\"\n\n if y > x:\n return gcd(y, x)\n elif y == 0:\n return x\n else:\n return gcd(y, x % y)", "def gcd(a, b):\n while b != 0:\n t = b\n b = a % b\n a = t\n return(a)", "def gcd(a, b):\n\n while b != 0:\n a, b = b, a % b\n return a", "def gcd(a, b):\n while b: \n a, b = b, a % b\n return a", "def gcd(a, b):\n if not a:\n return b\n else:\n a = abs(a)\n b = abs(b)\n return gcd(b%a, a)", "def gcd(a, b):\n return a if b == 0 else gcd(b, a % b)", "def recursive_gcd(a, b):\n if a == 0:\n return b\n\n return recursive_gcd(b % a, a)", "def lcm(a, b):\n\n if a == b == 0:\n return 0\n\n return (a * b) // gcd(a, b)", "def gcd_ea(n1, n2):\n\n # take in 2 integers\n # find the larger number\n # divide n1 by n2\n # store remainder\n # replace n1 by n2\n # replace n2 with remainder\n # repeat until r == 0\n # return n2 when r == 0\n\n\n if n1 > 0 and n2 > 0:\n if n1 > n2:\n a = n1\n b = n2\n else:\n a = n2\n b = n1\n\n remainder = a % b\n\n while remainder > 0:\n a, b = b, remainder\n remainder = a % b\n\n return b", "def lcm(a, b):\r\n return a * b / fr.gcd(a, b)", "def lcm(x, y):\n return x*y//gcd(x,y)", "def lcm(a, b):\n if not (a or b):\n return 0\n else:\n a = abs(a)\n b = abs(b)\n return a*b/gcd(a,b)", "def gcd(x, y):\n if x < y: x, y = y, x\n while y: x, y = y, x%y\n return x", "def gcd(x,y):\n\tif x%y == 0:\n\t\treturn y\n\treturn gcd(y, x%y)", "def lcm(x: int, y: int) -> int:\n assert isinstance(x, int) and isinstance(y, int) and x > 0 and y > 0\n return int(x * y / gcd(x, y))", "def lcm(a, b):\n\n\treturn (a * b)/gcd(a, b)", "def gcd(x, y):\n x, y = abs(x), abs(y)\n while y:\n x, y = y, x % y\n return x", "def gcd(a, b):\n \"*** YOUR CODE HERE ***\"\n if a % b == 0:\n return b\n else:\n return gcd(b, a % b)", "def lcm(a, b):\n return a * b / gcd(a, b)", "def lcm(a, b):\n return a * b / gcd(a, b)", "def lcm(a, b):\n return a * b / gcd(a, b)" ]
[ "0.85697377", "0.8548809", "0.80010635", "0.7985648", "0.7972896", "0.7921842", "0.78888613", "0.7888709", "0.78645015", "0.78455526", "0.7827044", "0.7827044", "0.7821854", "0.7792171", "0.7777419", "0.7754992", "0.775436", "0.775147", "0.77433187", "0.77408284", "0.77301985", "0.7721985", "0.7718121", "0.770866", "0.7701481", "0.7699024", "0.7679947", "0.76784587", "0.76730186", "0.76515007", "0.76515007", "0.76515007", "0.76510495", "0.76495063", "0.76351285", "0.7634662", "0.7628555", "0.76279855", "0.7626755", "0.76204014", "0.7616374", "0.76019585", "0.76007134", "0.760014", "0.75984323", "0.75984323", "0.75963354", "0.75954574", "0.75902766", "0.7584502", "0.7584502", "0.7583956", "0.7583956", "0.7580135", "0.7580135", "0.7580135", "0.7580135", "0.7580135", "0.7580135", "0.7580135", "0.7580135", "0.7580135", "0.7575358", "0.7574284", "0.75498134", "0.75484586", "0.75470525", "0.7546566", "0.75464994", "0.75464994", "0.75464994", "0.75464994", "0.75277865", "0.7525089", "0.7525089", "0.7525089", "0.7525089", "0.75228584", "0.75223815", "0.7520965", "0.7518465", "0.75098664", "0.7505232", "0.74991316", "0.7493425", "0.7483922", "0.7472236", "0.74661475", "0.7461335", "0.7456799", "0.74451107", "0.7440327", "0.74351007", "0.7428801", "0.73978066", "0.73900735", "0.73865", "0.73836344", "0.73836344", "0.73836344" ]
0.75848126
49
return least common multiple of a range of integers
def lcms(argg: range) -> int: l = 1 for arg in argg: l = lcm(l, arg) return l
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def smallest_multiple(n):\n return reduce(lowest_common_multiple, range(1, n+1))", "def least_common_multiple(number1, number2):\n return number1 * number2 // math.gcd(number1, number2)", "def lowest_common_multiple(a, b):\n # 两个数字相乘后除以最大公约数 = 两个数字的最小公倍数\n return a * b // gcd(a, b)", "def find_smallest_evenly_divisible_integer_of_range(range_i, range_f):\n max_power_primes = find_max_power_prime_in_range(range_i, range_f)\n sum = 1\n for x in max_power_primes:\n sum *= math.pow(x,max_power_primes[x])\n \n return sum", "def least_common_multiple2(number1, number2, number3, number4):\n return least_common_multiple((number1 * number2 // math.gcd(number1, number2)),\n (number3 * number4 // math.gcd(number3, number4)))", "def least_common_multiple_func(self, other_denominator) -> int:\n least_common_mult = self.denominator_b\n while (least_common_mult % self.denominator_b + least_common_mult % other_denominator) != 0:\n least_common_mult += 1\n return least_common_mult", "def lcm(\n numbers: List[int]\n) -> int:\n current_product = 1\n current_gcd = 1\n for num in numbers:\n current_gcd = gcd(current_gcd, num)\n current_product *= num\n return current_product // current_gcd", "def smallestMultiple(x):\r\n ans = 1\r\n for prime in xprime(x):\r\n ans *= prime**int(log(x, prime))\r\n\r\n return ans", "def main(n=20):\n return functools.reduce(lcm, range(1, 20))", "def gcd1(n1, n2):\n\n best = 1\n\n for i in range(1,n2+1):\n if n1 % i == 0 and n2 % i == 0 and i > best:\n best = i \n \n return best", "def problem085():\n target = 2000000\n end = eulerlib.sqrt(target) + 1\n gen = ((w, h) for w in range(1, end) for h in range(1, end))\n\n def func(wh):\n return abs(num_rectangles(*wh) - target)\n\n ans = min(gen, key=func)\n return ans[0] * ans[1]", "def lcm(*numbers): \n def lcm(a, b):\n return (a * b) // gcd(a, b)\n return reduce(lcm, numbers, 1)", "def lcm(*values):\n\tvalues = set([abs(int(v)) for v in values])\n\tif values and 0 not in values:\n\t\tn = n0 = max(values)\n\t\tvalues.remove(n)\n\t\twhile any( n % m for m in values ):\n\t\t\tn += n0\n\t\treturn n\n\treturn 0", "def lcm(a: int, b: int) -> int:\n return a * b // gcd(a, b)", "def lcm(a: int, b: int) -> int:\n return a * b // gcd(a, b)", "def smallest_common_divisor(num):\n\n minDiv = num * (num-1)\n divisor = minDiv\n num -= 2\n\n while num > 1:\n if divisor % num == 0:\n num -= 1\n minDiv = divisor\n else:\n divisor += minDiv\n \n return minDiv", "def lcm(a: int, b: int) -> int:\n return (a * b) // gcd(a, b)", "def lcm(a: int, b: int):\n return (a * b) // euclid(a, b)", "def gcd_algo(a,b):\n i = max(a,b)\n j = min(a,b)\n\n if j == 0:\n return i\n else:\n reminder = i%j\n return gcd_algo(j, reminder)", "def lcm(*numbers):\n def lcm(a, b):\n return (a * b) // gcd(a, b)\n return reduce(lcm, numbers, 1)", "def lcm(*numbers):\n def lcm(a, b):\n return (a * b) // gcd(a, b)\n\n return reduce(lcm, numbers, 1)", "def lcm(x: int, y: int) -> int:\n assert isinstance(x, int) and isinstance(y, int) and x > 0 and y > 0\n return int(x * y / gcd(x, y))", "def gcd_multiple_nums(*numbers: int) -> int:\n\n def _gcd(a: int, b: int) -> int:\n\n if a < b:\n a, b, = b, a\n if b == 0:\n return a\n return _gcd(b, a % b)\n\n result = 0\n for number in numbers:\n result = _gcd(result, number)\n return result", "def lcm(a, b):\r\n return a * b // gcd(a, b)", "def lcm(*args):\r\n\treturn functools.reduce(lambda x, y: x * y / gcd(x, y), args)", "def gcd(*numbers):\n # Am I terrible for doing it this way?\n from math import gcd\n\n return reduce(gcd, numbers)", "def four_num_lcm(*numbers):\n\n return reduce(calculate_lcm,numbers)", "def lcm(x, y):\n return x*y//gcd(x,y)", "def smallest_number_divisible(n):\n\tprime_numbers = generate_prime_less_than_n(n)\n\tlog_n = math.log(n)\n\tres = 1\n\tfor pi in prime_numbers:\n\t\tres *= math.pow(pi, math.floor(log_n/math.log(pi)))\n\treturn res", "def smallest_multiple(n):\n if n == 1:\n return 1\n res = 1\n primes = sieve_of_eratosthenes(n)\n for p in primes:\n i = 1\n while p**(i+1) <= n:\n i += 1\n res *= p**i\n return res", "def lcm(a, b):\n return a * b // gcd(a, b)", "def lcm(a, b):\n return a * b // gcd(a, b)", "def lcm(a, b):\n return a * b // gcd(a, b)", "def lcm(a, b):\n return a * b // gcd(a, b)", "def lcm(a, b):\n return a * b // gcd(a, b)", "def lcm(a, b):\n return a * b // gcd(a, b)", "def lcm(a, b):\n return a * b // gcd(a, b)", "def lcm(a, b):\n return a * b // gcd(a, b)", "def lcm(a, b):\n return a * b // gcd(a, b)", "def lcm(numbers):\r\n if len(numbers) == 2: \r\n num0 = numbers[0] \r\n num1 = numbers[1] \r\n return num0 * num1 / gcd(num0, num1) \r\n else: \r\n for i in range(len(numbers)): \r\n return lcm([numbers[0], lcm(numbers[1:])])", "def main():\n num_1 = 12\n num_2 = 76\n print(find_lcm(num_1, num_2))", "def lcm(a, b):\n return (a * b) // gcd(a, b)", "def lcm(a, b):\n return (a * b) // gcd(a, b)", "def lcm(multiple_list):\n res = 1\n for n in multiple_list:\n if res % n != 0:\n res *= n//fractions.gcd(res,n)\n return res", "def lcm(a, b):\n return a * b / gcd(a, b)", "def lcm(a, b):\n return a * b / gcd(a, b)", "def lcm(a, b):\n return a * b / gcd(a, b)", "def lcm(x, y):\n\n # choose the greater number\n if x > y:\n greater = x\n else:\n greater = y\n\n while(True):\n if((greater % x == 0) and (greater % y == 0)):\n lcm = greater\n break\n greater += 1\n\n return lcm", "def lcm(a, b):\n if not isinstance(a, int):\n a = int(a)\n if not isinstance(b, int):\n b = int(b)\n return abs(a*b) / gcd(a, b)", "def greatest_common_divisor(a: int, b: int) -> int:\n#[SOLUTION]\n while b:\n a, b = b, a % b\n return a", "def lcm(a, b):\r\n return a * b / fr.gcd(a, b)", "def fast_compute_totients(bound):\n\n # Populate the initial list with the leading factor of n.\n results = list(range(0, bound+1))\n\n # Get the list of primes up to the bound.\n primes = primes_up_to(bound)\n\n for p in primes:\n for m in multiple_gen(p):\n if m > bound:\n break\n results[m] = (results[m] // p) * (p - 1)\n return print(sum(results), '\\n',results[:100])", "def lcm(a, b):\n\treturn a * b // gcm(a, b)", "def lcm(numbers):\r\n if len(numbers) == 2:\r\n num0 = numbers[0]\r\n num1 = numbers[1]\r\n return num0 * num1 / gcd(num0, num1)\r\n else:\r\n for i in range(len(numbers)):\r\n return lcm([numbers[0], lcm(numbers[1:])])", "def lcm(x, y):\r\n\r\n # choose the greater number\r\n if x > y:\r\n greater = x\r\n else:\r\n greater = y\r\n\r\n while(True):\r\n if((greater % x == 0) and (greater % y == 0)):\r\n lcm = greater\r\n break\r\n\r\n greater += 1\r\n\r\n return lcm", "def minOperations(n):\n count = 0\n min_val = 2\n if n < 2:\n return 0\n while min_val <= n:\n if (n % min_val == 0):\n count = count + min_val\n n = n / min_val\n else:\n min_val = min_val + 1\n return (count)", "def lcm(x, y):\n\n # choose the greater number\n if x > y:\n greater = x\n else:\n greater = y\n\n while True:\n if (greater % x == 0) and (greater % y == 0):\n lcm = greater\n break\n greater += 1\n\n return lcm", "def findExtremeDivisor(n1, n2):\n\n minVal, maxVal = (None, None)\n for i in range (2, min(n1, n2) + 1):\n if n1 % i == 0 and n2 % i == 0:\n if minVal == None:\n minVal = i\n maxVal = i\n return (minVal, maxVal)", "def lcm(a, b):\n if not (a or b):\n return 0\n else:\n a = abs(a)\n b = abs(b)\n return a*b/gcd(a,b)", "def find_lcm(num_1, num_2):\n max_num = num_1 if num_1 > num_2 else num_2\n lcm = max_num\n while True:\n if ((lcm % num_1 == 0) and (lcm % num_2 == 0)):\n break\n lcm += max_num\n return lcm", "def fn(vals):\n total = odd = 0 \n for i, x in enumerate(vals): \n if vals[0] == x: \n total += 1\n if i&1: odd += 1\n elif vals[0] ^ x != (1 << n) - 1: return inf\n ans = inf \n if len(vals) <= 2*total <= len(vals)+1: ans = min(ans, odd)\n if len(vals)-1 <= 2*total <= len(vals): ans = min(ans, total - odd)\n return ans", "def gcd(*nums):\n\t\n\treturn reduce(gcd_for_two, nums)", "def lcm(x, y):\n\n # choose the greater number\n if x > y:\n greater = x\n else:\n greater = y\n\n while (True):\n if ((greater % x == 0) and (greater % y == 0)):\n lcm = greater\n break\n greater += 1\n\n return lcm", "def lcm(a,b):\n if a==0:\n return b\n if b==0:\n return a\n c=gcd(a,b)\n lcm=a*b/c\n return lcm", "def lcm(a, b):\n\n if a == b == 0:\n return 0\n\n return (a * b) // gcd(a, b)", "def find_gcd(a, b):\n\n gcd = min(a, b)\n\n # Keep looping until gcd divides both a & b evenly\n while a % gcd != 0 or b % gcd != 0:\n gcd -= 1\n\n return gcd", "def multiple(a, b):\n from fractions import gcd\n def lcm(x,y):\n \treturn (x*y)//gcd(x,y)\n #return lcm(a,b)\n \n def gcd(x,y):\n if y > x:\n x, y = y, x\n while y != 0:\n x, y = y, x % y\n return x\n return (a*b) // gcd(a,b)", "def lcm(*nums):\n\t\n\treturn reduce(lcm_for_two, nums)", "def find_smallest(num_vars):\n for x in range(10):\n if num_vars <= 2**x:\n return x", "def solution(limit=28123):\n sum_divs = [1] * (limit + 1)\n\n for i in range(2, int(limit**0.5) + 1):\n sum_divs[i * i] += i\n for k in range(i + 1, limit // i + 1):\n sum_divs[k * i] += k + i\n\n abundants = set()\n res = 0\n\n for n in range(1, limit + 1):\n if sum_divs[n] > n:\n abundants.add(n)\n\n if not any((n - a in abundants) for a in abundants):\n res += n\n\n return res", "def max_multiple(divisor: int, bound: int) -> int:\n\n while bound > 0:\n if bound % divisor == 0:\n return bound\n bound -= 1\n return 0", "def gcd(*numbers):\n from fractions import gcd\n return reduce(gcd, numbers)", "def gcd(*numbers):\n from fractions import gcd\n return reduce(gcd, numbers)", "def find_lonely_integer(numbers):\n lonely_integer = reduce(lambda x, y: x ^ y, numbers)\n return lonely_integer", "def next_smaller_multiple(value, multiple):\n return multiple * math.floor(value / multiple)", "def findExtremeDivisors(n1, n2):\n\n minVal, maxVal = None, None\n for i in range(2, min(n1, n2)+1):\n if n1%i == 0 and n2%i == 0:\n if minVal == None:\n minVal = i\n maxVal = i\n return (minVal, maxVal)", "def method2():\n n = 1000\n s = 0\n multiples = [3,5]\n total = []\n\n for m in multiples:\n total.append(0)\n\n minValue = 0\n while(minValue < 1000):\n minValue = 1000\n minPosition = 0\n for i, v in enumerate(total):\n if v < minValue:\n minValue = v\n minPosition = i\n\n temp = total[minPosition] + multiples[minPosition]\n\n if(temp < 1000) and (temp not in total):\n s += temp\n\n total[minPosition] = temp\n\n return s", "def find_least_common_number(a, b, c):\n i = j = k = 0\n while i < len(a) and j < len(b) and k < len(c):\n x, y, z = a[i], b[j], c[k]\n if x == y == z:\n return x\n m = max(x, y, z)\n if x < m:\n i += 1\n if y < m:\n j += 1\n if z < m:\n k += 1\n return -1", "def minOperations(n):\n if type(n) is not int or n < 2:\n return 0\n\n factor = []\n i = 2\n while i <= n:\n if n % i == 0:\n factor.append(i)\n n = n / i\n else:\n i += 1\n\n return sum(factor)", "def find_i(n):\n lst = []\n for i in range(1, n):\n lst.append(2 * compute(n - i) + 2 ** i - 1)\n result = min(lst)\n return lst.index(result) + 1", "def _lcm_f(a, b):\n return int((a * b) / _gcd_f(a, b))", "def lcm(a, b):\n\n\treturn (a * b)/gcd(a, b)", "def minimum_spanning_arborescence(sol):", "def lcm(L):\n lcm, M = 1, []\n for i in L:\n for j in M:\n if i % j == 0:\n i //= j\n while i > 1:\n lcm *= low_prime(i)\n M.append(low_prime(i))\n i //= low_prime(i)\n return lcm", "def tst_functn():\n a=15\n b=20\n Least_common_factor=lcm(a,b)\n print(\"LCM is: \")\n print(Least_common_factor)\n\n a=0\n b=51\n Least_common_factor=lcm(a,b)\n print(\"LCM is: \")\n print(Least_common_factor)\n\n a=12\n b=0\n Least_common_factor=lcm(a,b)\n print(\"LCM is: \")\n print(Least_common_factor)", "def c(ixs):\n return sum(range(1, sum((i > 0 for i in ixs)) + 1))", "def lcm(num1, num2):\n return num1 * num2 // fractions.gcd(num1,num2)", "def greatest_common_divisor(x: int, y: int) -> int:\n while y != 0:\n (x, y) = (y, x % y)\n return x", "def lcm(x,y):\n #Initialize counter & condition\n counter = 1\n condition = False\n #While loop iterates until LCM condition is satisfied\n while condition == False :\n counter = counter + 1\n condition = (counter % x == 0) and (counter % y == 0)\n return counter", "def lcm_for_two(a, b):\n\t\n\treturn a // gcd_for_two(a, b) * b", "def _get_m(self, ks: List[int]) -> int:\n\n base = 1\n for c in ks:\n base = base * c // gcd(base, c)\n return base", "def mult_parities_python(bound, verbose=False):\n v = [None] * bound\n v[0] = None\n v[1] = int(0)\n P = [int(p) for p in prime_range(bound)]\n for p in P:\n v[p] = int(1)\n last = P\n last_parity = int(1)\n loops = floor(log(bound, 2)) + 1\n bound = int(bound)\n for k in range(loops):\n cur = []\n cur_parity = (last_parity + int(1)) % int(2)\n if verbose:\n print(\"loop {0} (of {1}); last = {2}\".format(k, loops, len(last)))\n for n in last:\n for p in P:\n m = n * p\n if m >= bound:\n break\n if v[m] is None:\n v[m] = cur_parity\n cur.append(m)\n last_parity = cur_parity\n last = cur\n return v", "def solution(n: int = 28123) -> int:\n\n nums = range(1, n+1)\n abundant = list(filter(is_abundant, nums))\n abundant_sums = set(all_sums(abundant, n))\n fit = set(nums) - abundant_sums\n return fit", "def constrain(n: int, low: int, high: int) -> int:\n return max(min(n, high), low)", "def PrimitiveRoots(self, modulo):\n modRange = range(1, modulo)\n required = {x for x in modRange if fractions.gcd(x, modulo)}\n return [g for g in modRange if required == {pow(g, powers, modulo) for powers in modRange}]", "def lcm(a, b):\n return abs(a*b) / gcd(a, b) if a and b else 0", "def uniform_search(fun, a, b, E, n=3, counter=0):\n if b - a < E:\n return (b + a) / 2, counter\n step = (b - a) / n\n xn = a + step\n min_x = a\n min_f = fun(a)\n while xn <= b:\n counter += 1\n f = fun(xn)\n if f < min_f:\n min_x = xn\n min_f = f\n xn += step\n counter += 2\n if fun(min_x - step) < fun(min_x + step):\n return uniform_search(fun, min_x - step, min_x, E, n, counter)\n return uniform_search(fun, min_x, min_x + step, E, n, counter)", "def lcm(num1, num2):\n\n if num1 > num2:\n bigger = num1\n else:\n bigger = num2\n while True:\n if bigger % num1 == 0 and bigger % num2 == 0:\n return bigger\n bigger += 1", "def lcm2(a, b):\n return a * b / gcd(a, b)", "def hcf(num1, num2):\n\n if num1 > num2:\n smaller = num2\n else:\n smaller = num1\n for i in range(1, smaller + 1):\n if ((num1 % i == 0) and (num2 % i == 0)):\n return i" ]
[ "0.7665987", "0.75820744", "0.7434193", "0.725471", "0.71709675", "0.6794163", "0.6781177", "0.6689504", "0.6673474", "0.66556793", "0.66154504", "0.6614286", "0.66003877", "0.65978855", "0.65978855", "0.65896875", "0.6571934", "0.65678567", "0.6525186", "0.65232986", "0.6469532", "0.6465268", "0.6381586", "0.637862", "0.6359832", "0.63554084", "0.6346309", "0.6336405", "0.63291025", "0.63277936", "0.6326865", "0.6326865", "0.6326865", "0.6326865", "0.6326865", "0.6326865", "0.6326865", "0.6326865", "0.6326865", "0.62901664", "0.6276444", "0.6276255", "0.6276255", "0.6266456", "0.62598175", "0.62598175", "0.62598175", "0.62114537", "0.6209103", "0.61940193", "0.61742765", "0.6171429", "0.61668104", "0.61660475", "0.61595917", "0.61582524", "0.61417764", "0.6140491", "0.61327755", "0.61296356", "0.6128158", "0.6124589", "0.6120675", "0.6113099", "0.61115843", "0.6109797", "0.6106581", "0.61051357", "0.6097831", "0.6094834", "0.6094552", "0.6093757", "0.6093757", "0.60929334", "0.60626113", "0.60577756", "0.6050387", "0.6032547", "0.6028589", "0.60170925", "0.60146457", "0.6000646", "0.59981513", "0.5987498", "0.5971506", "0.5968474", "0.5956481", "0.59480006", "0.59476286", "0.5946828", "0.59450537", "0.5940384", "0.5932474", "0.5928681", "0.59284693", "0.59254456", "0.59183216", "0.59119844", "0.59113395", "0.5905111" ]
0.6465514
21
Creates a UNet model with pretrained optiopn.
def unet(num_classes=21, is_deconv=False, feature_scale=1, is_batchnorm=True, pretrained=False): if pretrained: model_path = pretrained_models['pascal'] model = UNet(n_classes=num_classes, feature_scale=feature_scale, is_batchnorm=is_batchnorm, is_deconv=is_deconv) checkpoint = torch.load(model_path) weights = checkpoint['state_dict'] weights['notinuse'] = weights.pop('final.weight') weights['notinuse2'] = weights.pop('final.bias') model.load_state_dict(weights, strict=False) else: model = UNet(n_classes=num_classes, feature_scale=feature_scale, is_batchnorm=is_batchnorm, is_deconv=is_deconv) return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_unet_model(self):\n # create optimizer instance\n config = {\n 'class_name': self.optimizer,\n 'config': self.optimizer_params}\n optimizer = get_optimizer(config)\n\n self.model = unet(optimizer=optimizer,\n loss=self.loss,\n metrics=self.metrics,\n input_size=self.input_size,\n pretrained_weights=self.pretrained_weights)", "def createModel(self):\n outputs, inputs = baseUNet(self.input_shape,\n self.conv_depth,\n self.n_classes,\n self.init_w,\n self.dropout)\n \n if self.regression == True:\n outputs = Lambda(getPropOfGround)(outputs)\n \n model = Model(inputs = inputs,outputs = outputs)\n \n model.compile(optimizer = self.optimizer,\n loss=self.loss_function,\n metrics=self.metrics)\n\n if self.old_weights != None:\n model.set_weights(self.old_weights)\n self.model = model", "def create_model(input_shape, n_classes, optimizer='rmsprop', fine_tune=0, n_model=1):\r\n # Pretrained convolutional layers are loaded using the Imagenet weights.\r\n # Include_top is set to False, in order to exclude the model's fully-connected layers.\r\n if n_model == 4:\r\n conv_base = VGG19(include_top=False,\r\n weights='imagenet', \r\n input_shape=input_shape)\r\n else:\r\n conv_base = VGG16(include_top=False,\r\n weights='imagenet', \r\n input_shape=input_shape)\r\n \r\n # Defines how many layers to freeze during training.\r\n # Layers in the convolutional base are switched from trainable to non-trainable\r\n # depending on the size of the fine-tuning parameter.\r\n if fine_tune > 0:\r\n for layer in conv_base.layers[:-fine_tune]:\r\n layer.trainable = False\r\n else:\r\n for layer in conv_base.layers:\r\n layer.trainable = False\r\n\r\n # Create a new 'top' of the model (i.e. fully-connected layers).\r\n # This is 'bootstrapping' a new top_model onto the pretrained layers.\r\n top_model = conv_base.output\r\n top_model = Flatten(name=\"flatten\")(top_model)\r\n if n_model == 1 or n_model == 2:\r\n top_model = Dense(4096, activation='relu')(top_model)\r\n top_model = Dense(1072, activation='relu')(top_model)\r\n\r\n\r\n if n_model == 3 or n_model == 4:\r\n top_model = Dense(4096, activation='relu')(top_model)\r\n top_model = Dense(1024, activation='relu')(top_model)\r\n top_model = Dense(256, activation='relu')(top_model)\r\n top_model = Dense(64, activation='relu')(top_model)\r\n\r\n if n_model == 5: \r\n top_model = Dense(4096, activation='relu')(top_model)\r\n top_model = Dense(2048, activation='relu')(top_model)\r\n top_model = Dense(1024, activation='relu')(top_model)\r\n top_model = Dense(512, activation='relu')(top_model)\r\n top_model = Dense(256, activation='relu')(top_model)\r\n top_model = Dense(128, activation='relu')(top_model)\r\n top_model = Dense(64, activation='relu')(top_model)\r\n top_model = Dense(32, activation='relu')(top_model)\r\n \r\n top_model = Dropout(0.2)(top_model)\r\n output_layer = Dense(n_classes, activation='softmax')(top_model)\r\n \r\n # Group the convolutional base and new fully-connected layers into a Model object.\r\n model = Model(inputs=conv_base.input, outputs=output_layer)\r\n\r\n # Compiles the model for training.\r\n model.compile(optimizer=optimizer, \r\n loss='categorical_crossentropy',\r\n metrics=['accuracy'])\r\n \r\n return model", "def build_model(self):\n model = None\n if self.model_name == 'vgg16':\n model = vgg16(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'resnet':\n model = ResNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'densenet':\n model = DenseNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'mobilenet':\n model = MobileNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'efficientnet':\n model = efficientnet(input_shape=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'vit':\n model = VisionTransformer(image_size=self.ex_input_size,\n patch_size=vit_settings['patch_size'],\n num_layers=vit_settings['num_layers'],\n num_classes=num_classes,\n d_model=vit_settings['d_model'],\n num_heads=vit_settings['num_heads'],\n mlp_dim=vit_settings['mlp_dim'],\n channels=self.ex_channels,\n dropout=vit_settings['dropout']).build_VisionTransformer()\n model.load_weights(self.path_to_weights)\n model.summary()\n return model", "def create_network(model_file=DEFAULT_MODEL_FILE, pretrained=DEFAULT_PRETRAINED, *args, **kwargs):\n net = imagenet_classifier(*args,**kwargs)\n net.set_phase_test()\n net.set_mode_cpu()\n return net", "def __init__(self, opt):\n BaseModel.__init__(self, opt)\n # specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>\n self.loss_names = ['G_GAN', 'G_L1', 'D_real', 'D_fake']\n # specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>\n self.visual_names = ['real_A', 'fake_B', 'real_B']\n # specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>\n if self.isTrain:\n self.model_names = ['G', 'D']\n else: # during test time, only load G\n self.model_names = ['G']\n\n # Set TPN_enabled to true if opt.TPN is defined\n if opt.TPN:\n self.TPN_enabled = True\n else:\n self.TPN_enabled = False\n\n # Conditional GANs need to take both input and output images; Therefore, #channels for D is input_nc + output_nc\n discr_input_nc = opt.input_nc + opt.output_nc\n\n # If TPN is enabled, switch to the U-Net with TPN architecture\n if self.TPN_enabled:\n opt.netG = 'unet_256_TPN'\n discr_input_nc +=1 # Additional Channel for Time Input\n\n # define networks (both generator and discriminator)\n self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)\n\n if self.isTrain: # define a discriminator; \n self.netD = networks.define_D(discr_input_nc, opt.ndf, opt.netD,\n opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)\n\n if self.TPN_enabled:\n self.loss_names = ['G_GAN', 'G_L1', 'G_TPN', 'D_real', 'D_fake']\n\n # Store final gamma value and then set it to 0\n self.final_gamma = deepcopy(opt.gamma)\n opt.gamma = 0\n\n # Initiliaze m and c to None\n self.update_m = None\n self.update_c = None\n\n # Setup TPN if set to True\n print(\"\\nSetting up TPN\\n\")\n opt_TPN = deepcopy(opt) # copy train options and change later\n opt_TPN.model = 'time_predictor'\n opt_TPN.name = opt.TPN\n opt_TPN.netD = 'time_input'\n opt_TPN.ndf = 16 # Change depending on the ndf size used with the TPN model specified\n # hard-code some parameters for TPN test phase\n opt_TPN.display_id = -1 # no visdom display;\n opt_TPN.isTrain = False\n print(\"Options TPN: {}\\n\\n\".format(opt_TPN))\n self.TPN = create_model(opt_TPN) # create a model given opt_TPN.model and other options\n self.TPN.setup(opt_TPN) # regular setup: load\n\n if self.isTrain:\n # define loss functions\n self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)\n self.criterionL1 = torch.nn.L1Loss()\n # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.\n self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizers.append(self.optimizer_G)\n self.optimizers.append(self.optimizer_D)\n\n # Check if lambda_L2 is in range [0,1]\n assert (0 <= self.opt.lambda_L2 <= 1)", "def load_model(self):\n self.pred_net.load((self.save_path / \"iqn_pred_net\").absolute().as_posix())\n self.target_net.load((self.save_path / \"iqn_target_net\").absolute().as_posix())", "def new(self):\n self.define_layers()\n self.model = nn.Sequential(*self.layers)\n self.model.cuda()\n self.model = orthogonal_init(self.model)\n\n # Re-count N\n self.count_params()", "def model(pretrained=False, **kwargs):\r\n\r\n layers = make_layers(cfg['O'], dilation=dilation['D1'])\r\n cnv = np.cumsum(cnvs['OI']) if kwargs['args'].IN or kwargs['args'].INL else np.cumsum(cnvs['O'])\r\n model = VGG(layers, cnvs=cnv, **kwargs)\r\n if pretrained:\r\n pre2local_keymap = [('features.{}.weight'.format(i), 'conv1_2.{}.weight'.format(i)) for i in range(10)]\r\n pre2local_keymap += [('features.{}.bias'.format(i), 'conv1_2.{}.bias'.format(i)) for i in range(10)]\r\n pre2local_keymap += [('features.{}.weight'.format(i + 10), 'conv3.{}.weight'.format(i)) for i in range(7)]\r\n pre2local_keymap += [('features.{}.bias'.format(i + 10), 'conv3.{}.bias'.format(i)) for i in range(7)]\r\n pre2local_keymap += [('features.{}.weight'.format(i + 17), 'conv4.{}.weight'.format(i)) for i in range(7)]\r\n pre2local_keymap += [('features.{}.bias'.format(i + 17), 'conv4.{}.bias'.format(i)) for i in range(7)]\r\n pre2local_keymap += [('features.{}.weight'.format(i + 24), 'conv5.{}.weight'.format(i)) for i in range(7)]\r\n pre2local_keymap += [('features.{}.bias'.format(i + 24), 'conv5.{}.bias'.format(i)) for i in range(7)]\r\n pre2local_keymap = dict(pre2local_keymap)\r\n\r\n\r\n model_dict = model.state_dict()\r\n pretrained_file = os.path.join(kwargs['args'].pretrained_model_dir, kwargs['args'].pretrained_model)\r\n if os.path.isfile(pretrained_file):\r\n pretrained_dict = torch.load(pretrained_file)\r\n print('load pretrained model from {}'.format(pretrained_file))\r\n else:\r\n pretrained_dict = model_zoo.load_url(model_urls['vgg16'])\r\n print('load pretrained model from {}'.format(model_urls['vgg16']))\r\n # 0. replace the key\r\n pretrained_dict = {pre2local_keymap[k] if k in pre2local_keymap.keys() else k: v for k, v in\r\n pretrained_dict.items()}\r\n # *. show the loading information\r\n for k in pretrained_dict.keys():\r\n if k not in model_dict:\r\n print('Key {} is removed from vgg16'.format(k))\r\n print(' ')\r\n for k in model_dict.keys():\r\n if k not in pretrained_dict:\r\n print('Key {} is new added for DA Net'.format(k))\r\n # 1. filter out unnecessary keys\r\n pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}\r\n # 2. overwrite entries in the existing state dict\r\n model_dict.update(pretrained_dict)\r\n # 3. load the new state dict\r\n model.load_state_dict(model_dict)\r\n return model", "def create(fpath):\n model_info = json.load(open(fpath))\n\n model_shape = model_info['model']\n model_settings = model_info['config']\n dropout_chance = model_info['config']['dropout_chance']\n\n nn = NeuralNetwork(model_shape, model_settings, dropout_probability=dropout_chance)\n return nn", "def make_model():\n # create the base pre-trained model\n base_model = efn.EfficientNetB0(input_shape=(img_width, img_height, 3), include_top=False)\n # add a global spatial average pooling layer\n x = base_model.output\n x = GlobalAveragePooling2D()(x)\n predictions = Dense(num_classes, activation=\"softmax\")(x)\n model = Model(inputs=base_model.input, outputs=predictions)\n\n model.compile(optimizer=\"adam\", loss=\"categorical_crossentropy\", metrics=[\"accuracy\"])\n\n return base_model, model", "def get_unet(start_depth=64, size=128):\n\n input_layer = Input((size, size, 3))\n output_layer = build_unet(input_layer, start_depth)\n\n model = Model(inputs=input_layer, outputs=output_layer)\n\n metrics = [iou, 'accuracy']\n\n model.compile(optimizer=Adam(lr=1e-3), loss='binary_crossentropy', metrics=metrics)\n model.summary()\n\n return model", "def make_model(self):\n onnx_graph = onnx.helper.make_graph(\n self._nodes, self._name, self._inputs, self._outputs, self._initializers\n )\n kwargs = {}\n kwargs[\"opset_imports\"] = self._get_opsets()\n kwargs[\"producer_name\"] = \"TVM Relay\"\n kwargs[\"producer_version\"] = tvm.__version__\n\n return onnx.helper.make_model(onnx_graph, **kwargs)", "def create_model(project_parameters):\n model = Net(project_parameters=project_parameters)\n if project_parameters.checkpoint_path is not None:\n model = load_checkpoint(model=model, num_classes=project_parameters.num_classes,\n use_cuda=project_parameters.use_cuda, checkpoint_path=project_parameters.checkpoint_path)\n return model", "def get_pretrained_model(model_name, n_classes):\n\n if model_name == 'vgg16':\n model = models.vgg16(pretrained=True)\n\n # Freeze early layers\n for param in model.parameters():\n param.requires_grad = False\n n_inputs = model.classifier[6].in_features\n\n # Add on classifier\n model.classifier[6] = nn.Sequential(\n nn.Linear(n_inputs, 256), nn.ReLU(), nn.Dropout(0.2),\n nn.Linear(256, n_classes), nn.LogSoftmax(dim=1))\n\n elif model_name == 'resnet50':\n model = models.resnet50(pretrained=True)\n\n for param in model.parameters():\n param.requires_grad = False\n\n n_inputs = model.fc.in_features\n model.fc = nn.Sequential(\n nn.Linear(n_inputs, 256), nn.ReLU(), nn.Dropout(0.2),\n nn.Linear(256, n_classes), nn.LogSoftmax(dim=1))\n\n # Move to gpu and parallelize\n if train_on_gpu:\n model = model.to('cuda')\n\n if multi_gpu:\n model = nn.DataParallel(model)\n\n return model", "def __init__(self):\n \n self.model = Net()\n\n if torch.cuda.is_available():\n map_location=torch.device('cuda')\n else:\n map_location=torch.device('cpu')\n\n # load parameters\n self.model.load_state_dict(torch.load('model.pt',\n map_location=map_location)) \n \n if torch.cuda.is_available():\n self.model.cuda()\n else:\n self.model.cpu()\n \n self.model.eval()", "def make_basic_ngpu(nb_classes=10, input_shape=(None, 28, 28, 1), **kwargs):\n model = make_basic_cnn()\n layers = model.layers\n\n model = MLPnGPU(nb_classes, layers, input_shape)\n return model", "def initModel(self):\n input_shape = (self.params[\"nb_features\"],)\n x = input_tensor = Input(input_shape)\n x = Dense(self.params[\"nb_neurons\"], activation=\"relu\")(x)\n for i in range(2, self.params[\"nb_layers\"] + 1):\n x = Dense(self.params[\"nb_neurons\"], activation=\"relu\")(x)\n if self.params[\"dropout\"]:\n x = Dropout(self.params[\"dropout\"])(x)\n x = output_tensor = Dense(4)(x)\n model = Model(input_tensor, output_tensor)\n return model", "def construct_model(self, output_model_path):\n\n input_tensor = helper.make_tensor_value_info(\"input\", TensorProto.FLOAT, [1, 1, 7, 7])\n output_tensor = helper.make_tensor_value_info(\"output\", TensorProto.FLOAT, [1, 1, 8, 8])\n ini_w = helper.make_tensor(\"weight\", TensorProto.FLOAT, [1, 1, 2, 2], [1.0, 1.0, 1.0, 1.0])\n ini_b = helper.make_tensor(\"bias\", TensorProto.FLOAT, [1], [0.17])\n conv_tranpose_node = onnx.helper.make_node(\n \"ConvTranspose\",\n [\"input\", \"weight\", \"bias\"],\n [\"output\"],\n kernel_shape=[2, 2],\n output_padding=[0, 0],\n pads=[0, 0, 0, 0],\n strides=[1, 1],\n dilations=[1, 1],\n group=1,\n )\n graph = helper.make_graph(\n [conv_tranpose_node],\n \"conv_transpose_test\",\n [input_tensor],\n [output_tensor],\n initializer=[ini_w, ini_b],\n )\n model = helper.make_model(graph, opset_imports=[helper.make_opsetid(\"\", 13)])\n model.ir_version = 7 # use stable onnx ir version\n\n onnx.save(model, output_model_path)", "def create_network():\n net = ln.models.TinyYolo(CLASSES, CONF_THRESH, NMS_THRESH)\n\n net.load(args.weight)\n net.eval()\n net.postprocess.append(ln.data.transform.TensorToBrambox(NETWORK_SIZE, LABELS))\n net = net.to(device)\n return net", "def create_model_net(n_input,n_hidden,n_output):\n net = Sequential(\n L.Linear(n_input, n_hidden), F.relu,\n L.Linear(n_hidden, n_hidden), F.relu,\n L.Linear(n_hidden, n_output), F.softmax)\n return net", "def load_model(self, gpus=1):\r\n\t\t\r\n\t\tif self.model != None:\r\n\t\t\treturn\r\n\r\n\t\t## build the model on the CPU if parallelism is targeted\r\n\t\tif isinstance(gpus, Sequence):\r\n\t\t\tif len(gpus) != 1:\r\n\t\t\t\tdevice = \"/cpu:0\"\r\n\t\t\t\tmultigpu = True\r\n\t\t\telse:\r\n\t\t\t\tdevice = \"/gpu:{:d}\".format(gpus[0])\r\n\t\t\t\tmultigpu = False\r\n\t\telse:\r\n\t\t\tif gpus != 1:\r\n\t\t\t\tdevice = \"/cpu:0\"\r\n\t\t\t\tmultigpu = True\r\n\t\t\telse:\r\n\t\t\t\tdevice = \"/gpu:{:d}\".format(gpus)\r\n\t\t\t\tmultigpu = False\r\n\r\n\r\n\t\tif self.__prop__(\"Resume\"):\r\n\t\t\tself.model = keras.models.load_model(\r\n\t\t\t\tself.__prop__(\"SnapshotDirectory\") + self.__prop__(\"Prefix\") + self.__prop__(\"Resume\") + '.h5\"')\r\n\t\t\tself.single_model = self.model\r\n\t\t\tif multigpu:\r\n\t\t\t\tself.model = multi_gpu_model(self.model, gpus)\r\n\t\telse: \r\n\t\t\t\r\n\t\t\twith tensorflow.device(device):\r\n\t\t\t\tif self.__prop__(\"Prefix\").startswith(\"i3PosNet_VGG16\"):\r\n\t\t\t\t\tself.model = i3PosNetVGG(\r\n\t\t\t\t\t\tinput_shape=self.__prop__(\"InputShape\"), \r\n\t\t\t\t\t\tout_number=self.__prop__(\"TargetShape\"),\r\n\t\t\t\t\t\tlayer_count=self.__prop__(\"LayerCount\"), \r\n\t\t\t\t\t\tfc_layer_count=self.__prop__(\"FCLayerCount\"), \r\n\t\t\t\t\t\tfc_reg=self.__prop__(\"FCReg\"), \r\n\t\t\t\t\t\tconv_reg=self.__prop__(\"ConvReg\"), \r\n\t\t\t\t\t\tshrinking=self.__prop__(\"Shrinking\"),\r\n\t\t\t\t\t\tpadding=self.__prop__(\"Padding\"))\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.model = i3PosNet(image_shape, out = self.__prop__(\"TargetShape\"))\r\n\r\n\t\t\tself.single_model = self.model\r\n\t\t\tif multigpu:\r\n\t\t\t\tself.model = multi_gpu_model(self.model, gpus)\r\n\t\t\t\t\r\n\t\t\t# clear model\r\n\t\t\t# try:\r\n\t\t\t\t# del self.model\r\n\t\t\t# except:\r\n\t\t\t\t# pass\r\n\r\n\t\t\tif self.__prop__(\"Optimizer\") == \"SGD\":\r\n\t\t\t\toptimizer = SGD(\r\n\t\t\t\t\tlr=self.__prop__(\"BaseLR\"),\r\n\t\t\t\t\tdecay=self.__prop__(\"Gamma\") if self.__prop__(\"LRPolicy\") == \"decay\" else 0.,\r\n\t\t\t\t\tmomentum= self.__prop__(\"Momentum\"),\r\n\t\t\t\t\tnesterov=True)\r\n\t\t\telif self.__prop__(\"Optimizer\") == \"Adam\":\r\n\t\t\t\toptimizer = Adam(\r\n\t\t\t\t\tlr=self.__prop__(\"BaseLR\"),\r\n\t\t\t\t\tdecay=self.__prop__(\"Gamma\") if self.__prop__(\"LRPolicy\") == \"decay\" else 0.,\r\n\t\t\t\t\t# use defaults for these for now (b1 = 0.9, b2 = 0.999, e = 1e-8\r\n\t\t\t\t\tbeta_1=self.__prop__(\"Beta1\"),\r\n\t\t\t\t\tbeta_2=self.__prop__(\"Beta2\"),\r\n\t\t\t\t\tepsilon=self.__prop__(\"Epsilon\")\r\n\t\t\t\t\t)\r\n\t\t\t\r\n\t\t\tself.model.compile(loss='mean_squared_error', optimizer=optimizer)", "def _create_nn(self):\n with tf.name_scope('policy_network'):\n with tf.variable_scope(\"policy_network\"):\n model = tf.keras.Sequential(name='policy_network_model')\n model.add(tf.keras.layers.Dense(self.neurons_in_each_layer[0], activation=tf.nn.relu,\n input_shape=(1, self.neurons_in_each_layer[0])))\n for num_neurons in self.neurons_in_each_layer[1:-1]:\n model.add(tf.keras.layers.Dense(num_neurons, activation=tf.nn.relu))\n model.add(tf.keras.layers.Dense(self.neurons_in_each_layer[-1], name='policy_output_layer'))\n\n return model", "def generate_model(**kwargs):\n model = ResNet3D(Bottleneck, [3, 4, 6, 3], [64, 128, 256, 512], **kwargs)\n return model", "def build_model():\n model = models.Sequential()\n\n # # Anti-overfit methods\n # model.add(layers.BatchNormalization())\n # model.add(layers.Dropout(0.5))\n # regularizers.l1_l2(l1=0.01, l2=0.01)\n\n model.add(layers.Conv2D(200, (3, 3), activation='relu',\n input_shape=nnc.INPUT_SHAPE))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Conv2D(200, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Conv2D(150, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Conv2D(100, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Flatten())\n model.add(layers.Dense(512, activation='relu',))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(256, activation='relu',))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(512, activation='relu',))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(7, activation='sigmoid'))\n model.compile(optimizer=nnc.OPTIMIZER, loss=nnc.LOSS, metrics=nnc.METRICS)\n\n # # Print the model to the console\n model.summary()\n # # Print the model to a png file\n # utils.plot_model(model, show_shapes=True, to_file=nnc.MODEL_PLOT_PATH)\n # # Turn into multi-gpu model\n # model = utils.multi_gpu_model(model, gpus=2)\n\n return model", "def build_base_model(model_opt, fields, gpu, checkpoint=None, gpu_id=None):\n\n # Build embeddings.\n if model_opt.model_type == \"text\":\n src_field = fields[\"src\"]\n src_emb = build_embeddings(model_opt, src_field)\n else:\n src_emb = None\n\n # Build encoder.\n encoder = build_encoder(model_opt, src_emb)\n\n # Build decoder.\n tgt_field = fields[\"tgt\"]\n tgt_emb = build_embeddings(model_opt, tgt_field, for_encoder=False)\n\n # Share the embedding matrix - preprocess with share_vocab required.\n if model_opt.share_embeddings:\n # src/tgt vocab should be the same if `-share_vocab` is specified.\n assert src_field.base_field.vocab == tgt_field.base_field.vocab, \\\n \"preprocess with -share_vocab if you use share_embeddings\"\n\n tgt_emb.word_lut.weight = src_emb.word_lut.weight\n\n if model_opt.share_position_embeddings:\n tgt_emb.make_embedding.pe.pe.weight = src_emb.make_embedding.pe.pe.weight\n\n decoder = build_decoder(model_opt, tgt_emb)\n\n # Build NMTModel(= encoder + decoder).\n if gpu and gpu_id is not None:\n device = torch.device(\"cuda\", gpu_id)\n elif gpu and not gpu_id:\n device = torch.device(\"cuda\")\n elif not gpu:\n device = torch.device(\"cpu\")\n\n # Build separate LM if doing simple fusion\n if model_opt.simple_fusion:\n layers = 12\n size = 768\n heads = 12\n\n lm_decoder_opt = copy.deepcopy(model_opt)\n lm_decoder_opt.dec_layers = layers\n lm_decoder_opt.use_GPT_version_ctxattn = False\n lm_decoder_opt.use_GPT_version_psa = False\n lm_decoder_opt.use_GPT_version_unconditional = True\n lm_decoder_opt.tgt_word_vec_size = size\n lm_decoder_opt.rnn_size = size\n lm_decoder_opt.dec_rnn_size = size\n lm_decoder_opt.transformer_ff = size*4\n lm_decoder_opt.dec_heads = heads\n lm_decoder_opt.position_encoding_learned_dec = True\n lm_decoder_opt.share_decoder_embeddings = True\n lm_decoder_opt.dropout = 0\n\n lm_decoder_emb = build_embeddings(lm_decoder_opt, tgt_field, for_encoder=False)\n logger.info(lm_decoder_emb)\n\n lm_decoder = build_decoder(lm_decoder_opt, lm_decoder_emb)\n load_decoder = lm_decoder\n\n model = onmt.models.SimpleFusionModel(encoder, decoder, lm_decoder)\n\n generator = SimpleFusionGenerator(model_opt.dec_rnn_size,\n lm_decoder_opt.dec_rnn_size,\n len(fields[\"tgt\"].base_field.vocab))\n generator.lm_linear.weight = lm_decoder.embeddings.word_lut.weight\n\n if model_opt.share_decoder_embeddings:\n generator.decoder_linear.weight = decoder.embeddings.word_lut.weight\n gen_linear = generator.lm_linear\n else:\n load_decoder = decoder\n if model_opt.unconditional:\n model = onmt.models.UncondModel(decoder)\n else:\n model = onmt.models.NMTModel(encoder, decoder)\n\n # Build Generator.\n if not model_opt.copy_attn:\n if model_opt.generator_function == \"sparsemax\":\n gen_func = onmt.modules.sparse_activations.LogSparsemax(dim=-1)\n else:\n gen_func = nn.LogSoftmax(dim=-1)\n\n if model_opt.padded_vocab_fix_me_later:\n gen_func = nn.Sequential(PadGen(), gen_func)\n\n generator = nn.Sequential(\n nn.Linear(model_opt.dec_rnn_size,\n len(fields[\"tgt\"].base_field.vocab)),\n Cast(torch.float32),\n gen_func\n )\n if model_opt.share_decoder_embeddings:\n generator[0].weight = decoder.embeddings.word_lut.weight\n gen_linear = generator[0]\n else:\n tgt_base_field = fields[\"tgt\"].base_field\n vocab_size = len(tgt_base_field.vocab)\n pad_idx = tgt_base_field.vocab.stoi[tgt_base_field.pad_token]\n generator = CopyGenerator(model_opt.dec_rnn_size, vocab_size, pad_idx)\n if model_opt.share_decoder_embeddings:\n generator.linear.weight = decoder.embeddings.word_lut.weight\n gen_linear = generator.linear\n\n if model_opt.encdec_share_params:\n for name, p in decoder.named_parameters():\n if 'ctx' in name or 'context' in name:\n continue\n pointer = encoder\n attrs = name.split('.')\n for attr_name in attrs[:-1]:\n pointer = getattr(pointer, attr_name)\n\n # pointer now has the encoder version of the parameter parent\n setattr(pointer, attrs[-1], p)\n\n\n # Load the model states from checkpoint or initialize them.\n if checkpoint is not None:\n # Normally, just load the model parameters from checkpoint\n if 'gpt2_params' not in checkpoint and 'enc_model' not in checkpoint:\n # This preserves backward-compat for models using customed layernorm\n def fix_key(s):\n s = re.sub(r'(.*)\\.layer_norm((_\\d+)?)\\.b_2',\n r'\\1.layer_norm\\2.bias', s)\n s = re.sub(r'(.*)\\.layer_norm((_\\d+)?)\\.a_2',\n r'\\1.layer_norm\\2.weight', s)\n return s\n \n checkpoint['model'] = {fix_key(k): v\n for k, v in checkpoint['model'].items()}\n # end of patch for backward compatibility\n\n # Initialize rest of parameters normally\n if hasattr(model_opt, 'load_uncond_from') and model_opt.load_uncond_from:\n for p in decoder.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n \n # Always initialize encoder parameters normally\n for p in encoder.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n\n if model_opt.ctx_weight_param:\n for name, p in decoder.named_parameters():\n if 'ctx_weight' in name:\n p.data.zero_()\n if 'ctx_bias' in name:\n p.data.fill_(-10)\n\n\n model.load_state_dict(checkpoint['model'], strict=False)\n generator.load_state_dict(checkpoint['generator'], strict=False)\n else:\n # load the gpt parameters\n if 'gpt2_params' in checkpoint:\n init_something = model_opt.gpt2_init_embanddec or model_opt.simple_fusion or model_opt.gpt2_init_embandenc or model_opt.GPT_representation_mode != 'none'\n \n if init_something:\n # Initialize all the weights first\n if model_opt.gpt2_init_zero:\n for p in decoder.parameters():\n p.data.zero_()\n if model_opt.simple_fusion:\n generator.decoder_linear.weight.data.zero_()\n generator.decoder_linear.bias.data.zero_()\n else:\n for p in decoder.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n \n # Always initialize encoder parameters normally\n if encoder is not None:\n for p in encoder.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n for p in generator.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n if model_opt.zero_bias_init:\n gen_linear.bias.data.zero_()\n\n if model_opt.ctx_weight_param:\n for name, p in decoder.named_parameters():\n if 'ctx_weight' in name:\n p.data.zero_()\n if 'ctx_bias' in name:\n p.data.fill_(-10)\n gen_linear.bias.data.zero_()\n\n load_models = []\n if model_opt.GPT_representation_mode != 'none':\n load_embs = []\n if model_opt.GPT_representation_loc in ['both', 'src']:\n load_models.append(src_emb.gpt_model)\n load_embs.append(src_emb)\n if model_opt.GPT_representation_loc in ['both', 'tgt']:\n load_models.append(tgt_emb.gpt_model)\n load_embs.append(tgt_emb)\n \n else:\n if model_opt.gpt2_init_embanddec or model_opt.simple_fusion:\n load_models = [load_decoder]\n elif model_opt.gpt2_init_embandenc:\n load_models = [encoder]\n \n it_list = list(checkpoint['gpt2_params'])\n for lm_idx, load_model in enumerate(load_models):\n #print(lm_idx, load_model)\n for name, array in it_list:\n name = name[12:] # skip \"transformer.\"\n name = name.split('.')\n\n assigned = False\n if name[0] == 'wpe':\n if model_opt.GPT_representation_mode != 'none':\n pointer = load_embs[lm_idx].make_embedding.pe.pe.weight\n else:\n pointer = load_model.embeddings.make_embedding.pe.pe.weight\n\n elif name[0] == 'wte':\n if model_opt.GPT_representation_mode != 'none':\n pointer = [load_embs[lm_idx].make_embedding.emb_luts[0].weight, gen_linear.weight]\n else:\n pointer = [load_model.embeddings.make_embedding.emb_luts[0].weight]\n if not model_opt.nopretrain_decemb:\n pointer.append(gen_linear.weight)\n if model_opt.simple_fusion and model_opt.sf_pretrain_dec_emb:\n pointer.append(decoder.embeddings.make_embedding.emb_luts[0].weight)\n\n elif name[0] == 'ln_f':\n if name[1] == 'weight':\n pointer = load_model.layer_norm.weight\n elif name[1] == 'bias':\n pointer = load_model.layer_norm.bias\n else:\n raise ValueError('I am missing something here!')\n\n elif name[0] == 'h':\n layer_num = name[1]\n pointer = getattr(load_model.transformer_layers, layer_num)\n if name[2] == 'attn':\n assigned = True\n pointer = pointer.self_attn\n full_data = torch.from_numpy(array)\n if name[3] == 'c_attn':\n end_size = full_data.shape[-1]//3\n assert full_data.shape[-1] % 3 == 0\n if name[4] == 'bias':\n if init_something:\n pointer.linear_query.bias.data = full_data[:end_size]\n pointer.linear_keys.bias.data = full_data[end_size:end_size*2]\n pointer.linear_values.bias.data = full_data[end_size*2:]\n if model_opt.gpt2_params_std > 0:\n pointer.linear_query.bias.orig = full_data[:end_size].clone()\n pointer.linear_keys.bias.orig = full_data[end_size:end_size*2].clone()\n pointer.linear_values.bias.orig = full_data[end_size*2:].clone()\n elif name[4] == 'weight':\n if init_something:\n pointer.linear_query.weight.data = full_data[:, :end_size].t().contiguous()\n pointer.linear_keys.weight.data = full_data[:, end_size:end_size*2].t().contiguous()\n pointer.linear_values.weight.data = full_data[:, end_size*2:].t().contiguous()\n if model_opt.gpt2_params_std > 0:\n pointer.linear_query.weight.orig = full_data[:, :end_size].t().contiguous().clone()\n pointer.linear_keys.weight.orig = full_data[:, end_size:end_size*2].t().contiguous().clone()\n pointer.linear_values.weight.orig = full_data[:, end_size*2:].t().contiguous().clone()\n else:\n raise ValueError('I am missing something here!')\n elif name[3] == 'c_proj':\n if name[4] == 'bias':\n if init_something:\n pointer.final_linear.bias.data = full_data\n if model_opt.gpt2_params_std > 0:\n pointer.final_linear.bias.orig = full_data.clone()\n elif name[4] == 'weight':\n if init_something:\n pointer.final_linear.weight.data = full_data.t().contiguous()\n if model_opt.gpt2_params_std > 0:\n pointer.final_linear.weight.orig = full_data.t().contiguous().clone()\n\n else:\n raise ValueError('I am missing something here!')\n\n elif name[2] == 'ln_1' or name[2] == 'ln_2':\n num = name[2][3]\n pointer = getattr(pointer, 'layer_norm_'+num)\n if name[2] == 'bias':\n pointer = pointer.bias\n elif name[2] == 'weight':\n pointer = pointer.weight\n else:\n raise ValueError('I am missing something here!')\n elif name[2] == 'mlp':\n pointer = pointer.feed_forward\n pointer = getattr(pointer, name[2])\n if name[3] == 'bias':\n pointer = pointer.bias\n elif name[3] == 'weight':\n pointer = pointer.weight\n else:\n raise ValueError('I am missing something here!')\n else:\n raise ValueError('I am missing something here!')\n else:\n raise ValueError('I am missing something here!')\n \n if not assigned:\n # if name[0] == 'wte':\n # print(array.shape)\n # continue\n if name[-1] == 'weight':\n array = array.T\n\n if not isinstance(pointer, list):\n pointer = [pointer]\n for pointer_i in pointer:\n target_size = int(math.ceil(array.shape[0]/8))*8\n padded_vocab = name[0] == 'wte' and pointer_i.shape[0] == target_size\n padded_vocab = padded_vocab and pointer_i.shape[1:] == array.shape[1:]\n try:\n assert pointer_i.shape == array.shape or padded_vocab\n except AssertionError as e:\n \n e.args += (pointer_i.shape, array.shape)\n raise\n if init_something:\n print(\"Initialize PyTorch weight {}\".format(name))\n if padded_vocab:\n pointer_i.data[:array.shape[0]] = torch.from_numpy(array)\n else:\n pointer_i.data = torch.from_numpy(array)\n if model_opt.gpt2_params_std > 0:\n if padded_vocab:\n raise NotImplementedError\n else:\n pointer_i.orig = torch.from_numpy(array).clone()\n # name = name[6:] # skip \"model/\"\n # name = name.split('/')\n\n # assigned = False\n # if name[0] == 'wpe':\n # if model_opt.GPT_representation_mode != 'none':\n # pointer = load_embs[lm_idx].make_embedding.pe.pe.weight\n # else:\n # pointer = load_model.embeddings.make_embedding.pe.pe.weight\n\n # elif name[0] == 'wte':\n # if model_opt.GPT_representation_mode != 'none':\n # pointer = [load_embs[lm_idx].make_embedding.emb_luts[0].weight, gen_linear.weight]\n # else:\n # pointer = [load_model.embeddings.make_embedding.emb_luts[0].weight]\n # if not model_opt.nopretrain_decemb:\n # pointer.append(gen_linear.weight)\n # if model_opt.simple_fusion and model_opt.sf_pretrain_dec_emb:\n # pointer.append(decoder.embeddings.make_embedding.emb_luts[0].weight)\n\n # elif name[0] == 'ln_f':\n # if name[1] == 'g':\n # pointer = load_model.layer_norm.weight\n # elif name[1] == 'b':\n # pointer = load_model.layer_norm.bias\n # else:\n # raise ValueError('I am missing something here!')\n\n # elif name[0][0] == 'h':\n # layer_num = name[0][1:]\n # pointer = getattr(load_model.transformer_layers, layer_num)\n # if name[1] == 'attn':\n # assigned = True\n # pointer = pointer.self_attn\n # full_data = torch.from_numpy(array)\n # if name[2] == 'c_attn':\n # end_size = full_data.shape[-1]//3\n # assert full_data.shape[-1] % 3 == 0\n # if name[3] == 'b':\n # if init_something:\n # pointer.linear_query.bias.data = full_data[:end_size]\n # pointer.linear_keys.bias.data = full_data[end_size:end_size*2]\n # pointer.linear_values.bias.data = full_data[end_size*2:]\n # if model_opt.gpt2_params_std > 0:\n # pointer.linear_query.bias.orig = full_data[:end_size].clone()\n # pointer.linear_keys.bias.orig = full_data[end_size:end_size*2].clone()\n # pointer.linear_values.bias.orig = full_data[end_size*2:].clone()\n # elif name[3] == 'w':\n # if init_something:\n # pointer.linear_query.weight.data = full_data[:, :end_size].t().contiguous()\n # pointer.linear_keys.weight.data = full_data[:, end_size:end_size*2].t().contiguous()\n # pointer.linear_values.weight.data = full_data[:, end_size*2:].t().contiguous()\n # if model_opt.gpt2_params_std > 0:\n # pointer.linear_query.weight.orig = full_data[:, :end_size].t().contiguous().clone()\n # pointer.linear_keys.weight.orig = full_data[:, end_size:end_size*2].t().contiguous().clone()\n # pointer.linear_values.weight.orig = full_data[:, end_size*2:].t().contiguous().clone()\n # else:\n # raise ValueError('I am missing something here!')\n # elif name[2] == 'c_proj':\n # if name[3] == 'b':\n # if init_something:\n # pointer.final_linear.bias.data = full_data\n # if model_opt.gpt2_params_std > 0:\n # pointer.final_linear.bias.orig = full_data.clone()\n # elif name[3] == 'w':\n # if init_something:\n # pointer.final_linear.weight.data = full_data.t().contiguous()\n # if model_opt.gpt2_params_std > 0:\n # pointer.final_linear.weight.orig = full_data.t().contiguous().clone()\n\n # else:\n # raise ValueError('I am missing something here!')\n\n # elif name[1] == 'ln_1' or name[1] == 'ln_2':\n # num = name[1][3]\n # pointer = getattr(pointer, 'layer_norm_'+num)\n # if name[2] == 'b':\n # pointer = pointer.bias\n # elif name[2] == 'g':\n # pointer = pointer.weight\n # else:\n # raise ValueError('I am missing something here!')\n # elif name[1] == 'mlp':\n # pointer = pointer.feed_forward\n # pointer = getattr(pointer, name[2])\n # if name[3] == 'b':\n # pointer = pointer.bias\n # elif name[3] == 'w':\n # pointer = pointer.weight\n # else:\n # raise ValueError('I am missing something here!')\n # else:\n # raise ValueError('I am missing something here!')\n # else:\n # raise ValueError('I am missing something here!')\n \n # if not assigned:\n # if name[0] == 'wte':\n # print(array.shape)\n # continue\n # if name[-1] == 'w' or name[-1] == 'g':\n # array = array.T\n\n # if not isinstance(pointer, list):\n # pointer = [pointer]\n # for pointer_i in pointer:\n # target_size = int(math.ceil(array.shape[0]/8))*8\n # padded_vocab = name[0] == 'wte' and pointer_i.shape[0] == target_size\n # padded_vocab = padded_vocab and pointer_i.shape[1:] == array.shape[1:]\n # try:\n # assert pointer_i.shape == array.shape or padded_vocab\n # except AssertionError as e:\n \n # e.args += (pointer_i.shape, array.shape)\n # raise\n # if init_something:\n # print(\"Initialize PyTorch weight {}\".format(name))\n # if padded_vocab:\n # pointer_i.data[:array.shape[0]] = torch.from_numpy(array)\n # else:\n # pointer_i.data = torch.from_numpy(array)\n # if model_opt.gpt2_params_std > 0:\n # if padded_vocab:\n # raise NotImplementedError\n # else:\n # pointer_i.orig = torch.from_numpy(array).clone()\n if 'enc_model' in checkpoint:\n load_dict = {k[8:]: v for k, v in checkpoint['enc_model'] if 'encoder' in k}\n encoder.load_state_dict(load_dict, strict=True)\n else:\n if model_opt.param_init != 0.0:\n for p in model.parameters():\n p.data.uniform_(-model_opt.param_init, model_opt.param_init)\n for p in generator.parameters():\n p.data.uniform_(-model_opt.param_init, model_opt.param_init)\n if model_opt.param_init_glorot:\n for p in model.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n for p in generator.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n\n if not model_opt.unconditional and hasattr(model.encoder, 'embeddings') \\\n and model.encoder.embeddings is not None:\n model.encoder.embeddings.load_pretrained_vectors(\n model_opt.pre_word_vecs_enc)\n if hasattr(model.decoder, 'embeddings'):\n model.decoder.embeddings.load_pretrained_vectors(\n model_opt.pre_word_vecs_dec)\n\n # remove requires_grad from params that are not trained:\n if model_opt.notrain_emb or model_opt.notrain_embanddec:\n if model_opt.position_encoding_learned_enc and model_opt.share_position_embeddings:\n model.encoder.embeddings.make_embedding.pe.pe.weight.requires_grad = False\n if model_opt.share_embeddings:\n model.encoder.embeddings.make_embedding.emb_luts[0].weight.requires_grad = False\n model.decoder.embeddings.make_embedding.pe.pe.weight.requires_grad = False\n model.decoder.embeddings.make_embedding.emb_luts[0].weight.requires_grad = False\n generator[0].weight.requires_grad = False\n\n if model_opt.notrain_genbias:\n generator[0].bias.requires_grad = False\n\n if model_opt.notrain_embanddec:\n for name, p in load_decoder.layer_norm.named_parameters():\n p.requires_grad = False\n for name, p in load_decoder.transformer_layers.named_parameters():\n if 'context' not in name and 'ctx' not in name: # Takes care of normal and psa versions\n p.requires_grad = False\n \n if model_opt.onlytrainln:\n for name, p in model.decoder.named_parameters():\n if 'layer_norm' not in name:\n p.requires_grad = False\n for p in generator.parameters():\n p.requires_grad = False\n\n if model_opt.onlytrainoutp:\n if model_opt.share_decoder_embeddings:\n raise ValueError\n\n for p in model.decoder.parameters():\n p.requires_grad = False\n\n if model_opt.simple_fusion:\n for p in lm_decoder.parameters():\n p.requires_grad = False\n for p in generator.lm_linear.parameters():\n p.requires_grad = False\n\n model.generator = generator\n model.to(device)\n if model_opt.model_dtype == 'fp16':\n model.half()\n\n for p in model.parameters():\n if hasattr(p, 'orig'):\n p.orig = p.orig.to(device)\n if model_opt.model_dtype == 'fp16':\n p.orig = p.orig.half()\n\n return model", "def __init__(self, opt):\n BaseModel.__init__(self, opt)\n\n self.loss_names = ['G_SH']\n self.visual_names = ['input', 'pr_SH', 'gt_SH']\n self.model_names = ['G1']\n\n if not opt.no_brightness:\n self.loss_names += ['G_BA', 'G_BC']\n self.visual_names += ['pr_BA', 'gt_BA']\n self.model_names += ['G3']\n\n if opt.reg_LTM:\n self.loss_names += ['LTMReg']\n\n self.light_res = opt.light_res\n\n\n # Intrinsic network\n if opt.latent_Ls or opt.latent_Lt:\n netG1name = 'unet_256_latent_inL'\n else:\n netG1name = 'unet_256_latent'\n\n input_nc = opt.input_nc\n if opt.in_Ls:\n input_nc += 1\n if opt.in_Lt:\n input_nc += 1\n\n if opt.LTM:\n self.dim_LTM = self.light_res**2\n if self.opt.enc_LTM:\n self.dim_LTM = opt.dim_LTM\n use_hidden = True if not opt.enc_ill_hid==-1 else False\n self.enc_LTM = networks.init_net(networks.IlluminationEncoder(self.light_res**2, opt.enc_ill_hid, self.dim_LTM, use_hidden), opt.init_type, opt.init_gain, self.gpu_ids)\n\n self.netG1 = networks.define_G(input_nc, self.dim_LTM, opt.ngf, netG1name, opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, True, self.gpu_ids)\n\n else:\n if opt.no_latent_color:\n output_nc = 3\n else:\n output_nc = 1\n self.netG1 = networks.define_G(input_nc, output_nc, opt.ngf, netG1name, opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, False, self.gpu_ids)\n\n # Brightness network\n g3_input_nc = 3\n if opt.cas and opt.cat_In:\n g3_input_nc = g3_input_nc + opt.input_nc\n if not opt.cas:\n if opt.in_Ls:\n g3_input_nc += 1\n if opt.in_Lt:\n g3_input_nc += 1\n self.netG3 = networks.define_G(g3_input_nc, 1, opt.ngf, 'resnet_9blocks_latent', opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, False, self.gpu_ids)\n if self.isTrain:\n # define loss functions\n self.criterionS = torch.nn.MSELoss()\n self.criterionBA = torch.nn.MSELoss()\n # self.criterionBP = torch.nn.MSELoss()\n self.criterionBC = torch.nn.MSELoss()\n self.criterionReg = torch.nn.MSELoss()\n # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.\n self.optimizer_G1 = torch.optim.Adam(self.netG1.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n # self.optimizer_G2 = torch.optim.Adam(self.netG2.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizer_G3 = torch.optim.Adam(self.netG3.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizers.append(self.optimizer_G1)\n # self.optimizers.append(self.optimizer_G2)\n self.optimizers.append(self.optimizer_G3)", "def make_model(self, inputs, is_training):\n with tf.variable_scope('ResNet50'):\n x = conv2d(inputs, 64, [7, 7], strides=[1, 2, 2, 1], name='conv1') # size 1/2\n x = bn(x, is_training)\n x = relu(x)\n x = max_pool(x, ksize=[1, 3, 3, 1], name='pool1') # size 1/4\n\n x = self.conv_block(x, [64, 64, 256], '2_1', is_training, s=1)\n x = self.identity_block(x, [64, 64, 256], '2_2', is_training)\n x = self.identity_block(x, [64, 64, 256], '2_3', is_training)\n\n x = self.conv_block(x, [128, 128, 512], '3_1', is_training)\n x = self.identity_block(x, [128, 128, 512], '3_2', is_training)\n x = self.identity_block(x, [128, 128, 512], '3_3', is_training)\n\n x = self.atrous_conv_block(x, [256, 256, 1024], '4_1', 2, is_training, s=1)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_2', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_3', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_4', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_5', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_6', 2, is_training)\n\n x = self.atrous_conv_block(x, [512, 512, 2048], '5_1', 4, is_training, s=1)\n x = self.atrous_identity_block(x, [512, 512, 2048], '5_2', 4, is_training)\n x = self.atrous_identity_block(x, [512, 512, 2048], '5_3', 4, is_training)\n\n\n\n \"\"\"\n Astrous Pyrimid Pooling. Decoder\n \"\"\"\n with tf.variable_scope('ASPP'):\n feature_map_shape = x.get_shape().as_list()\n\n # global average pooling\n # feature 맵의 height, width를 평균을 낸다.\n feature_map = tf.reduce_mean(x, [1, 2], keepdims=True)\n\n feature_map = conv2d(feature_map, 256, [1, 1], name='gap_feature_map')\n feature_map = tf.image.resize_bilinear(feature_map, [feature_map_shape[1], feature_map_shape[2]])\n\n rate1 = conv2d(x, 256, [1, 1], name='rate1')\n rate6 = atrous_conv2d(x, 256, [3, 3], rate=6, name='rate6')\n rate12 = atrous_conv2d(x, 256, [3, 3], rate=12, name='rate12')\n rate18 = atrous_conv2d(x, 256, [3, 3], rate=18, name='rate18')\n\n concated = tf.concat([feature_map, rate1, rate6, rate12, rate18], axis=3)\n\n net = conv2d(concated, 256, [1, 1], name='net')\n\n logits = conv2d(net, self.N_CLASS, [1, 1], name='logits')\n logits = tf.image.resize_bilinear(logits, size=[self.RESIZE, self.RESIZE], name='out')\n\n pred = tf.argmax(logits, axis=3)\n pred = tf.expand_dims(pred, dim=3)\n\n return logits, pred", "def create_network(self):\n\n print ('Creating network, changing data will have no effect beyond this point.')\n n = IMNN.IMNN(parameters=self.parameters)\n\n if self.load_network:\n n.restore_network()\n else:\n n.setup(network = self.network, load_data = self.data)\n\n return n", "def init_model(net, restore):\n # init weights of model\n net.apply(init_weights)\n\n if torch.cuda.device_count() > 1:\n print(\"Let's use\", torch.cuda.device_count(), \"GPUs!\")\n net = nn.DataParallel(net)\n device = torch.device('cuda')\n net.to(device)\n net.restored = False\n\n # restore model weights\n if restore is not None and os.path.exists(restore):\n #net.load_state_dict(torch.load(restore))\n net.load_state_dict(torch.load(restore, map_location=\"cuda\"))\n net.restored = True\n print(\"Restore model from: {}\".format(os.path.abspath(restore)))\n\n return net", "def __init__(self, opt):\n assert(not opt.isTrain)\n BaseModel.__init__(self, opt)\n\n # specify the training losses you want to print out.\n self.loss_names = []\n # specify the images you want to save/display.\n self.visual_names = ['real_A', 'fake_B']\n # specify the models you want to save to the disk. Only one generator is needed.\n self.model_names = ['G_B']\n\n self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.norm,\n opt.use_dropout, opt.init_type, opt.init_gain, self.gpu_ids)\n\n # assigns the model to self.netG_[suffix] so that it can be loaded\n # please see <BaseModel.load_networks>\n setattr(self, 'netG_B', self.netG)", "def build_model_mobilenet(num_classes):", "def create_network(normalized_input, n_vocab):\n \n # Create sequential Keras model\n model = Sequential()\n model.add(CuDNNLSTM(256,\n input_shape=(normalized_input.shape[1], normalized_input.shape[2]),\n return_sequences=True))\n model.add(Dropout(0.3))\n model.add(CuDNNLSTM(256))\n model.add(Dense(256, activation='relu'))\n model.add(Dropout(0.3))\n model.add(Dense(n_vocab, activation='softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='rmsprop')\n \n # Load the weights to each node\n model.load_weights('weights/weights_final.hdf5')\n\n return model", "def model_creator(config):\n return nn.Linear(1, 1)", "def build_model(self):\n if self.args.network_type == 'unet':\n self.shared = models.Unet(self.args)\n else:\n raise NotImplementedError(f'Network type '\n f'`{self.args.network_type}` is not '\n f'defined')\n self.controller = models.Controller(self.args)\n\n if self.args.num_gpu == 1:\n self.shared.cuda()\n self.controller.cuda()\n elif self.args.num_gpu > 1:\n raise NotImplementedError('`num_gpu > 1` is in progress')", "def load_model(opt, device):\n device_ids = list(range(opt.num_gpus))\n model = BiDateNet(13, 2).to(device)\n model = nn.DataParallel(model, device_ids=device_ids)\n\n return model", "def _make_network(self):\n inp = Input(shape = (self.input_dim,))\n x = Dense(256, activation='relu')(inp)\n x = GaussianNoise(1.0)(x)\n #x = Flatten()(x) # I assume this is if the input is a convolutional neural net?\n x = Dense(128, activation='relu')(x)\n x = GaussianNoise(1.0)(x)\n out = Dense(self.output_dim, activation='tanh', kernel_initializer=RandomUniform())(x)\n out = Lambda(lambda i: i * self.act_range)(out)\n return Model(inp, out)", "def build_nn(dropout: float=0.3,verbosity: int=0):\n model = Sequential()\n model.add(Dense(1024, input_shape=(1024,), activation='relu', kernel_regularizer=regularizers.l2(0.02)))\n model.add(BatchNormalization())\n model.add(Dropout(dropout))\n\n model.add(Dense(1024, activation='relu'))\n model.add(BatchNormalization())\n model.add(Dropout(dropout))\n\n model.add(Dense(512, activation='relu'))\n model.add(BatchNormalization())\n model.add(Dropout(dropout))\n\n model.add(Dense(512, activation='relu'))\n model.add(BatchNormalization())\n model.add(Dropout(dropout))\n\n model.add(Dense(1, activation='sigmoid'))\n \n if verbosity > 0:\n model.summary()\n return model", "def init_model(self) -> keras.Model:\n model_input = keras.Input(shape=(self.num_classes, self.nun_models))\n\n layer_out = Conv1D(64, kernel_size=self.num_classes, activation=\"sigmoid\")(\n model_input\n )\n layer_out = Dropout(0.2)(layer_out)\n\n layer_out = Dense(128)(layer_out)\n layer_out = Dropout(0.2)(layer_out)\n\n layer_out = Flatten()(layer_out)\n\n layer_out = Dense(128)(layer_out)\n layer_out = Dropout(0.2)(layer_out)\n output = Dense(self.num_classes, activation=\"softmax\")(layer_out)\n\n return keras.Model(inputs=model_input, outputs=output)", "def main(u_net_settings):\n model = build_u_net(*u_net_settings)\n print(model.summary())", "def build_u_net(input_size, filters, u_depth):\n input_layer = Input(shape=(input_size, input_size, 1), name=\"input_layer\")\n\n residual_connections = []\n for i in range(u_depth):\n if i == 0:\n x = Conv2D(filters, **CONV_LAYER_SETTINGS)(input_layer)\n else:\n x = Conv2D(filters, **CONV_LAYER_SETTINGS)(x)\n\n x = Dropout(0.1)(x)\n residual = Conv2D(filters, **CONV_LAYER_SETTINGS)(x)\n residual_connections.append(residual)\n x = MaxPool2D(pool_size=(2, 2))(residual)\n filters *= 2\n\n padding = [184, 88, 40, 16, 4]\n for i in range(u_depth):\n x = Conv2D(filters, **CONV_LAYER_SETTINGS)(x)\n x = Dropout(0.1)(x)\n x = Conv2D(filters, **CONV_LAYER_SETTINGS)(x)\n filters = int(filters / 2)\n x = Conv2DTranspose(filters, (2, 2), strides=(2, 2))(x)\n x = concatenate([Cropping2D(padding.pop())(residual_connections.pop()), x])\n\n x = Conv2D(filters, **CONV_LAYER_SETTINGS)(x)\n x = Dropout(0.1)(x)\n x = Conv2D(filters, **CONV_LAYER_SETTINGS)(x)\n output_layer = Conv2D(1, (1, 1), 1, activation=sigmoid)(x)\n\n model = Model(inputs=input_layer, outputs=output_layer)\n\n return model", "def build_cut_model(self):\n model = None\n if self.model_name == 'vgg16':\n model = vgg16(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'resnet':\n model = ResNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'densenet':\n model = DenseNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'mobilenet':\n model = MobileNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'efficientnet':\n model = efficientnet(input_shape=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'vit':\n model = VisionTransformer(image_size=self.ex_input_size,\n patch_size=vit_settings['patch_size'],\n num_layers=vit_settings['num_layers'],\n num_classes=num_classes,\n d_model=vit_settings['d_model'],\n num_heads=vit_settings['num_heads'],\n mlp_dim=vit_settings['mlp_dim'],\n channels=self.ex_channels,\n dropout=vit_settings['dropout']).build_VisionTransformer()\n model.load_weights(self.path_to_weights)\n model = Model(model.input, model.get_layer(self.ex_last_conv_layer_name2).output)\n model.summary()\n return model", "def __init__(self):\n super(PackageNet, self).__init__()\n \n # remove last layers of vgg19 model, save first fc layer and maxpool layer\n self.vgg = models.vgg19(pretrained=True)\n del self.vgg.classifier[2:]\n\n # get size of some layers\n start_num = self.vgg.classifier[0].out_features\n mid_num0 = int(np.sqrt(start_num))\n mid_num1 = int(start_num**0.667)\n mid_num2 = int(start_num**0.333)\n \n cls_out_num = 9 \n reg_out_num = 16 # 8 3D bounding box coords\n \n # define classifier\n self.classifier = nn.Sequential(\n nn.Linear(start_num,mid_num0,bias=True),\n nn.ReLU(),\n nn.Linear(mid_num0,cls_out_num,bias = True),\n nn.Softmax(dim = 1)\n )\n \n # define regressor\n # try relu and tanh, also try without bias\n self.regressor = nn.Sequential(\n nn.Linear(start_num,mid_num1,bias=True),\n nn.ReLU(),\n nn.Linear(mid_num1,mid_num2,bias = True),\n nn.ReLU(),\n nn.Linear(mid_num2,reg_out_num,bias = True),\n nn.Sigmoid()\n \n )", "def create_model(self):\n try:\n self.model = PPO2.load(self.save_path)\n self.model.set_env(self.env)\n print(\"Loading of the latest model successful!\")\n except:\n print(\"Creating new model...\")\n self.model = PPO2(CnnPolicy, self.env, verbose=1)", "def buildNet(inputShape, numUniqueClasses):\n layers = InputLayer((None,) + inputShape[1:4])\n layers = ResidualLayer(layers, 8, \n filter_size = (3,1))\n layers = ResidualLayer(layers, 8, \n filter_size = (3,1), stride= (5,1))\n layers = ResidualLayer(layers, 8, \n filter_size = (3,1))\n layers = ResidualLayer(layers, 1, \n filter_size = (3,1), stride= (3,1))\n layers = NonlinearityLayer(layers, nonlinearity = nonlinearity)\n layers = DropoutLayer(layers,p=.3) \n layers = batch_norm(NNHelpers.LocallyConnected2DLayer(layers,1,(5,1),\n W=He('relu'),\n nonlinearity=nonlinearity)) \n layers = DenseLayer(layers,num_units=numUniqueClasses,\n nonlinearity=linear) \n layers = NonlinearityLayer(layers, nonlinearity=softmax) \n return layers", "def net(net_params, inference=False):\n model_name = net_params['global']['model_name'].lower()\n num_classes = net_params['global']['num_classes']\n if num_classes == 1:\n warnings.warn(\"config specified that number of classes is 1, but model will be instantiated\"\n \" with a minimum of two regardless (will assume that 'background' exists)\")\n num_classes = 2\n msg = f'Number of bands specified incompatible with this model. Requires 3 band data.'\n state_dict_path = ''\n if model_name == 'unetsmall':\n model = unet.UNetSmall(num_classes,\n net_params['global']['number_of_bands'],\n net_params['training']['dropout'],\n net_params['training']['dropout_prob'])\n elif model_name == 'unet':\n model = unet.UNet(num_classes,\n net_params['global']['number_of_bands'],\n net_params['training']['dropout'],\n net_params['training']['dropout_prob'])\n elif model_name == 'ternausnet':\n assert net_params['global']['number_of_bands'] == 3, msg\n model = TernausNet.ternausnet(num_classes)\n elif model_name == 'checkpointed_unet':\n model = checkpointed_unet.UNetSmall(num_classes,\n net_params['global']['number_of_bands'],\n net_params['training']['dropout'],\n net_params['training']['dropout_prob'])\n elif model_name == 'inception':\n model = inception.Inception3(num_classes,\n net_params['global']['number_of_bands'])\n elif model_name == 'fcn_resnet101':\n assert net_params['global']['number_of_bands'] == 3, msg\n coco_model = models.segmentation.fcn_resnet101(pretrained=True, progress=True, num_classes=21, aux_loss=None)\n model = models.segmentation.fcn_resnet101(pretrained=False, progress=True, num_classes=num_classes,\n aux_loss=None)\n chopped_dict = chop_layer(coco_model.state_dict(), layer_names=['classifier.4'])\n del coco_model\n # load the new state dict\n # When strict=False, allows to load only the variables that are identical between the two models irrespective of\n # whether one is subset/superset of the other.\n model.load_state_dict(chopped_dict, strict=False)\n elif model_name == 'deeplabv3_resnet101':\n assert net_params['global']['number_of_bands'] == 3, msg\n # pretrained on coco (21 classes)\n coco_model = models.segmentation.deeplabv3_resnet101(pretrained=True, progress=True,\n num_classes=21, aux_loss=None)\n model = models.segmentation.deeplabv3_resnet101(pretrained=False, progress=True,\n num_classes=num_classes, aux_loss=None)\n chopped_dict = chop_layer(coco_model.state_dict(), layer_names=['classifier.4'])\n del coco_model\n model.load_state_dict(chopped_dict, strict=False)\n else:\n raise ValueError(f'The model name {model_name} in the config.yaml is not defined.')\n\n coordconv_convert = get_key_def('coordconv_convert', net_params['global'], False)\n if coordconv_convert:\n centered = get_key_def('coordconv_centered', net_params['global'], True)\n normalized = get_key_def('coordconv_normalized', net_params['global'], True)\n noise = get_key_def('coordconv_noise', net_params['global'], None)\n radius_channel = get_key_def('coordconv_radius_channel', net_params['global'], False)\n scale = get_key_def('coordconv_scale', net_params['global'], 1.0)\n # note: this operation will not attempt to preserve already-loaded model parameters!\n model = coordconv.swap_coordconv_layers(model, centered=centered, normalized=normalized, noise=noise,\n radius_channel=radius_channel, scale=scale)\n\n if net_params['training']['state_dict_path']:\n state_dict_path = net_params['training']['state_dict_path']\n checkpoint = load_checkpoint(state_dict_path)\n elif inference:\n state_dict_path = net_params['inference']['state_dict_path']\n checkpoint = load_checkpoint(state_dict_path)\n else:\n checkpoint = None\n\n return model, checkpoint, model_name", "def create_nueral_network(X, y, epochs=8):\n model = Sequential()\n model.add(layers.Dense(500, input_dim=X.shape[1]))\n model.add(layers.Dropout(.2))\n model.add(layers.Dense(128, activation='relu'))\n model.add(layers.Dropout(.2))\n model.add(layers.Dense(64, activation='relu'))\n model.add(layers.Dropout(.2))\n model.add(layers.Dense(32, activation='relu'))\n model.add(layers.Dense(5,activation='softmax'))\n\n model.compile(optimizer='adam', loss='categorical_crossentropy',metrics=['accuracy'])\n print(model.summary())\n model.fit(X, y, epochs=epochs, batch_size=500)\n return model", "def build_model(self) -> nn.Module:\n pass", "def create_network(layers):\r\n return NeuronNetwork(layers)", "def trainNet():", "def create_model() -> Model:\n # Create a neural network model that includes several dense layers with hyperbolic tangent activations, L2 regularization, and batch normalization\n regularizer = l2(0)\n dropout = 0\n activation = 'tanh'\n model = Sequential([\n InputLayer(input_shape=(16,)),\n BatchNormalization(),\n Dense(12, activation=activation, kernel_regularizer=regularizer),\n Dropout(dropout),\n Dense(8, activation=activation, kernel_regularizer=regularizer),\n Dropout(dropout),\n Dense(1, kernel_regularizer=regularizer)\n ])\n # Output a summary of the model's architecture\n print(model.summary())\n # Use a mean squared error loss function and an Adam optimizer; do not print accuracy because this is a regression task\n model.compile(\n optimizer='adam',\n loss='mse',\n metrics=['mae']\n )\n # Return the untrained model\n return model", "def create_nn(self):\n\n\t\tmodel = Sequential()\n\t\tmodel.add(Dense(32, input_dim=self.state_size, activation='relu'))\n\t\tmodel.add(Dense(32, activation='relu'))\n\t\tmodel.add(Dense(64, activation='relu'))\n\t\tmodel.add(Dense(self.action_size, activation='linear'))\n\t\tmodel.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))\n\t\treturn model", "def create_model(window, input_shape, num_actions,\n model_name='q_network'):\n if model_name == 0:\n model = linear_model(window, input_shape, num_actions)\n elif model_name == 1:\n model = deep_model(window, input_shape, num_actions)\n elif model_name == 2:\n model = dueling_deep(window, input_shape, num_actions)\n else:\n print(\"No suitable models found.\")\n exit()\n print(model.summary())\n return model", "def build_model(self):\n cfg = self.cfg\n\n print('Building model')\n self.model = SimpleNet(cfg, cfg.MODEL, 0, **cfg.MODEL.BACKBONE.PARAMS)\n self.model.to(self.device)\n print('# params: {:,}'.format(count_num_param(self.model)))\n self.optim = build_optimizer(self.model, cfg.OPTIM)\n self.sched = build_lr_scheduler(self.optim, cfg.OPTIM)\n self.register_model('model', self.model, self.optim, self.sched)\n\n fdim = self.model.fdim\n self.classifier = nn.Linear(fdim, self.num_classes)\n print('# params: {:,}'.format(count_num_param(self.classifier)))\n self.classifier.to(self.device)\n self.optim_classifier = build_optimizer(self.classifier, cfg.OPTIM)\n self.sched_classifier = build_lr_scheduler(self.optim_classifier, cfg.OPTIM)\n self.register_model('classifier', self.classifier, self.optim_classifier, self.sched_classifier)", "def _create_model(self, arch, num_output_channels, num_input_channels, pretrained, **kwargs):\n\n self.net = None\n\n #--------------------------------------------------------------------------------------------\n # select architecture\n #--------------------------------------------------------------------------------------------\n num_filters = kwargs.get(\"num_filters\", 32)\n kw = {'dim': num_output_channels, 'num_classes': self.num_classes, 'num_channels': num_input_channels, 'pretrained': pretrained,\n 'num_filters': num_filters}\n print(\"kw\", kw)\n self.net = nnmodels.__dict__[arch](**kw)\n\n self.s_arch = arch\n self.num_output_channels = num_output_channels\n self.num_input_channels = num_input_channels\n self.num_filters = num_filters\n\n if self.cuda:\n self.net.cuda()\n if self.parallel and self.cuda:\n self.net = nn.DataParallel(self.net, device_ids= range( torch.cuda.device_count() ))", "def build_unet(input_layer = Input((128,128,3)), start_depth=64, activation='relu', initializer='he_normal'):\n\n # 128 -> 64\n conv1 = Conv2D_BN(input_layer, start_depth * 1, (3, 3), activation=activation, kernel_initializer=initializer)\n conv1 = Conv2D_BN(conv1, start_depth * 1, (3, 3), activation=activation, kernel_initializer=initializer)\n pool1 = MaxPooling2D((2, 2))(conv1)\n\n # 64 -> 32\n conv2 = Conv2D_BN(pool1, start_depth * 2, (3, 3), activation=activation, kernel_initializer=initializer)\n conv2 = Conv2D_BN(conv2, start_depth * 2, (3, 3), activation=activation, kernel_initializer=initializer)\n pool2 = MaxPooling2D((2, 2))(conv2)\n\n # 32 -> 16\n conv3 = Conv2D_BN(pool2, start_depth * 4, (3, 3), activation=activation, kernel_initializer=initializer)\n conv3 = Conv2D_BN(conv3, start_depth * 4, (3, 3), activation=activation, kernel_initializer=initializer)\n pool3 = MaxPooling2D((2, 2))(conv3)\n\n # 16 -> 8\n conv4 = Conv2D_BN(pool3, start_depth * 8, (3, 3), activation=activation, kernel_initializer=initializer)\n conv4 = Conv2D_BN(conv4, start_depth * 8, (3, 3), activation=activation, kernel_initializer=initializer)\n pool4 = MaxPooling2D((2, 2))(conv4)\n\n # Middle\n convm=cbam_block(pool4)\n\n # 8 -> 16\n deconv4 = Conv2DTranspose(convm, start_depth * 8, (3, 3), strides=(2, 2), activation=activation, kernel_initializer=initializer)\n uconv4 = concatenate([deconv4, conv4])\n uconv4 = Conv2D_BN(uconv4, start_depth * 8, (3, 3), activation=activation, kernel_initializer=initializer)\n uconv4 = Conv2D_BN(uconv4, start_depth * 8, (3, 3), activation=activation, kernel_initializer=initializer)\n\n # 16 -> 32\n deconv3 = Conv2DTranspose(uconv4, start_depth * 4, (3, 3), strides=(2, 2), activation=activation, kernel_initializer=initializer)\n uconv3 = concatenate([deconv3, conv3])\n uconv3 = Conv2D_BN(uconv3, start_depth * 4, (3, 3), activation=activation, kernel_initializer=initializer)\n uconv3 = Conv2D_BN(uconv3, start_depth * 4, (3, 3), activation=activation, kernel_initializer=initializer)\n\n # 32 -> 64\n deconv2 = Conv2DTranspose(uconv3, start_depth * 2, (3, 3), strides=(2, 2), activation=activation, kernel_initializer=initializer)\n uconv2 = concatenate([deconv2, conv2])\n uconv2 = Conv2D_BN(uconv2, start_depth * 2, (3, 3), activation=activation, kernel_initializer=initializer)\n uconv2 = Conv2D_BN(uconv2, start_depth * 2, (3, 3), activation=activation, kernel_initializer=initializer)\n\n # 64 -> 128\n deconv1 = Conv2DTranspose(uconv2, start_depth * 1, (3, 3), strides=(2, 2), activation=activation, kernel_initializer=initializer)\n uconv1 = concatenate([deconv1, conv1])\n uconv1 = Conv2D_BN(uconv1, start_depth * 1, (3, 3), activation=activation, kernel_initializer=initializer)\n uconv1 = Conv2D_BN(uconv1, start_depth * 1, (3, 3), activation=activation, kernel_initializer=initializer)\n\n output_layer = Conv2D(1, (1,1), padding=\"same\", activation=\"sigmoid\")(uconv1)\n\n return output_layer", "def build_network(config):\n network_cfg = config['network']\n\n network_name = network_cfg['name']\n\n network_params = list(inspect.signature(eval(network_name).__init__).parameters)[1:]\n\n args = [f'{param}={network_cfg[param]}' for param in network_params if network_cfg.get(param)]\n\n try:\n model = eval('{}({})'.format(network_name, ', '.join(args)))\n except:\n raise ValueError('Can\\'t load network.')\n\n return model.to(device='cuda')", "def build_model(self):\n input_pencil = tf.keras.Input((128,128,3))\n # generator's output\n gen_image = self.gan_generator.model(input_pencil)\n # generator's output\n x = self.gan_discriminator.model([input_pencil,gen_image])\n model = tf.keras.Model(input_pencil,[x,gen_image])\n # compiling the model\n model.compile(loss=['hinge', 'mae'], optimizer = self.optimizer,loss_weights=[1,100], metrics=['accuracy'])\n self.model = model", "def __init__(self, in_channels=3, n_classes=21):\n super(U_Net, self).__init__()\n\n self.layer_0 = UNet_Encoder_Particular(in_channels, 64)\n\n self.layer_1 = UNet_Encoder(64, 128)\n self.layer_2 = UNet_Encoder(128, 256)\n self.layer_3 = UNet_Encoder(256, 512)\n self.layer_4 = UNet_Encoder(512, 512)\n\n self.layer_7 = UNet_Decoder(1024, 256)\n self.layer_8 = UNet_Decoder(512, 128)\n self.layer_9 = UNet_Decoder(256, 64)\n self.layer_10 = UNet_Decoder(128, 64)\n\n self.layer_11 = UNet_Decoder_Particular(64, n_classes)", "def build_unet_backbone(name: str,\r\n input_tensor: tf.Tensor,\r\n n_levels: int = 4,\r\n weights: Optional[str] = None) -> tf.keras.Model:\r\n if name not in _MODELS:\r\n supported_models = list_supported_models()\r\n supported_models = '\\n'.join(f'- {o}' for o in supported_models)\r\n raise ValueError(f\"Backbone {name} is not supported. \"\r\n f\"Supported backbones are: \\n {supported_models}\")\r\n\r\n model_cls, _ = _MODELS[name]\r\n model = model_cls(input_tensor=input_tensor,\r\n include_top=False,\r\n weights=weights)\r\n\r\n outputs = [model.get_layer(o).output\r\n for o in _DEFAULT_FEATURE_LAYERS[name][:n_levels]]\r\n\r\n return tf.keras.Model(inputs=model.inputs,\r\n outputs=outputs[::-1],\r\n name=name)", "def createNet(hyper, rescaleParameter, full_dim, nettype):\n\n\tshape = hyper[\"shape\"]\n\tnodes = hyper[\"nodes\"]\n\tlayer = hyper[\"layer\"]\n\tactiv = hyper[\"activationFunction\"]\n\n\tnetshape, nodesTotal = getNodesPerLayer(shape, nodes, layer, full_dim)\n\n\tif nettype == 'regression':\n\t\tmodel = Net_reg(netshape, activ)\n\telif nettype == 'classification':\n\t\tmodel = Net_cla(netshape, activ)\n\t\n\treturn model", "def _build_model(self):\n\n with tf.variable_scope(self.name):\n # adds placeholders, data_normalization and data_noise if desired. Also adds a placeholder for dropout probability\n self.layer_in_x, self.layer_in_y = self._build_input_layers()\n\n # create core multi-layer perceptron\n mlp_output_dim = 2 * self.ndim_y * self.n_centers + self.n_centers\n core_network = MLP(\n name=\"core_network\",\n input_layer=self.layer_in_x,\n output_dim=mlp_output_dim,\n hidden_sizes=self.hidden_sizes,\n hidden_nonlinearity=self.hidden_nonlinearity,\n output_nonlinearity=None,\n weight_normalization=self.weight_normalization,\n dropout_ph=self.dropout_ph if self.dropout else None\n )\n\n core_output_layer = core_network.output_layer\n\n # slice output of MLP into three equally sized parts for loc, scale and mixture weights\n slice_layer_locs = L.SliceLayer(core_output_layer, indices=slice(0, self.ndim_y * self.n_centers), axis=-1)\n slice_layer_scales = L.SliceLayer(core_output_layer, indices=slice(self.ndim_y * self.n_centers, 2 * self.ndim_y * self.n_centers), axis=-1)\n slice_layer_weights = L.SliceLayer(core_output_layer, indices=slice(2 * self.ndim_y * self.n_centers, mlp_output_dim), axis=-1)\n\n # locations mixture components\n self.reshape_layer_locs = L.ReshapeLayer(slice_layer_locs, (-1, self.n_centers, self.ndim_y))\n self.locs = L.get_output(self.reshape_layer_locs)\n\n # scales of the mixture components\n reshape_layer_scales = L.ReshapeLayer(slice_layer_scales, (-1, self.n_centers, self.ndim_y))\n self.softplus_layer_scales = L.NonlinearityLayer(reshape_layer_scales, nonlinearity=tf.nn.softplus)\n self.scales = L.get_output(self.softplus_layer_scales)\n\n # weights of the mixture components\n self.logits = L.get_output(slice_layer_weights)\n self.softmax_layer_weights = L.NonlinearityLayer(slice_layer_weights, nonlinearity=tf.nn.softmax)\n self.weights = L.get_output(self.softmax_layer_weights)\n\n # # put mixture components together\n self.y_input = L.get_output(self.layer_in_y)\n self.cat = cat = Categorical(logits=self.logits)\n self.components = components = [MultivariateNormalDiag(loc=loc, scale_diag=scale) for loc, scale\n in zip(tf.unstack(self.locs, axis=1), tf.unstack( self.scales, axis=1))]\n self.mixture = mixture = Mixture(cat=cat, components=components, value=tf.zeros_like(self.y_input))\n\n # regularization\n self._add_softmax_entropy_regularization()\n self._add_l1_l2_regularization(core_network)\n\n # tensor to store samples\n self.samples = mixture.sample() #TODO either use it or remove it\n\n # tensor to compute probabilities\n if self.data_normalization:\n self.pdf_ = mixture.prob(self.y_input) / tf.reduce_prod(self.std_y_sym)\n self.log_pdf_ = mixture.log_prob(self.y_input) - tf.reduce_sum(tf.log(self.std_y_sym))\n else:\n self.pdf_ = mixture.prob(self.y_input)\n self.log_pdf_ = mixture.log_prob(self.y_input)\n\n # symbolic tensors for getting the unnormalized mixture components\n if self.data_normalization:\n self.scales_unnormalized = self.scales * self.std_y_sym\n self.locs_unnormalized = self.locs * self.std_y_sym + self.mean_y_sym\n else:\n self.scales_unnormalized = self.scales\n self.locs_unnormalized = self.locs\n\n # initialize LayersPowered --> provides functions for serializing tf models\n LayersPowered.__init__(self, [self.softmax_layer_weights, self.softplus_layer_scales, self.reshape_layer_locs,\n self.layer_in_y])", "def create_base_network(NumberOfFeatures, NumberOfClasses,init_mode='glorot_normal'):\n network = Sequential()\n network.add(Dense(44, activation='sigmoid', kernel_initializer=init_mode,input_dim=NumberOfFeatures))\n# network.add(Dense(22, activation='sigmoid',kernel_initializer=init_mode))\n network.add(Dense(NumberOfClasses, activation='softmax',kernel_initializer=init_mode))\n return network", "def init_model(model_filename, doGPU):\n # set model attributes list\n ##print(\"Model-dataset =\", model_ds_name)\n ##if model_ds_name == 'modelRAP':\n ## model_labels = loader_rapdataset_yiqiang.ATTRIBUTES\n ##elif model_ds_name == 'modelPETA':\n ## model_labels = loader_peta_dataset.ATTRIBUTES\n ##elif model_ds_name == 'modelRAPPETA':\n ## model_labels = [peta_label for rap_label,peta_label in loader_rap_plus_peta_dataset.ATTRIBUTES]\n ##else:\n ## print(\"ERROR: unknown model-dataset.\")\n ## sys.exit()\n model_labels = loader_rap_plus_peta_dataset.ATTRIBUTES\n assert (len(model_labels) == 49)\n\n # create model\n person.NO_ATTRIBUTES = len(model_labels) #TODO-elo: ugly, attr. nbr should be a parameter of person.Net.__init__()\n net = person.Net()\n if doGPU:\n net = person.Net().cuda()\n\n # load model\n print('loading model \"' + model_filename + '\"')\n person.load_model(net, model_filename)\n\n return net, model_labels", "def build_model(\n model_purpose: str,\n name: str,\n init_w: str,\n input_shape: np.ndarray,\n classes: int,\n dropout_rate: np.float32,\n) -> keras.Model:\n\n if model_purpose.startswith(\"segmentation\"):\n seg_builder = sm.Seg_model_builder(name, input_shape, classes, dropout_rate)\n model = seg_builder.get_model()\n\n elif model_purpose == \"inversion\":\n reg_builder = rm.Reg_model_builder(name, input_shape, classes, init_w)\n model = reg_builder.get_model()\n\n elif model_purpose == \"pixel_concentration_retrieval\":\n model = pwrm.Unet_2(input_shape, classes)\n\n return model", "def make_model(self):\n if self.model_type=='densenet_121':\n model = self.make_densenet_121(self.weights)\n\n\n return model", "def build_model(cls, args):\n base_architecture(args) \n return StyleGANGeneratorPretrain(args)", "def make_NN(n_hidden, n_epoch, labelsdict, lr, device, model_name, trainloader, validloader, train_data, pretrain, finetune_whole, custom_model):\n if custom_model == 2:\n # Use custom two-layer convolution model\n print(\"Using Two-Layer CNN\")\n model = TwoLayerConvNet()\n elif custom_model == 5:\n print(\"Using Five-Layer CNN\")\n # Use custom five-layer convolution model\n model = FiveLayerConvNet()\n else:\n # Import NN model (either pretrained or not)\n model = getattr(models, model_name)(pretrained=pretrain)\n \"\"\" ===================================================================================== \"\"\"\"\n \n \"\"\" IMPLEMENTATION OF finetune_whole ARGUMENT TO EITHER FREEZE THE PARAMETERS OR NOT (WILBERT ARISTO) \"\"\"\n # If we do not need to finetune whole model, freeze parameters that we don't need to re-train\n if not finetune_whole:\n for param in model.parameters():\n param.requires_grad = False\n \"\"\" ===================================================================================== \"\"\"\"\n\n n_out = len(labelsdict)\n\n \"\"\" CHANGED LAST LAYER TO model.fc IF WE ARE USING RESNET MODEL (WILBERT ARISTO) \"\"\"\n if \"resnet\" in model_name:\n # Make classifier\n n_in = next(model.fc.modules()).in_features\n model.fc = NN_Classifier(input_size=n_in, output_size=n_out, hidden_layers=n_hidden)\n \n \"\"\" IMPLEMENTATION OF finetune_whole ARGUMENT TO EITHER OPTIMIZE ALL PARAMETERS OR JUST THE LAST LAYER'S PARAMS (WILBERT ARISTO) \"\"\"\n # Define optimizer\n if finetune_whole:\n optimizer = optim.Adam(model.parameters(), lr = lr)\n else:\n optimizer = optim.Adam(model.fc.parameters(), lr = lr)\n \"\"\" ============================================================================================================================ \"\"\"\"\n else:\n # Make classifier\n n_in = next(model.classifier.modules()).in_features\n model.classifier = NN_Classifier(input_size=n_in, output_size=n_out, hidden_layers=n_hidden)\n \n \"\"\" IMPLEMENTATION OF finetune_whole ARGUMENT TO EITHER OPTIMIZE ALL PARAMETERS OR JUST THE LAST LAYER'S PARAMS (WILBERT ARISTO) \"\"\"\n # Define optimizer\n if finetune_whole:\n optimizer = optim.Adam(model.parameters(), lr = lr)\n else:\n optimizer = optim.Adam(model.classifier.parameters(), lr = lr)\n \"\"\" ============================================================================================================================ \"\"\"\"\n \"\"\" ============================================================================================================================ \"\"\"\"\n\n # Define criterion\n criterion = nn.NLLLoss() \n\n model.to(device)\n start = time.time()\n\n epochs = n_epoch\n steps = 0 \n running_loss = 0\n print_every = 40\n for e in range(epochs):\n model.train()\n for images, labels in trainloader:\n images, labels = images.to(device), labels.to(device)\n\n steps += 1\n\n optimizer.zero_grad()\n\n output = model.forward(images)\n loss = criterion(output, labels)\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item()\n\n if steps % print_every == 0:\n # Eval mode for predictions\n model.eval()\n\n # Turn off gradients for validation\n with torch.no_grad():\n test_loss, accuracy = validation(model, validloader, criterion, device)\n\n print(\"Epoch: {}/{} - \".format(e+1, epochs),\n \"Training Loss: {:.3f} - \".format(running_loss/print_every),\n \"Validation Loss: {:.3f} - \".format(test_loss/len(validloader)),\n \"Validation Accuracy: {:.3f}\".format(accuracy/len(validloader)))\n\n running_loss = 0\n\n # Make sure training is back on\n model.train()\n \n \"\"\" CHANGED LAST LAYER TO model.fc IF WE ARE USING RESNET MODEL (WILBERT ARISTO) \"\"\"\n if \"resnet\" in model_name:\n # Add model info \n model.fc.n_in = n_in\n model.fc.n_hidden = n_hidden\n model.fc.n_out = n_out\n model.fc.labelsdict = labelsdict\n model.fc.lr = lr\n model.fc.optimizer_state_dict = optimizer.state_dict\n model.fc.model_name = model_name\n model.fc.class_to_idx = train_data.class_to_idx\n else:\n # Add model info \n model.classifier.n_in = n_in\n model.classifier.n_hidden = n_hidden\n model.classifier.n_out = n_out\n model.classifier.labelsdict = labelsdict\n model.classifier.lr = lr\n model.classifier.optimizer_state_dict = optimizer.state_dict\n model.classifier.model_name = model_name\n model.classifier.class_to_idx = train_data.class_to_idx\n \"\"\" ============================================================================================================================ \"\"\"\"\n\n print('model:', model_name, '- hidden layers:', n_hidden, '- epochs:', n_epoch, '- lr:', lr)\n print(f\"Run time: {(time.time() - start)/60:.3f} min\")\n return model\n\n# Define function to save checkpoint\ndef save_checkpoint(model, path):\n checkpoint = {'c_input': model.classifier.n_in,\n 'c_hidden': model.classifier.n_hidden,\n 'c_out': model.classifier.n_out,\n 'labelsdict': model.classifier.labelsdict,\n 'c_lr': model.classifier.lr,\n 'state_dict': model.state_dict(),\n 'c_state_dict': model.classifier.state_dict(),\n 'opti_state_dict': model.classifier.optimizer_state_dict,\n 'model_name': model.classifier.model_name,\n 'class_to_idx': model.classifier.class_to_idx\n }\n torch.save(checkpoint, path)\n \n# Define function to load model\ndef load_model(path):\n cp = torch.load(path)\n \n # Import pre-trained NN model \n model = getattr(models, cp['model_name'])(pretrained=True)\n \n # Freeze parameters that we don't need to re-train \n for param in model.parameters():\n param.requires_grad = False\n \n # Make classifier\n model.classifier = NN_Classifier(input_size=cp['c_input'], output_size=cp['c_out'], \\\n hidden_layers=cp['c_hidden'])\n \n # Add model info \n model.classifier.n_in = cp['c_input']\n model.classifier.n_hidden = cp['c_hidden']\n model.classifier.n_out = cp['c_out']\n model.classifier.labelsdict = cp['labelsdict']\n model.classifier.lr = cp['c_lr']\n model.classifier.optimizer_state_dict = cp['opti_state_dict']\n model.classifier.model_name = cp['model_name']\n model.classifier.class_to_idx = cp['class_to_idx']\n model.load_state_dict(cp['state_dict'])\n \n return model", "def __init__(self, opt):\n BaseModel.__init__(self, opt)\n self.loss_names = ['G_GAN', 'D_real', 'D_fake', 'Feat', 'VGG', 'SSIM', 'PSNR']\n self.visual_names = ['fake_B', 'real_B']\n if self.isTrain:\n self.model_names = ['G', 'D']\n else:\n self.model_names = ['G']\n\n self.netG = generator.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids,\n not opt.no_transp_conv,\n opt.n_downsample_global, opt.n_blocks_global, opt.n_local_enhancers,\n opt.n_blocks_local)\n\n if self.isTrain:\n self.netD = discriminator.define_D(opt.input_nc + opt.output_nc, opt.ndf, 'pix2pixHD_multiscale',\n opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids,\n not (opt.gan_mode == 'lsgan'), opt.num_D)\n\n self.criterionGAN = loss.GANLoss(opt.gan_mode, multiscale_D=opt.netD == 'pix2pixHD_multiscale').to(\n self.device)\n self.criterionVGG = loss.VGGLoss().to(self.device)\n self.criterionFeat = loss.FeatureMatchingLoss(opt.n_layers_D, opt.num_D)\n\n self.criterionSSIM = loss.SkimageLoss(partial(ssim, multichannel=True))\n self.criterionPSNR = loss.SkimageLoss(psnr)\n\n if opt.netG.startswith('pix2pixHD') and (opt.n_epochs_fix_global > 0):\n params_dict = dict(self.netG.named_parameters())\n netG_params = []\n for key, value in params_dict.items():\n if key.startswith('model' + str(opt.n_local_enhancers)):\n netG_params += [value]\n else:\n netG_params = self.netG.parameters()\n\n self.optimizer_G = torch.optim.Adam(netG_params, lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizers.append(self.optimizer_G)\n self.optimizers.append(self.optimizer_D)\n\n if opt.load_pretrain:\n pretrained_path = '' if not self.isTrain else opt.load_pretrain\n self.load_network(self.netG, 'G', opt.epoch, pretrained_path)\n if self.isTrain:\n self.load_network(self.netD, 'D', opt.epoch, pretrained_path)\n\n self.real_A = None\n self.real_B = None\n self.fake_A = None\n self.fake_B = None\n self.loss_D_real = None\n self.loss_D_fake = None\n self.loss_D = None\n self.loss_G_GAN = None\n self.loss_Feat = None\n self.loss_VGG = None\n self.loss_G = None\n self.loss_SSIM = None\n self.loss_PSNR = None", "def _tpu_build(self):\n def _define_model(features, labels, mode, params):\n data_source = (features, labels)\n self.outputs = {}\n self.losses = {}\n self.otters = {}\n outputs, losses, others = self.define_model(data_source, mode)\n\n if mode == tf.estimator.ModeKeys.EVAL:\n return tpu.TPUEstimatorSpec(\n mode=mode, loss=losses, eval_metrics=others)\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tpu.TPUEstimatorSpec(\n mode=mode, predictions=outputs\n )\n if mode == tf.estimator.ModeKeys.TRAIN:\n self.losses['train'] = losses\n self._build_optimizer(tpu_support=True)\n if not len(self.optimize_ops) == 1:\n logging.error('Implementati Error: More than one optimizer defined')\n logging.warning(' [*] Selecting only the first optimizer')\n return tpu.TPUEstimatorSpec(\n mode=mode, loss=losses[0], train_op=self.optimize_ops[0]\n )\n\n tpu_name = ['node-1'] # TODO Bring outside\n tpu_iterations = 500 # TODO Bring outside\n tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(\n tpu_name)\n\n run_config = tf.contrib.tpu.RunConfig(\n model_dir=self.output_path,\n cluster=tpu_cluster_resolver,\n session_config=tf.ConfigProto(\n allow_soft_placement=True, log_device_placement=True),\n tpu_config=tpu.TPUConfig(tpu_iterations),\n )\n\n self.estimator = tpu.TPUEstimator(\n model_fn=_define_model,\n use_tpu=True,\n train_batch_size=32*4, #self.dataset['train'].batch_size,\n eval_batch_size=32*4, #self.dataset['validation'].batch_size,\n config=run_config,\n params={\"data_dir\": self.data_dir}\n )", "def UNet(input_size=(256, 256, 1)):\n inputs = Input(input_size)\n c1 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(inputs)\n c1 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c1)\n p1 = MaxPooling2D((2, 2))(c1)\n\n c2 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(p1)\n c2 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c2)\n p2 = MaxPooling2D((2, 2))(c2)\n\n c3 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(p2)\n c3 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c3)\n p3 = MaxPooling2D((2, 2))(c3)\n\n c4 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(p3)\n c4 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c4)\n p4 = MaxPooling2D(pool_size=(2, 2))(c4)\n\n c5 = Conv2D(256, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(p4)\n c5 = Conv2D(256, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c5)\n\n u6 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(c5)\n u6 = concatenate([u6, c4])\n c6 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(u6)\n c6 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c6)\n\n u7 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(c6)\n u7 = concatenate([u7, c3])\n c7 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(u7)\n c7 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c7)\n\n u8 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(c7)\n u8 = concatenate([u8, c2])\n c8 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(u8)\n c8 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c8)\n\n u9 = Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same')(c8)\n u9 = concatenate([u9, c1], axis=3)\n c9 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(u9)\n c9 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c9)\n\n outputs = Conv2D(1, (1, 1), activation='sigmoid')(c9)\n model = Model(inputs=[inputs], outputs=[outputs])\n model.compile(optimizer=Adam(lr=1e-4), loss=dice_coef_loss, metrics=['accuracy', dice_coef])\n return model", "def localizer_vggnet(pretrained=False, **kwargs):\n model = LocalizerVGG(make_layers(cfg['E']), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg19']))\n return model", "def build_network(self, dimList, actType=\"Tanh\", verbose=True):\n self.Q_network = Model(dimList, actType, verbose=verbose)\n self.target_network = Model(dimList, actType)\n\n if self.device == torch.device(\"cuda\"):\n self.Q_network.cuda()\n self.target_network.cuda()\n\n self.build_optimizer()", "def resnet46(pretrained=False):\n model = ResNet(BasicBlock, [3, 6, 10, 3])\n if pretrained:\n pass\n #model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def get_unet():\n inputs = Input((img_rows, img_cols, 1))\n conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(inputs)\n conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n\n conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)\n conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n\n conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)\n conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n\n conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool3)\n conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)\n pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)\n\n conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)\n conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)\n\n up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2),\n padding='same')(conv5), conv4], axis=3)\n conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(up6)\n conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv6)\n\n up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2),\n padding='same')(conv6), conv3], axis=3)\n conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(up7)\n conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv7)\n\n up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2),\n padding='same')(conv7), conv2], axis=3)\n conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(up8)\n conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv8)\n\n up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2),\n padding='same')(conv8), conv1], axis=3)\n conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(up9)\n conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)\n\n conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9)\n\n model = Model(inputs=[inputs], outputs=[conv10])\n\n model.compile(optimizer=Adam(lr=1e-4), loss=dice_coef_loss,\n metrics=[dice_coef])\n\n return model", "def __init__(self, out_size=2, freeze=False, pretrained=True, arch='resnet50'):\n\n super().__init__()\n\n if arch == 'resnet50':\n model = torchvision.models.resnet50(pretrained=pretrained)\n self.model_name = 'resnet50'\n elif arch == 'resnet18':\n model = torchvision.models.resnet18(pretrained=pretrained)\n self.model_name = 'resnet18'\n elif arch == 'resnet34':\n model = torchvision.models.resnet34(pretrained=pretrained)\n self.model_name = 'resnet34'\n elif arch == 'resnet101':\n model = torchvision.models.resnet101(pretrained=pretrained)\n self.model_name = 'resnet101'\n elif arch == 'resnet152':\n model = torchvision.models.resnet152(pretrained=pretrained)\n self.model_name = 'resnet152'\n elif arch == 'wide_resnet50_2':\n model = torchvision.models.wide_resnet50_2(pretrained=pretrained)\n self.model_name = 'wide_resnet50_2'\n elif arch == 'wide_resnet101_2':\n model = torchvision.models.wide_resnet101_2(pretrained=pretrained)\n self.model_name = 'wide_resnet101_2'\n else:\n model = torchvision.models.resnet18(pretrained=pretrained)\n self.model_name = 'resnet18'\n\n if pretrained and freeze:\n for param in model.parameters():\n param.requires_grad = False\n\n num_ftrs = model.fc.in_features\n model.fc = nn.Linear(num_ftrs, out_size)\n\n self.model = model", "def build_model():\n pretrained_model = VGG16(input_shape=(fixed_size[0], fixed_size[1], 3), weights='imagenet', include_top=False)\n # We will not train the layers imported.\n for layer in pretrained_model.layers:\n layer.trainable = False\n transfer_learning_model = Sequential()\n transfer_learning_model.add(pretrained_model)\n transfer_learning_model.add(Flatten())\n transfer_learning_model.add(Dense(128, activation='relu', kernel_initializer='he_uniform'))\n transfer_learning_model.add(Dropout(0.5))\n transfer_learning_model.add(Dense(3, activation='softmax'))\n transfer_learning_model.summary()\n opt = Adam(learning_rate=.0003)\n transfer_learning_model.compile(optimizer=opt, loss='sparse_categorical_crossentropy', metrics=['accuracy'])\n return transfer_learning_model", "def build(model_name):\n return pretrain.factory.create(model_name)", "def model(pretrained=False, **kwargs):\n model = VGG(make_layers(cfg['D1'], dilation=dilation['D1']), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg16']))\n return model", "def get_model(training = True):\n # download the model\n model = models.alexnet(pretrained = True)\n # define the FC layers\n classifier = nn.Sequential(OrderedDict([\n ('fc1', nn.Linear(9216, 2024)),\n ('relu', nn.ReLU()),\n ('dropout', nn.Dropout(p = .7)),\n ('fc2', nn.Linear(2024, 516)),\n ('relu', nn.ReLU()),\n ('dropput', nn.Dropout(p = .7)),\n ('fc3', nn.Linear(516, 5)),\n ('output', nn.LogSoftmax(dim=1))]))\n # configure the last layer\n model.classifier = classifier\n if training:\n # set model training parameters\n criterion = nn.NLLLoss()\n optimizer = optim.Adam(model.classifier.parameters(), lr = 1e-4, amsgrad = True)\n return model, criterion, optimizer\n else:\n return model", "def _create_model(self):\n if torch.cuda.is_available():\n model = torch.jit.load(self.torch_jit).cuda()\n else:\n model = torch.jit.load(self.torch_jit)\n model.eval()\n return model", "def load_model():\r\n model = MobileNetV2(weights=\"imagenet\")\r\n print(\"Model loaded\")\r\n return model", "def make_model(self, options, generator):\n model_type = options.model_type\n input_shape = (options.target_size[0], options.target_size[1],\n len(options.active_input_inds))\n nb_labels = generator.dataset.nb_labels\n\n if model_type == CONV_LOGISTIC:\n model = make_conv_logistic(input_shape, nb_labels,\n options.kernel_size)\n elif model_type == FCN_RESNET:\n model = make_fcn_resnet(\n input_shape, nb_labels, options.use_pretraining,\n options.freeze_base)\n elif model_type == DUAL_FCN_RESNET:\n model = make_dual_fcn_resnet(\n input_shape, options.dual_active_input_inds,\n nb_labels, options.use_pretraining, options.freeze_base)\n elif model_type == UNET:\n model = make_unet(input_shape, nb_labels)\n elif model_type == FC_DENSENET:\n model = make_fc_densenet(\n input_shape, nb_labels, drop_prob=options.drop_prob,\n weight_decay=options.weight_decay,\n down_blocks=options.down_blocks,\n up_blocks=options.up_blocks)\n elif model_type in [CONCAT_ENSEMBLE, AVG_ENSEMBLE]:\n models, active_input_inds_list = self.load_ensemble_models(options)\n if model_type == CONCAT_ENSEMBLE:\n model = ConcatEnsemble(\n models, active_input_inds_list, input_shape, nb_labels)\n elif model_type == AVG_ENSEMBLE:\n model = AvgEnsemble(models, active_input_inds_list)\n else:\n raise ValueError('{} is not a valid model_type'.format(model_type))\n\n return model", "def load_fine_tuned_model(architecture, img_size, n_classes, n_untrained_layers, top_weights_path, fine_weights_path):\r\n #TODO: Use top model weights from fine tuning weights for model initialization\r\n classifier = TransferModel()\r\n classifier.build_base_model(architecture, [img_size, img_size], 3)\r\n classifier.add_top_model(n_classes)\r\n classifier.load_top_weights(fine_weights_path)\r\n logger.debug(\"Loaded \" + architecture +\" model.\")\r\n return classifier", "def getModel(config: configuration.Configuration) -> torch.nn.Module:\n if config.modelName == ModelName.DENSE:\n return DenseGenerator(1, 1, n_blocks=config.blockCount)\n elif config.modelName == ModelName.SHALLOW:\n return Shallow(1, 1, )\n elif config.modelName == ModelName.TIRAMISU:\n model = Tiramisu(1, 1, structure=(\n config.down, # Down blocks\n config.bottleneck, # bottleneck layers\n config.up, # Up blocks\n ), checkpoint=False)\n\n model.initialize_kernels(torch.nn.init.kaiming_uniform_, conv=True)\n return model\n else:\n return SimpleCNN()", "def set_vanilla_model(self):\n logging.debug(\"Setting vanilla model\")\n # Build model\n\n ## Embedding Layer\n word_embedding_layer = self.embed_word()\n pos_embedding_layer = self.embed_pos()\n\n ## Deep layers\n latent_layers = self.stack_latent_layers(self.num_of_latent_layers)\n\n ## Dropout\n dropout = Dropout(self.pred_dropout)\n\n ## Prediction\n predict_layer = self.predict_classes()\n\n ## Prepare input features, and indicate how to embed them\n inputs_and_embeddings = [(Input(shape = (self.sent_maxlen,),\n dtype=\"int32\",\n name = \"word_inputs\"),\n word_embedding_layer),\n (Input(shape = (self.sent_maxlen,),\n dtype=\"int32\",\n name = \"predicate_inputs\"),\n word_embedding_layer),\n (Input(shape = (self.sent_maxlen,),\n dtype=\"int32\",\n name = \"postags_inputs\"),\n pos_embedding_layer),\n ]\n\n ## Concat all inputs and run on deep network\n output = predict_layer(dropout(latent_layers(merge([embed(inp)\n for inp, embed in inputs_and_embeddings],\n mode = \"concat\",\n concat_axis = -1))))\n\n # Build model\n self.model = Model(input = map(itemgetter(0), inputs_and_embeddings),\n output = [output])\n\n # Loss\n self.model.compile(optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['categorical_accuracy'])\n self.model.summary()\n\n # Save model json to file\n self.save_model_to_file(os.path.join(self.model_dir, \"model.json\"))", "def createProtoSpecification(model, filename):\n net = network_pb2.Network()\n num_layers = 0\n\n # Iterate through the layers\n for layer in model.children():\n for child in layer.modules():\n if isinstance(child, nn.Conv2d):\n # Make the conv message\n convLayer = net.layers.add()\n makeConv2DMessage(child, convLayer)\n num_layers += 1\n\n elif isinstance(child, nn.MaxPool2d):\n # Make the pool message\n poolLayer = net.layers.add()\n makePool2DMessage(child, poolLayer)\n num_layers += 1\n \n elif isinstance(child, nn.AvgPool2d):\n # Make the pool message\n poolLayer = net.layers.add()\n makePool2DMessage(child, poolLayer, avg=True)\n num_layers += 1\n\n elif isinstance(child, nn.AdaptiveAvgPool2d):\n # Make the adaptive pool message\n apoolLayer = net.layers.add()\n makePool2DMessage(child, apoolLayer, avg=True, adaptive=True)\n num_layers += 1\n\n elif isinstance(child, nn.ReLU):\n # Make the activation message\n reluact = net.layers.add()\n makeReLUMessage(reluact)\n num_layers += 1\n\n elif isinstance(child, nn.Sigmoid):\n # Make the activation message\n sigact = net.layers.add()\n makeSigmoidMessage(sigact)\n num_layers += 1\n\n elif isinstance(child, nn.Linear):\n # Make the linear layer message\n linearLayer = net.layers.add()\n makeFCMessage(child, linearLayer)\n num_layers += 1\n\n elif isinstance(child, nn.Dropout):\n # Make the DropOut layer message\n dropLayer = net.layers.add()\n makeDropoutMessage(child, dropLayer)\n num_layers += 1\n\n net.num_layers = num_layers\n\n # Store in Pre-trained Models\n filename = PRE_TRAINED_DIR + filename\n f = open(filename, \"wb\")\n f.write(net.SerializeToString())\n f.close()", "def build_model(self):\r\n self.images, self.labels = self.dataloader.get_model_inputs()\r\n\r\n model = SimpleModel(self.images, self.labels, output_dim=F.output_dim, scope='source_regressor')\r\n self.out, _ = model.get_model()\r\n self.get_loss()", "def buildModel(model_name):\n if model_name == \"resnet50\":\n model = kapp.resnet50.ResNet50(weights=\"imagenet\", include_top=False)\n return model, kapp.resnet50.preprocess_input\n elif model_name == \"vgg16\":\n model = kapp.vgg16.VGG16(weights=\"imagenet\", include_top=False)\n return model, kapp.vgg16.preprocess_input\n elif model_name == 'xception':\n model = kapp.xception.Xception(weights=\"imagenet\", include_top=False)\n return model, kapp.xception.preprocess_input\n elif model_name == 'vgg19':\n model = kapp.vgg19.VGG19(weights=\"imagenet\", include_top=False)\n return model, kapp.vgg19.preprocess_input\n elif model_name == 'inceptionv3':\n model = kapp.inception_v3.InceptionV3(weights=\"imagenet\", include_top=False)\n return model, kapp.inception_v3.preprocess_input\n elif model_name == 'mobilenet':\n model = kapp.mobilenet.MobileNet(weights=\"imagenet\", include_top=False)\n return model, kapp.mobilenet.preprocess_input\n else:\n raise Exception(\"Unsupported model error\")", "def _build_model(self, architecture):\n estimator = NN_estimator(architecture)\n weight_file = architecture[\"WEIGHT_FILE\"]\n if weight_file is None:\n pass\n else:\n estimator.load_weights(weight_file)\n return estimator", "def create_model():\n\n class Net(nn.Cell):\n def construct(self, x, y):\n return x\n\n net = Net()\n model_simple = Model(net)\n\n return model_simple", "def setup_model(self):\r\n\r\n logging.info(\"Setup the models.\")\r\n\r\n logging.info(\"{} model\".format(self.base_network_name))\r\n if self.base_network_name.lower().startswith(\"resnet\"):\r\n base_model, classifier = getattr(setops_models, self.base_network_name)(\r\n num_classes=80,\r\n avgpool_kernel=self.avgpool_kernel\r\n )\r\n else:\r\n base_model = getattr(setops_models, self.base_network_name)()\r\n classifier = getattr(setops_models, self.classifier_name)(num_classes=80)\r\n\r\n if self.init_inception:\r\n logging.info(\"Initialize inception model using Amit's networks.\")\r\n\r\n checkpoint = torch.load(self.resume_path)\r\n\r\n base_model = Inception3(aux_logits=False, transform_input=True)\r\n base_model.load_state_dict(\r\n {k: v for k, v in checkpoint[\"state_dict\"].items() if k in base_model.state_dict()}\r\n )\r\n classifier.load_state_dict(\r\n {k: v for k, v in checkpoint[\"state_dict\"].items() if k in classifier.state_dict()}\r\n )\r\n\r\n setops_model_cls = getattr(setops_models, self.sets_network_name)\r\n setops_model = setops_model_cls(\r\n input_dim=2048,\r\n S_latent_dim=self.ops_latent_dim, S_layers_num=self.ops_layer_num,\r\n I_latent_dim=self.ops_latent_dim, I_layers_num=self.ops_layer_num,\r\n U_latent_dim=self.ops_latent_dim, U_layers_num=self.ops_layer_num,\r\n block_cls_name=self.sets_block_name, basic_block_cls_name=self.sets_basic_block_name,\r\n dropout_ratio=self.setops_dropout,\r\n )\r\n\r\n if self.resume_path:\r\n logging.info(\"Resuming the models.\")\r\n models_path = Path(self.resume_path)\r\n if self.base_network_name.lower().startswith(\"resnet\"):\r\n base_model.load_state_dict(\r\n torch.load(sorted(models_path.glob(\"networks_base_model_{}*.pth\".format(self.resume_epoch)))[-1])\r\n )\r\n classifier.load_state_dict(\r\n torch.load(sorted(models_path.glob(\"networks_classifier_{}*.pth\".format(self.resume_epoch)))[-1])\r\n )\r\n\r\n setops_models_paths = sorted(models_path.glob(\"networks_setops_model_{}*.pth\".format(self.resume_epoch)))\r\n if len(setops_models_paths) > 0:\r\n setops_model.load_state_dict(\r\n torch.load(setops_models_paths[-1]).state_dict()\r\n )\r\n\r\n return base_model, classifier, setops_model", "def get_model(args, num_classes):\n data_size = 224\n image = nn.Variable([1, 3, data_size, data_size])\n pimage = image_preprocess(image)\n pred, hidden = model_resnet.resnet_imagenet(\n pimage, num_classes, args.num_layers, args.shortcut_type, test=True, tiny=False)\n Model = namedtuple('Model', ['image', 'pred', 'hidden'])\n return Model(image, pred, hidden)", "def BuildModel(ANNSetup,model):\n\n if(isinstance(ANNSetup.Activ,str)):\n model.add(Dense(ANNSetup.Neurons[0], kernel_regularizer=l2(ANNSetup.Regu), activation=ANNSetup.Activ, kernel_initializer=Winit(ANNSetup.Winit), input_dim=ANNSetup.InputDim))\n if(ANNSetup.Dropout != None):\n model.add(Dropout(ANNSetup.Dropout))\n for i in range(1,len(ANNSetup.Neurons)):\n if(i == len(ANNSetup.Neurons)-1):\n model.add(Dense(ANNSetup.Neurons[i], kernel_initializer=Winit(ANNSetup.Winit), activation='sigmoid'))\n else:\n model.add(Dense(ANNSetup.Neurons[i], kernel_initializer=Winit(ANNSetup.Winit), activation=ANNSetup.Activ))\n else:\n model.add(Dense(ANNSetup.Neurons[0], kernel_regularizer=l2(ANNSetup.Regu), kernel_initializer=Winit(ANNSetup.Winit), input_dim=ANNSetup.InputDim))\n model.add(LeakyReLU(alpha=ANNSetup.Activ))\n if(ANNSetup.Dropout != None):\n model.add(Dropout(ANNSetup.Dropout))\n for i in range(1,len(ANNSetup.Neurons)):\n if(i == len(ANNSetup.Neurons)-1):\n model.add(Dense(ANNSetup.Neurons[i], kernel_initializer=Winit(ANNSetup.Winit), activation='sigmoid'))\n else:\n model.add(Dense(ANNSetup.Neurons[i], kernel_initializer=Winit(ANNSetup.Winit)))\n model.add(LeakyReLU(alpha=ANNSetup.Activ))\n\n return model", "def resnet50(pretrained=False):\n model = ResNet(Bottleneck, [3, 4, 6, 3])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n return model", "def build_model():\n # noise for soise sampling in NCE\n noise = build_unigram_noise(\n torch.FloatTensor(corpus.vocab.idx2count)\n )\n\n norm_term = 'auto' if args.norm_term == -1 else args.norm_term\n # setting up NCELoss modules\n if args.index_module == 'linear':\n criterion = IndexLinear(\n args.emsize,\n ntoken,\n noise=noise,\n noise_ratio=args.noise_ratio,\n norm_term=norm_term,\n loss_type=args.loss,\n reduction='none',\n )\n model = RNNModel(\n ntoken, args.emsize, args.nhid, args.nlayers,\n criterion=criterion, dropout=args.dropout,\n )\n elif args.index_module == 'gru':\n if args.nlayers != 1:\n logger.warning('Falling into one layer GRU due to Index_GRU supporting')\n nce_criterion = IndexGRU(\n ntoken, args.emsize, args.nhid,\n args.dropout,\n noise=noise,\n noise_ratio=args.noise_ratio,\n norm_term=norm_term,\n )\n model = GenModel(\n criterion=nce_criterion,\n )\n else:\n logger.error('The index module [%s] is not supported yet' % args.index_module)\n raise(NotImplementedError('index module not supported'))\n\n if args.cuda:\n model.cuda()\n\n logger.info('model definition:\\n %s', model)\n return model", "def setupNetwork(self):\n\t\tin_layer = Input(shape=(28, ))\n\t\td1 = Dense(40, activation='relu')(in_layer)\n\t\td2 = Dense(10, activation='relu')(d1)\n\t\tout = Dense(1, activation='sigmoid')(d2)\n\n\t\tself.model = tf.keras.Model(inputs=in_layer, outputs=out)", "def _create_model(self):\n ref = 0 if self.m_cfg['configs']['recursive'] else -1\n out_t, l_t, models = [], [], []\n in_t = [tf.keras.Input(batch_size=self.m_cfg['configs']['batch'],\n shape=self.m_cfg['configs']['patch'])]\n for level in np.arange(self.levels):\n if not self.m_cfg['configs']['recursive'] or not level:\n lat, res, layers = self._set_level_ops(in_t[-1], level)\n opt = self._inst_optimizer()\n self.opt += [opt]\n curr_layers = sum(layers, [])\n vars = sum(list(map(lambda l: l.variables, curr_layers)), [])\n self.vars.append(vars)\n elif self.m_cfg['configs']['recursive']:\n lat, res, layers = self._set_level_ops(in_t[-1], level, layers)\n\n out_t += [res]\n l_t += [lat]\n in_t += [tf.keras.layers.Subtract()([in_t[ref], out_t[-1]])]\n\n inputs, outputs = in_t[0], [in_t[:-1], l_t, out_t]\n self.model = tf.keras.Model(inputs=inputs, outputs=outputs)\n self.loss = Losses(self.m_cfg['configs']['loss']).value", "def build_model(self):\n self.G = Generator(self.g_conv_dim)\n self.D = Discriminator(self.d_conv_dim, self.c_dim)\n self.generator = Generator(self.g_conv_dim).train(False)\n\n self.G = nn.DataParallel(self.G)\n self.D = nn.DataParallel(self.D)\n\n # For Adam (Unofficial)\n # self.g_optimizer = torch.optim.Adam(self.G.parameters(), self.g_lr, [self.beta1, self.beta2])\n # self.d_optimizer = torch.optim.Adam(self.D.parameters(), self.d_lr, [self.beta1, self.beta2])\n\n # For RMSprop(Official)\n self.g_optimizer = torch.optim.RMSprop(self.G.parameters(), lr=0.0001)\n self.d_optimizer = torch.optim.RMSprop(self.D.parameters(), lr=0.0001)\n\n self.accumulate(self.generator, self.G.module, 0)\n # self.print_network(self.G, 'G')\n # self.print_network(self.D, 'D')\n \n self.G.to(self.device)\n self.D.to(self.device)\n self.generator.to(self.device)\n\n # weight init\n self.G.apply(self.weights_init)\n self.D.apply(self.weights_init)\n self.generator.apply(self.weights_init)", "def load_phon_model(self, opt):\n if not opt.phonetic_model:\n return None\n print('\\nloading phonetic model...\\n')\n opt = copy.deepcopy(opt)\n opt.input = 'phonetic'\n if not opt.load_complete_model:\n opt.traindata = 'phonetic_data/' + opt.traindata\n opt.testdata = 'phonetic_data/' + opt.testdata\n print('\\n creating data for phonetic model. If no phonetic data is provided, this can take a while...\\n')\n _, _, _, vocab, _ = lib.data.create_datasets(opt)\n phon_model, phon_optim = lib.model.create_model((vocab['src'], vocab['tgt']), opt, is_phon_model = True)\n print(phon_model.opt)\n print('Loading test data for phonetic model from \"%s\"' % opt.testdata)\n print('Loading training data for phonetic model from \"%s\"' % opt.traindata)\n print(' * Phonetic model vocabulary size. source = %d; target = %d' % (len(vocab['src']), len(vocab['tgt'])))\n print(' * Phonetic model maximum batch size. %d' % opt.batch_size)\n else:\n phon_model = lib.create_model_from_complete(opt, is_phon_model=True)\n phon_optim = None\n phon_model.transliterator = PhonTransliterator()\n print(phon_model)\n return phon_model, phon_optim" ]
[ "0.68969065", "0.665536", "0.62934476", "0.61874026", "0.6184248", "0.6179625", "0.6171186", "0.6165023", "0.61580503", "0.6156004", "0.61520237", "0.6149518", "0.6119684", "0.61149585", "0.60775506", "0.6073373", "0.60634506", "0.605574", "0.60486454", "0.6038523", "0.6014028", "0.5996178", "0.5992404", "0.5970532", "0.5951206", "0.59359825", "0.5924887", "0.5924472", "0.5921671", "0.5876192", "0.5871864", "0.5867495", "0.58624387", "0.5847745", "0.58438903", "0.5825455", "0.5815568", "0.58049476", "0.5799477", "0.5798537", "0.579418", "0.57940656", "0.57764816", "0.57648754", "0.5759461", "0.5751022", "0.57405144", "0.57379097", "0.57241255", "0.5723882", "0.5712384", "0.5712278", "0.5710626", "0.5706551", "0.5700079", "0.5697253", "0.5696447", "0.56917894", "0.5683438", "0.5679071", "0.5677227", "0.5677105", "0.56663257", "0.56574816", "0.5655912", "0.5655842", "0.5653758", "0.56526446", "0.5648112", "0.56358004", "0.5624507", "0.56240904", "0.5623119", "0.56164193", "0.5608221", "0.56067884", "0.5603752", "0.5594207", "0.559289", "0.55908257", "0.55857825", "0.55811435", "0.55805916", "0.55801326", "0.5579653", "0.5572512", "0.5565537", "0.5548846", "0.554609", "0.55458844", "0.554242", "0.55351985", "0.55338347", "0.5532355", "0.55264586", "0.55263674", "0.5521538", "0.55211383", "0.55194914", "0.5519427" ]
0.6460649
2
Download text from the given url to fname.
def download_target_url(target, fname): r = requests.get(target) if r.ok: with open(fname, 'w') as f: f.write(r.text) print(f"Wrote {len(r.text)} chars to {fname}.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download_file(url, fname):\n urllib.request.urlretrieve(url, fname)", "def download (url):\n path, url = url\n r = requests.get (url, stream = True)\n content = r.text\n #print (content)\n with open (path + '.txt', 'w') as f:\n f.write (content)", "def download_link(url,save_dir):\n global downloaded_links\n global urlcnt\n if url in downloaded_links.keys(): return None\n m = re.search('\\?id=([a-zA-Z0-9.]+)', url)\n unique_name = m.group(1)\n unique_name = unique_name.replace(\".\",\"_\")\n text_name = unique_name + \".txt\"\n html = urlopen(url).read()\n text_file = open(save_dir + \"/{0}\".format(text_name),\"w\")\n urlcnt += 1\n text_version = get_gp_text_description(html)\n text_file.write(text_version)\n text_file.close()\n downloaded_links[url] = True\n print(\"Downloaded {0} and saved it in '{1}' as {2}\".format(url, save_dir, unique_name))\n return html", "def download(self, url):\n try:\n webFile = urllib.urlopen(url)\n localFile = open(self.workdir + \"/\" + url.split('/')[-1], 'w')\n localFile.write(webFile.read())\n webFile.close()\n localFile.close()\n except IOError:\n print(\"could not get url \" + url)", "def main(url, localfile):\n ph.download_file(url, localfile)", "def _download_from_url(self, url):\n target_file_name = self.dir + \"/\" + url.split('/')[-1].split('?')[0]\n urllib.urlretrieve (url, target_file_name)", "def download(self, url, filename):\n print(\"url\", url)\n print(\"filename\", filename)\n # open in binary mode\n with open(filename, \"wb\") as file:\n # get request\n try:\n r = requests.get(url)\n if r.status_code == 404:\n raise NotFoundException(\n \"URL: \", url, \" is not working. Status code 404\")\n # write to file\n file.write(r.content)\n print(\"file downloaded\")\n except ConnectionError as ex:\n print(ex)\n except NotFoundException as ex:\n print(ex)\n except Exception as ex:\n print(ex)", "def download_file(url: str) -> str:\n\n assert len(url) > 0\n\n filename = url.split('/')[-1]\n\n with open(filename, 'wb') as output_file:\n response = requests.get(url, stream=True)\n total = response.headers.get('content-length')\n\n if total is None:\n output_file.write(response.content)\n else:\n downloaded = 0\n total = int(total)\n for data in response.iter_content(chunk_size=max(int(total / 1000), 1024 * 1024)):\n downloaded += len(data)\n output_file.write(data)\n done = int(50 * downloaded / total)\n sys.stdout.write('\\r[{}{}]'.format('█' * done, '.' * (50 - done)))\n sys.stdout.flush()\n sys.stdout.write('\\n')\n\n return filename", "def download(filename):\n print \"Downloading\", filename\n file_content = urlopen(\n urljoin(URL_PATH, filename)\n )\n write_data_to_file(\n file_content.read(),\n os.path.join(\n '/tmp',\n filename\n )\n )", "def download_file(self, url, path):\n print('\\tDownloading: ', path)\n with open(path, 'w') as outfile:\n try:\n response = self._http_client.get(url)\n outfile.write(response.text)\n finally:\n response.close()\n outfile.close()\n gc.collect()", "def download_file(url,file_name):\n #http://stackabuse.com/download-files-with-python/\n filedata = urllib2.urlopen(url)\n datatowrite = filedata.read()\n with open(file_name, 'wb') as f:\n f.write(datatowrite)", "def download_file_nowget(url, fn, cookiejar):\n\tprint \"Downloading %s -> %s\" % (url, fn)\n\turlfile = get_opener(cookiejar).open(url)\n\tchunk_sz = 1048576\n\tbytesread = 0\n\tf = open(fn, \"wb\")\n\n\twhile True:\n\t\tdata = urlfile.read(chunk_sz)\n\t\tif not data:\n\t\t\tprint \".\"\n\t\t\tbreak\n\n\t\tf.write(data)\n\t\tbytesread += len(data)\n\t\tprint \"\\r%d bytes read\" % bytesread,\n\t\tsys.stdout.flush()", "def download(url, save_as):\n\topen(save_as, 'w').write(urllib2.urlopen(url).read())", "def get_text(URL_string, name):\n\n tale = URL(URL_string).download()\n save_file = open(name + '.pickle', 'w')\n pickle.dump(tale, save_file)\n save_file.close()", "def download_from_url(url, output_path):\n\n print('Pulling data from {} to {}'.format(url, output_path))\n wget.download(url, output_path)\n print('done')", "def downloadString(url):\n filein = urllib2.urlopen(url)\n data = filein.read()\n filein.close()\n return data", "def get_text(URL_string, name):\n\n\ttext = URL(URL_string).download()\n\tsave_file = open(name + '.pickle', 'w')\n\tpickle.dump(text, save_file)\n\tsave_file.close()", "def download_file(self, url, filename):\n r = requests.get(url, stream=True)\n r.raise_for_status()\n\n with open(filename, 'wb') as f:\n for chunk in r.iter_content():\n if chunk:\n f.write(chunk)\n f.flush()", "def download_file(download_url, save_path):\n url = \"https://www.encodeproject.org/\" + download_url\n urllib.request.urlretrieve(url, save_path)", "def fetch(self, url: furl) -> str:\n try:\n contents = self._download(url)\n except requests.ConnectionError as err:\n logger.exception(f\"Request failed with {err}\")\n click.secho(\n f\"The URL {url} could not be downloaded. Either your network is unreachable or the URL is broken.\"\n f\" Check the URL, fix your connection, or use \"\n f\" {OptionEnum.OFFLINE.as_flake8_flag()} / {OptionEnum.OFFLINE.as_envvar()}=1\",\n fg=\"red\",\n err=True,\n )\n return \"\"\n return contents", "def _download(url):\n \n filename = url.split('/')[-1]\n if os.path.isfile(filename):\n info('Using pre-existed file {} from local system.'.format(filename))\n else:\n info('Downloading {} from OMA Database.'.format(url.split('/')[-1]))\n filename, _ = urlretrieve(url, filename)\n return filename", "def download_file (url):\n\n '''\n Try and download the file given in the url,\n throw up an error if not possible.\n '''\n try:\n ret = urllib2.urlopen (url)\n except urllib2.HTTPError:\n return None\n except urllib2.URLError:\n return None\n\n print \"Downloaded \" + url\n\n return ret", "def download_to_file(url, filename):\n with browser_spoof_open(url) as download_conn:\n with open(filename, \"wb\") as out_file:\n shutil.copyfileobj(download_conn, out_file, 1024 * 8)", "def download(url, filename=None):\n\t# requirements os, shutil, urllib.parse, urllib.request\n\tif not filename:\n\t\turl_parts = urllib.parse.urlparse(url)\n\t\tfilename = os.path.basename(url_parts.path)\n\turl_h = urllib.request.urlopen(url)\n\twith open(filename, 'wb') as file_h:\n\t\tshutil.copyfileobj(url_h, file_h)\n\turl_h.close()\n\treturn", "def download_file(url: str, fdst):\n split = urlsplit(url)\n filename = os.path.basename(split.path)\n\n print('Downloading {}'.format(filename))\n\n with urllib.request.urlopen(url) as response:\n length = response.getheader('content-length')\n if length:\n total = int(length)\n copyfileobj_with_progress(response, fdst, total=total)", "def download_simple(url): # url(str)\n html = urlopen(url).read().decode()\n return html", "def single_file_download(url: str, encoding: str = \"utf-8\") -> str:\n\n recipient = BytesIO() # the stream we will write into\n\n # print(\"Opening %r . . .\" % url)\n curl = pycurl.Curl()\n curl.setopt(curl.URL, url)\n curl.setopt(curl.WRITEDATA, recipient)\n curl.perform()\n curl.close()\n # print(\"Closed %r.\" % url)\n\n return recipient.getvalue().decode(encoding)", "def get_file(url):\n helpers.make_workdir() # create temp working directory\n file_url = url + constant.MALICIOUS_LOCATION\n print(file_url)\n filename = wget.download(file_url, out=constant.WORKDIR)\n return filename", "def download_file(url, filename):\n\n with DownloadProgressBar(unit=\"B\",\n unit_scale=True,\n miniters=1,\n desc=url.split(\"/\")[-1]\n ) as t:\n urllib.request.urlretrieve(url, filename=filename, reporthook=t.update_to)", "def download_song(url, filename):\n page = requests.get(url, headers=HEADERS)\n if page.status_code == 200: # OK\n with open(filename, 'w') as outf:\n outf.write(page.text)\n else:\n print(f'download failed with status code {page.status_code}!')", "def download(self, url):\n try:\n logging.info(self.log_format((\"downloading \" + url)))\n webFile = urllib.urlopen(url)\n localFile = open(self.paths['workspace'] + \"/\" + url.split('/')[-1], 'w')\n localFile.write(webFile.read())\n webFile.close()\n localFile.close()\n except IOError:\n logging.error(self.log_format((\"could not get url \" + url)))", "def download(url, filename):\n response = requests.get(url, stream=True)\n with open(filename, \"wb\") as handle:\n for data in response.iter_content():\n handle.write(data)", "def torrent_download(download_url, torrent):\n webFile = urllib.urlopen(download_url)\n localFile = open(torrent, 'wb')\n localFile.write(webFile.read())\n webFile.close()\n localFile.close()", "def download_file(url, output_filename):\n print(\"Downloading\", url, \"to\", output_filename)\n r = requests.get(url)\n r.raise_for_status()\n with open(output_filename, 'wb') as f:\n f.write(r.content)", "def download_file(filename, url):\n print(\"downloading {0}\".format(url))\n with open(filename, \"wb\") as fout:\n response = requests.get(url, stream=True, verify=False)\n response.raise_for_status()\n # Write response data to file\n iblock = 0\n for block in response.iter_content(4096):\n if iblock % 10000 == 0:\n sys.stdout.write(\".\")\n sys.stdout.flush()\n iblock += 1\n fout.write(block)", "def download(self, url, destination):\n fileDownloader = utils.HttpFileDownloader(url, destination)\n fileDownloader.download()", "def download_file(filename, url):\n with open(filename, 'wb') as fout:\n response = requests.get(url, stream=True)\n response.raise_for_status()\n # Write response data to file\n for block in response.iter_content(4096):\n fout.write(block)", "def downloadFile(self, base_url, file_name):\n url = os.path.join(base_url, file_name)\n req = urllib2.Request(url)\n try:\n f = urllib2.urlopen(req, timeout=self.timeout)\n local_file = open(os.path.join(self.config.get('PATHS', 'pdfdir'), file_name), \"w\")\n local_file.write(f.read())\n local_file.close()\n except Exception, err:\n print \"[ Failed ]\"\n print \"\\n***ERROR in downloadFile: %s\" % err\n sys.exit(0)", "def download_from_url(path, url):\n filename = url.split(\"/\")[-1]\n found_file = find_file(path, filename, max_depth=0)\n if found_file is None:\n filename = os.path.join(path, filename)\n logging.info(\"Downloading from %s to %s.\" % (url, filename))\n inprogress_filepath = filename + \".incomplete\"\n inprogress_filepath, _ = urllib.request.urlretrieve(\n url, inprogress_filepath, reporthook=download_report_hook)\n # Print newline to clear the carriage return from the download progress.\n print()\n tf.gfile.Rename(inprogress_filepath, filename)\n return filename\n else:\n logging.info(\"Already downloaded: %s (at %s).\" % (url, found_file))\n return found_file", "def download_addon(self, url, target_path):\n try:\n filename = url.split('?')[0].rstrip('/').rsplit('/', 1)[-1]\n target_path = os.path.join(target_path, filename)\n\n print \"Downloading %s to %s\" % (url, target_path)\n urllib.urlretrieve(url, target_path)\n\n return target_path\n except Exception, e:\n print e", "def download_url(filename, url):\n latest_package_url = request.urlopen(url).read().decode(\"utf-8\")\n print(\"Downloading latest package:\\n{}\".format(latest_package_url))\n request.urlretrieve(latest_package_url, filename, reporthook=download_progress_callback)", "def getfile(url):\n try:\n return urlreq.urlopen(url)\n except urlreq.HTTPError as e:\n safeprint(\"Sever returned with response code \" + str(e.getcode()) + \", download failed.\")", "def download_file():\n for lines in urls:\n try:\n req.urlretrieve(lines, '{0}/{1}'.format(folder_path, lines.split('/')[-1]))\n time.sleep(1)\n print ('File - {} - downloaded successfully'.format(lines.split('/')[-1]))\n except urllib.error.HTTPError:\n print('File is missing or not reachable')\n print('Download Complete & Successful!')", "def url_retrieve(url, output_file):\n r = requests.get(url, allow_redirects=True)\n if r.status_code != 200:\n raise ConnectionError(f\"Could not download {url}\\nError code: {r.status_code}\")\n\n output_file.write_bytes(r.content)", "def download_link(self): # pragma: no cover\n\n if PyFunceble.Check(self.file).is_url():\n # We get the destination.\n destination = self.file.split(\"/\")[-1]\n\n if self.file and self.autocontinue.is_empty():\n # The given file is an URL.\n\n if (\n not PyFunceble.path.isfile(destination)\n or PyFunceble.INTERN[\"counter\"][\"number\"][\"tested\"] == 0\n ):\n # The filename does not exist in the current directory\n # or the currently number of tested is equal to 0.\n\n # We download the content of the link.\n Download(self.file, destination).text()\n\n # We update the global file with the destination.\n self.file = destination", "def download(url, target):\n # Add progress bar via:\n # http://stackoverflow.com/a/22776/317916\n if not url:\n return None\n urlretrieve(url, target)\n return target", "def download_file_from_url(url, PATH, file_name):\n with requests.get(url) as r:\n with open(PATH+'/'+file_name, 'wb') as f:\n f.write(r.content)", "def download_file(url, filename):\n with requests.get(url, stream=True) as res:\n if res.status_code == 200:\n with open(filename, 'wb') as f:\n for chunk in res.iter_content(chunk_size=8192): \n f.write(chunk)\n else:\n raise ValueError(\"{} {}\".format(res.status_code, url))\n return filename", "def download(self, url: str, dest: PathLike, force: bool = False):", "def download_pdf( url, filename = None ):\n r = urlopen( Request( url ) )\n try:\n if filename is None:\n filename = give_filename( url )\n with open( filename, 'wb' ) as f:\n shutil.copyfileobj( r, f )\n finally:\n r.close()", "def download_with_callback(self, url, path=None, filename=None, headers=None, force=False, func=None):", "def downloadAndReplaceFile(file_path, download_url):\r\n file = urllib.request.urlopen(download_url)\r\n with open(file_path, 'wb') as output:\r\n output.write(file.read())", "def download_url(url):\n # use url_checker to verify URL is using the full address\n url_name = url_checker(url)\n if url_name:\n print(f'Requesting page {url_name}')\n tstamp = get_tstamp()\n # set the headers like we are a browser\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko)'\n ' Chrome/72.0.3626.109 Safari/537.36'}\n # download the page\n response = requests.get(url, headers=headers)\n\n # create directory for saving file\n URL_DIR_NAME = os.path.join(OUTPUT_DIR, str(url_name))\n URL_TM_DIR_NAME = os.path.join(URL_DIR_NAME, str(tstamp))\n # create directory using url name and timestamp for directories\n ensure_dir(URL_TM_DIR_NAME)\n # save downloaded page as a .txt file\n with open(f'{URL_TM_DIR_NAME}{slash}response.html', 'w') as f:\n print(response.text, file=f)\n # use beautiful soup to extract links\n links = []\n soup = BeautifulSoup(response.text, 'html.parser')\n tags = soup.find_all('a')\n # append links to links list\n for tag in tags:\n links.append(tag.get('href'))\n # get only unique values and sort\n my_set = set(links)\n u_links = list(my_set)\n u_links.sort()\n # save links as a .txt file\n with open(f'{URL_TM_DIR_NAME}{slash}links.txt', 'w') as f:\n for list_item in u_links:\n f.write(f'{list_item}\\n')", "def download(url, path):\n response = requests.get(url)\n\n if response.ok:\n print(\"response is ok file is downloading ... \")\n # start to download file from url.\n with open(path, \"wb\") as f:\n f.write(response.content)\n else:\n print(\"Error!\", response.status_code)\n return False\n\n print(\"File downloaded succusfully.\")\n return True", "def download_file(url, fname_out=None) -> None:\n\n import ssl\n\n try:\n with urllib.request.urlopen(url) as f:\n if not fname_out:\n return f.read().decode(\"utf-8\")\n else:\n fdir = os.path.dirname(fname_out)\n if not os.path.exists(fdir):\n os.makedirs(fdir)\n\n with open(fname_out, \"wb\") as outfile:\n outfile.write(f.read())\n return fname_out\n\n except ssl.SSLError:\n print(\"WHAT!\")\n sys.exit(1)", "def download(self, url):\n if url is None:\n return\n user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'\n headers = {'User-Agent': user_agent}\n r = requests.get(url, headers=headers, verify=False)\n if r.status_code == 200:\n r.encoding = 'utf-8'\n return r.text\n return None", "def download_file(url, dl_filename):\r\n print( url )\r\n url_object=urlopen(url)\r\n dl_file_object=open(dl_filename,'wb')\r\n meta = url_object.info()\r\n file_size = 0\r\n if int(meta.get(\"Content-Length\", -1)) > 0:\r\n file_size = int(meta.get(\"Content-Length\", -1))\r\n if file_size == 0:\r\n print( \"Downloading: %s\" % (dl_filename.split('/')[-1]) )\r\n else:\r\n print( \"Downloading: %s Bytes: %s\" % (dl_filename.split('/')[-1], file_size) )\r\n\r\n current_file_size = 0\r\n block_size = 8192\r\n pbar = tqdm(\r\n total=file_size, initial=0, \r\n unit='B', unit_scale=True, desc=dl_filename.split('/')[-1] \r\n )\r\n while True:\r\n buffer = url_object.read(block_size)\r\n if not buffer:\r\n break\r\n current_file_size += len(buffer)\r\n dl_file_object.write(buffer)\r\n pbar.update(block_size)\r\n pbar.close()\r\n dl_file_object.close()", "def download_file(path, filename, destination):\n import os\n command = \"wget -q -O \"+destination+\"/\"+filename+\" ftp://nomads.ncdc.noaa.gov/\"+path+\"/\"+filename\n os.system(command)", "def download_url(url, fd, handle=None):\n return _librepo.download_url(handle, url, fd)", "def to_file(self, filename):\n resp = urlopen(self.url)\n self.file_size = self._get_content_length(resp.headers)\n block_size = 8192\n self.bytes_read = 0\n with open(filename, 'wb') as f:\n while True:\n buf = resp.read(block_size)\n if not buf:\n break\n self.bytes_read += len(buf)\n f.write(buf)\n self._dl_progress_bar()\n if self.show_progress:\n print(' ✓')", "def _DownloadFile(self, url, local_filename = None, modifiers = \"\",\n force = False):\n try:\n if local_filename == None:\n local_filename = url.split('/')[-1]\n if os.path.isfile(local_filename) and not force:\n if self.verbose:\n print \"File at %s already exists.\" % local_filename\n return local_filename\n if self.dont_download:\n return local_filename\n webFile = urllib2.urlopen(url)\n localFile = open(local_filename, (\"w%s\" % modifiers))\n localFile.write(webFile.read())\n webFile.close()\n localFile.close()\n os.chmod(local_filename, 0777)\n except urllib2.HTTPError:\n return None\n except urllib2.URLError:\n print \"The url %s is malformed.\" % url\n return None\n return localFile.name", "def _download_epw_file(url):\n r = requests.get(url)\n if r.ok:\n # py2 and 3 compatible: binary write, encode text first\n log.debug(\" ... OK!\")\n return io.StringIO(r.text)\n else:\n log.error(\" connection error status code: %s\" % r.status_code)\n r.raise_for_status()", "def process(self, url: str) -> None:\n\n text = single_file_download(url, encoding=\"utf-8\")\n self.output_queue.put(text)", "def filedownload(source, destination):\n\n # Initiate the download\n urllib.request.urlretrieve(source, destination)", "def download_file():\r\n global title_dict\r\n title=ResultsListbox.get(ResultsListbox.curselection())\r\n link=title_dict[title]\r\n file_dl=urllib.URLopener()\r\n file_dl.retrieve(link,str(title)+\".pdf\")", "def download_file(url, file_name):\n conn = urllib3.PoolManager(\n cert_reqs='CERT_REQUIRED',\n ca_certs=certifi.where())\n\n with conn.request('GET', url, preload_content=False) as resp, open(file_name, 'wb') as out:\n shutil.copyfileobj(resp, out)", "def download_page(link, f, cnt):\n try:\n page = ur.urlopen(link).read().decode()\n fh = open(ALL_PAGES + f + str(cnt) + '.htm', 'w')\n\n fh.write(page)\n fh.close()\n except Exception:\n print('Something wrong with link ' + link)", "def download_file(url, target_pkg_dir, filename):\n abs_file_path = \"/\".join([target_pkg_dir, filename])\n try:\n urllib.request.urlretrieve(url, abs_file_path)\n except Exception as ex:\n raise Exception(\"HTTP error for url: {url}\\nError message: {msg}\\nHTTP code: {code}\".format(\n url=ex.url, msg=ex.msg, code=ex.code))", "def _download(self, url, output_dir, dataset, chunk_size=1024):\n r = self.session.get(url, stream=True, allow_redirects=True)\n if not r.ok:\n r = self.session.get(r.url, stream=True, allow_redirects=True, auth=(self._username, self._password))\n file_size = int(r.headers['Content-Length'])\n\n with tqdm(total=file_size, unit_scale=True, unit='B', unit_divisor=1024) as pbar:\n ### GET FILE NAME ###\n if \"Content-Disposition\" in r.headers.keys():\n local_filename = re.findall(\"filename=(.+)\", r.headers[\"Content-Disposition\"])[0]\n else:\n local_filename = url.split(\"/\")[-3]\n local_filename = self.api.lookup(dataset, local_filename)[0]\n local_filename = local_filename + util.convert_to_extension(r.headers['content-type'])\n print(\"*** FNAME\", local_filename)\n\n local_filename = os.path.join(output_dir, local_filename)\n\n ### WRITE FILE ###\n with open(local_filename, 'wb') as f:\n for chunk in r.iter_content(chunk_size=chunk_size):\n if chunk:\n f.write(chunk)\n pbar.update(chunk_size)\n return local_filename", "def download_file(url, dest=None, force=False, trusted=False):\n url, filename = get_save_path(url, dest, force)\n keep_going = True\n success = False\n if url is None:\n return 'Aborted!'\n\n if url:\n success = download_wget(url, filename, trusted) # Try wget\n if not success:\n success = download_urllib(url, filename) # Try urllib\n if not success:\n success = download_pip(url, filename, force, trusted) # Try urllib\n if not success:\n split_url = url.split('/')\n msg = '\\n'.join([\n \"\\n\\nERROR in Web Access! - You may be behind a firewall!\",\n \"-\" * 52,\n \"You should be able to bybass this by using a browser to download:\",\n \"\\t%s\\nfrom:\\t%s\\nthen copying the download file to:\\n\\t%s\" % (\n split_url[-1], '/'.join(split_url[:-1]), filename),\n ])\n print(msg, '\\n')\n wx.MessageBox(msg, caption='WDOWNLOAD ERROR!',\n style=wx.OK|wx.CENTRE|wx.ICON_ERROR)\n return \"FAILURE or Abort!\"\n\n return filename", "def download_file(url, local_filename):\n response = requests.get(url, stream=True)\n with open(local_filename, \"wb\") as outfile:\n for chunk in response.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n outfile.write(chunk)", "def external_download(song, filename, url):\n cmd = Config.DOWNLOAD_COMMAND.get\n ddir, basename = Config.DDIR.get, os.path.basename(filename)\n cmd_list = shlex.split(cmd)\n\n def list_string_sub(orig, repl, lst):\n \"\"\" Replace substrings for items in a list. \"\"\"\n return [x if orig not in x else x.replace(orig, repl) for x in lst]\n\n cmd_list = list_string_sub(\"%F\", filename, cmd_list)\n cmd_list = list_string_sub(\"%d\", ddir, cmd_list)\n cmd_list = list_string_sub(\"%f\", basename, cmd_list)\n cmd_list = list_string_sub(\"%u\", url, cmd_list)\n cmd_list = list_string_sub(\"%i\", song.ytid, cmd_list)\n dbg(\"Downloading using: %s\", \" \".join(cmd_list))\n subprocess.call(cmd_list)", "def wget_content(url):\n\n try:\n\n for i in range(len(url)):\n url[i].replace(' ', \"%20\") if i > url.find('?') else url[i]\n\n with TemporaryDirectory() as dirname:\n retval = ''\n retcode = subprocess.Popen([\"wget\", \"--tries=5\", '--timeout=10', url, \"-O\", os.path.join(dirname, \"1.txt\")])\n retcode.wait()\n file_name = os.path.join(dirname, \"1.txt\")\n handle = open(file_name)\n if handle:\n retval = handle.read()\n\n\n except Exception as ex:\n if url.startswith(\"https://\") and \"handshake failure\" in retval:\n return wget_content(url.replace(\"https://\", \"http://\"))\n else:\n wxpush(\"Crawler module failure\", traceback.extract_stack(), True)\n\n return retval or \"\"", "def download(FILE,URL):\n CMD = ['curl','-o',FILE,URL]\n call(CMD)", "def download(url, server_fname, local_fname=None, progress_update_percentage=5):\n try:\n import urllib\n urllib.urlretrieve('http://google.com')\n except AttributeError:\n import urllib.request as urllib\n u = urllib.urlopen(url)\n if local_fname is None:\n local_fname = server_fname\n full_path = local_fname\n meta = u.info()\n with open(full_path, 'wb') as f:\n try:\n file_size = int(meta.get(\"Content-Length\"))\n except TypeError:\n print(\"WARNING: Cannot get file size, displaying bytes instead!\")\n file_size = 100\n print(\"Downloading: %s Bytes: %s\" % (server_fname, file_size))\n file_size_dl = 0\n block_sz = int(1E7)\n p = 0\n while True:\n buffer = u.read(block_sz)\n if not buffer:\n break\n file_size_dl += len(buffer)\n f.write(buffer)\n if (file_size_dl * 100. / file_size) > p:\n status = r\"%10d [%3.2f%%]\" % (file_size_dl, file_size_dl *\n 100. / file_size)\n print(status)\n p += progress_update_percentage", "def download(self, url, directory):\n while True:\n try:\n urlretrieve(url, directory) # this fails if no internet\n break\n except IOError:\n if not ask(\"Notes\", \"Error: No internet connection\", self):\n raise", "def download_file_wget(wget_bin, url, fn, cookies_file):\n\tcmd = [wget_bin, url, \"-O\", fn, \"--load-cookies\", cookies_file, \"--no-check-certificate\"]\n\tprint \"Executing wget:\", cmd \n\tretcode = subprocess.call(cmd)", "def DownloadFile(url, theFile, quietMode):\n # open in binary mode\n with open(theFile, \"wb\") as file:\n if not quietMode:\n print(\"[-] - Downloading -> [{0}] ...\".format(url))\n response = requests.get(url)\n if not quietMode:\n print(\"[-] - Saving -> [{0}] ...\".format(theFile))\n file.write(response.content)", "def url_to_file(url):\n \n try:\n r = get(url)\n print(r.status_code)\n if r.status_code == 200:\n try:\n with open(f'print-{date}.html', 'w') as f:\n f.write(r.text)\n except UnicodeEncodeError as e:\n print(\"Unicode error :using encodeing utf-8\")\n with open(f'print-{date}.html', 'w', encoding=\"utf-8\") as f:\n f.write(r.text)\n else:\n print(\"passing headers\")\n headers = {\"user-agent\":\"Edg/87.0.664.66\"}\n r = get(url, headers=headers)\n print(r.status_code)\n if r.status_code == 200:\n try:\n with open(f'print-{date}.html', 'w') as f:\n f.write(r.text)\n except UnicodeEncodeError as e:\n print(\"Unicode error: using encodeing utf-8\")\n with open(f'print-{date}.html', 'w', encoding=\"utf-8\") as f:\n f.write(r.text)\n else:\n print(f\"Unable to send requests {r.status_code}\")\n return r\n except Exception as e:\n print(\"Error occured\",e)", "def downloadData(url):\n response = urllib2.urlopen(url)\n html = response.read()\n localfile = open('hitdata.csv', 'wb')\n localfile.write(html)\n localfile.close()", "def dl_url(url):\n g.browse_mode = \"normal\"\n yt_url(url)\n\n if len(g.model.songs) == 1:\n download(\"download\", \"1\")\n\n if g.command_line:\n sys.exit()", "def download_file(url, local_path):\n try:\n local_filename = normalizeFilenameToCommonDateFormat(url.split('/')[-1])\n \n destination_dir = local_path #os.path.join(local_path, os.path.splitext(os.path.basename(local_filename))[0])\n \n #if not os.path.exists(destination_dir):\n # os.makedirs(destination_dir)\n \n destination_file = os.path.join(destination_dir, local_filename)\n \n if not os.path.exists(destination_file):\n # NOTE the stream=True parameter \n r = requests.get(url, stream=True)\n with open(destination_file, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024): \n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n #f.flush() commented by recommendation from J.F.Sebastian\n # Sleep so that we aren't rude\n sleep(1)\n else:\n return destination_file + ' already '\n \n return destination_file\n except ValueError as err:\n return \"Skipping %s, not \" % (url.split('/')[-1])", "def urlretrieve(url, filename, reporthook=None, data=None):\n\n def chunk_read(response, chunk_size=8192, reporthook=None):\n content_type = response.info().get(\"Content-Length\")\n total_size = -1\n if content_type is not None:\n total_size = int(content_type.strip())\n count = 0\n while True:\n chunk = response.read(chunk_size)\n count += 1\n if reporthook is not None:\n reporthook(count, chunk_size, total_size)\n if chunk:\n yield chunk\n else:\n break\n\n response = urlopen(url, data)\n with open(filename, \"wb\") as fd:\n for chunk in chunk_read(response, reporthook=reporthook):\n fd.write(chunk)", "def urlretrieve(url, filename, reporthook=None, data=None):\n\n def chunk_read(response, chunk_size=8192, reporthook=None):\n content_type = response.info().get('Content-Length')\n total_size = -1\n if content_type is not None:\n total_size = int(content_type.strip())\n count = 0\n while True:\n chunk = response.read(chunk_size)\n count += 1\n if reporthook is not None:\n reporthook(count, chunk_size, total_size)\n if chunk:\n yield chunk\n else:\n break\n\n response = urlopen(url, data)\n with open(filename, 'wb') as fd:\n for chunk in chunk_read(response, reporthook=reporthook):\n fd.write(chunk)", "def fetch_save(url):\n\n name = url.split(\"/\")[-1]\n response = requests.get(url, stream=True)\n if response.status_code == 200:\n with open(f\"{DATA_PATH}/{name}\", \"wb\") as f:\n f.write(response.raw.read())\n else:\n logging.info(f\"Failed {url} download\")", "def anon_download(url: str):\n if verify(url):\n location = download(url)\n return location\n return 6", "def download_file(url_path):\n local_filename = url_path.split('/')[-3] + \"-\" + url_path.split('/')[-1]\n local_filename = OUT_DIR + local_filename\n print local_filename\n url = \"https://commoncrawl.s3.amazonaws.com/\" + url_path\n # NOTE the stream=True parameter\n req = requests.get(url, stream=True)\n with open(local_filename, 'wb') as write_f:\n for chunk in req.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n write_f.write(chunk)\n write_f.close()\n return local_filename", "def download_from_url(url, path):\n\n with open(path, \"wb\") as f:\n response = requests.get(url, stream=True)\n total_length = response.headers.get('content-length')\n\n if total_length is None: # no content length header\n f.write(response.content)\n else:\n dl = 0\n total_length = int(total_length)\n for data in response.iter_content(chunk_size=4096):\n dl += len(data)\n f.write(data)\n done = int(50 * dl / total_length)\n sys.stdout.write(\"\\r[%s%s] %s%%\" % ('=' * done, ' ' * (50 - done), done * 2))\n sys.stdout.flush()", "def _Download(url):\n response = urllib2.urlopen(url)\n if response.code != 200:\n raise RuntimeError('Failed to download \"%s\".' % url)\n return response.read()", "def fetch_file(url, filename):\n from clinica.utils.exceptions import ClinicaException\n from urllib.request import Request, urlopen\n from urllib.error import URLError\n import shutil\n import ssl\n import os.path\n from clinica.utils.stream import cprint\n\n head_tail = os.path.split(filename)\n if not os.path.exists(head_tail[0]):\n cprint('Path to the file does not exist')\n cprint('Stop Clinica and handle this error')\n\n # Download the file from `url` and save it locally under `file_name`:\n cert = ssl.get_server_certificate((\"aramislab.paris.inria.fr\", 443))\n gcontext = ssl.SSLContext()\n req = Request(url)\n try:\n response = urlopen(req, context=gcontext)\n except URLError as e:\n if hasattr(e, 'reason'):\n cprint('We failed to reach a server.')\n cprint(['Reason: ' + e.reason])\n elif hasattr(e, 'code'):\n cprint('The server could not fulfill the request.')\n cprint(['Error code: ' + e.code])\n else:\n try:\n with open(filename, 'wb') as out_file:\n shutil.copyfileobj(response, out_file)\n except OSError as err:\n cprint(\"OS error: {0}\".format(err))", "def http_download(url, target_path):\n try:\n resp = urllib2.urlopen(url)\n except urllib2.URLError, e:\n if not hasattr(e, 'code'):\n raise\n resp = e\n if resp.code != 200:\n raise IOError(\"Request url(%s) expect 200 but got %d\" %(url, resp.code))\n\n with open(target_path, 'wb') as f:\n shutil.copyfileobj(resp, f)\n return target_path", "def download_file(url, outfile=None):\n if not outfile:\n outfile = url.split(\"/\")[-1]\n info(\"Downloading %s to %s\" % (url, outfile))\n with requests.get(url, stream=True) as r:\n r.raise_for_status()\n with open(outfile, \"wb\") as f:\n for chunk in r.iter_content(chunk_size=8192):\n f.write(chunk)\n return outfile", "def downloadFile(url, filePath):\n log.finer(\" Opening URL: %s to %s\" % (url, filePath))\n MozURLopener().retrieve(url, filePath)", "def _download(item):\n\n filename = item.filename()\n filename = os.path.join(item.vdir(), filename)\n logger.info(\"Downloading '%s' to %s\" % (item.show, filename))\n\n f = open(filename, \"wb\")\n\n buf = net.tivoget(item.show.url)\n for chunk in buf:\n f.write(chunk)\n\n f.close()\n\n item.downloaded = True\n item.save()", "def get_text(self, url, *, timeout, headers):", "def download_url(self, fname):\n if not fname in self.data:\n return ''\n url = '/'.join([\n self.context.absolute_url(),\n '@@download-file',\n self.id + ':' + fname\n ])\n return url", "def urlretrieve(url, filename, reporthook=None, data=None):\n\n def chunk_read(response, chunk_size=8192, reporthook=None):\n content_type = response.info().get('Content-Length')\n total_size = -1\n if content_type is not None:\n total_size = int(content_type.strip())\n count = 0\n while True:\n chunk = response.read(chunk_size)\n count += 1\n if reporthook is not None:\n reporthook(count, chunk_size, total_size)\n if chunk:\n yield chunk\n else:\n break\n\n response = urlopen(url, data)\n with open(filename, 'wb') as fd:\n for chunk in chunk_read(response, reporthook=reporthook):\n fd.write(chunk)", "def single_download(self, url, meta_mode=False):\n self.println(DL_HEAD)\n try:\n if self.djs_core is None or self.analyzer is None:\n print(\"Download failed, enter `help` for help.\")\n else:\n if meta_mode:\n self._meta_download([url, ])\n else:\n self._download([url, ])\n os.chdir(self.home)\n except Exception as e:\n self.println(\"Download failed and stopped.\")\n print(str(e))\n self.println(DL_TAIL)", "def _download(url, file_name):\n # File length can only be approximated from the resulting GET, unfortunately\n r = requests.get(url, stream=True)\n if 'Content-Length' in r.headers:\n file_len = int(r.headers['Content-Length'])\n elif 'X-Original-Content-Length' in r.headers:\n file_len = int(r.headers['X-Original-Content-Length'])\n else:\n file_len = 0\n r.raw.decode_content = True\n with open(file_name, 'wb') as f:\n _copyfileobj(r.raw, f, chunks=(file_len / (64. * 1024)))\n r.close()\n\n return file_name", "def fetch(self, url):\r\n fname = os.path.join(self._cachedir, self._formatter(url))\r\n if not os.path.exists(fname):\r\n time.sleep(self._sleep)\r\n html = urllib.urlopen(url).read()\r\n with codecs.open(fname, 'w', 'utf-8') as f:\r\n soup = BeautifulSoup(html)\r\n f.write(unicode(soup))\r\n return fname" ]
[ "0.7638134", "0.755848", "0.73113656", "0.72749597", "0.7246588", "0.7025229", "0.70069635", "0.6974052", "0.6928931", "0.6926085", "0.69255304", "0.6919365", "0.6906989", "0.6897153", "0.68755436", "0.68460965", "0.683638", "0.68292296", "0.6827624", "0.6824476", "0.6773361", "0.67723304", "0.6765092", "0.6734787", "0.6734706", "0.6726219", "0.6717798", "0.6708025", "0.6706707", "0.66887057", "0.66861993", "0.6677546", "0.66767395", "0.6662527", "0.66525346", "0.66473997", "0.66359854", "0.661345", "0.6595911", "0.65547884", "0.65518945", "0.65237695", "0.6520274", "0.65202254", "0.6520191", "0.6518746", "0.64985526", "0.6498093", "0.6495964", "0.64842385", "0.648042", "0.6464513", "0.6463848", "0.6456556", "0.64395165", "0.64287895", "0.6424985", "0.6414527", "0.6411154", "0.6404309", "0.64040154", "0.6403117", "0.64030397", "0.63998187", "0.6384025", "0.63642657", "0.6355472", "0.63457304", "0.6334747", "0.6327694", "0.6325369", "0.63120365", "0.6307722", "0.6307425", "0.63072187", "0.630602", "0.6302846", "0.6301179", "0.6296166", "0.6290024", "0.62777996", "0.6264986", "0.62615174", "0.6255935", "0.6253604", "0.62439007", "0.62416565", "0.6230702", "0.6229615", "0.62289876", "0.62139845", "0.6213044", "0.6208884", "0.62062836", "0.62047833", "0.62017214", "0.6191643", "0.61907816", "0.6177316", "0.61720634" ]
0.705656
5
Load parsed beautifulsoup object holding the full html
def load_parsed(self): with open(self.fname) as f: self.parsed = BeautifulSoup(f.read(), features="html.parser")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_website(self):\n# r = urllib.request.urlopen(self.url).read()\n r = requests.get(self.url).content \n self.soup = bs(r, \"lxml\")", "def load_page(self) -> bs4.BeautifulSoup:\n\n res = requests.get(self.url)\n\n res.raise_for_status()\n return bs4.BeautifulSoup(res.text, 'html.parser')", "def parse_source(html, encoding='utf-8'):\n return BeautifulSoup(html, from_encoding=encoding)", "def update_html(self):\n self.html = self.driver.page_source\n self.soup = BeautifulSoup(self.html, features=\"lxml\")", "def make_soup(self):\n req = urllib.request.Request(\n url,\n data=None,\n headers={\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'\n }\n )\n f = urllib.request.urlopen(self.html)\n soupdata = BeautifulSoup(f, \"html.parser\")\n return soupdata", "def parsed_html():\n return utils.parse_html(\n \"\"\"\n <!doctype hmtl>\n <html>\n <head>\n <meta charset=\"utf-8\">\n <meta name=\"viewport\" content=\"width=device-width\">\n <title>Page title</title>\n <link rel=\"stylesheet\" href=\"/static/styles.css\" />\n </head>\n <body>\n <h1>Django Auto AMP</h1>\n <p>Generate automatic AMP from your Django templates</p>\n <img src=\"/static/img.jpg\" width=\"500\" height=\"300\" />\n <img src=\"/static/img.gif\" layout=\"nodisplay\" />\n <img src=\"/static/img.png\" />\n <script type=\"text/javascript\" src=\"/static/scripts.js\" />\n <script type=\"application/json\" src=\"/static/data.json\" />\n </body>\n </html>\n \"\"\"\n )", "def load_data(url: str):\n\n page = requests.get(url=url)\n soup = BeautifulSoup(page.content, 'html.parser')\n return soup", "def make_file_soup(self):\n soup = BeautifulSoup(self.html, 'html.parser')\n return soup", "def _soup(self, url):\n r = self.session.get(url)\n r.raise_for_status()\n html = Soup(r.text, 'lxml') # lxml is fastert than html.parser\n r.close()\n return html", "def _extract_html(self, url):\n self.response = requests.get(url, timeout=5)\n self.html = BeautifulSoup(self.response.content, \"lxml\") if self.response.ok else None\n # return self.html", "def get_soup(self, html):\n if html is not None:\n soup = BeautifulSoup(html, \"html.parser\")\n return soup\n else:\n return", "def getHTML(self):\n html = requests.get(self.URL).text\n soup = BeautifulSoup(html, \"lxml\")\n return soup", "def parse(html, encoding='utf-8'):\n if isinstance(html, unicode):\n return bs4.BeautifulSoup(html, 'html.parser')\n\n return bs4.BeautifulSoup(html, 'html.parser', from_encoding=encoding)", "def _get_soup_object(url: str) -> bs4.BeautifulSoup:\n request_result=requests.get(url)\n soup = bs4.BeautifulSoup(request_result.text, \"html.parser\")\n return soup", "def get_html_parser(url):\n response = requests.get(url)\n return BeautifulSoup(response.content, 'html.parser')", "def get_content(self):\n response = requests.get(self.url)\n soup = BeautifulSoup(response.text, \"html.parser\")\n return soup", "def soup(self):\n if not self._soup:\n resp = requests.get(self.url)\n if not resp.ok:\n logging.warning('Status of request is not ok.')\n self._soup = BeautifulSoup(resp.content, 'html.parser')\n\n return self._soup", "def parse_html_with_bs4(html_src):\n try:\n BeautifulSoup(html_src, 'html.parser')\n except Exception as exc:\n print exc, traceback.format_exc()\n pass", "def load_data(self):\n with open(self.FILE, 'r') as html_file:\n document = html_file.read()\n self.HTML = document", "def get_html_content(self, url):\n\n req = urllib2.Request(url, headers=self.HEADER)\n page = urllib2.urlopen(req)\n soup = BeautifulSoup(page)\n\n return soup", "def bs4(self):\n\n if self._soup is None:\n self._soup = bs4.BeautifulSoup(self.raw_html, 'html.parser')\n return self._soup", "def from_html(self, content):\r\n pass", "def get_soup(url):\n\tr = requests.get(url)\n\tdata = r.text\n\tsoup = BeautifulSoup(data, \"lxml\")\n\treturn soup", "def _html(url: str) -> BeautifulSoup:\n with urllib3.PoolManager() as manager:\n res = manager.request(\"GET\", url, headers={\"User-Agent\": ua.chrome})\n if res.status != 200:\n raise Exception(res.status)\n soup = BeautifulSoup(res.data, \"html.parser\")\n return soup", "def get_soup(url):\n\tresponse = urlopen(url)\n\thtml = response.read()\n\tsoup = BeautifulSoup(html, \"html.parser\")\n\tresponse.close()\n\treturn soup", "def getSoup(url):\n return BeautifulSoup(getHtml(url), 'lxml')", "def make_soup(url):\r\n htmlFile = urllib.request.urlopen(url).read()\r\n soup = BeautifulSoup(htmlFile)\r\n return soup", "def soup(self) -> Soup:\n return Soup(self.html)", "def getHTML(url):\n\n time.sleep(2.00)\n html = urllib2.urlopen(url,timeout=10).read()\n urllib2.urlopen(url).close()\n\n soup = BeautifulSoup(html)\n\n return soup", "def get_soup(url):\n return BeautifulSoup(requests.get(url).content, 'lxml')", "def convert_content(self, html):\n\n try:\n dom = BeautifulSoup(html, 'html.parser')\n return self.parse_content(dom)\n except:\n return html", "def return_beautiful_soup_object(url: str) -> bs4.BeautifulSoup:\n html_filename, headers = urllib.request.urlretrieve(url)\n with open(html_filename) as file:\n soup = BeautifulSoup(file, 'html.parser')\n file.close()\n return soup", "def request(self, url):\r\n\r\n req = self.get(url)\r\n soup = BeautifulSoup(req.content, \"lxml\")\r\n return soup", "def parse_html(filename: str) -> BeautifulSoup:\n with open(filename) as file_handle:\n tree = BeautifulSoup(file_handle, \"html.parser\")\n return tree", "def htmlParsePage(page):\n if 'parsedHtml' not in page:\n logging.debug('Parsing HTML')\n html = page['data']\n html = html.replace(' xmlns=\"http://www.w3.org/1999/xhtml\"', '')\n html = removeThreeByteUtf(html)\n page['parsedHtml'] = BeautifulSoup(html)", "def get_soup(self):\n page = get(self.url)\n if page.status_code == 200:\n soup = BeautifulSoup(page.text, 'lxml')\n return soup\n else:\n raise ConnectionError('The page is not disponible.')", "def soupify(html):\n return BeautifulSoup(html, \"html.parser\")", "def make_soup():\n # It's a pretty complex, auto-generated HTML file. Turns out\n # BeautifulSoup's html.parser, lxml, and xml don't completely parse.\n # Initial workarounds chopped up the file and parsed individually, but the\n # most lenient html5lib parse seems to do the trick.\n response = requests.get(\n 'https://genome.ucsc.edu/goldenPath/help/trackDb/trackDbHub.html')\n soup = bs4.BeautifulSoup(response.text, 'html5lib')\n return soup", "def get_soup(url):\r\n page=requests.get(url)\r\n soup = BeautifulSoup(page.text.encode(\"utf-8\"), 'html.parser')\r\n return soup", "def get_soup(url):\n response = requests.get(url)\n soup = BeautifulSoup(response.text, 'html.parser')\n return soup", "def get_soup(url: str):\n response = requests.get(url)\n\n return BeautifulSoup(response.content, \"html.parser\")", "def get_soup(url: str) -> BeautifulSoup:\n html = get_html(url)\n soup = BeautifulSoup(html, 'lxml')\n return soup", "def parse_html(self):\n if self.file_extension == '.czm': # Caso de fichero comprimido czm.\n folder_path = extract_file(self.input_file) # Descomprime el archivo de entrada.\n self.html_path = find_extension(folder_path, '.html') # Busca el html en el directorio de extracción.\n else: # Caso de html proporcionado directamente.\n self.html_path.append(self.input_file)\n if not self.html_path: # En caso de que no exista ningún html.\n raise IOError('html file not found.')\n for path in self.html_path: # Almacena cada uno de los html parseados en un diccionario.\n html_file = open(path, encoding=\"utf8\") # Almacena los datos del html.\n parsed_html = BeautifulSoup(html_file, \"lxml\") # Hay que instalar lxml.\n self.parsed_html_dic.update({os.path.splitext(os.path.basename(path))[0]:parsed_html})", "def parse(self):\r\n hdr = {'User-Agent': 'Mozilla/5.0'}\r\n url = CostOfLiving.URL.format(self.city)\r\n req = Request(url, headers=hdr)\r\n page = urlopen(req)\r\n soup = BeautifulSoup(page, \"html.parser\")\r\n self.table = soup.find(\"table\", attrs={\"class\": \"data_wide_table\"})", "def page_soup(url):\n html = requests.get(url).text\n return bs(html, 'html.parser')", "def __init__(self, html_contents):\n self.doc = html.document_fromstring(html_contents)", "def __init__(self, html_soup):\n # Drilling down to the internal wrapper <div> tag\n self.data = html_soup.find('div', class_='sbkBrv_SingleResultDesc')", "def read_soup_from_local_html(filename, debug=False):\n\n print('Reading file %s' % (filename))\n try:\n with open(filename, encoding='utf-8', errors='ignore') as f:\n soup = BeautifulSoup(f, \"html.parser\")\n f.close()\n except:\n soup = None\n print(traceback.format_exc())\n return soup", "def get_soup():\n global soup\n html = urlopen(\"http://www.jrenshaw.com/works-in-progress/\")\n soup = BeautifulSoup(html, \"lxml\")\n return soup", "def parse(self, response: BeautifulSoup):\n raise NotImplementedError", "def load_to_scraper(self, scraper):\n scraper.url = self.url\n scraper.response = self.response\n scraper.load_soup()\n return scraper", "def parse(url, parser='html5lib', **kwargs):\n return bs4.BeautifulSoup(SESSION.get(url).content, features=parser, **kwargs)", "def _get_soup(self, page=''):\n content = requests.get('%s/%s' % (BASE_URL, page)).text\n return BeautifulSoup(content)", "def get_soup_for_page(url: str) -> BeautifulSoup:\n return BeautifulSoup(get_html(url), 'html.parser')", "def test_parseHtml(self):\n dom = lunchr.parseHtml(self.html)\n self.assertTrue(isinstance(dom, xml.dom.minidom.Document))", "async def parse_url(self, url: str, delay: int = 0) -> BeautifulSoup:\n if url != self.driver.current_url:\n self.driver.get(url)\n return BeautifulSoup(self.driver.page_source, 'lxml')", "def _get_soup(self, url):\n\n # generate a random header \n headers = {'User-Agent': self._random_user_agent()}\n # send a request and get the soup\n response = requests.get(url, headers=headers)\n results = response.content\n if not response.status_code == 404:\n soup = BeautifulSoup(results, 'lxml')\n return soup", "def parsed_html_lean():\n return utils.parse_html(\n \"\"\"\n <!doctype hmtl>\n <html>\n <head>\n <title>Page title</title>\n </head>\n <body>\n <h1>Django Auto AMP</h1>\n <p>Generate automatic AMP from your Django templates</p>\n </body>\n </html>\n \"\"\"\n )", "def get_page(self, url):\n page = self.__open_page(url)\n soup = BeautifulSoup(page, 'html.parser')\n return soup", "def read_html(url: str) -> BeautifulSoup:\n try:\n response = requests.get(url, stream=True)\n status_code = response.status_code\n content_type = response.headers[\"Content-Type\"].lower()\n except requests.RequestException as e:\n raise RuntimeError(f\"Error during requests to {url} : {str(e)}\")\n else:\n if (\n status_code == 200\n and content_type is not None\n and content_type.find(\"html\") > -1\n ):\n return BeautifulSoup(response.content, \"html.parser\")", "def source_to_soup(page_source):\n\tpage_source = re.sub('<br>', '', page_source)\n\tpage_source = re.sub('<br/', '', page_source)\n\tpage_source = re.sub('<br />', '', page_source)\n\treturn BeautifulSoup(page_source, 'html.parser', parse_only=SoupStrainer('div'))", "def _grab_tags(self, url):\n a = self._api_request(url)\n return bs4.BeautifulSoup(a,features=\"html.parser\")", "async def load(cls, document: str) -> Dict[str, Any]:\n return await cls.from_html(document)", "def __setSoup(self, url=None, data=None, headers={}):\n if url:\n self.currenturi = url\n res = self._getHTML(data=data, headers=headers)\n if res:\n self.rawpage = res['result']\n else:\n log.info(self.log_msg('HTML Content cannot be fetched for the url: \\\n %s'%self.currenturi))\n return False\n self._setCurrentPage()\n return True", "def _parse(self):\n soup = BS(self._current_html, 'lxml')\n for item in soup.select('div.c'):\n temp = {}\n # main content\n ctt = item.select('span.ctt')\n if not ctt:\n continue\n weibo_body = item.select('div')\n if len(weibo_body) > 1:\n temp['content'] = weibo_body[0].text\n btn_group = weibo_body[1].text\n else:\n temp['content'] = weibo_body[0].select('span.ctt')[0].text\n btn_group = weibo_body[0].text\n temp['is_repost'] = True if REPO_TEST_PATTERN.match(\n temp['content']) else False\n try:\n temp['like_num'] = LIKE_NUM_PATTERN.findall(btn_group)[0]\n temp['cmt_num'] = COMMENT_NUM_PATTERN.findall(btn_group)[0]\n temp['repo_num'] = REPO_NUM_PATTERN.findall(btn_group)[0]\n except Exception:\n pass\n cmt = item.select('.cmt')\n # visibility\n if cmt:\n try:\n temp['visibility'] = VISIBILITY_PATTERN.findall(\n cmt[0].text)[0]\n except Exception:\n pass\n\n # img in main content\n img = item.select('div a img')\n img_src = img[0].attrs['src'] if img else None\n temp['img_src'] = img_src\n LOGGER.debug('img_src: {}'.format(img_src))\n # time & source device\n ct = item.select('span.ct')\n if ct:\n ct = ct[0]\n text = ct.text\n reg_result = TIME_PATTERN.findall(text)[0]\n\n temp['time'] = ar(\n '{}年{}'.format(self._current_year, reg_result[0]),\n DATE_FMTS[0]\n ).naive if reg_result[0] else ar(\n reg_result[1], DATE_FMTS[1]\n ).naive\n temp['source'] = SOURCE_DEVICE_PATTERN.findall(text)[0]\n self._post_item = Post(**temp)\n self._attachment_item = Attachment(\n uri=img_src, post=self._post_item)\n self._store()", "def page_soup(page):\n return bs4.BeautifulSoup(page, 'html.parser')", "def make_soup(self, base_url):\n response = requests.post(base_url, params=self.league_data)\n self.url = response.url\n soup = BeautifulSoup(response.content, 'lxml')\n return soup", "def request(url):\n response=requests.get(url)\n soup=BeautifulSoup(response.content,\"lxml\")\n return soup", "def create_soup(u):\n req = requests.get(u)\n html = req.text\n s = BeautifulSoup(html, \"html.parser\")\n return s", "def get_soup(url):\r\n res = requests.get(url=url)\r\n soup = BeautifulSoup(res.text, \"html.parser\")\r\n infor_form = soup.find(\"div\", {\"id\": \"content\"})\r\n return infor_form", "def _get_soup(pagerequest):\n html = _get_page(pagerequest)\n return BeautifulSoup(html, 'html.parser')", "def get_html(url):\n # type: (str) -> BeautifulSoup\n headers = {\n \"Accept\": \"text/html\",\n \"Accept-encoding\": \"gzip\"\n }\n with Cache(CACHE_URI) as c:\n cached = c.get(url)\n if cached:\n add_cache_headers(headers, cached)\n # always return cached info regardless\n if cached[\"fresh\"] or url.startswith(JAFC_INFO_URI):\n return BeautifulSoup(cached[\"blob\"], \"html.parser\")\n r = requests.get(url, headers=headers, timeout=SEARCH_TIMEOUT)\n if 200 == r.status_code:\n soup = BeautifulSoup(r.content, \"html.parser\")\n # pre-cache clean-up\n for x in soup([\"script\", \"style\"]):\n x.extract()\n c.set(url, str(soup), r.headers)\n return soup\n if 304 == r.status_code:\n c.touch(url, r.headers)\n return BeautifulSoup(cached[\"blob\"], \"html.parser\")", "def parseSearchHtml(self):\n pass", "def parseSearchHtml(self):\n pass", "def __download_web(self):\n page = requests.get(self.url)\n\n if page.status_code == 200:\n return BeautifulSoup(page.content, \"html.parser\")", "def scrape(self):\n\n self.url = self.headline.url\n\n # Should raise exception...\n if not self.parsing_template:\n return None, None, None, None, None\n\n try:\n response = self.download()\n self.source = response.text\n except:\n return None, None, None, None, None\n\n soup = BeautifulSoup(response.content, \"html.parser\")\n\n if soup:\n return self.parse(soup)\n else:\n return None, None, None, None, None", "def get_soup(self, page):\n if page in self.soups:\n return self.soups[page]\n else:\n response = self.client.get(page)\n soup = bs4.BeautifulSoup(response.content, \"html5lib\")\n self.soups[page] = soup\n return soup", "def get_soup_obj(url):\n try:\n html = session.get(url, headers=headers).text\n return BeautifulSoup(html, \"html.parser\")\n except HTTPError:\n print(\"{} not reachable\".format(url))\n return None", "def __setStoneSoupForCurrentUri(self, data=None, headers={}):\n res = self._getHTML(data=data, headers=headers)\n if res:\n self.rawpage = res['result']\n else:\n log.info(self.log_msg('Page Content cannot be fetched for the url: \\\n %s'%self.currenturi))\n raise Exception('Page content not fetched for th url %s'%self.currenturi)\n #self._setCurrentPage()\n self.soup = BeautifulStoneSoup(self.rawpage)", "def get_soup(url):\n url_hash = get_url_hash(url)\n www_cache_file = os.path.join(www_cache_dir, url_hash)\n if os.path.exists(www_cache_file):\n with open(www_cache_file) as file:\n charset = 'utf8'\n data = file.read().encode(charset)\n else:\n print('Downloading %s...' % url, file=sys.stderr)\n with urlopen(url) as stream:\n charset = stream.info().get_param('charset')\n data = stream.read()\n with open(www_cache_file, 'w') as file:\n file.write(data.decode(charset))\n return bs4.BeautifulSoup(data, 'lxml', from_encoding=charset)", "def scrape(self):\n try:\n self.result = urlfetch.fetch(self.url)\n except DownloadError:\n self.result = urlfetch.fetch(self.url) \n if ((self.result.status_code == 200) and\n (self.result.content_was_truncated == 0)):\n self.soup = BeautifulSoup(self.result.content)\n else:\n logging.critical(\"Bad Status Code: \", self.result.status_code, self.url)\n sys.exit(1)", "def _get_soup_by_path(self, path):\n return BeautifulSoup(requests.get('%s%s' % (self.URL, path), headers=self.HEADERS).content)", "def get_content_from_link(link):\n\n page = requests.get(link)\n soup = BeautifulSoup(page.text, \"lxml\")\n return soup", "def make_request(url):\r\n req = requests.get(url, headers)\r\n soup = BeautifulSoup (req.content, \"html5lib\")\r\n return soup", "def get_body_soup(self):\n if self.body_type != 'HTML':\n return None\n else:\n return bs(self.body, 'html.parser')", "def load_html(self, *, scope: Scope, doc_key: str, stream_name: str, requested_output_size: int) -> HtmlResult:", "def parse(html_doc):\n soup = BeautifulSoup(html_doc, 'html.parser')\n table = soup.find('div', class_=\"container-fluid cols_table show_visited\")\n# print table.prettify().encode('UTF-8')\n jobstats = []\n\n for row in table:\n jobstats.append({\n \"title\":row.find('div', class_=\"col-sm-7\").a.text,\n \"category\":row.find('div', class_=\"text-muted\").a.text,\n \"price\":row.find('div', class_=\"col-sm-2 amount title\").\n text.strip(),\n \"applications\":row.find(\n 'div', class_=\"col-sm-3 text-right text-nowrap hidden-xs\"\n ).text.strip()\n })\n return jobstats", "def HTMLparser(self):\n soup = self.getHTML()\n \n # Sort through all the text in the html:\n for text in soup.find_all('p'):\n try:\n paragraphNo = int(text.parent.p['id'][14:])\n \n # Only grab paragraphs in \"On the Social Contract\"\n if paragraphNo < self.START_PARAGRAPH or paragraphNo > self.END_PARAGRAPH:\n continue\n \n elif text.string:\n \n # Ignore those \"paragraphs\" in the html that simply outline different chapters/books\n if re.search('^(CHAPTER|BOOK)(.*):', text.string):\n continue\n \n else:\n \n # Want to read in the document by sentence (for RousseauBot to use individually later on)\n tempList = re.split('(?<!etc)\\.\\s(?!.*\\\")|\\!', text.string)\n for sentence in tempList:\n \n # When a \"paragraph\" is just a single sentence, re's .split() returns the sentence and a ''\n # Also, remove overly long quotes - Twitter has char limit\n if sentence != '' and len(sentence.strip()) < self.TWITTER_LIMIT:\n self.quotes.append(sentence.strip())\n \n except KeyError:\n \n # BS throws KeyError when <p>'s id field is blank; ignore - all paragraphs I need has an id\n continue", "def get_soup(url):\n opener = urllib2.build_opener()\n request = urllib2.Request(url);\n request.add_header('User-Agent','Mozilla/6.0 (Windows NT 6.2; WOW64; rv:16.0.1) Gecko/20121011 Firefox/16.0.1');\n data = opener.open(request).read(); \n return BeautifulSoup(data);", "def parse_announcement_data(self) -> 'Scraper':\n logger.info('Parsing extracted html partial')\n for tag in self.html_partial: # there are 63 tags\n if tag.name == 'h4':\n announcement_data = self.get_data_from_tag(tag)\n self.announcement_data_list.append(announcement_data)\n logger.info('Compiled announcement data list from html web page partial')\n return self", "def add_soup(response, soup_config):\r\n if \"text/html\" in response.headers.get(\"Content-Type\", \"\"):\r\n response.soup = BeautifulSoup(\r\n response.content, \"html.parser\", **soup_config)", "def soupify(html):\n try:\n return BeautifulSoup(html, \"html.parser\")\n except Exception as e: # pragma: no cover\n raise SoupError(str(e))", "def process_doc_html(self, doc_in):\n self.feed(doc_in) #SGMLParser call\n self.close() #SGMLParser call\n self.hand_off_temp_pieces('to_doc_pieces')\n self.all_pieces = self.all_pieces[:-16] # drop </body></html>\n return self.all_pieces", "def get_soup(self, url):\n if self.session is None:\n return BeautifulSoup(requests.get(url).content, features=\"xml\")\n else:\n return BeautifulSoup(self.session.get(url).content, features=\"xml\")", "def soup_explore(url_or_file, session=None):\n soup = ph.get_soup(url_or_file, session)\n if not soup:\n ph.logger.error('No soup found for {}'.format(url_or_file))\n else:\n print('\\nExplore the \"soup\" object\\n\\n')\n embed()\n return soup", "def gather_current(url=URL):\n page = requests.get(url)\n soup = BeautifulSoup(page.content, 'html.parser')\n return soup", "def soup(url):\n handle = ''\n max_tries = 10\n for i in range(max_tries):\n try:\n handle = urlopen(url)\n handle = handle.read()\n break\n except:\n logging.exception('urlopen failed (attempt %d)', i + 1)\n if i == max_tries - 1:\n logging.error('the maximum urlopen attempts have been reached')\n raise\n time.sleep(1)\n\n s = BeautifulSoup(handle)\n return s", "def get_soup(url, using_TOR = False):\n try:\n request = get_request(url, using_TOR = using_TOR)\n if request == None:\n logger.debug(\"Request is empty, don't create soup.\")\n return None\n soup = BeautifulSoup(request, 'html.parser')\n return soup\n except Exception as error:\n #logger.warn(traceback.format_exc())\n raise\n return None", "def hot_soup(url, payload={}):\r\n response = query(url, payload)\r\n soup = BeautifulSoup(response.content, 'html.parser')\r\n return soup", "def parse(afile, builder=None, encoding=None):\n bob = builder\n\n def emit(this_soup):\n \"\"\"\n emit cleaned up html\n :type this_soup:\n \"\"\"\n if isinstance(this_soup, BS.element.NavigableString):\n for ignorable in ignorable_soup:\n if isinstance(this_soup, ignorable):\n return\n bob.data(unescape(this_soup))\n else:\n attrib = dict([(k, unescape(v)) for k, v in this_soup.attrs])\n bob.start(this_soup.name, attrib)\n for s in this_soup:\n emit(s)\n bob.end(this_soup.name)\n\n # determine encoding (the document charset is not reliable)\n if not hasattr(afile, \"read\"):\n infile = open(afile)\n text = infile.read()\n assert isinstance(encoding, object)\n if not encoding:\n try:\n encoding = \"utf-8\"\n unicode(text, encoding)\n except UnicodeError:\n encoding = \"iso-8859-1\"\n soup = BS.BeautifulSoup(\n text, convertEntities=\"html\", fromEncoding=encoding\n )\n # build the tree\n if not bob:\n bob = ET.TreeBuilder()\n emit(soup)\n root = bob.close()\n assert isinstance(root, object)\n # wrap the document in a html root element, if necessary\n if 1 == len(root) and \"html\" == root[0].tag:\n return root[0]\n root.tag = \"html\"\n return root" ]
[ "0.7414255", "0.7408412", "0.7246797", "0.7061659", "0.6892907", "0.68442833", "0.6834316", "0.6795834", "0.6783969", "0.67211264", "0.67171395", "0.6716718", "0.66845864", "0.6660223", "0.660617", "0.65939754", "0.6587271", "0.6559653", "0.65470797", "0.6519594", "0.65095526", "0.64791566", "0.64658093", "0.64374965", "0.6401383", "0.639497", "0.6391123", "0.6386812", "0.63863516", "0.6374355", "0.6363865", "0.635929", "0.63497424", "0.6296715", "0.629073", "0.62831235", "0.62788147", "0.6258093", "0.62579185", "0.6249987", "0.62200886", "0.621765", "0.6187469", "0.61767787", "0.6175006", "0.6174137", "0.61656684", "0.61370873", "0.61323446", "0.6121251", "0.6113201", "0.61075383", "0.60990816", "0.60864466", "0.6084862", "0.60845137", "0.60720533", "0.6059257", "0.6058032", "0.60549504", "0.60115117", "0.6003881", "0.60020125", "0.5975208", "0.5966616", "0.59610826", "0.5948593", "0.5946547", "0.59366786", "0.59185106", "0.59041923", "0.5901859", "0.5892662", "0.5892662", "0.58847326", "0.5882598", "0.5871296", "0.58307356", "0.5819052", "0.5818889", "0.5810524", "0.5798732", "0.5795346", "0.5794095", "0.5783314", "0.57786727", "0.5772929", "0.5768234", "0.5762697", "0.57551336", "0.574012", "0.57381845", "0.57311606", "0.5726617", "0.5698799", "0.56980306", "0.5691414", "0.56905735", "0.5689316", "0.56841147" ]
0.7917264
0
Iterator over maintext paragraph elements; this includes footnotes.
def _paragraphs_raw(self): for par in self.parsed.find_all("p")[self.PAR_START:]: yield par
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def linked_text_paragraphs(self):\n for par in self._main_paragraphs_raw():\n par_links = par.find_all('a')\n if len(par_links) == 0:\n self.main_count += len(par.text)\n yield par.text\n else:\n for el in par.contents:\n if el.name is None:\n #this is plain text\n self.main_count += len(str(el))\n yield str(el)\n elif el.name == \"a\" and \"href\" in el.attrs:\n id = el[\"href\"].lstrip('#')\n try:\n foot_par = self._get_footnote_par(id)\n except NoFootnoteError:\n self.log(f\"Could not find footnote for {id}, skipping.\")\n self.footnote_count += len(foot_par.text)\n yield foot_par.text", "def iter_main_text(self, element):\n if element.tag == 'note':\n return\n if element.text:\n yield element.text\n for e in element:\n for se in self.iter_main_text(e):\n yield se\n if e.tail:\n yield e.tail", "def testParagraphs(self):\n\n textractor = Textractor(paragraphs=True)\n\n # Extract text as sentences\n paragraphs = textractor(Utils.PATH + \"/article.pdf\")\n\n # Check number of paragraphs is as expected\n self.assertEqual(len(paragraphs), 13)", "def paragraph(self, text):\n return [text]", "def extract_paragraph(file_name, url_text = None, show_property = False, database = None, extract_all_property=False, \n return_documenTM = False, cut_off = True, unit_dict = None, special_unit_dictionary = None):\n if not url_text:\n url_text = file_name\n \n if not database: \n database = {}\n \n if not isinstance(unit_dict, dict):\n unit_dict = unit_dict_default\n \n keyword_dict = make_keyword_dict(unit_dict)\n \n Q = DocumentTM(file_name, **database)\n Q.doc()\n Q.find_strange()\n chemical_type_dict = {}\n database = Q.database()\n \n if special_unit_dictionary:\n Q.set_special_unit(special_unit_dictionary)\n \n \n data_collection = []\n json_list = []\n \n for Para in Q.Para:\n new_split, unit = Q.tokenize_paragraph(Para, lemma = False, Strange = True, cut_off=cut_off)\n \n if not new_split:\n continue\n \n #print (new_split)\n \n before_represent_chem = False\n \n for sent in cut_paragraph(new_split):\n new_sent, unit_dictionary, next_represent_chem = matching_algorithm(sent, database, chemical_type_dict, before_represent_chem)\n\n if extract_all_property:\n #iters = chain.from_iterable(unit_dictionary.values())\n iters = chain.from_iterable([dics.values() for dics in unit_dictionary.values()])\n else:\n iters = unit_dictionary['Character'].values()\n \n \n #print (unit_dictionary['Character'])\n #if unit_dictionary['Character'] or unit_dictionary['Reaction']:\n #data_collection.append([sent, unit_dictionary])\n \n if show_property and (unit_dictionary['Character'] or unit_dictionary['Reaction']):\n \n print (\"\\n\\n------------------------------------\")\n print (file_name)\n print (\" \".join([str(t) for t in new_sent]))\n print (\"\\n\")\n #print (Para)\n #print (\" \".join(new_split))\n print (\"------------------------------------\")\n \n for T in chain.from_iterable(iters):\n #for T in t:\n dictionary_chemical = {'Material':T.target, 'Value':T.value, 'Unit':T.unit, 'Condition':T.condition, 'Property':T.prop,\n 'Reference':str(file_name)}\n \n json_list.append(dictionary_chemical)\n\n if show_property:\n print (\"value:\", T, \"condition:\", T.condition, \"chemical:\", T.target)\n \n if isinstance(next_represent_chem, Chemical) or not next_represent_chem:\n before_represent_chem = next_represent_chem \n \n if return_documenTM:\n return json_list, Q\n \n return json_list", "def extract_paragraph_test(file_name, url_text = None, show_property = False, database = None, extract_all_property=False, \n return_documenTM = False, cut_off = True, unit_dict = None):\n if not url_text:\n url_text = file_name\n \n if not database: \n database = {}\n \n if not isinstance(unit_dict, dict):\n unit_dict = unit_dict_default\n \n keyword_dict = make_keyword_dict(unit_dict)\n \n Q = DocumentTM(file_name, **database)\n Q.doc(parser = 'cde_parser')\n Q.find_strange()\n chemical_type_dict = {}\n database = Q.database()\n \n data_collection = []\n json_list = []\n \n for Para in Q.Para:\n new_split, unit = Q.tokenize_test(Para, lemma = False, Strange = True, cut_off=cut_off)\n \n if not new_split:\n continue\n \n #print (new_split)\n \n before_represent_chem = False\n \n for sent in cut_paragraph(new_split):\n new_sent, unit_dictionary, next_represent_chem = matching_algorithm(sent, database, chemical_type_dict, before_represent_chem)\n\n if extract_all_property:\n #iters = chain.from_iterable(unit_dictionary.values())\n iters = chain.from_iterable([dics.values() for dics in unit_dictionary.values()])\n else:\n iters = unit_dictionary['Character'].values()\n \n \n #print (unit_dictionary['Character'])\n #if unit_dictionary['Character'] or unit_dictionary['Reaction']:\n #data_collection.append([sent, unit_dictionary])\n \n if show_property and (unit_dictionary['Character'] or unit_dictionary['Reaction']):\n \n print (\"\\n\\n------------------------------------\")\n print (file_name)\n print (\" \".join([str(t) for t in new_sent]))\n print (\"\\n\")\n #print (Para)\n #print (\" \".join(new_split))\n print (\"------------------------------------\")\n \n for T in chain.from_iterable(iters):\n #for T in t:\n dictionary_chemical = {'Material':T.target, 'Value':T.value, 'Unit':T.unit, 'Condition':T.condition, 'Property':T.prop,\n 'Reference':str(file_name)}\n \n json_list.append(dictionary_chemical)\n\n if show_property:\n print (\"value:\", T, \"condition:\", T.condition, \"chemical:\", T.target)\n \n if isinstance(next_represent_chem, Chemical) or not next_represent_chem:\n before_represent_chem = next_represent_chem \n \n if return_documenTM:\n return json_list, Q\n \n return json_list", "def get_paragraphs(self, batch=None):\n\t\t\n\t\t# loop through the document stream for this document database\n\t\tfor document in self.get_documents(batch):\n\t\t\tfor paragraph in document[\"paragraphs\"]:\n\t\t\t\t# yield the paragraphs one by one\n\t\t\t\tyield paragraph", "def paragraphs(self, data=True):\n return self.nodes(self.max_depth, data)", "def end_paragraph(self):\n raise NotImplementedError", "def get_paragraphs():\n soup = get_html()\n paragraphs = []\n for i in soup.findAll('div', {'class': 'faq-list1__hide'}):\n p = str(i.get_text().strip())\n paragraphs.append(p)\n return paragraphs", "def __iter__(self):\r\n for text in self.get_texts():\r\n yield self.dictionary.doc2bow(text, allow_update=False)", "def paragraphs(iterable, splitter):\n assert isinstance(splitter, (tuple, list))\n splitter = tuple(splitter)\n paragraph = []\n for line in iterable:\n if line.startswith(splitter):\n if paragraph:\n yield paragraph\n paragraph = [line]\n else:\n paragraph.append(line)\n if paragraph:\n yield paragraph", "def generate_new_book(text):\n\n for paragraph in text:\n for sentence in paragraph:\n for word in sentence:\n print(word, end=' ')\n print()\n print()", "def paragraph(self, on, **kw):\n if self._terse:\n return ''\n FormatterBase.paragraph(self, on)\n tag = 'p'\n if on:\n tagstr = self._open(tag, **kw)\n else:\n tagstr = self._close(tag)\n return tagstr", "def generate_paragraphs(self):\n def dig(hr_tag, end_index):\n paragraphs = []\n for tag in hr_tag.children:\n if tag.name == 'hr':\n return paragraphs + dig(tag, end_index)\n text = (str(tag)\n if isinstance(tag, NavigableString)\n else tag.get_text())\n if '$' in text and not tag.find('table'):\n start_index = document_txt.index(text[:search_chars])\n end_index = start_index + len(text)\n paragraphs.append({\n 'text': text,\n 'start': start_index,\n 'end': end_index\n })\n return paragraphs\n\n with open('document.txt', 'rb') as f1:\n document_txt = f1.read().decode()\n search_chars = 20\n paragraphs = dig(self.soup.find('body'), 0)\n paragraphs = sorted(paragraphs, key=lambda x: x['start'])\n with open('paragraphs.txt', 'wb') as f2:\n f2.write(json.dumps(paragraphs, indent=2, sort_keys=True).encode())", "def _get_text(self, element):\n # for text in element.itertext():\n for text in self.iter_main_text(element):\n yield text.strip()", "def paragraphs(self, path, filemoving, parser):\n root = parser.parsing_xml(path, filemoving)\n root_tag = root.tag[0:(root.tag.find('}')+1)]\n number_of_paragraphs = len(list(root.iter(root_tag + 'p')))\n return number_of_paragraphs", "def HTMLparser(self):\n soup = self.getHTML()\n \n # Sort through all the text in the html:\n for text in soup.find_all('p'):\n try:\n paragraphNo = int(text.parent.p['id'][14:])\n \n # Only grab paragraphs in \"On the Social Contract\"\n if paragraphNo < self.START_PARAGRAPH or paragraphNo > self.END_PARAGRAPH:\n continue\n \n elif text.string:\n \n # Ignore those \"paragraphs\" in the html that simply outline different chapters/books\n if re.search('^(CHAPTER|BOOK)(.*):', text.string):\n continue\n \n else:\n \n # Want to read in the document by sentence (for RousseauBot to use individually later on)\n tempList = re.split('(?<!etc)\\.\\s(?!.*\\\")|\\!', text.string)\n for sentence in tempList:\n \n # When a \"paragraph\" is just a single sentence, re's .split() returns the sentence and a ''\n # Also, remove overly long quotes - Twitter has char limit\n if sentence != '' and len(sentence.strip()) < self.TWITTER_LIMIT:\n self.quotes.append(sentence.strip())\n \n except KeyError:\n \n # BS throws KeyError when <p>'s id field is blank; ignore - all paragraphs I need has an id\n continue", "def _process_layout(self, layout):\r\n # Here we just group text into paragraphs\r\n elements = []\r\n for lt_obj in layout:\r\n if isinstance(lt_obj, LTTextBox) or isinstance(lt_obj, LTTextLine):\r\n elements.append(Paragraph(lt_obj.get_text().strip()))\r\n elif isinstance(lt_obj, LTFigure):\r\n # Recursive...\r\n elements.extend(self._process_layout(lt_obj))\r\n return elements", "def _visit_paragraph(self,elem):\n # only add this p if we don't already have a descriptor for the site\n if self._curr_url not in self._url_paragraphs:\n try:\n paragraph_text = self._text_of_para(elem).strip()\n paragraph_text = strip_tags(paragraph_text)\n paragraph_text = (paragraph_text[:1001] + '...') if len(paragraph_text) > 1000 else paragraph_text\n self._url_paragraphs[self._curr_url] = paragraph_text\n print \"description of url:\" + repr(paragraph_text)\n except:\n print \"Failed to get paragraph text\"", "def _process_layout(self, layout):\n # Here we just group text into paragraphs\n elements = []\n for lt_obj in layout:\n if isinstance(lt_obj, LTTextBox) or isinstance(lt_obj, LTTextLine):\n elements.append(Paragraph(lt_obj.get_text().strip()))\n elif isinstance(lt_obj, LTFigure):\n # Recursive...\n elements.extend(self._process_layout(lt_obj))\n return elements", "def read(self, paragraph_idx=None):\n if paragraph_idx:\n self.paragraphs[paragraph_idx].read()\n else:\n for paragraph in self.paragraphs:\n paragraph.read()", "def find_text_in_p(self, el):\n\n all = []\n for el in el.findall(\".//p\"):\n t = el.text_content().strip()\n if len(t)<40:\n continue\n all.append(t)\n\n return \" \".join(all)", "def getRtf(self):\n self.pieces = []\n for node in self.root.findall(\"MiscellaneousDocumentText\"):\n for child in node:\n if child.tag == \"Para\":\n self.__addPara(child)\n elif child.tag in (\"ItemizedList\", \"OrderedList\"):\n self.__addList(child, child.tag)\n return \"\".join(self.pieces)", "def split_paragraphs(block):\n # Break block contents into paragraphs by blank lines.\n def gen(block):\n par = []\n for obj in block:\n if isinstance(obj, Text) and obj.empty:\n # New paragraph.\n yield par\n par = []\n else:\n par.append(obj)\n yield par\n\n # Combine paragraphs. \n def finish(pars):\n for par in pars:\n if len(par) == 0:\n continue\n elif any( isinstance(o, Text) for o in par ):\n # Paragraph contains text. Use a P element.\n yield Block(par, tag='P')\n else:\n # Doesn't contain text; don't wrap it.\n yield from par\n\n block[:] = finish(gen(block))", "def _convert(self):\n root = cElementTree.fromstring(self.html)\n for el in root.getiterator():\n if el in self.visited:\n continue\n self.visited.update([el])\n if el.tag == 'p':\n parser = ParagraphParser(el)\n self.document_state.append(parser.tag)\n self.visited.update(el.getiterator())", "def generate_paragraphs(self, count=3):\n\n with self.open_text_data() as f:\n result = self.read_paragraphs(f, count=count)\n return result", "def __yahoo_parse_text(self, content):\n text = ''\n # Process all paragraphs.\n paragraphs = content.find_all('p')\n for par in paragraphs:\n text += '<p>' + par.getText(separator=' ') + '</p>'\n # Remove all extra whitespace (single space remains).\n text = ' '.join(text.strip().split())\n # Result\n return text", "def is_footnote_text(self, par):\n return (par is not None) and (\"foot\" in par.attrs.get(\"class\", []))", "def extract_sentences(paper_path, para_yes):\n\n f = open(paper_path, 'rb')\n doc = Document.from_file(f, readers=[HtmlReader()])\n\n sen_yes_arr = list()\n sen_no_arr = list()\n\n elem_all = np.arange(0,len(doc))\n para_no = np.delete(elem_all, para_yes)\n\n for i in para_no:\n if type(doc.elements[i]) == chemdataextractor.doc.text.Paragraph:\n for sentence in doc.elements[i]:\n sen_no_arr.append(sentence)\n\n for i in para_yes:\n if type(doc.elements[i]) == chemdataextractor.doc.text.Paragraph:\n for sentence in doc.elements[i]:\n sen_yes_arr.append(sentence)\n\n\n return sen_yes_arr, sen_no_arr", "def paragraphs(str):\n return [mark_safe(x) for x in para_list(str)]", "def parse_poet_poem(self, response):\n poemitem = response.meta['poemitem']\n sresponse = scrapy.Selector(response)\n poemitem['poem_text'] = sresponse.xpath('//div[@property = \"content:encoded\"]//text()').extract()\n poemitem['poem_copyright'] = sresponse.xpath('//div[@class = \"poem-credit\"]//p//text()').extract()\n\n yield poemitem", "def paragraph(lines) -> List[Tuple[str, Any]]:\n p = Paragraph.parse_lines(lines)\n acc = []\n for c in p.children:\n if type(c).__name__ == \"Directive\":\n if c.role == \"math\":\n acc.append(Math(c.value))\n else:\n acc.append(c)\n else:\n acc.append(c)\n p.children = acc\n return p", "def segment_paragraphs(root_el, cites=[]):\n from capdb.models import Citation\n\n last_el_ends_mid_sentence = False\n join_with_last_el = False\n html_to_prepend_to_next_el = ''\n\n # build a lookup like {\"935 F.3d\": 1, \"123 Mass.\": 2}\n reporter_indexes = {}\n for i, cite in enumerate(Citation.sorted_by_type(cites)):\n eyecite_cite = next(extract_citations_from_text(cite.cite), None)\n if eyecite_cite:\n volume = eyecite_cite.groups['volume']\n reporter = eyecite_cite.groups['reporter']\n reporter_indexes[f\"{volume} {reporter}\"] = i+1\n\n # special case -- \"[134 Hawai'i 89]\" is a page number for \"134 Haw. 86\"\n if reporter == 'Haw.':\n reporter_indexes[f\"{volume} Hawai'i\"] = i + 1\n\n # process each paragraph\n for el_pq in PyQuery(root_el)('root').children().items():\n el = el_pq[0]\n if el.tag == 'header-end':\n continue\n\n html = inner_html(el)\n page_label = None\n exact_match = False\n index = 1\n\n # clean el whitespace\n clean_html = re.sub(r'\\s+|^<br>|<br>$', ' ', html).strip()\n if not clean_html:\n el_pq.remove()\n continue\n\n # strip tags to handle examples like\n # \"<p><strong>[16 N.Y.3d 274] <strong> <p/></strong></strong> <p> <strong> [945 N.E.2d 484]</strong> </p> <p> <strong>OPINION OF THE COURT</strong> </p></p>\"\n # in NE2d/945/945ne2d484.xml\n html_no_tags = strip_tags(clean_html).strip()\n\n # check for 'Page 123'\n m = re.match(r'Page (\\d+)$', html_no_tags)\n if m:\n page_label = make_page_label(m[1])\n exact_match = True\n\n # check for '[123 Mass. 456]'\n else:\n m = re.search(r\"\\[(?P<volume>\\d+) (?P<reporter>[A-Z][A-Za-z0-9 .']+) (?P<page>\\d+)\\]\", html_no_tags)\n if m:\n vol_reporter = f\"{m['volume']} {m['reporter']}\"\n if vol_reporter in reporter_indexes:\n index = reporter_indexes[vol_reporter]\n is_valid_reporter = True\n else:\n is_valid_reporter = False\n exact_match = m[0] == html_no_tags\n if exact_match or is_valid_reporter:\n page_label = make_page_label(m['page'], index)\n\n # handle page label found\n if page_label:\n clean_html = clean_html.replace(escape(m[0]), page_label)\n\n if exact_match:\n if last_el_ends_mid_sentence:\n join_with_last_el = True\n html_to_prepend_to_next_el += clean_html\n el_pq.remove()\n continue\n\n if html_to_prepend_to_next_el:\n clean_html = html_to_prepend_to_next_el + clean_html\n html_to_prepend_to_next_el = ''\n\n if join_with_last_el:\n join_with_last_el = False\n prev_el = el_pq.prev()\n if prev_el[0].tag == el_pq[0].tag:\n prev_el.append(('' if prev_el.text().endswith('-') else ' ')+clean_html)\n el_pq.remove()\n continue\n\n last_el_ends_mid_sentence = bool(mid_sentence_re.search(html_no_tags))\n\n if clean_html != html:\n el_pq.html(clean_html)", "def _process_usx_file(self, usx):\n tree = ET.parse(usx)\n root = next(tree.iter())\n for marker in list(root):\n style = marker.get('style')\n if style in self.publishable and style.startswith(self.main_text):\n for text in self._get_text(marker):\n yield text\n usx.close()", "def paras(self, fileids=None, categories=None):\n for doc in self.docs(fileids):\n table_id = 0\n for block in self.iter_block_items_(doc):\n paragr = ''\n #print(block.text if isinstance(block, Paragraph) else '<table>')\n if isinstance(block, Paragraph):\n if len(re.sub(r'\\s+', '', block.text)) == 0: \n continue\n else:\n paragr = block.text\n elif isinstance(block, Table):\n paragr = f'table_{table_id}'\n table_id += 1\n yield paragr\n\n # print(\"\\t\".join(table_header))\n # for idx, para in enumerate(doc.paragraphs):\n # if idx < 3: continue\n # # text = re.sub(r'[\\d]+[\\/\\.]\\d+[\\/\\.]*\\d*', '', para.text)\n # # # text = re.sub(r'Ф\\.И\\.О\\.', 'ФИО', text)\n # # if DIAGNOS_PATTERN in text: continue\n # if len(re.sub(r'\\s+', '', para.text)) == 0: continue\n # # if THERAPY_PATTERN in text: break\n # # yield text\n # yield para.text", "def collect_content(parent_tag):\n content = ''\n for tag in parent_tag:\n p_tags = tag.find_all('p')\n for tag in p_tags:\n content += tag.text + '\\n'\n return content", "def iter_markdown_lines(markdown_html):\n nodes = get_markdown_element_tree(markdown_html)\n\n for node in nodes:\n if node.nodeType == node.ELEMENT_NODE:\n if (node.tagName == 'div' and\n node.attributes.get('class', 'codehilite')):\n # This is a code block, which will consist of a bunch of lines\n # for the source code. We want to split that up into\n # individual lines with their own <pre> tags.\n for line in node.toxml().splitlines():\n yield '<pre>%s</pre>' % line\n elif node.tagName in ('ul', 'ol'):\n # This is a list. We'll need to split all of its items\n # into individual lists, in order to retain bullet points\n # or the numbers.\n #\n # For the case of numbers, we can set each list to start\n # at the appropriate number so that they don't all say \"1.\"\n i = node.attributes.get('start', 1)\n\n for child_node in node.childNodes:\n if (child_node.nodeType == child_node.ELEMENT_NODE and\n child_node.tagName == 'li'):\n # This is a list item element. It may be multiple\n # lines, but we'll have to treat it as one line.\n yield '<%s start=\"%s\">%s</%s>' % (\n node.tagName, i, child_node.toxml(),\n node.tagName)\n\n i += 1\n elif node.tagName == 'p':\n # This is a paragraph, possibly containing multiple lines.\n for line in node.toxml().splitlines():\n yield line\n else:\n # Whatever this is, treat it as one block.\n yield node.toxml()\n elif node.nodeType == node.TEXT_NODE:\n # This may be several blank extraneous blank lines, due to\n # Markdown's generation from invisible markup like fences.\n # We want to condense this down to one blank line.\n yield '\\n'", "def test_extend_to_paragraph(self):\n before_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year,\n Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000\n tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly\n weather impacts every American. Communities can now rely on the National Weather\n Service’s StormReady program to help them guard against the ravages of Mother\n Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading\n to around 500 deaths per year and nearly $14 billion in damage. StormReady, a\n program started in 1999 in Tulsa, OK, helps arm America's communities with the\n communication and safety skills needed to save lives and property– before and\n during the event. StormReady helps community leaders and emergency managers\n strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of\n severe weather through better planning, education, and awareness. No community\n is storm proof, but StormReady can help communities save lives. Does StormReady\n make a difference?\n \"\"\"\n after_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year,\n Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000\n tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly\n weather impacts every American. Communities can now rely on the National Weather\n Service’s StormReady program to help them guard against the ravages of Mother\n Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading\n to around 500 deaths per year and nearly $14 billion in damage. StormReady, a\n program started in 1999 in Tulsa, OK, helps arm America's communities with the\n communication and safety skills needed to save lives and property– before and\n during the event. StormReady helps community leaders and emergency managers\n strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of\n severe weather through better planning, education, and awareness. No community\n is storm proof, but StormReady can help communities save lives. Does StormReady\n make a difference?\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"9.0\", \"9.0\"),\n after_sel=(\"8.0\", \"13.33\"),\n command_name=\"extend-to-paragraph\",\n )", "def __init__(self, number, title, paragraphs):\n self.number = number\n self.title = title\n self.paragraphs = []\n for paragraph_lines in paragraphs:\n new_pragraph = Paragraph.Paragraph(paragraph_lines)\n self.paragraphs.append(new_pragraph)", "def _get_all_paragraphs(self) -> List[Paragraph]:\n documents = self.document_store.get_all_documents()\n\n paragraphs = []\n p_id = 0\n for doc in documents:\n for p in doc.text.split(\"\\n\\n\"): # TODO: this assumes paragraphs are separated by \"\\n\\n\". Can be switched to paragraph tokenizer.\n if not p.strip(): # skip empty paragraphs\n continue\n paragraphs.append(\n Paragraph(document_id=doc.id, paragraph_id=p_id, text=(p,), meta=doc.meta)\n )\n p_id += 1\n logger.info(f\"Found {len(paragraphs)} candidate paragraphs from {len(documents)} docs in DB\")\n return paragraphs", "def build(self, verbose: bool = False):\n self.content = \"\"\n for par in self.paragraphs:\n for observ in par.observations:\n\n # save the meta_data\n self.observs_id.append(observ.observ_id)\n self.sit_relev.append(observ.relevance2)\n\n # append the sentence to the article\n self.content += f\"{observ.observation_new} \"\n if verbose:\n print(observ.year, observ.week_number, observ.day_number, observ.pattern, observ.observation_new)\n\n # end of paragraph (add a paragraph divider)\n self.content += self.par_divider", "def process(self, doc, is_):\n for block in doc.getTextBlocks():\n if block.isContent():\n bs = block.getContainedTextElements()\n if bs:\n self.contentBitSet = self.contentBitSet.union(bs)\n\n self.feed(is_)", "def __parse_docs(self, docs, analyses=True):\n # iter over docs\n for i, doc in enumerate(docs):\n _meta = doc.attrib['title']\n # iter over examples in *doc*\n for snip in doc.getchildren()[1:]:\n _text = str()\n _idx = 0\n _target_idxs = list()\n _ana = list()\n # iter over words in cur example\n for word in snip.getchildren():\n if word.tag == 'text':\n _text += word.text\n _idx += len(word.text)\n \n if len(word.attrib) > 0:\n _text += word.attrib['text']\n # process target\n if word.attrib.get('target') is not None:\n _target_idxs.append((_idx, _idx + len(word.attrib['text'])))\n if analyses:\n _ana.append(self.__get_ana(word))\n \n _idx += len(word.attrib['text'])\n \n if _target_idxs:\n for i, ixs in enumerate(_target_idxs):\n if analyses:\n yield _text, ixs, _meta, _ana[i]\n else:\n yield _text, ixs, _meta, _ana\n else:\n continue", "def iter_block_items_(self, parent):\n if isinstance(parent, _Document):\n parent_elm = parent.element.body\n elif isinstance(parent, _Cell):\n parent_elm = parent._tc\n elif isinstance(parent, _Row):\n parent_elm = parent._tr\n else:\n raise ValueError(\"something's not right\")\n \n for child in parent_elm.iterchildren():\n if isinstance(child, CT_P):\n yield Paragraph(child, parent)\n elif isinstance(child, CT_Tbl):\n yield Table(child, parent)", "def test_get_description_markdown_paragraphs(self):\n description = get_description(\"Paragraph 1\\n\\nParagraph 2\")\n expected = \"<p>Paragraph 1</p>\\n<p>Paragraph 2</p>\"\n self.assertEqual(description, expected)", "def ExtractText(self, selector):\n xpaths = map(self.tree.xpath, selector)\n elements = list(chain.from_iterable(xpaths))\n paragraphs = [e.text_content() for e in elements]\n paragraphs = [s.strip() for s in paragraphs if s and not s == ' ']\n\n return paragraphs", "def _split(self):\n text = self.md\n self.parts = parts = []\n self.headers = headers = []\n lines = []\n\n # Split in parts\n for line in text.splitlines():\n if line.startswith((\"# \", \"## \", \"### \", \"#### \", \"##### \")):\n # Finish pending lines\n parts.append(\"\\n\".join(lines))\n lines = []\n # Process header\n level = len(line.split(\" \")[0])\n title = line.split(\" \", 1)[1]\n title_short = title.split(\"(\")[0].split(\"<\")[0].strip().replace(\"`\", \"\")\n headers.append((level, title_short))\n parts.append((level, title_short, title))\n else:\n lines.append(line)\n parts.append(\"\\n\".join(lines))\n\n # Now convert all text to html\n for i in range(len(parts)):\n if not isinstance(parts[i], tuple):\n parts[i] = markdown.markdown(parts[i], extensions=[]) + \"\\n\\n\"", "def process_paragraph( paragraph ):\n\t# Lists of bounding boxes, text, and probabilities\n\tline_box_list = []\n\tline_text_list = []\n\tline_prob_list = []\n\n\t# Line under processing\n\tcurrent_line_text = []\n\tcurrent_line_prob = []\n\t# Bounding box temporary variables\n\tx1 = 100000\n\ty1 = 100000\n\tx2 = 0\n\ty2 = 0\n\n\tfor word in paragraph.words:\n\t\tfor symbol in word.symbols:\n\t\t\t# x1, y1 (Left upper corner)\n\t\t\tif symbol.bounding_box.vertices[0].x < x1:\n\t\t\t\tx1 = symbol.bounding_box.vertices[0].x\n\t\t\tif symbol.bounding_box.vertices[0].y < y1:\n\t\t\t\ty1 = symbol.bounding_box.vertices[0].y\n\t\t\tif symbol.bounding_box.vertices[1].y < y1: \n\t\t\t\ty1 = symbol.bounding_box.vertices[1].y\n\t\t\tif symbol.bounding_box.vertices[3].x < x1:\n\t\t\t\tx1 = symbol.bounding_box.vertices[3].x\n\t\t\t# x2, y2 (right lower corner)\n\t\t\tif symbol.bounding_box.vertices[2].x > x2:\n\t\t\t\tx2 = symbol.bounding_box.vertices[2].x\n\t\t\tif symbol.bounding_box.vertices[2].y > y2:\n\t\t\t\ty2 = symbol.bounding_box.vertices[2].y\n\t\t\tif symbol.bounding_box.vertices[1].x > x2:\n\t\t\t\tx2 = symbol.bounding_box.vertices[1].x\n\t\t\tif symbol.bounding_box.vertices[3].y > y2:\n\t\t\t\ty2 = symbol.bounding_box.vertices[3].y\n\n\t\t\tcurrent_line_text.append( symbol.text )\n\t\t\tcurrent_line_prob.append( symbol.confidence )\n\t\t\t# Check for blank spaces\n\t\t\tif symbol.property.detected_break.type in [ breaks.SPACE, breaks.SURE_SPACE ]:\n\t\t\t\tcurrent_line_text.append( ' ' )\n\t\t\t\tcurrent_line_prob.append( 0.95 )\n\t\t\t# Check for new lines\n\t\t\tif symbol.property.detected_break.type in [ breaks.EOL_SURE_SPACE, breaks.HYPHEN, breaks.LINE_BREAK ]:\n\t\t\t\tline_box_list.append( [x1, y1, x2, y2] )\n\t\t\t\tline_text_list.append( current_line_text )\n\t\t\t\tline_prob_list.append( current_line_prob )\n\t\t\t\t# Line under processing\n\t\t\t\tcurrent_line_text = []\n\t\t\t\tcurrent_line_prob = []\n\t\t\t\t# Bounding box temporary variables\n\t\t\t\tx1 = 100000\n\t\t\t\ty1 = 100000\n\t\t\t\tx2 = 0\n\t\t\t\ty2 = 0\n\n\treturn( line_box_list, line_text_list, line_prob_list )", "def split_description_into_paragraphs(unformatted_description):\n description = unformatted_description.strip()\n paragraphs = re.compile(r'[\\n\\r]{2,}').split(description)\n formatted_paragraphs = []\n\n # Sanitise paragraphs\n def external(attrs, new=False):\n url_parts = urlparse(attrs[(None, \"href\")])\n if url_parts.netloc and url_parts.netloc != 'snapcraft.io':\n if (None, \"class\") not in attrs:\n attrs[(None, \"class\")] = \"p-link--external\"\n elif \"p-link--external\" not in attrs[(None, \"class\")]:\n attrs[(None, \"class\")] += \" p-link--external\"\n return attrs\n\n for paragraph in paragraphs:\n callbacks = bleach.linkifier.DEFAULT_CALLBACKS\n callbacks.append(external)\n\n paragraph = bleach.clean(paragraph, tags=[])\n paragraph = bleach.linkify(paragraph, callbacks=callbacks)\n\n formatted_paragraphs.append(paragraph.replace('\\n', '<br />'))\n\n return formatted_paragraphs", "def iter_texts():\n dirs = 'comm_use_subset noncomm_use_subset pmc_custom_license biorxiv_medrxiv'.split()\n for dir in dirs:\n fnames = (DATA_PATH / dir / dir).glob('*')\n for fname in fnames:\n with fname.open() as f:\n content = json.load(f)\n \n for key in 'abstract body_text'.split():\n for row in content[key]:\n yield row['text']", "def footnotes(self, text):\n html = '<div class=\"footnotes\">\\n%s<ol>%s</ol>\\n</div>\\n'\n return html % (self.hrule(), text)", "def parse(self):\n\n text = self.text.li\n\n # helper function to parse both BeautifulSoup tags and NavigableStrings\n def extract_text(x):\n if type(x).__name__ == \"NavigableString\":\n return x\n elif x.name == 'br':\n return '\\n'\n else:\n return x.get_text()\n\n # helper function to get text from a bullet, ignoring potential\n # sub-bullets or images\n def get_bullet_parts(q):\n parts = []\n for c in q.children:\n if c.name == 'ul':\n break\n elif c.name == 'div' and 'thumb' in c['class']:\n pass\n elif c.name == 'a' and 'class' in c.attrs and 'autonumber' in c['class']:\n pass\n else:\n parts.append(c)\n return parts\n\n def is_english(quote, quote_parts=None):\n # reject quotes not in latin alphabet\n alpha = 'abcdefghijklmnopqrstuvwzyz'\n spaceless = quote.replace(' ', '')\n if not len(spaceless):\n print(quote)\n return False\n prop_latin = sum(map(lambda x: x in alpha, spaceless.lower())) / len(spaceless)\n if prop_latin < .6:\n print(quote)\n return False\n\n # figure out whether quote is in italics\n textlen = len(quote)\n try:\n italiclen = len(''.join([extract_text(x) for x in quote_parts if x.name=='i']))\n except:\n italiclen = 0\n if italiclen + 5 > textlen:\n is_italic = True\n else:\n is_italic = False\n\n is_en_list = [en_dict.check(s.strip('\\'\"(){}[].?!-—’,<>')) for s in quote.split() if len(s.strip('\\'\"(){}[].?!-—’,<>'))]\n en_proportion = (sum(is_en_list)+2)/len(is_en_list)\n if en_proportion > .6 and not is_italic:\n return True\n elif en_proportion > .8 and is_italic:\n return True\n else:\n print(quote)\n return False\n\n\n # get sub-bullets which might include source name\n meta_info = text.ul\n quote_parts = get_bullet_parts(text)\n try:\n quote = ''.join(map(extract_text, quote_parts)).strip()\n # quote in foreign language, try next subbullet\n if not is_english(quote, quote_parts):\n if meta_info:\n old_quote = quote\n bullets = meta_info.find_all('li')\n quote_parts = get_bullet_parts(bullets[0])\n quote = ''.join(map(extract_text, quote_parts)).strip()\n # check if subbullet seems to be in english\n if is_english(quote, quote_parts) and len(quote) > len(old_quote)*.6:\n badwords = ['pp.', 'p.', 'ch.', 'chapter', 'page', 'chap.', 'act', 'book']\n if sum([quote.lower().startswith(b) for b in badwords]) > 0:\n self.invalid = True\n else:\n self.quote = quote\n if len(bullets) > 1:\n source_parts = get_bullet_parts(bullets[1])\n self.potential_source = ''.join(map(extract_text, source_parts)).strip()\n else:\n self.invalid = True\n else:\n self.invalid = True\n print(\"foreign with no meta-info:\", quote)\n else:\n self.quote = quote\n if meta_info:\n source_parts = get_bullet_parts(meta_info.li)\n self.potential_source = ''.join(map(extract_text, source_parts)).strip()\n # try to catch things like chapter headings that get through from bad parses\n badwords = ['p.', 'pp.', 'ch.', 'chapter', 'page', 'chap.']\n if len(quote) < 25 and sum([(b in quote.lower().split()) for b in badwords]) > 0:\n self.invalid = True\n if ('\\\\displaystyle' in quote):\n self.invalid = True\n badwords = ['pp.', 'p.', 'ch.', 'chapter', 'page', 'chap.', 'act', 'book']\n if self.potential_source and sum([self.potential_source.lower().startswith(b) for b in badwords]) > 0:\n self.potential_source = None\n except Exception as e:\n print(e)\n print(quote_parts, meta_info)\n self.invalid = True", "def calculate_texts(self) -> None:\n texts = []\n for text in self.texts:\n paragraphs = list(filter(lambda x: x != \"\", text.split(\"\\n\\n\")))\n for paragraph in paragraphs:\n text = paragraph.replace(\"\\n\", \" \").strip()\n if len(text) > self.split_threshold_min:\n text_sentences = nlp(text)\n sentences = []\n for sentence in text_sentences.sents:\n current = sentence.text\n sentences.append(current.strip())\n texts.extend(sentences)\n else:\n texts.append(text)\n self.texts = list(set(texts))", "def format_text(text: TTextType) -> typing.Iterator[TViewLine]:\n for line in text.splitlines():\n yield [(\"text\", line)]", "def get_paragraphs(text):\n return [LINE_START.sub(' ', p) for p in PARAGRAPH_SEP.split(text)]", "def text_by_paragraph(self,\r\n filename,\r\n splitchar=EOL,\r\n keys=True,\r\n key_definitions=False,\r\n query=True):\r\n\r\n\r\n analysetext = file_access.get_text_file(filename)\r\n #load the text to be analysed\r\n\r\n if keys:\r\n\r\n possible_keys = set()\r\n if len(self.keys())>50:\r\n nprint (\"TOO MANY KEYS\")\r\n for key in self.keys():\r\n #grab all keys, removing tags.\r\n #DESIDERATUM: Make it possible to\r\n #restrict the range of notes\r\n #from which the keys are grabbed\r\n\r\n if SLASH in key:\r\n if key.split(SLASH)[0] != EMPTYCHAR:\r\n possible_keys.add(key.split(SLASH)[0].lower())\r\n else:\r\n possible_keys.add(key.split(SLASH)[0].lower())\r\n\r\n\r\n possible_keys = list(possible_keys)\r\n\r\n possible_keys = show_list(possible_keys,\r\n from_here=0,\r\n to_here=len(possible_keys),\r\n label='KEYS',\r\n select=True,\r\n display=display)\r\n # show the keys through display\r\n #object and select which are to be kept\r\n possible_keys += input(queries.ADDITIONAL_KEYS).split(COMMA)\r\n display.noteprint((labels.KEYS,\r\n formkeys(possible_keys)))\r\n\r\n\r\n for paragraph in analysetext.split(splitchar):\r\n # iterate over segments of the text to be analysed\r\n found_words = set()\r\n keyset = set()\r\n\r\n if keys:\r\n found_words.update({a_temp for a_temp in get_words(paragraph)\r\n if len(a_temp) > 3}.intersection(set(possible_keys)))\r\n # make a set of all the words that have been found\r\n keyset = found_words\r\n if key_definitions:\r\n found_words.update(self.default_dict['definitions']\r\n .return_keys(get_words(paragraph)))\r\n keyset = found_words\r\n\r\n display.noteprint((formkeys(keyset),\r\n nformat.encase(paragraph,\r\n found_words,\r\n surround=False)))\r\n # display the segment as a note\r\n #with found words encased\r\n #in arrow brackets\r\n\r\n if not query:\r\n if keyset == set():\r\n keyset = {VOIDTERM}\r\n if paragraph.strip() != EMPTYCHAR:\r\n self.enter(ek=keyset,\r\n et=paragraph)\r\n\r\n else:\r\n\r\n if input(queries.INCLUDE) in YESTERMS+[EMPTYCHAR]:\r\n # ask if the found words\r\n #should be included as keys\r\n\r\n newkeys = set(input(formkeys(keyset)\r\n +queries.KEYWORDS_TO_ADD).split(COMMA)).union(keyset)\r\n if paragraph.strip() != EMPTYCHAR:\r\n self.enter(ek=newkeys, et=paragraph)\r\n if input(queries.CONTINUE + BLANK) not in YESTERMS+[EMPTYCHAR]:\r\n break", "def iter_block_items(parent):\n if isinstance(parent, _Document):\n parent_elm = parent.element.body\n # print(parent_elm.xml)\n elif isinstance(parent, _Cell):\n parent_elm = parent._tc\n else:\n raise ValueError(\"something's not right\")\n\n for child in parent_elm.iterchildren():\n if isinstance(child, CT_P):\n yield Paragraph(child, parent)\n elif isinstance(child, CT_Tbl):\n yield Table(child, parent)", "def iter_block_items(parent):\n if isinstance(parent, _Document):\n parent_elm = parent.element.body\n # print(parent_elm.xml)\n elif isinstance(parent, _Cell):\n parent_elm = parent._tc\n else:\n raise ValueError(\"something's not right\")\n\n for child in parent_elm.iterchildren():\n if isinstance(child, CT_P):\n yield Paragraph(child, parent)\n elif isinstance(child, CT_Tbl):\n yield Table(child, parent)", "def generate_tokenlist(text):\r\n for paragraph in text.find_all('p'):\r\n paragraph_start = True\r\n for sentence in paragraph.find_all('s'):\r\n sentence_start = True\r\n for word in sentence.find_all(['wf', 'punc']):\r\n if word.name == 'punc':\r\n yield TextItem(None, word.string, word.name, word.string, paragraph_start, sentence_start, 0)\r\n paragraph_start = False\r\n sentence_start = False\r\n else:\r\n great_token = Token.from_tag(word)\r\n sense_key = great_token.sense_key if great_token.has_senses else None\r\n for token in great_token.get_components():\r\n yield TextItem('word', token.wordform, token.pos, token.lemma, paragraph_start, sentence_start, 1, sense_key)\r\n paragraph_start = False\r\n sentence_start = False", "def _get_next_textoutputsections(\n sections: List[\"Section\"], index: int\n) -> Iterator[\"Section\"]:\n for j in range(index, len(sections)):\n section = sections[j]\n if section.directive == SphinxDoctestDirectives.TESTOUTPUT:\n yield section\n else:\n break", "def count_paragraphs(all_articles):\n total_paragraphs = 0\n for title in all_articles:\n total_paragraphs += all_articles[title]['content'].count('\\n')\n print(f\"There are {total_paragraphs} paragraphs written.\")", "def tokenize(self, fileid):\n for paragraph in self.corpus.paras(fileids=fileid):\n sents = []\n for sent in sent_tokenize(paragraph, language='russian'):\n words = []\n for word in wordpunct_tokenize(sent):\n token = self.lemmatize(word)\n if not self.is_punct(token) and not self.is_stopword(token):\n\n words.append((token, str(self.morph.parse(word)[0].tag.POS)))\n\n sents.append(words)\n yield sents\n # yield [\n # (word, morph.parse(word)[0].tag.POS)\n # # pos_tag(wordpunct_tokenize(sent), lang='rus')\n # for sent in sent_tokenize(paragraph, language='russian')\n # for word in wordpunct_tokenize(sent)\n # ]\n # yield [\n # pos_tag(wordpunct_tokenize(sent), lang='rus')\n # for sent in sent_tokenize(paragraph, language='russian')\n # ]", "def process(self, doc):\n # don't try to process null notes\n if not doc[1]:\n if self.verbose:\n print(\"Error segmenting doc\",doc[0])\n return []\n # odd notes may throw an error. Just continue rather than stopping the entire process\n try:\n sentences = self.sentence_tokenizer.segToSentenceSpans(doc[1])\n except KeyError:\n if self.verbose:\n print(\"Error segmenting doc\",doc[0])\n return []\n\n #context_doc = pyConTextGraph.ConTextDocument() # ConTextDoc not needed for simple usage\n\n doc_annots = list()\n\n for sentence in sentences:\n # run sentence tokenizer on input text, return the spans\n sentence_text = doc[1][sentence.begin:sentence.end]\n # process every sentence by adding markup\n markup = pyConTextGraph.ConTextMarkup()\n markup.setRawText(sentence_text)\n markup.cleanText()\n # apply targets and modifiers\n markup.markItems(self.targets, mode=\"target\")\n markup.markItems(self.modifiers, mode=\"modifier\")\n # address scope of modifiers to targets, remove inactive modifiers and self-modifying relationships\n markup.pruneMarks()\n markup.applyModifiers()\n markup.pruneSelfModifyingRelationships()\n markup.dropInactiveModifiers()\n\n marked_targets = markup.getMarkedTargets()\n for marked_target in marked_targets:\n modifiers = markup.getModifiers(marked_target)\n if not modifiers:\n span = (sentence.begin+marked_target.getSpan()[0],sentence.begin+marked_target.getSpan()[1])\n if self.mode == 'combined':\n annot = (doc[0], marked_target.getPhrase(), span[0], span[1], marked_target.getCategory()[0]+'_unspecified', marked_target.getCode())\n elif self.mode == 'separate':\n annot = (doc[0], marked_target.getPhrase(), span[0], span[1], marked_target.getCategory()[0], 'unspecified', marked_target.getCode())\n if annot not in doc_annots:\n doc_annots.append(annot)\n else:\n for modifier in modifiers:\n if marked_target.getSpan()[0] < modifier.getSpan()[0]:\n span = (sentence.begin+marked_target.getSpan()[0],sentence.begin+modifier.getSpan()[1])\n else:\n span = (sentence.begin+modifier.getSpan()[0],sentence.begin+marked_target.getSpan()[1])\n if self.mode == 'combined':\n annot = (doc[0], doc[1][span[0]:span[1]], span[0], span[1], marked_target.getCategory()[0]+'_'+modifier.getCategory()[0], marked_target.getCode())\n elif self.mode == 'separate':\n annot = (doc[0], doc[1][span[0]:span[1]], span[0], span[1], marked_target.getCategory()[0], modifier.getCategory()[0], marked_target.getCode())\n if annot not in doc_annots:\n doc_annots.append(annot)\n\n #context_doc.addMarkup(markup)\n\n return doc_annots", "def __iter__(self):\n return iter(self.parses)", "def insert_paragraphs(self, str, ignore_pre=True):\n for block in splittag(str, 'pre'):\n if ignore_pre and '<pre>' in block:\n self.insert_text(block)\n if 'Holds down and then' in block:\n print block\n else:\n self.insert_text('\\n'.join('<p>%s</p>' % line\n for line in block.splitlines()))", "def paragraphs_to_lines(doc: List[List[str]]) -> List[str]:\n lines = []\n for pg in doc:\n lines.extend(pg)\n lines.append(\"\")\n\n return lines", "def __iter__(self):\n for keyword in self.meta.findall(CN('meta:keyword')):\n yield keyword.text", "def paragraph_p12(candidates_tup, return_html=False):\n\n elim_list = \"\"\n for i, c in candidates_tup:\n elim_list += f\"<dt><b>{i:>2}: {c}</b></dt>\"\n \n text = \"\"\"<h3>* Insights from Problems 1 and 2</h3><p style=\"font-size:110%;\">\"\"\"\n text += \"\"\"On the basis of Figures 1 and 2, which show the number of new nodes created, \n and the time spent by each search function, respectively, the searches that are candidates \n for elimination for more complex problems are those at the intersection of the average-ranked \n costliest sets viz new nodes creation and search time.<br>These searches are:</p><pre><dl>\"\"\"\n text += f\"<dl>{elim_list}</dl></p></pre>\"\n \n if return_html:\n return text\n else:\n return Markdown(text)", "def processNotes(self, inMeasureElem):\n inNotes = inMeasureElem.findall(\"note\")\n # list to hold array of processed note elements\n lstOutNotes = []\n\n for inNote in inNotes:\n for childNode in inNote:\n outNote = ET.Element(\"note\")\n\n # rest\n inRest = inNote.find(\"rest\")\n if (inRest is not None):\n outRest = ET.Element(\"rest\")\n outNote.append(outRest)\n\n # chord\n inChord = inNote.find(\"chord\")\n if (inChord is not None):\n outChord = ET.Element(\"chord\")\n outNote.append(outChord)\n\n # pitch\n inPitch = inNote.find(\"pitch\")\n if (inPitch is not None):\n outPitch = ET.Element(\"pitch\")\n for childNode in inPitch:\n if (True == (childNode.tag in [\"step\", \"octave\"])):\n pitchSubElem = ET.Element(childNode.tag)\n pitchSubElem.text = childNode.text\n outPitch.append(pitchSubElem)\n # handle the \"alter\" child tag separately even if the alter value is 0 include it otherwise\n # include the original scores alter value\n inPitchAlter = inPitch.find(\"alter\")\n if (inPitchAlter is not None):\n # use sources alter elem\n outPitchAlter = ET.Element(\"alter\")\n outPitchAlter.text = inPitchAlter.text\n outPitch.append(outPitchAlter)\n else:\n # make a zero value alter element\n outPitchAlter = ET.Element(\"alter\")\n outPitchAlter.text = \"0\"\n outPitch.append(outPitchAlter)\n\n outNote.append(outPitch)\n\n # duration\n inDuration = inNote.find(\"duration\")\n if (inDuration is not None):\n outDuration = ET.Element(\"duration\")\n outDurationVal = int(inDuration.text) * self._multiplier\n outDuration.text = str(int(outDurationVal))\n outNote.append(outDuration)\n\n # append the mote to the list of notes\n lstOutNotes.append(outNote)\n\n # finally return the list of notes\n return lstOutNotes", "def iter_block_items(parent):\n\n if isinstance(parent, Document):\n parent_elm = parent.element.body\n elif isinstance(parent, _Cell):\n parent_elm = parent._tc\n else:\n raise ValueError(\"something's not right\")\n\n for child in parent_elm.iterchildren():\n if isinstance(child, CT_P):\n yield Paragraph(child, parent)\n elif isinstance(child, CT_Tbl):\n yield Table(child, parent)", "def tokenize(self, fileid):\n for paragraph in self.corpus.paras(fileids=fileid):\n yield [\n nltk.pos_tag(nltk.wordpunct_tokenize(sent))\n for sent in nltk.sent_tokenize(paragraph)\n ]", "def get_marked_paragraphs(doc):\n\n\tres = [[x] for x in doc.paragraphs if x.text != ''] # получаем все непустые параграфы\n\n\tfor i in range(len(res)):\n\t\tq = [] # подготавливаем список маркеров\n\t\tfor k in range(len(res[i][0].runs)):\n\t\t\tif \"<>\" in res[i][0].runs[k].text: # если в тексте каретки встречается маркер\n\t\t\t\tq.append(res[i][0].runs[k])\n\t\t\telif \"<\" in res[i][0].runs[k].text and \">\" in res[i][0].runs[k+1].text: # сли маркер разделен на две сосендние каретки\n\t\t\t\tres[i][0].runs[k+1].clear() # удаляем содержимое второй каретки\n\t\t\t\tq.append(res[i][0].runs[k]) # и сохраняем в итоговый список первую \n\t\tif q != []: # если найдены маркеры\n\t\t\tres[i].append(q)\n\n\treturn res", "def processed_doc(self, pipeline):\n return [pipeline(text) for text in EN_DOCS]", "def __iter__(self):\n txt = self.get_comp_text()\n # split by sentences\n sentences = txt.split('\\n')\n # breaks when parsing train2 (has EOF)\n if sentences[-1] == '':\n sentences = sentences[:-1]\n # slice data\n if self.slice is not None:\n start, end = self.slice\n sentences = sentences[start:end]\n for sent_id, sentence in enumerate(sentences):\n words = sentence.split(' ')\n stripped_sentence = []\n # X, y\n tuples, tags = [], []\n # helper\n word_tag_tuples = []\n if not self.comp:\n for word in words:\n word_stripped, tag_stripped = word.split('_')\n word_tag_tuples.append((word_stripped, tag_stripped))\n stripped_sentence.append(word_stripped)\n\n for i, word_tag_tuple in enumerate(word_tag_tuples):\n tag = word_tag_tuple[1]\n tags.append(tag)\n if i == 0:\n tuples.append(('*', '*', sent_id, i))\n elif i == 1:\n tuples.append(('*', word_tag_tuples[i - 1][1], sent_id, i))\n else:\n u = word_tag_tuples[i - 2][1] # pre pre tag\n v = word_tag_tuples[i - 1][1] # pre tag\n tuples.append((u, v, sent_id, i))\n\n yield tuples, tags, stripped_sentence\n else:\n yield [(None, None, sent_id)], [], words", "def __iter__(self):\r\n\r\n return iter(self._contents)", "def tokenize(self, fileid):\n for paragraph in self.corpus.paras(fileids=fileid):\n yield [\n pos_tag(wordpunct_tokenize(sent))\n for sent in sent_tokenize(paragraph)\n ]", "def label_paragraphs(root_el, fastcase_data):\n # case metadata\n citations = [alphanum_lower(\" \".join((c[\"Volume\"], c[\"Reporter\"], c[\"Page\"]) + ((c[\"Suffix\"],) if \"Suffix\" in c else ()))) for c in fastcase_data['Citations']]\n name_clean = alphanum_lower(fastcase_data['PartyHeader']) if fastcase_data['PartyHeader'] else None\n court_clean = alphanum_lower(fastcase_data['CourtName'] or fastcase_data['CourtAbbreviation'])\n docket_numbers_clean = [alphanum_lower(d) for d in fastcase_data['DocketNumbers']]\n\n # via https://github.com/harvard-lil/CaselawAccessProjectSchemas/blob/master/casebodyxml/v1/casebodyxml.xsd\n states = {k:i for i, k in enumerate([None, \"citation\", \"parties\", \"docketnumber\", \"court\", \"otherdate\", \"decisiondate\", \"history\", \"syllabus\", \"attorneys\", \"judges\", \"disposition\", \"_opinionstart\", \"_preauthor\", \"author\", \"opinion\"])}\n reverse_states = {v:k for k, v in states.items()}\n\n state = 0\n header_els = []\n opinions = [[]]\n header_complete = False\n extra_els = []\n blank_els = []\n authors = []\n opinion_starts = []\n paragraph_id = 1\n\n def shift_to_opinion(i):\n \"\"\"Move i elements from the end of header to the start of opinion.\"\"\"\n if not i:\n return\n nonlocal header_els\n opinions[0][0:0] = header_els[-i:]\n header_els = header_els[:-i]\n\n def add_el(el, state, target_list=header_els):\n nonlocal blank_els, paragraph_id\n if state:\n if not reverse_states[state].startswith('_'):\n el.attrib['class'] = reverse_states[state]\n if state == states['_opinionstart']:\n opinion_starts.append((len(target_list), el))\n elif state == states['author']:\n authors.append((len(target_list), el))\n blank_els = []\n else:\n blank_els.append(el)\n el.attrib['id'] = f'p-{paragraph_id}'\n paragraph_id += 1\n target_list.append(el)\n\n def append_to_previous(line):\n PyQuery(header_els[-1]).append(PyQuery(line))\n\n for el_pq in PyQuery(root_el)('root').children().items():\n\n if extra_els:\n extra_els.append(el_pq)\n el_pq = extra_els.pop(0)\n\n el = el_pq[0]\n\n # mark the end of the labeled front matter (which may or may not align with actual end)\n if el.tag == 'header-end':\n header_complete = True\n if state == states[\"author\"]:\n state = states[\"opinion\"]\n continue\n\n # skip\n if el.text == \"COPYRIGHT MATERIAL OMITTED\":\n continue\n\n # add linebreak after element for indentation\n if not (el.tail and el.tail.startswith('\\n')):\n el.tail = '\\n' + (el.tail or '')\n\n line = inner_html(el)\n line_text = strip_tags(line)\n line_text_lower = line_text.lower()\n line_alphanum_chars = alphanum_lower(line_text)\n\n # if we've had 5 regular paragraphs in a row, assume we missed the start of the opinion\n if state < states[\"opinion\"] and len(blank_els) >= 5:\n shift_to_opinion(len(blank_els))\n state = states[\"opinion\"]\n\n # we have now reached the opinion and no longer have to process header lines\n if state >= states[\"opinion\"]:\n # check short lines for the start of a concurrence or dissent\n m = new_opinion_re.match(line_text)\n if m:\n el.attrib['class'] = 'author'\n el.attrib['opinion-type'] = opinion_type_lookup[m[1].lower()]\n opinions.append([])\n\n add_el(el, 0, opinions[-1])\n continue\n\n # citation\n if state <= states[\"citation\"]:\n if any(c in line_alphanum_chars for c in citations) or all(citation_like_re.match(s) for s in line.split('<br>')):\n state = states[\"citation\"]\n continue # don't include citation lines in output\n\n # parties\n if state < states[\"parties\"]:\n # special case -- if the case doesn't have a name, like NE2d/939/939ne2d586.xml,\n # assume that whatever comes after the last citation is the name\n if name_clean is None or line_alphanum_chars == name_clean:\n state = states[\"parties\"]\n add_el(el, state)\n elif header_els and name_clean == alphanum_lower(inner_html(header_els[-1]) + line):\n # handle edge case where name is split across two paragraphs\n append_to_previous(line)\n elif line_alphanum_chars.startswith(name_clean) or similar_strings(line_text, fastcase_data['PartyHeader']):\n # special cases -- NW2d/881/881 N.W.2d 813-4_Replace.xml, NW2d/792/792NW2d203.xml\n state = states[\"parties\"]\n add_el(el, state)\n else:\n # if we haven't found a valid name yet, paragraphs are just regular paragraphs\n add_el(el, 0)\n continue\n\n # docket numbers or court\n if state < states[\"court\"]:\n # detect 'Supreme Judicial Court of Massachusetts.' and 'United States Bankruptcy Appellate Panel of the Ninth Circuit.' as a court, but not\n # 'Court of Appeals Case No. 04A03-1707-IF-1724' or 'Consol. Court No. 16-00054'\n # line may be 'Court of Appeals of Virginia, Chesapeake.' if court is 'Court of Appeals of Virginia'\n # line may be 'North Carolina Court of Appeals.' if court is 'Court of Appeals of North Carolina'\n # if 'court' in line.lower() or 'panel' in line.lower()) and ('No.' not in line or 'Division No.' in line):\n if any(line_alphanum_chars.startswith(s) for s in docket_numbers_clean):\n state = states[\"docketnumber\"]\n elif line_alphanum_chars.startswith(court_clean) or (\n (line_text.endswith('Court of Appeals.') or any(line_text_lower.startswith(s) for s in ('court of appeal', 'supreme court')))\n ):\n state = states[\"court\"]\n else:\n state = states[\"docketnumber\"]\n add_el(el, state)\n continue\n\n # accidental start of opinion included in head matter\n # NW2d/737/737NW2d768_3New.xml -- \"On order of the Court ...\"\n if state >= states[\"decisiondate\"]:\n if line_text.startswith(\"On order of the Court\"):\n state = states[\"opinion\"]\n add_el(el, 0, opinions[-1])\n continue\n\n # dates\n # 'DATED at Olympia, Washington, this 31st day of October, 2018.'\n # '01-04-2017'\n if state <= states[\"decisiondate\"]:\n # long line isn't decision date -- SCt/134/134sct985_2.xml\n if len(line_text) < 80 and (date_re.search(line_text) or line_text_lower.startswith('dated at') or re.match(r'\\d{1,2}-\\d{2}-\\d{4}$', line_text)):\n if any(line_text.startswith(s) for s in ('Released', 'Submitted', 'Dissenting')) and 'Decided' not in line_text:\n # handle case like\n # 'Submitted June 5, 2007, at Lansing.'\n # 'Decided June 12, 2007, at 9:05 a.m.'\n # 'Released for Publication October 11, 2007\n # 'Dissenting Opinion of Chief Justice Maynard June 27, 2008.'\n # avoid\n # 'Submitted March 2, 2010.<br>Decided April 2, 2010.'\n state = states[\"otherdate\"]\n else:\n state = states[\"decisiondate\"]\n add_el(el, state)\n continue\n\n if state < states[\"judges\"]:\n # strip off judges lines appended to current line, and add as an extra_el\n # \"for Respondent.<strong>Justice BEATTY.</strong></p>\" SE2d/708/708se2d750.xml\n # \"... West Virginia Insurance Federation.<strong>DAVIS, Justice:</strong></p>\" SE2d/719/719se2d830.xml\n # \"for appellees.<strong>Present: HUMPHREYS, McCLANAHAN and BEALES, JJ.</strong><strong>BEALES, Judge.</strong>\" SE2d/708/708se2d429.xml\n while True:\n m = re.search('(.+)(<strong>([^<]+)</strong>)$', line)\n if m and is_judges_or_author(m[3]):\n extra_els.insert(0, PyQuery('<p>'+m[2]+'</p>'))\n line = m[1]\n el_pq.html(line)\n line_text = strip_tags(line)\n line_alphanum_chars = alphanum_lower(line_text)\n continue\n break\n\n # history\n # 'Appeal by defendant from judgment entered 8 December 2004 by Judge Robert H. Hobgood in Alamance County Superior Court. Heard in the Court of Appeals 2 November 2005.'\n if line_text_lower.startswith('appeal') or any(s in line_text for s in ('Superior Court', 'District Court', 'Circuit Court')):\n state = states[\"history\"]\n add_el(el, state)\n continue\n\n # syllabus\n if 'Syllabus by the Court' in line_text or (state == states[\"syllabus\"] and re.match(r'\\d+\\.|[a-z\\[]', line_text)):\n if re.match(r'[a-z\\[]', line_text):\n # handle case where syllabus is split midsentence\n append_to_previous(line)\n else:\n state = states[\"syllabus\"]\n add_el(el, state)\n continue\n\n # attorneys\n # 'Garrett D. Blanchfield, Jr., Reinhardt Wendorf & Blanchfield, St. Paul, MN, for Appellants.'\n if any(line_text.startswith(s) for s in (\"An amicus\", \"For the\", \"On behalf of\")) or any(s in line_text for s in (' for ', 'amici curiae', 'pro se')):\n state = states[\"attorneys\"]\n add_el(el, state)\n continue\n\n # titles that mark the start of an opinion, like \"OPINION\"\n if line_alphanum_chars in opinion_start_lines or any(line_alphanum_chars.startswith(s) for s in opinion_start_line_prefixes):\n state = states[\"_opinionstart\"]\n if line_text != \"OPINION\":\n add_el(el, state)\n continue\n\n # Handle paragraph that is definitely followed by author, like \"The opinion of the court was delivered by\", A3d/148/148 A.3d 441_Replace.xml\n if line_text == \"The opinion of the court was delivered by\":\n state = states[\"_preauthor\"]\n add_el(el, 0)\n continue\n if state == states[\"_preauthor\"]:\n add_el(el, states[\"author\"])\n state = states[\"opinion\"]\n continue\n\n # author\n # note, in theory fastcase_data[\"Author\"] could be useful for identifying author paragraph, but it's often not set,\n # and when it is it can also appear in the judges line and other places ...\n judges_or_author = is_judges_or_author(line_text)\n if judges_or_author == \"judges\":\n state = states[\"judges\"]\n add_el(el, state)\n continue\n elif judges_or_author == \"author\":\n add_el(el, states[\"author\"])\n state = states[\"opinion\"] if header_complete else states[\"author\"]\n continue\n\n # weird special case where there's an order provided before the start of the opinion\n # E.g. NW2d/740/740NW2d659_1.xml, 'ORDER ENTERED JUNE 8, 2007' and subsequent unlabeled lines\n if line_text.startswith(\"ORDER ENTERED\") or state == states[\"disposition\"]:\n state = states[\"disposition\"]\n add_el(el, state)\n continue\n\n # regular paragraph\n add_el(el, 0)\n continue\n\n # fixups\n labels = [el.attrib.get('class') for el in header_els]\n # rewrite special case like NE2d/944/944ne2d1119.xml:\n # [['parties', '...'],\n # ['docketnumber', 'Feb. 15'],\n # ['docketnumber', '2011.'],\n # ['court', 'Court of Appeals of New York.']]\n # to\n # [['parties', '...'],\n # ['court', 'Court of Appeals of New York.'],\n # ['decisiondate', 'Feb. 15, 2011.']]\n if labels == [None, 'docketnumber', 'docketnumber', 'court']:\n docket_combined = header_els[1].text + \", \" + header_els[2].text\n if date_re.match(docket_combined):\n header_els[1].attrib['class'] = 'decisiondate'\n header_els[1].text = docket_combined\n header_els = [header_els[0], header_els[3], header_els[1]]\n\n # change all author labels but the last to judges; we likely misdetected one earlier\n for i, el in authors[:-1]:\n el.attrib['class'] = \"judges\"\n\n # if we didn't find an author and the last line is unlabeled, assume that's the author with a typo --\n # e.g. NW2d/753/753NW2d552_1.xml , missing comma\n if header_els and not authors and not opinion_starts and state >= states[\"judges\"] and header_els[-1].attrib.get('class') is None:\n header_els[-1].attrib['class'] = \"author\"\n authors = [(len(header_els)-1, header_els[-1])]\n\n # move author, and any paragraphs after it, to beginning of first opinion\n move_index = opinion_starts[0][0] + 1 if opinion_starts else authors[-1][0] if authors else None\n if move_index is not None:\n shift_to_opinion(len(header_els)-move_index)\n\n return header_els, opinions", "def getDataParagraph(startpattern,stoppattern,datararray):\n output = []\n inparagraph = 'FALSE'\n lines=datararray\n for i in range(len(lines)):\n search_start=re.search(r'{0}'.format(startpattern),lines[i])\n if search_start is not None or inparagraph == 'TRUE':\n inparagraph = 'TRUE'\n lines[i] = lines[i].split('\\n')[0]\n if lines[i].startswith('*'):\n pass\n else:\n output.append(lines[i])\n search_stop=re.search(r'{0}'.format(stoppattern),lines[i])\n if search_stop is not None:\n return output\n pass", "def parse_text(self, text):\n self._text_paragraph = text.split(\"\\n\")\n self._render()", "def between(current, end):\n text_list = []\n\n while current and current != end:\n if isinstance(current, bs4.NavigableString):\n text = current.strip()\n if text:\n texts = text.split(\"\\n\")\n for text in texts:\n content_str = ''\n for c in text:\n if is_chinese(c):\n content_str += c\n text_list.append(content_str)\n\n current = current.next_element\n return text_list", "def sentence_parse(list_of_posts): \n for parsedPosts in nlp.pipe(line_review(list_of_posts)):\n for sent in parsedPosts.sents:\n yield str(sent)", "def generate_excerpt():", "def process_page_tag(self, root):\n pages = root.findall(self.tag_prefix + self.page_tag)\n articles = []\n for page in pages:\n if self.is_news_article(page):\n article = self.parse_text(page)\n if article:\n articles.append(article)\n return articles", "def __iter__(self):\r\n yield from self.phrase", "def text_preprocessing_pdf(self,p):\n #remover_end_paragraphs=np.vectorize(self.remove_end_paragraphs,otypes=[str])\n cleaner=np.vectorize(self.remove_non_alpha,otypes=[str])\n cut_text=np.vectorize(self.cut_text,otypes=[str])\n cut_text_raw=np.vectorize(self.cut_text_raw,otypes=[str])\n assert len(self.parser)==len(self.parser_raw), \"Length of the treated sentence treated list does not match length of raw text list: {} / {}\".format(len(self.parser),len(self.parser_raw))\n cut_text_raw(p)\n p=cleaner(p)\n cut_text(p)\n return p", "def add_paragraph(self):\n # <a:p> elements are last in txBody, so can simply append new one\n p = _Element('a:p', _nsmap)\n self.__txBody.append(p)\n return Paragraph(p)", "def __iter__(self):\n return self.contents.__iter__()", "def __iter__(self):\n return self.contents.__iter__()", "def verbalisePunctuation(self):\n for i, strText in enumerate(self.sentencesList):\n #For all punctuation marks\n for regex, value in list(TextRepresentation.PUNCTUATION.items()):\n strText = re.sub(regex, value, strText)\n self.sentencesList[i] = strText", "def parse_document(self, response):\n document = response.meta['document']\n document['title'] = ' '.join(response.css('p.s32B251D').css(\n 'span.s7D2086B4 ::text').extract())\n paragraphs = []\n for paragraph in response.css('p'):\n spans = [span for span in paragraph.css('span ::text').extract()\n if span != u'\\xa0' and span != '']\n if len(spans):\n paragraphs.append(u' '.join(spans))\n document['sentences'] = paragraphs\n yield document", "def __iter__(self) -> Generator:\n\t\treturn (article for article in self._articles)", "def paragraph_with_marker(self, text, tagged_text):\n # To aid in determining collapsed paragraphs, replace any\n # keyterms present\n node_for_keyterms = Node(\n text, node_type=Node.APPENDIX, tagged_text=tagged_text,\n label=[initial_marker(text)[0]]\n )\n keyterm = KeyTerms.keyterm_in_node(node_for_keyterms)\n if keyterm:\n mtext = text.replace(keyterm, '.' * len(keyterm))\n else:\n mtext = text\n\n for mtext in split_paragraph_text(mtext):\n if keyterm: # still need the original text\n mtext = mtext.replace('.' * len(keyterm), keyterm)\n node = Node(mtext, node_type=Node.APPENDIX,\n label=[initial_marker(mtext)[0]])\n self.nodes.append(node)", "def __call__(self, filename, manuscript=\"\", journal=\"\", author=[], \n publisher=\"\"):\n\n # check the parameters\n while len(author) < 4:\n author.append(\"\")\n\n doc = getDocument(filename)\n\n Story = []\n\n # Section 1\n Story.append(\n Paragraph(\n \"\"\"<seqreset id=\"main\" /><seq id=\"main\">. THIS Amendment hereby \n modifies and supplements the attached Publication Agreement \n concerning the following Article:\"\"\", styles['outer_style'])\n )\n\n journal_info_table = Table([\n [fillInRow(manuscript, \"(manuscript title)\", width=inch*5)],\n [fillInRow(journal, \"(journal name)\", width=inch*5)],\n ],\n )\n journal_info_table.hAlign = 'LEFT'\n Story.append(journal_info_table)\n\n # Section 2\n Story.append(\n Paragraph(\n \"\"\"<seq id=\"main\">. The parties to the Publication Agreement and\n to this Amendment are:\"\"\", styles['outer_style'])\n )\n\n journal_info_table = Table([\n [fillInRow(author[0], \"(corresponding author)\", width=inch*5)],\n [Paragraph(\"and\", styles['outer_style'])],\n [fillInRow(journal, \"(the Publisher)\", width=inch*5)],\n ],\n )\n journal_info_table.hAlign = 'LEFT'\n Story.append(journal_info_table)\n\n # Section 3\n Story.append(\n Paragraph(\n \"\"\"<seq id=\"main\">. The parties agree that wherever there is any\n conflict between the Amendment and the Publication Agreement, \n the provisions of this Amendment are paramount and the \n Publication Agreement shall be construed accordingly.\"\"\",\n styles['outer_style'])\n )\n\n # Section 4\n Story.append(\n Paragraph(\n \"\"\"<seq id=\"main\">. Notwithstanding any terms in the Publication\nAgreement to the contrary and in addition to the rights retained by Author \nor licensed by Published to Author in the Publication Agreement and any fair \nuse rights of Author, Author and Publisher agree that the Author shall also \nretain the following rights:\"\"\",\n styles['outer_style'])\n )\n\n # 4a\n Story.append(\n Paragraph(\n \"\"\"a. The Author shall, without limitation, have the non-exclusive right to use, reproduce, distribute, create derivative works including update, perform, and display publicly, the Article in electronic, digital or print form in connection with the Author's teaching, conference presentations, lectures, other scholarly works, and for all of Author's academic and professional activities. \"\"\",\n styles['inner_style'])\n )\n\n # 4b\n Story.append(\n Paragraph(\n \"\"\"b. Once the Article has been published by Publisher, the Author shall also have all the non-exclusive rights necessary to make, or to authorize others to make, the final published version of the Article available in digital form over the Internet, including but not limited to a website under the control of the Author or the Author's employer or through any digital repository, such as MIT's DSpace or the National Library of Medicine's PubMed Central database.\"\"\",\n styles['inner_style'])\n )\n\n #4c\n Story.append(\n Paragraph(\n \"\"\"c. The Author further retains all non-exclusive rights necessary to grant to the Author's employing institution the non-exclusive right to use, reproduce, distribute, display, publicly perform, and make copies of the work in electronic, digital or in print form in connection with teaching, digital repositories, conference presentations, lectures, other scholarly works, and all academic and professional activities conducted at the Author's employing institution. \"\"\",\n styles['inner_style'])\n )\n\n # Section 5\n Story.append(\n Paragraph(\n \"\"\"<seq id=\"main\" />. <b>Final Agreement.</b> This Amendment and the Publication Agreement, taken together, constitute the final agreement between the Author and the Publisher with respect to the publication of the Article and allocation of rights under copyright in the Article. Any modification of or additions to the terms of this Amendment or to the Publication Agreement must be in writing and executed by both Publisher and Author in order to be effective.\"\"\",\n styles['outer_style'])\n )\n\n # Signature\n journal_info_table = Table([\n [\"AUTHOR\", \" \", \"PUBLISHER\"],\n [fillInRow(\"\", \"(corresponding author on behalf of all authors)\"),\n \"\", fillInRow(\"\", \"\")],\n [fillInRow(\"\", \"Date\"),\n \"\",\n fillInRow(\"\", \"Date\")]\n ],\n colWidths=[inch*3, inch*.25, inch*3],\n )\n\n journal_info_table.hAlign = 'LEFT'\n Story.append(journal_info_table)\n\n # MIT Directive\n Story.append(\n Paragraph(\"<b>MIT Authors:</b>\", styles['outer_style'])\n )\n Story.append(\n Paragraph(\"Please fax a copy of the agreement to 617-253-8894. Direct any questions to [email protected]\",\n styles['inner_style'])\n )\n\n\n agreement = \"%s %s\" % (self.NAME, self.VERSION)\n doc.build(Story, \n onFirstPage=mit_pageInfo, onLaterPages=mit_pageInfo)", "def process_text(text):\n return [token.text for token in nlp(text) if not token.is_stop]", "def analyze_text(self):\n\n # Read stylesheet.\n found_stylesheet = False\n for filename in self.project.namelist():\n if os.path.basename(filename) == 'styles.xml':\n found_stylesheet = True\n style = self.project.open(filename, 'r')\n self._read_stylesheet(style)\n if not found_stylesheet:\n raise IOError('stylesheet not found')\n\n # Process text data.\n for filename in self.project.namelist():\n if filename.endswith('.usx'):\n usx = self.project.open(filename, 'r')\n for text in self._process_usx_file(usx):\n yield text\n # self.exemplars.process(text)\n # self.corpus.write(text + '\\n')", "def __iter__(self):\n for p in self.positions():\n yield p.element()", "def extract_text(url, sem):\n with (yield from sem):\n page = yield from get(url)\n\n tree = etree.HTML(page)\n paragraphs = tree.findall('.//*/div[@class=\"entry-content\"]/p')[1:-1]\n return url, b'\\n'.join(map(etree.tostring, paragraphs))", "def chunk_generator(self, note_text):\n\n # section regular expression\n sec_re = r'\\[start section id=\\\"(.+)\"\\](.*?)\\[end section id=\\\"\\1\"\\]'\n\n # sentence regular expressions; use group 0 for entire match\n sent_re = r'(.+?\\.\\s\\s)|(.+?\\.\\n)|(.+?\\n)'\n\n # iterate over sections; using DOTALL to match newlines\n for sec_match in re.finditer(sec_re, note_text, re.DOTALL):\n\n section_id = sec_match.group(1)\n if section_id in sections_to_skip:\n continue\n\n section_text = sec_match.group(2)\n sec_start, sec_end = sec_match.start(2), sec_match.end(2)\n\n sent_offsets = []\n for sent_match in re.finditer(sent_re, section_text):\n sent_start, sent_end = sent_match.start(0), sent_match.end(0)\n sent_offsets.append((sent_start, sent_end))\n\n # form this many chunks (add an overflow chunk)\n section_length = len(self.tokenizer(section_text).input_ids)\n n_chunks = (section_length // self.chunk_size) + 1\n\n for sents in numpy.array_split(sent_offsets, n_chunks):\n\n # this happens if there are fewer paragraphs than chunks\n # e.g. 2 large paragraphs in section and n_chunks is 3\n if sents.size == 0:\n continue\n\n chunk_start, _ = sents[0].tolist()\n _, chunk_end = sents[-1].tolist()\n yield sec_start + chunk_start, sec_start + chunk_end", "def iter_documents(self):\n raise NotImplementedError" ]
[ "0.70568925", "0.6538393", "0.630119", "0.6105424", "0.59029144", "0.5857844", "0.56422466", "0.561061", "0.5597017", "0.5584193", "0.55805284", "0.55002755", "0.5490439", "0.5486235", "0.5449823", "0.5444187", "0.5435644", "0.5427534", "0.5414275", "0.54038227", "0.5385818", "0.5359002", "0.5344678", "0.53430986", "0.53422266", "0.5325232", "0.5321369", "0.53143275", "0.5266145", "0.5233419", "0.520934", "0.5184691", "0.5179486", "0.5177782", "0.5171536", "0.5152182", "0.5144838", "0.51370543", "0.5136715", "0.5127114", "0.51271033", "0.51178104", "0.511628", "0.51044273", "0.50959504", "0.50901777", "0.50897384", "0.50664455", "0.50506574", "0.5044168", "0.50388956", "0.5035157", "0.50351405", "0.5034977", "0.5033192", "0.5023665", "0.50187343", "0.5010931", "0.5010931", "0.499643", "0.49936146", "0.49916947", "0.4976435", "0.49754834", "0.49656337", "0.49652556", "0.49644256", "0.4964124", "0.4954928", "0.49445686", "0.49338263", "0.49283233", "0.49241742", "0.49207798", "0.4920429", "0.4918079", "0.49036896", "0.4897775", "0.4894539", "0.489422", "0.48942143", "0.48752195", "0.4861661", "0.48559976", "0.48474142", "0.48342946", "0.48308948", "0.4821887", "0.4821887", "0.4805937", "0.4802868", "0.47974455", "0.47966176", "0.47943068", "0.4785451", "0.47829163", "0.4777417", "0.477369", "0.47734755", "0.47720817" ]
0.6565161
1
Checks whether an element contains footnote text.
def is_footnote_text(self, par): return (par is not None) and ("foot" in par.attrs.get("class", []))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_footnote(self, par):\n if par.find_next_sibling('p') is None:\n return False\n return self.is_footnote_text(par) or self.is_footnote_link(par)", "def is_footnote_link(self, par):\n return self.is_footnote_text(par.find_next_sibling('p'))", "def is_footnote(self):\n return self.style['float'] == 'footnote'", "def has_text(self, page: fitz.Page) -> bool:\n return page.get_text(clip=page.trimbox).strip() != \"\"", "def has_text(self):\n try:\n first = self.text_planets()[0]\n except IndexError:\n first = None\n\n return first is not None", "def test_two_footnotes(self):\n text = \"Footnote[^1]\\n\\n[^1]: Footnote text\"\n self.assertNotEqual(self.md(text), self.md(text))", "def is_plugin_note(self, note):\n return bool(self.regex.match(note))", "def assert_text_present(self, text, msg=None):\r\n e = driver.find_element_by_tag_name('body')\r\n assert text in e.text", "def is_plain_text(self):\n return self._tag == 'plain_text'", "def text_exists(self, text: str)-> bool:\n result = self.__content.find(text)\n if result == -1:\n return False\n else:\n return True", "def is_text( self ):\n return self.get_main_type() == 'text'", "def ends_paragraph(s: str) -> bool:\n return not s.strip()", "def has_textframe(self):\n return _child(self._element, 'p:txBody') is not None", "def is_ends_with_tag(text):\n\treturn re_tag.search(text) != None", "def check_marked_paragraph(paragraph, number):\n\n\tq = 0 # счетчик найденных маркеров\n\tchars = '<> ' # возможные символы в каретке\n\n\tfor i in range(len(paragraph.runs)):\n\t\tif \"<>\" in paragraph.runs[i].text: # если в тексте каретки встречается маркер\n\t\t\tfor c in paragraph.runs[i].text: # проверяем каждый символ в каретке\n\t\t\t\tif c not in chars: # если он не входит в список разрешенных символов\n\t\t\t\t\treturn False\n\t\t\tq += 1 # если проверка пройдена, увеличиваем счетчик\n\t\telif \"<\" in paragraph.runs[i].text and \">\" in paragraph.runs[i+1].text: # если маркер разделен на две соседние каретки\n\t\t\tfor c in paragraph.runs[i].text: # проверяем каждую из кареток\n\t\t\t\tif c not in chars:\n\t\t\t\t\treturn False\n\t\t\tfor c in paragraph.runs[i+1].text:\n\t\t\t\tif c not in chars:\n\t\t\t\t\treturn False\n\t\t\tq += 1\n\n\tif q != number: # если количество маркеров не совпало с указанным в выводе\n\t\treturn False\n\telse:\n\t\treturn True", "def has_text(self, text, match_option=None):\n selector_text = UiSelector().attributes(\"text\", text, match_option)\n selector_content_desc = UiSelector().attributes(\"content-desc\", text,\n match_option)\n\n return UiObject(\n selector_text, self.android_device_driver).verify_exist() or UiObject(\n selector_content_desc, self.android_device_driver).verify_exist()", "def paragraph_mentions(text: str, keyword: str) -> bool:\n soup = BeautifulSoup(text, \"html5lib\")\n paragraphs = [p.get_text() for p in soup('p')]\n\n return any(keyword.lower() in paragraph.lower()\n for paragraph in paragraphs)", "def is_tagged_text(text):\n return len(text) > len(strip_tags(text))", "def assert_has_text(self, xml_root, xpath, text, exact=True):\r\n element_list = xml_root.xpath(xpath)\r\n self.assertTrue(len(element_list) > 0,\r\n \"Could not find element at '%s'\" % str(xpath))\r\n\r\n if exact:\r\n self.assertEqual(text, element_list[0].text)\r\n else:\r\n self.assertIn(text, element_list[0].text)", "def is_important_text(node):\n\n return not (0 < len(node.get_text()) < TEXT_MIN_SCORE\n and node.name not in HEADING_TAGS)", "def has_template(page_text: str) -> bool:\n\n\tpattern = '<noinclude>.*{{documentation}}.*</noinclude>'\n\tif re.search(pattern, page_text, re.DOTALL | re.IGNORECASE):\n\t\treturn True\n\telse:\n\t\treturn False", "def hasContents():", "def findFootnotesPlaceholder(self, root):\n def finder(element):\n for child in element:\n if child.text:\n if child.text.find(self.getConfig(\"PLACE_MARKER\")) > -1:\n return child, element, True\n if child.tail:\n if child.tail.find(self.getConfig(\"PLACE_MARKER\")) > -1:\n return child, element, False\n finder(child)\n return None\n \n res = finder(root)\n return res", "def is_plugin_note(self, note):\n return False", "def _is_jupytext_file(ntbk):\n jupytext_meta = ntbk.get('metadata', {}).get('jupytext')\n if jupytext_meta is None:\n return False\n else:\n return jupytext_meta.get('notebook_metadata_filter', '') != \"-all\"", "def is_text_exists(self, locator_type, locator, text):\n try:\n self.wait_for_text(locator_type, locator, text)\n return True\n except TimeoutException:\n return False", "def _is_text_tag(tag):\n return tag.name not in ['script', 'style']", "def is_resent(self):\n return self.unixtext.find(\"...RESENT\") > 0", "def is_tip(text):\n\n amount = 0\n if re.search(r'I sent you a \\$[0-9]*\\.00 tip ♥', text):\n amount = re.match(r'I sent you a \\$([0-9]*)\\.00 tip ♥', text).group(1)\n Settings.maybe_print(\"successfully found tip\")\n Settings.dev_print(\"amount: {}\".format(amount))\n return True, int(amount)\n elif re.search(r\"I\\'ve contributed \\$[0-9]*\\.00 to your Campaign\", text):\n amount = re.match(r'I\\'ve contributed \\$([0-9]*)\\.00 to your Campaign', text).group(1)\n Settings.maybe_print(\"successfully found campaign donation\")\n Settings.dev_print(\"amount: {}\".format(amount))\n return True, int(amount)\n return False, int(amount)", "def chunk_in_text(chunk, text):\n chunk = clean_chunk(chunk)\n return text.find(chunk) >= 0", "def _has_notes(track):\n return len(\n list(filter(lambda m: m.type in ['note_on', 'note_off'], track))) > 0", "def is_word(mystem_element):\n word = mystem_element.get('text', '')\n if len(word.strip()) > 0:\n return True\n return False", "def has_contents(self):\n return len(self.byteruns())>0", "def has_tags_in_content(self):\n\t\treturn self.get_content() and re_tag.search(self.get_content())", "def isText(self):\n ret = libxml2mod.xmlNodeIsText(self._o)\n return ret", "def assert_html_contains(html, element, attributes=None, text=None):\n html_doc = lxml_html.parse(StringIO(html))\n\n try:\n tag = next(html_doc.iter(element))\n except StopIteration:\n raise Exception(\"<{0}> not in {1}\".format(element, html))\n\n if attributes:\n arg_attrs = set(attributes.items())\n tag_attrs = set(tag.items())\n assert arg_attrs.issubset(tag_attrs)\n\n if text:\n assert text in tag.text", "def tag_visible(element):\n if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:\n return False\n if isinstance(element, bs4.element.Comment):\n return False\n if re.match(r\"[\\n]+\", str(element)):\n return False\n return True", "def is_list_of_text_data(parent_element, list_name):\n list_item_name = get_singular_from_plural(list_name)\n\n if parent_element.find(list_item_name) is None:\n return False\n\n return parent_element.find(list_item_name).text is not None", "def parse_footnote(self, footelem) -> FootNote:\n\n fn = FootNote()\n if footelem.text is None:\n fn.footnote = ''\n else:\n fn.footnote = footelem.text.strip()\n\n fn.footnoteid = footelem.attrib['{%s}label' % footelem.nsmap['xlink']]\n\n return fn", "def check_for_metatitle(self, interest_name: str):\n if interest_name.endswith(\"/streaming/p*\"):\n return True\n else:\n return False", "def is_empty(self):\n return not self._text", "def isValid(text):\n return bool(re.search(r'\\bnews\\b', text, re.IGNORECASE))", "def is_postal_code(elem):\n return 'post' in elem.attrib['k']", "def hasRawText(self, text):\n r = re.compile(r'<(p|blockquote|div|form|table|ul|ol|dl|pre|h\\d)[^>]*?>.*</\\1>',\n re.S).sub('', text.strip()).strip()\n r = re.compile(r'<(hr|br)[^>]*?/>').sub('', r)\n return '' != r", "def footnote_item(self, key, text):\n back = (\n '<a href=\"#fnref-%s\" class=\"footnote\">&#8617;</a>'\n ) % escape(key)\n text = text.rstrip()\n if text.endswith('</p>'):\n text = re.sub(r'<\\/p>$', r'%s</p>' % back, text)\n else:\n text = '%s<p>%s</p>' % (text, back)\n html = '<li id=\"fn-%s\">%s</li>\\n' % (escape(key), text)\n return html", "def footnotes(self, text):\n html = '<div class=\"footnotes\">\\n%s<ol>%s</ol>\\n</div>\\n'\n return html % (self.hrule(), text)", "def css_has_text(css_selector, text, index=0, strip=False):\r\n # If we're expecting a non-empty string, give the page\r\n # a chance to fill in text fields.\r\n if text:\r\n wait_for(lambda _: css_text(css_selector, index=index))\r\n\r\n actual_text = css_text(css_selector, index=index)\r\n\r\n if strip:\r\n actual_text = actual_text.strip()\r\n text = text.strip()\r\n\r\n return actual_text == text", "def test_p_tag_is_not_empty_element(self):\n soup = self.soup(\"<p />\")\n self.assertFalse(soup.p.is_empty_element)\n self.assertEqual(str(soup.p), \"<p></p>\")", "def should_be_compact_paragraph(self, node):\n\n if isinstance(node.parent, nodes.container):\n if 'non-paragraph' not in node.parent.attributes['classes']:\n return False\n\n # noinspection PyUnresolvedReferences\n return super().should_be_compact_paragraph(node)", "def has_child(self, locator):\n return self.find_element(locator) is not None", "def __contains__(self, item):\n if isinstance(item, Token):\n return item.text in self.text\n return item in self.text", "def isHereDoc(self, lineData, column):\n return self._getTextType(lineData, column) == 'h'", "def ISNONTEXT(value):\n return not ISTEXT(value)", "def IsValid(self):\n return len(self.Text) > 0", "def is_tagged_text(*a, **kw):\n return is_tagged_text(*a, **kw)", "def text_search(self, text, stuff_to_cop):\n if any(ext in text for ext in stuff_to_cop):\n return(True)\n else:\n return(False)", "def said(self, text):\n for message in self.messages:\n if text in message:\n return True\n return False", "def is_text(content):\n if b\"\\0\" in content:\n return False\n if not content: # Empty files are considered text\n return True\n # Try to decode as UTF-8\n try:\n content.decode(\"utf8\")\n except UnicodeDecodeError:\n return False\n else:\n return True", "def have_msg_form(msgf_element):\n msgf = msgf_element.find('a.js-message-biz')\n return ('Message the business' in [i.text for i in msgf],\n 'Request a reservation' in [i.text for i in msgf])", "def _has_phrase(self, box):\n lines = box.get_lines()\n pattern = self.field.settings.pattern_builder.list_pattern(self._phrases)\n for line in lines:\n if re.search(pattern, line.text) is not None:\n return True\n return False", "def footnote(self) -> str:\n return self._footnote", "def matches_output(self, text):\n if self.markers:\n for marker in self.markers:\n if marker in text:\n return True\n # -- OTHERWISE:\n return False", "def has_data(self):\n return self.has_credits() or self.has_content() or self.notes", "def has_data(self):\n return self.has_credits() or self.has_content() or self.notes", "def can_transform(self, html_element: ET.Element):\n return html_element.tag == \"mark\"", "def check(self, text):\n\n try:\n console.print(self.parser.parse(text)[\"result\"][1:], style=\"green\")\n return True\n\n except:\n console.print(\"An error has occurred while trying to parse the typo!\", style=\"red\")\n return False", "def isHTML(content):\n\n return '<html' in content or 'html>' in content", "def hasIntroMarker(self):\n return any(marker.type == 'intro' for marker in self.markers)", "def is_sentence_end(mystem_element):\n word = mystem_element.get('text', '')\n return word == '\\\\s' or word == '\\n'", "def fix_footnotes(case_el, warnings):\n case_pq = PyQuery(case_el)\n # fix footnotes\n # footnotes look like this (since <small> is already stripped)\n # <p>--------</p>\n # <p>Notes:</p>\n # <p>\n # <sup>\n # <a href=\"#fn1\" name=\"fr1\">1</a>\n # </sup> text text text </p>\n # notes label can look like `<strong><br/> --------</strong>` -- NE2d/990/990ne2d139_12.xml\n notes_el = case_pq('p:contains(\"Notes:\")').filter(lambda i, el: strip_tags(PyQuery(el).text()).strip() == 'Notes:')\n refs = {}\n notes_section = None\n if notes_el:\n notes_section = notes_el.closest('article, section')\n footnote_index = 0\n opinion_index = 1\n footnote_el = None\n\n # before and after footnote sections there is a paragraph of either 8 or 15 hyphens\n footnote_breaks = ['-' * 8, '-' * 15]\n\n # remove footnote break before footnote section\n # can have tags in the footnote break -- A3d/50/50a3d607_29.xml\n prev_notes_el = notes_el.prev()\n if strip_tags(prev_notes_el.text()).strip() not in footnote_breaks:\n warnings.append(\"Unexpected element before notes el.\")\n else:\n prev_notes_el.remove()\n\n # remove \"Notes:\"\n old_footnote_el = notes_el.next()\n notes_el.remove()\n\n # step through each footnote element\n while old_footnote_el:\n # sometimes <a> tag gets out of <p> tag -- SE2d/590/590SE2d53.xml\n # put it inside a new <p>\n if old_footnote_el[0].tag == 'a':\n old_footnote_el = wrap_with(old_footnote_el, PyQuery(etree.Element('p')))\n\n link_el = old_footnote_el('a').eq(0)\n if not link_el:\n # this could be the end of footnotes, in which case stop\n if strip_tags(old_footnote_el.text()).strip() in footnote_breaks:\n old_footnote_el.remove()\n break\n # or could be a second paragraph of the previous footnote, in which case append\n if footnote_el:\n footnote_el.append(old_footnote_el)\n old_footnote_el = footnote_el.next()\n continue\n else:\n # if there's a non-footnote before the first footnote, we don't know what's going on,\n # so quit processing\n warnings.append(\"Unexpected non-footnote element.\")\n break\n label = link_el.text()\n footnote_index += 1\n footnote_id = f'footnote_{opinion_index}_{footnote_index}'\n footnote_el = PyQuery(renderer.make_footnote_el(id=footnote_id, label=label))\n refs[link_el.attr('href').lstrip('#')] = [footnote_id, footnote_el]\n while link_el.parent()[0].tag == 'sup':\n link_el = link_el.parent()\n link_el.remove()\n\n # remove space at beginning of footnote left over from removing footnote number\n if old_footnote_el[0].text:\n old_footnote_el[0].text = old_footnote_el[0].text.lstrip()\n\n wrap_with(old_footnote_el, footnote_el)\n old_footnote_el = footnote_el.next()\n\n # fix footnote references (<small> is already stripped)\n # ...<sup><a href=\"#fr1\" name=\"fn1\">1</a></sup>... typical\n # ...<sup id=\"co_fnRef_B00012045229866_ID0E4F\">1</sup> BR/590/590 B.R. 577.xml\n # ...<a href=\"#1\" name=\"fn1\" id=\"fn1\">1</a>... NW2d/781/781NW2d5512010WIApp33_29.xml\n for section in case_pq('.head-matter, .opinion').items():\n for old_ref_pq in section('a, sup[id]').items():\n label = old_ref_pq.text()\n if old_ref_pq[0].tag == 'a':\n ref_name = old_ref_pq.attr('name')\n if not (ref_name and ref_name.startswith('fn')):\n continue\n else:\n ref_name = \"fn\" + label\n ref, footnote_el = refs.get(ref_name, ['orphan', None])\n if footnote_el:\n # move footnotes from end of document to correct section -- see NW2d/906/906 N.W.2d 436_Replace.xml\n if section != notes_section:\n section.append(footnote_el)\n else:\n warnings.append(f\"Unmatched ref {repr(str(old_ref_pq))}\")\n ref_el = etree.Element('a', {'class': 'footnotemark', 'href': '#' + ref, 'id': 'ref_' + ref})\n ref_el.text = label\n while old_ref_pq.parent()[0].tag == 'sup':\n old_ref_pq = old_ref_pq.parent()\n PyQuery(ref_el).insert_before(old_ref_pq)\n old_ref_pq.remove()", "def test_textCondition(self):\n xp = XPathQuery(\"/foo[text() = 'somecontent']\")\n self.assertEqual(xp.matches(self.e), True)", "def isTextWrapper(self, w: Wrapper) -> bool:\n if w is None:\n return False\n if isinstance(w, (g.NullObject, g.TracingNullObject)):\n return True\n return bool(getattr(w, 'supportsHighLevelInterface', None))", "def verify_text_present(self, text, msg=None):\r\n try:\r\n self.assert_text_present(text, msg)\r\n except AssertionError, e:\r\n if msg:\r\n m = \"%s:\\n%s\" % (msg, str(e))\r\n else:\r\n m = str(e)\r\n self.verification_erorrs.append(m)", "def is_substring(text: str, elements: set) -> bool:\n for element in elements:\n if text in element:\n return True\n\n return False", "def IsMultiline(self):\r\n\r\n return \"\\n\" in self.caption", "def measureIsEmpty(self, inMeasureElem):\n emptyMeasureMsg = \"Measure found empty (invalid XML)\"\n inMeasureNotes = inMeasureElem.findall(\"note\")\n if (inMeasureNotes is None):\n self.debugPrint(emptyMeasureMsg)\n return True\n elif (len(inMeasureNotes) == 0):\n self.debugPrint(emptyMeasureMsg)\n return True\n else:\n return False", "def exists( identifier ):\n return note.exists(identifier)", "def isText(self):\n return _libsbml.XMLToken_isText(self)", "def hasTextElement(self, timeout=20.0, commandId=None, expectedText=None):\n TestAdapterLib.check_timeout(caller=TestAdapterLib.caller(), timeout=timeout)\n \n valueLayer = TestTemplates.TemplateLayer(name=\"\")\n if expectedText is not None:\n valueLayer.addKey(name=\"value\", data=expectedText)\n return self.isActionAccepted(timeout=timeout, commandName=Command.GET_ELEMENT_TEXT, \n commandId=commandId, expectedValue=valueLayer)", "def verify_text(self, text):\n pass", "def assertText(self,content,expected_text,description=\"\"): \n self.assertTrue(expected_text in content,\n \"expected to find '{0}' but found '{1}' instead.\\\n Attemted action: {2}\".format(expected_text, \n content,\n description))", "def footnote(self, footnote: str):\n\n self._footnote = footnote", "def contains(self, element) -> bool:\n\n return self.__find_node(element) is not None", "def citationContainsDOI(citation):\n if citation.startswith(\"doi:\"):\n return True\n elif citation.startswith(\"@doi:\"):\n return True\n elif citation.startswith(\"[@doi\"):\n return True\n else:\n return False", "def check_data_exist(data):\n if data is not None:\n if data.text is not None:\n return True\n return False", "def is_complete(self):\n return all([\n len(strip_tags(score.notes)) > 0 for score in self.scores.all()\n ])", "def _is_title(self):\n ph = _child(self.__nvXxPr.nvPr, 'p:ph')\n if ph is None:\n return False\n # idx defaults to 0 when idx attr is absent\n ph_idx = ph.get('idx', '0')\n # title placeholder is identified by idx of 0\n return ph_idx == '0'", "def isValid(text):\n return bool(re.search(r'\\bneustart\\b', text, re.IGNORECASE))", "def get_text_in_element():\n nonlocal text_\n if text_ is None:\n text_ = element.text\n element_text = element.text\n if element_text == text:\n return element\n if text.lower() == element_text.lower():\n return element\n if \"\".join(text.split()) == \"\".join(element_text.split()):\n return element\n return False", "def file_content_has_bitcoin_address(file_content):\n bitcoin_address_regex = re.compile('[13][a-km-zA-HJ-NP-Z1-9]{25,34}')\n if bitcoin_address_regex.search(file_content):\n return True\n else:\n return False", "def has_simple_content(self) -> bool:\n raise NotImplementedError()", "def is_tag(t):\n return len(t) > 1 and t.startswith('#') and not t.startswith('##') and t", "def is_cuisine(elem):\n return elem.attrib['k'] == 'cuisine'", "def check_paragraph(line):\n if len(line) > 3 and line[:3] == '⋅⋅⋅':\n return '<p>' + line[3:] + '</p>'\n else:\n return line", "def _check_with_content(params):\r\n if 'with_content' in params and params['with_content'] != 'false':\r\n return True\r\n else:\r\n return False", "def isTextOfAcceleratorForContainer(elm):\n try:\n if elm.parentNode.tagName == \"accelerator\" and elm.parentNode.parentNode.tagName == \"container\":\n return True\n else:\n return False\n except Exception, e:\n return False;", "def testParagraphs(self):\n\n textractor = Textractor(paragraphs=True)\n\n # Extract text as sentences\n paragraphs = textractor(Utils.PATH + \"/article.pdf\")\n\n # Check number of paragraphs is as expected\n self.assertEqual(len(paragraphs), 13)", "def single_line_paragraph(s: str) -> bool:\n return s.startswith('@') or s.strip() in ('\"\"\"', \"'''\")", "def is_content(cls, path_or_content):\n return any(path_or_content.lstrip().startswith(s) for s in cls.valid_content_start)", "def is_header_or_footer(self, tag):\n return ((tag.name == \"text\") and tag.has_attr(\"top\")\n and ((int(tag[\"top\"]) <= self.HEADER_END_OFFSET) \n or (int(tag[\"top\"]) > self.FOOTER_START_OFFSET)))" ]
[ "0.72341335", "0.688278", "0.6498541", "0.6382326", "0.61090255", "0.6085547", "0.59022486", "0.5810465", "0.5732255", "0.5724639", "0.56633186", "0.55054045", "0.54965115", "0.5481354", "0.54613525", "0.5442589", "0.5437752", "0.54305816", "0.53904307", "0.5370518", "0.5368163", "0.53493077", "0.5339378", "0.5335499", "0.529686", "0.52964383", "0.5295664", "0.5281439", "0.5256145", "0.52471274", "0.5239496", "0.52300334", "0.5216192", "0.5210294", "0.520925", "0.52058685", "0.5177493", "0.5170278", "0.5145914", "0.5140791", "0.5130127", "0.5120637", "0.5117305", "0.51084", "0.5099248", "0.5097301", "0.5083415", "0.50786984", "0.50754184", "0.5054255", "0.50462794", "0.5042948", "0.5042126", "0.503969", "0.5036264", "0.5011335", "0.50080705", "0.50077415", "0.500495", "0.49984995", "0.4994462", "0.49777576", "0.49341992", "0.49341992", "0.49325746", "0.49324787", "0.49317086", "0.4929924", "0.4927468", "0.49259263", "0.4921733", "0.49168995", "0.49164325", "0.49023518", "0.49020082", "0.4900908", "0.48990533", "0.48912102", "0.4875738", "0.48753673", "0.4870272", "0.48659423", "0.48616534", "0.4855925", "0.48485523", "0.48461097", "0.48437122", "0.48413157", "0.48376486", "0.48326203", "0.48182386", "0.4817823", "0.48167104", "0.48166427", "0.48161665", "0.47998098", "0.4792547", "0.478776", "0.47845867", "0.47786763" ]
0.80490977
0
Checks whether an element is a link adjacent to footnote text.
def is_footnote_link(self, par): return self.is_footnote_text(par.find_next_sibling('p'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_footnote(self, par):\n if par.find_next_sibling('p') is None:\n return False\n return self.is_footnote_text(par) or self.is_footnote_link(par)", "def is_link(s):\n return (len(s) == 2 and is_link(s[1])) or s == empty", "def check_link(self, link, links_para):\n href = link['href']\n if not href.startswith('/wiki/') or href == '/wiki/Latin' or href.startswith('#'):\n return False\n if \"<i>\" in link or href in links_para:\n return False\n title = href[6:]\n if title.startswith('Help:') or title.startswith('File:') or title.endswith('.ogg') or title.startswith('Wikipedia:'):\n return False\n return True", "def is_link(s):\n return s == empty or (len(s) == 2 and is_link(s[1]))", "def is_link(s):\n return s == empty or (len(s) == 2 and is_link(s[1]))", "def is_link(s):\n return s == empty or (len(s) == 2 and is_link(s[1]))", "def is_footnote_text(self, par):\n return (par is not None) and (\"foot\" in par.attrs.get(\"class\", []))", "def is_href_valid(self, link):\n url = str(link['href'])\n # if it doesn't lead to a wiki page\n if not url.startswith(\"/wiki/\"):\n return False\n\n wikipedia_classes = [\"external_text\", \"mw-disambig\", \"infobox-data\"]\n # if the href has a class\n if link.get(\"class\") is not None:\n link_class = \"_\".join(link.get(\"class\"))\n # if the class is an external text class, or a disambiguation link\n if any(wiki_class in link_class for wiki_class in wikipedia_classes):\n return False\n\n if 'wikimedia' in url or 'wiktionary' in url:\n return False\n wikipedia_keywords = [\"Help\", \"Category\", \"Wikipedia\", \"Template\", \"File\", \"Talk\", \"Special\", \"Portal\"]\n if any(keyword + ':' in url for keyword in wikipedia_keywords):\n return False\n if '#' in url:\n return False\n # if the page is a file\n if re.search(\"\\.[a-zA-Z][a-zA-Z][a-zA-Z]$\", url) or re.search(\"\\.[a-zA-Z][a-zA-Z][a-zA-Z][a-zA-Z]$\", url):\n return False\n\n # if the href is enclosed in brackets\n if WikiPage.is_substring_enclosed_in_brackets(link, link.parent.parent):\n return False\n\n wikipedia_not_needed_tags = ['small', 'sup', 'i']\n if link.parent.name in wikipedia_not_needed_tags:\n return False\n\n # if the href shows two different spellings. like in: https://en.wikipedia.org/wiki/Carbon_fibers\n # Carbon fibers ~or~ carbon fibres - here or is the href.\n\n if link.contents == [\"or\"]:\n return False\n\n parents_classes = [p.get(\"class\") for p in link.parents if p.get(\"class\") is not None]\n parents_classes = [str(\"_\".join(p)) for p in parents_classes]\n parents_ids = [p.get(\"id\") for p in link.parents if p.get(\"id\") is not None]\n\n # 'toc' - the Contents menu class\n # 'mw-editsection' - the Edit section\n # 'thumbcaption' - a Photo Caption\n # 'hlist' - a list like in: https://en.wikipedia.org/wiki/January\n wikipedia_classes_to_ignore = [\"thumbcaption\", \"infobox\", \"navigation-not-searchable\", \"sidebar\", \"box-text\",\n \"toc\", \"mw-editsection\", \"thumb\", \"hlist\", \"navbox\"]\n\n for p_class in parents_classes:\n\n if any(class_to_ignore in p_class for class_to_ignore in wikipedia_classes_to_ignore):\n return False\n\n # if it is a coordinates href\n if \"coordinates\" in parents_ids:\n return False\n\n '''\n Update 13.04.2021:\n ------------------\n Someone edited the \"Epistemology\" page. and changed the first link <a>branches<a/>.\n Instead of pointing to the page \"Branches of science\", it was changed to point to \"Outline of philosophy\".\n Which creates a loop. I chose to ignore it manually, and instead click on the next link.\n ( which happens to be Philosophy :) )\n This changed also caused some of the \"paths\" in the PDF files,\n generated before that date to be slightly outdated. But the concept stays the same :)\n \n Update 08.05.2021:\n ------------------\n they fixed it since :)\n \"Epistemology\" -> branches of philosophy : \"https://en.wikipedia.org/wiki/Outline_of_philosophy\" ->\n -> Philosophy.\n \n #if \"Outline_of_philosophy\" in url:\n # return False\n '''\n\n return True", "def islink(path):\n return get_instance(path).islink(path)", "def islink(self):\n return os.path.islink(self.path)", "def islink(self, path):\n return os.path.islink(path)", "def is_link(self, url):\n return not self.is_page(url)", "def is_footnote(self):\n return self.style['float'] == 'footnote'", "def test_link(self):\n\n markup = \"\"\"\n <div>\n <p>Some text <span id=\"1\" class=\"foo:bar:foobar\"> in a paragraph</span>.\n <a id=\"2\" class=\"bar\" href=\"http://google.com\">Link</a>\n <a id=\"3\">Placeholder text.</a>\n </p>\n </div>\n \"\"\"\n\n self.assert_selector(\n markup,\n \":link\",\n [\"2\"],\n flags=util.HTML5\n )\n\n self.assert_selector(\n markup,\n \"a:link\",\n [],\n flags=util.XML\n )", "def _IsLink(self, file_attribute_flags):\n if file_attribute_flags is None:\n return False\n return bool(\n file_attribute_flags & pyfsntfs.file_attribute_flags.REPARSE_POINT)", "def isPostLink(self, rel, type = None): #$NON-NLS-1$\r\n return self._isInRelList(rel, ZAtomRelTypes.ATOM_POST_LINK_REL_LIST)", "def check_for_url_in_text(self, string):\r\n has_link = False\r\n\r\n # Find all links in the string.\r\n links = re.findall(r'(https?://\\S+)', string)\r\n if len(links)>0:\r\n has_link = True\r\n\r\n # Autolink by wrapping links in anchor tags.\r\n for link in links:\r\n string = re.sub(link, self.generate_file_link_html_from_url(link, link), string)\r\n\r\n return has_link, string", "def check_link(self, link):\n false_links = [\"wikipedia:\", \"w:\", \"wikitionary:\", \"wikt:\", \"wikinews:\",\n \"n:\", \"wikibooks:\", \"b:\", \"wikiquote:\", \"q:\", \"wikisource:\",\n \"s:\", \"wikispecies:\", \"species:\", \"wikiversity\", \"v:\", \n \"wikivoyage:\", \"voy:\", \"wikimedia:\", \"foundation:\", \"wmf:\", \n \"commonds:\", \"c:\", \"chapter:\", \"metawikipedia:\", \"meta:\", \n \"m:\", \"incubator:\", \"outreach:\", \"mw:\", \"mediazilla:\", \n \"bugzilla:\", \"testwiki:\", \"wikitech:\", \"wikidata:\", \"d:\",\n \"phabricator:\", \"phab:\", \"talk:\", \"user talk:\", \"file:\", \n \"user:\", \"template:\", \"category:\", \"file talk:\", \n \"category talk:\", \"image:\", \"media:\", \"special:\", \n \"help:\", \"portal:\", \"portal talk:\", \"\\#\"]\n is_bad = any(false_link in link.lower() for false_link in false_links)\n if is_bad or link[0] == \":\":\n return False\n else:\n return True", "def test_link(self):\n comment = \"[link](http://foo.com)\"\n comment_md = Markdown().render(comment)\n self.assertEqual(comment_md, '<p><a rel=\"nofollow\" href=\"http://foo.com\">link</a></p>')", "def is_link(token):\n\n pattern = r'ht{2}p(s|)\\:\\/\\/(w{3}.|)[\\w]+\\.[\\w]+\\/[\\w\\d]+'\n return re.match(pattern, token)", "def hasEntityLink(self, link):\r\n return self.feed_handler.hasEntityLink(link)", "def check_paragraph(self, para, links_para):\n #Return False if no paragraphs found\n if para is None:\n return False\n\n links = para.find_all('a')\n #Return False if no links found\n if links is None:\n return False\n\n #Return True if one link is valid in the paragraph\n for link in links:\n if self.check_link(link, links_para):\n return True\n return False", "def isFeedLink(self, rel, type = None): #$NON-NLS-1$\r\n return self._isInRelList(rel, ZAtomRelTypes.ATOM_FEED_LINK_REL_LIST)", "def is_valid_tag(self, tag):\n\n if tag.has_attr('href') and len(tag['href']) > 0:\n href = tag['href']\n complete_href = self.session.complete_url(href)\n\n is_relative = self.url in complete_href\n is_visited = complete_href in self.visited_paths\n is_style_sheet = tag.name == \"link\"\n is_jumpTo = \"#\" in href\n is_mailTo = \"mailto\" in href\n is_js = \"javascript:\" in href\n return is_relative and \\\n not (is_visited or is_style_sheet or is_jumpTo or is_mailTo or is_js)\n else:\n return False", "def isLinkName(word):\r\n return wikiLink.match(word)", "def check_link(feed):\n # see if this is youtube link\n if feed['link'].count('youtube.com') and 'embed' in feed and feed['embed']:\n y = re.findall('youtube\\.com/embed/(.+)', feed['embed'])\n if y:\n # format correct youtube link\n feed['link'] = 'http://youtu.be/{0}'.format(y[0])\n return True\n\n return False", "def test_linkify(self):\r\n self.assertHtmlEqual(OpenEndedChild.sanitize_html(self.link_text), self.link_atag)", "def isHighLinkDensity(self, e):\n links = Parser.getElementsByTag(e, tag='a')\n if links is None or len(links) == 0:\n return False\n \n text = Parser.getText(e)\n words = text.split(' ')\n numberOfWords = float(len(words))\n sb = []\n for link in links:\n sb.append(Parser.getText(link))\n \n linkText = ''.join(sb)\n linkWords = linkText.split(' ')\n numberOfLinkWords = float(len(linkWords))\n numberOfLinks = float(len(links))\n linkDivisor = float(numberOfLinkWords / numberOfWords)\n score = float(linkDivisor * numberOfLinks)\n if score >= 1.0:\n return True\n return False\n # return True if score > 1.0 else False", "def isAlternateLink(self, rel, type = None): #$NON-NLS-1$\r\n return u\"alternate\" == rel.strip().lower() #$NON-NLS-1$\r", "def assert_has_valid_link(self, response, expected_ending):\r\n assert link in response['link']\r\n self.assert_valid_url(link, expected_ending)", "def is_cross_onap_link(self, logical_link):\n for relationship in logical_link[\"relationship-list\"][\"relationship\"]:\n if relationship[\"related-to\"] == \"ext-aai-network\":\n return True\n return False", "def _islink(path):\n if not os.path.isdir(path):\n return False\n\n if not isinstance(path, str):\n path = str(path)\n\n attributes = ctypes.windll.kernel32.GetFileAttributesW(path)\n if attributes == INVALID_FILE_ATTRIBUTES:\n return False\n\n return (attributes & FILE_ATTRIBUTE_REPARSE_POINT) > 0", "def is_highlink_density(self, e):\n links = self.parser.getElementsByTag(e, tag='a')\n if links is None or len(links) == 0:\n return False\n\n text = self.parser.getText(e)\n words = text.split(' ')\n words_number = float(len(words))\n sb = []\n for link in links:\n sb.append(self.parser.getText(link))\n\n linkText = ''.join(sb)\n linkWords = linkText.split(' ')\n numberOfLinkWords = float(len(linkWords))\n numberOfLinks = float(len(links))\n linkDivisor = float(numberOfLinkWords / words_number)\n score = float(linkDivisor * numberOfLinks)\n if score >= 1.0:\n return True\n return False\n # return True if score > 1.0 else False", "def check_linked(self):\n\n count = 0\n for house in self.houses.values():\n if house.link:\n count += 1\n if count is 150:\n return True\n else:\n return False", "def test_tag_hyperlinks(self):\n for h in self.hyperlinks:\n if h['name'] in ['C++', 'Java', 'Python', 'ROS', 'MATLAB']:\n self.assertTrue(\n '.md' in h['url'],\n msg='Hyperlink \"%s\" is wrongly detected as a tag in \"%s\".' % (h['md'], h['file'])\n )", "def _validate_item_link(self, item):\n if len(item.link) > 255:\n raise ValueError(\"item.link length too long.\")\n\n return True", "def test_link_without_no_follow(self):\n comment = \"[link](http://foo.com)\"\n comment_md = Markdown(no_follow=False).render(comment)\n self.assertEqual(comment_md, '<p><a href=\"http://foo.com\">link</a></p>')", "def is_highlink_density(self, e):\r\n links = self.parser.getElementsByTag(e, tag='a')\r\n if links is None or len(links) == 0:\r\n return False\r\n\r\n text = self.parser.getText(e)\r\n words = text.split(' ')\r\n words_number = float(len(words))\r\n sb = []\r\n for link in links:\r\n sb.append(self.parser.getText(link))\r\n\r\n linkText = ''.join(sb)\r\n linkWords = linkText.split(' ')\r\n numberOfLinkWords = float(len(linkWords))\r\n numberOfLinks = float(len(links))\r\n linkDivisor = float(numberOfLinkWords / words_number)\r\n score = float(linkDivisor * numberOfLinks)\r\n if score >= 1.0:\r\n return True\r\n return False\r\n # return True if score > 1.0 else False\r", "def is_linked(self, node_from, node_to): # pragma: no cover\n\t\traise NotImplementedError", "def test_external_link_no_optional():\n anchor = _gen_link(\n '{% external_link url=\"http://example.com/path\" text=\"Click\" %}')\n assert anchor.get('target') == '_blank'\n assert anchor.get('href') == 'http://example.com/path'\n assert 'title' not in anchor.attrib\n assert 'aria-label' in anchor.attrib\n assert 'Click' in anchor.text", "def verify(link: str\n ) -> bool:\n \n # Ignore any /live/ or /av/ articles as they aren't proper articles\n if any([path in link for path in (\"/live/\", \"/sport1/\", \"/av/\")]):\n return False\n \n # Ensure the link corresponds with a valid BBC News article.\n return any([link.startswith(prefix) for prefix in BBC_URLS])", "def isAnchor(node):\n # TODO What is considered an anchor needs to be subject to an option\n return bool((isinstance(node, nodes.target)\n or isinstance(node, nodes.Structural))\n and node[DuAttrIds]\n and not node.get(DuAttrRefuri, None))", "def is_relative_link(link):\n return not get_protocol(link) and re.search(r\"^\\.?/([a-z]|[A-Z]|[0-9]|\\.)+\", link)", "def __set_has_link(html_text=str):\n try:\n find = \"a\"\n bsoup = BeautifulSoup(html_text, \"html.parser\")\n for child in bsoup.find_all(find):\n child.string = constants.QUESTION_HAS_LINKS_KEY\n return bsoup.prettify()\n except TypeError as error:\n print(\"TypeError in text_processor.__set_has_link\", error)\n return None", "def is_valid_listings(link):\n if link.has_attr(\"href\") and link.attrs[\"href\"].startswith(LISTING_PREFIX):\n return True\n return False", "def hasSuffixLink(self):\n return False if self.suffix_link is None else True", "def is_attachment(self):\n from ..html import element_has_link_type\n\n if self.element is not None and self.element.tag == 'a':\n return element_has_link_type(self.element, 'attachment')\n return False", "def hasSiblings():", "def has_target_href(self) -> bool:\n return self._target_href is not None", "def find_link(html_content):\n soup = BeautifulSoup(html_content, \"html.parser\")\n paragraphs = soup.find_all('p')\n for p in paragraphs:\n string = ''\n for element in p:\n if type(element) == bs4.element.NavigableString:\n string += element\n elif type(element) == bs4.element.Tag and element.name == 'a':\n if balanced_parenths(string):\n return element\n else:\n string += element.get_text()\n return None", "def is_linked(self, other):\n for edge in self._edges_list:\n if other.index == edge.linked[1].index:\n return True\n return False", "def IsItemHyperText(self, item):\r\n\r\n return item.IsHyperText()", "def is_git_link():\n return islink('.git')", "def should_render_as_link(self):\n if self.active and not self.render_link_for_active:\n return False\n return bool(self.url)", "def isPresent(self, word):\n\t\treturn word in self.link_words", "def is_re_analysis_link_present(self):\n return self.is_element_present(self.re_analysis_locator)", "def test_autolink_without_no_follow(self):\n comment = \"http://foo.com\"\n comment_md = Markdown(no_follow=False).render(comment)\n self.assertEqual(comment_md, '<p><a href=\"http://foo.com\">http://foo.com</a></p>')", "def is_format_wrong(soup):\n a = soup.find('a')\n if a.has_attr('faire'):\n return True\n return False", "def check_link_in(self, url):\n url_hash = tools.url_hash(url)\n if url_hash not in self.__links:\n self.__link_lock.acquire()\n self.__links.add(url_hash)\n self.__link_lock.release()\n return False\n else:\n return True", "def check_exists_links(self):\n\n # get all non-own articles\n articles_from_external_resourse = self.articles_from_external_resourse()\n\n # return true if it not\n if not articles_from_external_resourse.count():\n return True\n\n # if found broken link\n # keep all the articles with broken links and return false,\n # otherwise return true\n article_with_broken_links = list()\n for article in articles_from_external_resourse:\n try:\n urllib.request.urlopen(article.source)\n except:\n article_with_broken_links.append(article)\n if article_with_broken_links:\n return (False, article_with_broken_links)\n return True", "def archive_check_if_end_of_posts(page_html):\n # <div id=\"no_posts_yet\">No posts yet.</div>\n # <div\\s+id=[\"']no_posts_yet[\"']>\\s*No posts yet.\\s*</div>\n next_page_link_regex = \"\"\"<div\\s+id=[\"']no_posts_yet[\"']>\\s*No posts yet.\\s*</div>\"\"\"\n next_page_link_search = re.search(next_page_link_regex, page_html, re.IGNORECASE|re.DOTALL)\n if next_page_link_search:\n return True\n else:\n return False", "def tag_visible(element):\n if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:\n return False\n if isinstance(element, bs4.element.Comment):\n return False\n if re.match(r\"[\\n]+\", str(element)):\n return False\n return True", "def have_anchor_symbol(l):\r\n if \"<\" in str(l) or \">\" in str(l):\r\n return 1\r\n else:\r\n return 0", "def is_zipinfo_symlink(zip_info):\n return zip_info.external_attr == ZIP_SOFTLINK_ATTRIBUTE_MAGIC", "def _do_links(self, text):\r\n MAX_LINK_TEXT_SENTINEL = 3000 # markdown2 issue 24\r\n\r\n # `anchor_allowed_pos` is used to support img links inside\r\n # anchors, but not anchors inside anchors. An anchor's start\r\n # pos must be `>= anchor_allowed_pos`.\r\n anchor_allowed_pos = 0\r\n\r\n curr_pos = 0\r\n while True: # Handle the next link.\r\n # The next '[' is the start of:\r\n # - an inline anchor: [text](url \"title\")\r\n # - a reference anchor: [text][id]\r\n # - an inline img: ![text](url \"title\")\r\n # - a reference img: ![text][id]\r\n # - a footnote ref: [^id]\r\n # (Only if 'footnotes' extra enabled)\r\n # - a footnote defn: [^id]: ...\r\n # (Only if 'footnotes' extra enabled) These have already\r\n # been stripped in _strip_footnote_definitions() so no\r\n # need to watch for them.\r\n # - a link definition: [id]: url \"title\"\r\n # These have already been stripped in\r\n # _strip_link_definitions() so no need to watch for them.\r\n # - not markup: [...anything else...\r\n try:\r\n start_idx = text.index('[', curr_pos)\r\n except ValueError:\r\n break\r\n text_length = len(text)\r\n\r\n # Find the matching closing ']'.\r\n # Markdown.pl allows *matching* brackets in link text so we\r\n # will here too. Markdown.pl *doesn't* currently allow\r\n # matching brackets in img alt text -- we'll differ in that\r\n # regard.\r\n bracket_depth = 0\r\n for p in range(start_idx+1, min(start_idx+MAX_LINK_TEXT_SENTINEL,\r\n text_length)):\r\n ch = text[p]\r\n if ch == ']':\r\n bracket_depth -= 1\r\n if bracket_depth < 0:\r\n break\r\n elif ch == '[':\r\n bracket_depth += 1\r\n else:\r\n # Closing bracket not found within sentinel length.\r\n # This isn't markup.\r\n curr_pos = start_idx + 1\r\n continue\r\n link_text = text[start_idx+1:p]\r\n\r\n # Possibly a footnote ref?\r\n if \"footnotes\" in self.extras and link_text.startswith(\"^\"):\r\n normed_id = re.sub(r'\\W', '-', link_text[1:])\r\n if normed_id in self.footnotes:\r\n self.footnote_ids.append(normed_id)\r\n result = '<sup class=\"footnote-ref\" id=\"fnref-%s\">' \\\r\n '<a href=\"#fn-%s\">%s</a></sup>' \\\r\n % (normed_id, normed_id, len(self.footnote_ids))\r\n text = text[:start_idx] + result + text[p+1:]\r\n else:\r\n # This id isn't defined, leave the markup alone.\r\n curr_pos = p+1\r\n continue\r\n\r\n # Now determine what this is by the remainder.\r\n p += 1\r\n if p == text_length:\r\n return text\r\n\r\n # Inline anchor or img?\r\n if text[p] == '(': # attempt at perf improvement\r\n match = self._tail_of_inline_link_re.match(text, p)\r\n if match:\r\n # Handle an inline anchor or img.\r\n is_img = start_idx > 0 and text[start_idx-1] == \"!\"\r\n if is_img:\r\n start_idx -= 1\r\n\r\n url, title = match.group(\"url\"), match.group(\"title\")\r\n if url and url[0] == '<':\r\n url = url[1:-1] # '<url>' -> 'url'\r\n # We've got to encode these to avoid conflicting\r\n # with italics/bold.\r\n url = url.replace('*', self._escape_table['*']) \\\r\n .replace('_', self._escape_table['_'])\r\n if title:\r\n title_str = ' title=\"%s\"' % (\r\n _xml_escape_attr(title)\r\n .replace('*', self._escape_table['*'])\r\n .replace('_', self._escape_table['_']))\r\n else:\r\n title_str = ''\r\n if is_img:\r\n result = '<img src=\"%s\" alt=\"%s\"%s%s' \\\r\n % (url.replace('\"', '&quot;'),\r\n _xml_escape_attr(link_text),\r\n title_str, self.empty_element_suffix)\r\n if \"smarty-pants\" in self.extras:\r\n result = result.replace('\"', self._escape_table['\"'])\r\n curr_pos = start_idx + len(result)\r\n text = text[:start_idx] + result + text[match.end():]\r\n elif start_idx >= anchor_allowed_pos:\r\n result_head = '<a href=\"%s\"%s>' % (url, title_str)\r\n result = '%s%s</a>' % (result_head, link_text)\r\n if \"smarty-pants\" in self.extras:\r\n result = result.replace('\"', self._escape_table['\"'])\r\n # <img> allowed from curr_pos on, <a> from\r\n # anchor_allowed_pos on.\r\n curr_pos = start_idx + len(result_head)\r\n anchor_allowed_pos = start_idx + len(result)\r\n text = text[:start_idx] + result + text[match.end():]\r\n else:\r\n # Anchor not allowed here.\r\n curr_pos = start_idx + 1\r\n continue\r\n\r\n # Reference anchor or img?\r\n else:\r\n match = self._tail_of_reference_link_re.match(text, p)\r\n if match:\r\n # Handle a reference-style anchor or img.\r\n is_img = start_idx > 0 and text[start_idx-1] == \"!\"\r\n if is_img:\r\n start_idx -= 1\r\n link_id = match.group(\"id\").lower()\r\n if not link_id:\r\n link_id = link_text.lower() # for links like [this][]\r\n if link_id in self.urls:\r\n url = self.urls[link_id]\r\n # We've got to encode these to avoid conflicting\r\n # with italics/bold.\r\n url = url.replace('*', self._escape_table['*']) \\\r\n .replace('_', self._escape_table['_'])\r\n title = self.titles.get(link_id)\r\n if title:\r\n before = title\r\n title = _xml_escape_attr(title) \\\r\n .replace('*', self._escape_table['*']) \\\r\n .replace('_', self._escape_table['_'])\r\n title_str = ' title=\"%s\"' % title\r\n else:\r\n title_str = ''\r\n if is_img:\r\n result = '<img src=\"%s\" alt=\"%s\"%s%s' \\\r\n % (url.replace('\"', '&quot;'),\r\n link_text.replace('\"', '&quot;'),\r\n title_str, self.empty_element_suffix)\r\n if \"smarty-pants\" in self.extras:\r\n result = result.replace('\"', self._escape_table['\"'])\r\n curr_pos = start_idx + len(result)\r\n text = text[:start_idx] + result + text[match.end():]\r\n elif start_idx >= anchor_allowed_pos:\r\n result = '<a href=\"%s\"%s>%s</a>' \\\r\n % (url, title_str, link_text)\r\n result_head = '<a href=\"%s\"%s>' % (url, title_str)\r\n result = '%s%s</a>' % (result_head, link_text)\r\n if \"smarty-pants\" in self.extras:\r\n result = result.replace('\"', self._escape_table['\"'])\r\n # <img> allowed from curr_pos on, <a> from\r\n # anchor_allowed_pos on.\r\n curr_pos = start_idx + len(result_head)\r\n anchor_allowed_pos = start_idx + len(result)\r\n text = text[:start_idx] + result + text[match.end():]\r\n else:\r\n # Anchor not allowed here.\r\n curr_pos = start_idx + 1\r\n else:\r\n # This id isn't defined, leave the markup alone.\r\n curr_pos = match.end()\r\n continue\r\n\r\n # Otherwise, it isn't markup.\r\n curr_pos = start_idx + 1\r\n\r\n return text", "def seq_in_link(link, sub_link):\r\n # first_link = []\r\n # while link != Link.empty:\r\n # first_link.append(link.first)\r\n # link = link.rest\r\n #\r\n # while sub_link != Link.empty:\r\n # if sub_link.first in first_link:\r\n # index = first_link.index(sub_link.first)\r\n # first_link = first_link[index:]\r\n # sub_link = sub_link.rest\r\n # else:\r\n # return False\r\n # return True\r\n# this method is too complicated!\r\n while link != Link.empty and sub_link != Link.empty:\r\n if sub_link.first == link.first:\r\n sub_link = sub_link.rest\r\n link = link.rest\r\n\r\n if sub_link == Link.empty:\r\n return True\r\n else:\r\n return False", "def test_two_footnotes(self):\n text = \"Footnote[^1]\\n\\n[^1]: Footnote text\"\n self.assertNotEqual(self.md(text), self.md(text))", "def handle_a(self, tag, attrs):\n ad = dict(attrs)\n if 'href' in ad.keys() \\\n and ad['href'].startswith('http:') \\\n and 'target' not in ad.keys():\n self.errmsg(\"External link with no target attribute\")", "def test_local_link(self):\n\n self.assert_selector(\n self.MARKUP,\n \"a:local-link\",\n [],\n flags=util.HTML\n )", "def header_field_should_have_link(self, label):\n locator = lex_locators[\"record\"][\"header\"][\"field_value_link\"].format(label)\n self.selenium.page_should_contain_element(locator)", "def is_broken_link(path):\r\n path = os.readlink(path)\r\n return not os.path.exists(path)", "def GoogleCode_IsExternalLink(wikifier, link):\n\n if GoogleCode_Exists(wikifier, link):\n return False;\n\n if URL.match(link):\n return True\n\n if '.' in link or '\\\\' in link or '/' in link or '#' in link:\n return True\n\n return False", "def test_links_in_message_are_shortened(self):\n sender = self.create_user()\n group = self.create_group()\n sender.add_to_group(group.pk)\n\n thread = mommy.make(Thread, group=group)\n message = Message(\n text='This is a <a href=\"http://www.razzmatazz.local\">link</a>',\n thread=thread,\n sender=sender\n )\n message.save()\n self.assertEqual(message.links.count(), 1)\n self.assertTrue(message.links.get().short_code in message.text)", "def exists(self):\n return self.islink() or exists(self._path)", "def update_link(self, link):\n if self not in (link.endpoint_a, link.endpoint_b):\n return False\n\n if self.link is None or self.link != link:\n self.link = link\n\n if link.endpoint_a == self:\n endpoint = link.endpoint_b\n else:\n endpoint = link.endpoint_a\n\n if endpoint.link is None or endpoint.link != link:\n endpoint.link = link\n\n return True", "def test_bleach_with_href():\n eq_(u'<a href=\"http://xx.com\" rel=\"nofollow\" title=\"xx\">xx</a> '\n u'<a href=\"http://yy.com\" rel=\"nofollow\">http://yy.com</a>',\n bl.bleach('<a title=\"xx\" href=\"http://xx.com\">xx</a> http://yy.com'))\n eq_('<a href=\"http://xx.com\" rel=\"nofollow\">http://xx.com</a>',\n bl.bleach('<a href=\"http://xx.com\">http://xx.com</a>'))", "def is_google_doc_link(link: str) -> bool:\n\n valid_google_docs_url_strings = ('https://docs.google.com/', 'https://drive.google.com/',\n 'https://forms.gle/', 'https://goo.gl/forms')\n\n if len(link) < 15 or not link.startswith(valid_google_docs_url_strings):\n return False\n else:\n return True", "def __ne__(self, other: 'NextHref') -> bool:\n return not self == other", "def test_link_is_tracked_true(self):\n self.assertTrue(link_is_tracked(\"https://test.com/testurl\"))", "def add_link():\n return True", "def _is_bad_link(info, base):\r\n # Links are interpreted relative to the directory containing the link\r\n tip = resolved(joinpath(base, dirname(info.name)))\r\n return _is_bad_path(info.linkname, base=tip)", "def has_cycle(link):\r\n # collect_list = [link]\r\n # while not link == Link.empty:\r\n # collect_list.append(link.first)\r\n # link = link.rest\r\n # if link.rest in collect_list:\r\n # return True\r\n # return False\r\n s = link\r\n while not link == Link.empty:\r\n if link.rest == s:\r\n return True\r\n else:\r\n link = link.rest\r\n return False", "def is_linked(self): \n return self.ichair_id is not None", "def has_end_effector_link(self):\n return len(self._g.get_end_effector_link()) > 0", "def is_hom_ref(self) -> bool:\n return self.is_hom() and (self.allele1 == 0 or self.allele2 == 0)", "def has_member(self, pointer):\n start = self.head\n while start:\n if start==pointer:\n return True\n start = start.getLink()\n return False", "def should_link(self, item):\r\n return item.__class__ in self.class_map.keys()", "def test_comments_button_link_test(self):\n\n topic_footer = self.event_page.topic_footer\n old_url = self.driver.current_url\n topic_footer.go_to_commments()\n new_url = self.driver.current_url\n self.assertEqual(new_url, old_url + '#comments', 'Go to comments wrong url')", "def is_symlink(self):\n return self._security_class == \"lnk_file\"", "def is_fs_link(pathname: Union[str, os.PathLike]) -> bool:\n return os.path.islink(pathname)", "def test_autolink(self):\n comment = \"http://foo.com\\n\" \\\n \"http://foo.com?foo=1&bar=2\\n\" \\\n \"http://foo.com/<bad>\"\n comment_md = Markdown().render(comment)\n self.assertEqual(\n comment_md.splitlines(),\n [\n '<p><a rel=\"nofollow\" href=\"http://foo.com\">http://foo.com</a></p>',\n '<p><a rel=\"nofollow\" href=\"http://foo.com?foo=1&amp;bar=2\">http://foo.com?foo=1&amp;bar=2</a></p>',\n '<p><a rel=\"nofollow\" href=\"http://foo.com/&lt;bad&gt;\">http://foo.com/&lt;bad&gt;</a></p>'\n ])", "def __verify(self, href):\n # change main url to avoid mistakes with http ou https\n main = self.main_url.replace('https://', '').replace('http://', '')\n forbiden = {\"#\", 'None'} # forbidden possible urls\n if (href is None) or (href in forbiden):\n return False\n for item in ['tel:', 'mailto:', 'javascript:']:\n if item in href: # verify if is a link to telephone, e-mail or javascript\n return False\n if main in href and (\"/checkout/cart/add\" in href or \"/checkout/#/cart\" in href):\n return False # prevents a purchase from being made\n elif main in href or (main not in href and href[:4] != \"http\"):\n return True # possible case of a valid link\n else:\n return False # any other link is not valid", "def linkify(state: StateInline, silent: bool) -> bool:\n if not state.md.options.linkify:\n return False\n if state.linkLevel > 0:\n return False\n if not state.md.linkify:\n raise ModuleNotFoundError(\"Linkify enabled but not installed.\")\n\n pos = state.pos\n maximum = state.posMax\n\n if (\n (pos + 3) > maximum\n or state.src[pos] != \":\"\n or state.src[pos + 1] != \"/\"\n or state.src[pos + 2] != \"/\"\n ):\n return False\n\n if not (match := SCHEME_RE.match(state.pending)):\n return False\n\n proto = match.group(1)\n if not (link := state.md.linkify.match_at_start(state.src[pos - len(proto) :])):\n return False\n url: str = link.url\n\n # disallow '*' at the end of the link (conflicts with emphasis)\n url = url.rstrip(\"*\")\n\n full_url = state.md.normalizeLink(url)\n if not state.md.validateLink(full_url):\n return False\n\n if not silent:\n state.pending = state.pending[: -len(proto)]\n\n token = state.push(\"link_open\", \"a\", 1)\n token.attrs = {\"href\": full_url}\n token.markup = \"linkify\"\n token.info = \"auto\"\n\n token = state.push(\"text\", \"\", 0)\n token.content = state.md.normalizeLinkText(url)\n\n token = state.push(\"link_close\", \"a\", -1)\n token.markup = \"linkify\"\n token.info = \"auto\"\n\n state.pos += len(url) - len(proto)\n return True", "def is_people_with_link_can_view_and_comment(self):\n return self._tag == 'people_with_link_can_view_and_comment'", "def test_link_is_tracked_false(self):\n self.assertFalse(link_is_tracked(\"https://www.foo.com/\"))", "def is_ode_noad_link(self):\n if self.project_name in IDENTIFIERS:\n return True\n else:\n return False", "def test_get_object_link_hyperlink(self):\n plugin = ProjectAppPluginPoint.get_plugin(PLUGIN_NAME)\n ret = plugin.get_object_link('HyperLink', self.hyperlink.sodar_uuid)\n self.assertEqual(ret['url'], self.hyperlink.url)\n self.assertEqual(ret['label'], self.hyperlink.name)\n self.assertEqual(ret['blank'], True)", "def IsSymlink(info):\n return (info.external_attr >> 16) == 0120777", "def check_href(href):\n if bool(pattern.match(href)):\n if os.path.basename(urlparse.urlparse(href).path) not in file_list:\n return True\n return False", "def isReference(node):\n return bool(isinstance(node, nodes.Referential)\n and node.get(DuAttrRefid, None))" ]
[ "0.69997984", "0.6757967", "0.6695195", "0.66264176", "0.66264176", "0.66264176", "0.63451207", "0.6296661", "0.6101461", "0.60751194", "0.604954", "0.6045755", "0.60173523", "0.5861832", "0.5820362", "0.5799617", "0.5760772", "0.57050264", "0.5704481", "0.5701879", "0.5698439", "0.56340456", "0.56298524", "0.5579598", "0.55548924", "0.5543574", "0.5533561", "0.55235624", "0.55162615", "0.5506118", "0.54905206", "0.546339", "0.5451245", "0.544554", "0.5438265", "0.5433175", "0.5429096", "0.5419738", "0.5414154", "0.5403942", "0.5400776", "0.53732365", "0.5367499", "0.5364436", "0.5344208", "0.5315885", "0.5292272", "0.5281003", "0.52648836", "0.524979", "0.5239134", "0.52369046", "0.52185845", "0.5209682", "0.52079964", "0.52070296", "0.51994026", "0.5187265", "0.51827455", "0.51753235", "0.51651925", "0.5160124", "0.51483244", "0.5142532", "0.5140128", "0.5138945", "0.5135238", "0.5134243", "0.5131163", "0.5125556", "0.51217216", "0.511658", "0.5114275", "0.50885504", "0.50642675", "0.50534534", "0.50490874", "0.50457835", "0.5032906", "0.50313413", "0.5029144", "0.50195545", "0.5018644", "0.5013959", "0.50071424", "0.4998131", "0.49911255", "0.49906808", "0.49724367", "0.49696746", "0.49628112", "0.49613184", "0.49571374", "0.49414453", "0.49355528", "0.49325868", "0.4904041", "0.49034023", "0.49031463", "0.49000117" ]
0.8036169
0
Checks whether a paragraph element is part of a footnote.
def is_footnote(self, par): if par.find_next_sibling('p') is None: return False return self.is_footnote_text(par) or self.is_footnote_link(par)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_footnote_text(self, par):\n return (par is not None) and (\"foot\" in par.attrs.get(\"class\", []))", "def is_footnote_link(self, par):\n return self.is_footnote_text(par.find_next_sibling('p'))", "def is_footnote(self):\n return self.style['float'] == 'footnote'", "def check_marked_paragraph(paragraph, number):\n\n\tq = 0 # счетчик найденных маркеров\n\tchars = '<> ' # возможные символы в каретке\n\n\tfor i in range(len(paragraph.runs)):\n\t\tif \"<>\" in paragraph.runs[i].text: # если в тексте каретки встречается маркер\n\t\t\tfor c in paragraph.runs[i].text: # проверяем каждый символ в каретке\n\t\t\t\tif c not in chars: # если он не входит в список разрешенных символов\n\t\t\t\t\treturn False\n\t\t\tq += 1 # если проверка пройдена, увеличиваем счетчик\n\t\telif \"<\" in paragraph.runs[i].text and \">\" in paragraph.runs[i+1].text: # если маркер разделен на две соседние каретки\n\t\t\tfor c in paragraph.runs[i].text: # проверяем каждую из кареток\n\t\t\t\tif c not in chars:\n\t\t\t\t\treturn False\n\t\t\tfor c in paragraph.runs[i+1].text:\n\t\t\t\tif c not in chars:\n\t\t\t\t\treturn False\n\t\t\tq += 1\n\n\tif q != number: # если количество маркеров не совпало с указанным в выводе\n\t\treturn False\n\telse:\n\t\treturn True", "def ends_paragraph(s: str) -> bool:\n return not s.strip()", "def test_two_footnotes(self):\n text = \"Footnote[^1]\\n\\n[^1]: Footnote text\"\n self.assertNotEqual(self.md(text), self.md(text))", "def should_be_compact_paragraph(self, node):\n\n if isinstance(node.parent, nodes.container):\n if 'non-paragraph' not in node.parent.attributes['classes']:\n return False\n\n # noinspection PyUnresolvedReferences\n return super().should_be_compact_paragraph(node)", "def check_paragraph(self, para, links_para):\n #Return False if no paragraphs found\n if para is None:\n return False\n\n links = para.find_all('a')\n #Return False if no links found\n if links is None:\n return False\n\n #Return True if one link is valid in the paragraph\n for link in links:\n if self.check_link(link, links_para):\n return True\n return False", "def check_paragraph(line):\n if len(line) > 3 and line[:3] == '⋅⋅⋅':\n return '<p>' + line[3:] + '</p>'\n else:\n return line", "def testParagraphs(self):\n\n textractor = Textractor(paragraphs=True)\n\n # Extract text as sentences\n paragraphs = textractor(Utils.PATH + \"/article.pdf\")\n\n # Check number of paragraphs is as expected\n self.assertEqual(len(paragraphs), 13)", "def paragraph_mentions(text: str, keyword: str) -> bool:\n soup = BeautifulSoup(text, \"html5lib\")\n paragraphs = [p.get_text() for p in soup('p')]\n\n return any(keyword.lower() in paragraph.lower()\n for paragraph in paragraphs)", "def _get_footnote_par(self, id):\n start = self._current_body_par\n if start is None:\n start = self.parsed\n link = start.find_next(id=id)\n if link is None:\n raise NoFootnoteError(f\"Could not find id {id}\")\n foot_par = link.parent.find_next_sibling('p')\n if not self.is_footnote_text(foot_par):\n raise NoFootnoteError(f\"Failed to find adjacent link paragraph for footnote {id}.\")\n return foot_par", "def is_plugin_note(self, note):\n return bool(self.regex.match(note))", "def is_valid_paragraphs(args, skip=False):\n if is_valid_file_and_directory(args) or skip:\n if args.paragraphs is not None:\n return True\n return False", "def _is_valid_pt(content_type: str) -> bool:\n content_type = content_type.strip()\n return content_type in SPECIFICATION_PRIMITIVE_TYPES", "def is_foot_vertically_inside(self, item_or_group):\n if isinstance(item_or_group, ItemGroup):\n return not self.is_bottom_edge_below(item_or_group) and self.is_bottom_edge_below_top_foot(item_or_group)\n else:\n raise TypeError(\"item_or_group must be instance of ItemGroup.\")", "def parse_footnote(self, footelem) -> FootNote:\n\n fn = FootNote()\n if footelem.text is None:\n fn.footnote = ''\n else:\n fn.footnote = footelem.text.strip()\n\n fn.footnoteid = footelem.attrib['{%s}label' % footelem.nsmap['xlink']]\n\n return fn", "def end_paragraph(self):\n raise NotImplementedError", "def fix_footnotes(case_el, warnings):\n case_pq = PyQuery(case_el)\n # fix footnotes\n # footnotes look like this (since <small> is already stripped)\n # <p>--------</p>\n # <p>Notes:</p>\n # <p>\n # <sup>\n # <a href=\"#fn1\" name=\"fr1\">1</a>\n # </sup> text text text </p>\n # notes label can look like `<strong><br/> --------</strong>` -- NE2d/990/990ne2d139_12.xml\n notes_el = case_pq('p:contains(\"Notes:\")').filter(lambda i, el: strip_tags(PyQuery(el).text()).strip() == 'Notes:')\n refs = {}\n notes_section = None\n if notes_el:\n notes_section = notes_el.closest('article, section')\n footnote_index = 0\n opinion_index = 1\n footnote_el = None\n\n # before and after footnote sections there is a paragraph of either 8 or 15 hyphens\n footnote_breaks = ['-' * 8, '-' * 15]\n\n # remove footnote break before footnote section\n # can have tags in the footnote break -- A3d/50/50a3d607_29.xml\n prev_notes_el = notes_el.prev()\n if strip_tags(prev_notes_el.text()).strip() not in footnote_breaks:\n warnings.append(\"Unexpected element before notes el.\")\n else:\n prev_notes_el.remove()\n\n # remove \"Notes:\"\n old_footnote_el = notes_el.next()\n notes_el.remove()\n\n # step through each footnote element\n while old_footnote_el:\n # sometimes <a> tag gets out of <p> tag -- SE2d/590/590SE2d53.xml\n # put it inside a new <p>\n if old_footnote_el[0].tag == 'a':\n old_footnote_el = wrap_with(old_footnote_el, PyQuery(etree.Element('p')))\n\n link_el = old_footnote_el('a').eq(0)\n if not link_el:\n # this could be the end of footnotes, in which case stop\n if strip_tags(old_footnote_el.text()).strip() in footnote_breaks:\n old_footnote_el.remove()\n break\n # or could be a second paragraph of the previous footnote, in which case append\n if footnote_el:\n footnote_el.append(old_footnote_el)\n old_footnote_el = footnote_el.next()\n continue\n else:\n # if there's a non-footnote before the first footnote, we don't know what's going on,\n # so quit processing\n warnings.append(\"Unexpected non-footnote element.\")\n break\n label = link_el.text()\n footnote_index += 1\n footnote_id = f'footnote_{opinion_index}_{footnote_index}'\n footnote_el = PyQuery(renderer.make_footnote_el(id=footnote_id, label=label))\n refs[link_el.attr('href').lstrip('#')] = [footnote_id, footnote_el]\n while link_el.parent()[0].tag == 'sup':\n link_el = link_el.parent()\n link_el.remove()\n\n # remove space at beginning of footnote left over from removing footnote number\n if old_footnote_el[0].text:\n old_footnote_el[0].text = old_footnote_el[0].text.lstrip()\n\n wrap_with(old_footnote_el, footnote_el)\n old_footnote_el = footnote_el.next()\n\n # fix footnote references (<small> is already stripped)\n # ...<sup><a href=\"#fr1\" name=\"fn1\">1</a></sup>... typical\n # ...<sup id=\"co_fnRef_B00012045229866_ID0E4F\">1</sup> BR/590/590 B.R. 577.xml\n # ...<a href=\"#1\" name=\"fn1\" id=\"fn1\">1</a>... NW2d/781/781NW2d5512010WIApp33_29.xml\n for section in case_pq('.head-matter, .opinion').items():\n for old_ref_pq in section('a, sup[id]').items():\n label = old_ref_pq.text()\n if old_ref_pq[0].tag == 'a':\n ref_name = old_ref_pq.attr('name')\n if not (ref_name and ref_name.startswith('fn')):\n continue\n else:\n ref_name = \"fn\" + label\n ref, footnote_el = refs.get(ref_name, ['orphan', None])\n if footnote_el:\n # move footnotes from end of document to correct section -- see NW2d/906/906 N.W.2d 436_Replace.xml\n if section != notes_section:\n section.append(footnote_el)\n else:\n warnings.append(f\"Unmatched ref {repr(str(old_ref_pq))}\")\n ref_el = etree.Element('a', {'class': 'footnotemark', 'href': '#' + ref, 'id': 'ref_' + ref})\n ref_el.text = label\n while old_ref_pq.parent()[0].tag == 'sup':\n old_ref_pq = old_ref_pq.parent()\n PyQuery(ref_el).insert_before(old_ref_pq)\n old_ref_pq.remove()", "def is_plugin_note(self, note):\n return False", "def has_text(self, page: fitz.Page) -> bool:\n return page.get_text(clip=page.trimbox).strip() != \"\"", "def test_p_tag_is_not_empty_element(self):\n soup = self.soup(\"<p />\")\n self.assertFalse(soup.p.is_empty_element)\n self.assertEqual(str(soup.p), \"<p></p>\")", "def footnotes(self, text):\n html = '<div class=\"footnotes\">\\n%s<ol>%s</ol>\\n</div>\\n'\n return html % (self.hrule(), text)", "def footnote(self) -> str:\n return self._footnote", "def findFootnotesPlaceholder(self, root):\n def finder(element):\n for child in element:\n if child.text:\n if child.text.find(self.getConfig(\"PLACE_MARKER\")) > -1:\n return child, element, True\n if child.tail:\n if child.tail.find(self.getConfig(\"PLACE_MARKER\")) > -1:\n return child, element, False\n finder(child)\n return None\n \n res = finder(root)\n return res", "def footnote(self, footnote: str):\n\n self._footnote = footnote", "def contains_pronoun(cluster):\n for mention in cluster:\n if any([w.tag_.startswith(\"PRP\") for w in mention]):\n # Found a mention with a pronoun\n return True\n return False", "def is_postal_code(elem):\n return 'post' in elem.attrib['k']", "def is_break_tag(self, el):\n\n should_break = False\n if self.type == 'odp':\n if el.name == 'page' and el.namespace and el.namespace == self.namespaces['draw']:\n should_break = True\n return should_break", "def is_punct(self, word, language):", "def is_complete_multipartite(self):\n if self._.d != 2:\n return False\n if not self._has(\"p\"):\n self.pTable()\n return any(self._.p[0, i, i] == self._.p[j, i, i]\n for i, j in [(1, 2), (2, 1)])", "def has_text(self):\n try:\n first = self.text_planets()[0]\n except IndexError:\n first = None\n\n return first is not None", "def is_toc(self, par):\n return \"toc\" in par.attrs.get(\"class\", [])", "def is_overlappedFootprint(self, footprint):\n if footprint.width == 0 or footprint.height == 0 or footprint.popularity <= 1:\n return False\n for corner in footprint.corners:\n if self.is_point_in(corner):\n return True\n for corner in self.corners:\n if footprint.is_point_in(corner):\n return True\n return False", "def is_tip(text):\n\n amount = 0\n if re.search(r'I sent you a \\$[0-9]*\\.00 tip ♥', text):\n amount = re.match(r'I sent you a \\$([0-9]*)\\.00 tip ♥', text).group(1)\n Settings.maybe_print(\"successfully found tip\")\n Settings.dev_print(\"amount: {}\".format(amount))\n return True, int(amount)\n elif re.search(r\"I\\'ve contributed \\$[0-9]*\\.00 to your Campaign\", text):\n amount = re.match(r'I\\'ve contributed \\$([0-9]*)\\.00 to your Campaign', text).group(1)\n Settings.maybe_print(\"successfully found campaign donation\")\n Settings.dev_print(\"amount: {}\".format(amount))\n return True, int(amount)\n return False, int(amount)", "def delete_paragraph(paragraph):\n p = paragraph._element\n p.getparent().remove(p)\n p._p = p._element = None", "def _visit_paragraph(self,elem):\n # only add this p if we don't already have a descriptor for the site\n if self._curr_url not in self._url_paragraphs:\n try:\n paragraph_text = self._text_of_para(elem).strip()\n paragraph_text = strip_tags(paragraph_text)\n paragraph_text = (paragraph_text[:1001] + '...') if len(paragraph_text) > 1000 else paragraph_text\n self._url_paragraphs[self._curr_url] = paragraph_text\n print \"description of url:\" + repr(paragraph_text)\n except:\n print \"Failed to get paragraph text\"", "def single_line_paragraph(s: str) -> bool:\n return s.startswith('@') or s.strip() in ('\"\"\"', \"'''\")", "def is_ends_with_tag(text):\n\treturn re_tag.search(text) != None", "def footnote_item(self, key, text):\n back = (\n '<a href=\"#fnref-%s\" class=\"footnote\">&#8617;</a>'\n ) % escape(key)\n text = text.rstrip()\n if text.endswith('</p>'):\n text = re.sub(r'<\\/p>$', r'%s</p>' % back, text)\n else:\n text = '%s<p>%s</p>' % (text, back)\n html = '<li id=\"fn-%s\">%s</li>\\n' % (escape(key), text)\n return html", "def is_conference_paper(self):\n if self.root.xpath(\"./conference-info\"):\n return True\n journal_issue = self.root.xpath(\n \"string(./RDF/Description/issueName[1])\"\n ).extract_first()\n if journal_issue:\n is_conference = re.findall(r\"proceedings|proc.\", journal_issue.lower())\n return bool(is_conference)\n return False", "def contains_point(self, p):\n return self.begin <= p < self.end", "def contains_pt(self, pt):\n x, y = pt\n if not self.x - self.radius < x < self.x + self.radius:\n return False\n if not self.y - self.radius < y < self.y + self.radius:\n return False\n return True", "def has_template(page_text: str) -> bool:\n\n\tpattern = '<noinclude>.*{{documentation}}.*</noinclude>'\n\tif re.search(pattern, page_text, re.DOTALL | re.IGNORECASE):\n\t\treturn True\n\telse:\n\t\treturn False", "def isPdf(page):\n return page['data'][:4] == '%PDF'", "def is_valid_para(self, para_type, type_table):\n # The values of the table contain all known destination types\n if para_type in type_table.values():\n return True\n return True", "def is_P(self):\n return isinstance(self,P)", "def test_as_p(self, output=None, form=None):\n setup = {'start_tag': '<p>', 'end_tag': '</p>', 'label_end': ' ', 'input_end': ' '}\n setup['as_type'] = as_type = 'as_p'\n setup['form'] = form or self.form\n output = output or setup['form'].as_p().strip()\n expected = self.get_expected_format(setup)\n errors = []\n if output != expected:\n errors = self.log_html_diff(expected, output, as_type=as_type, full=False)\n message = \"Suite {}, had {} lines of HTML errors for {} \".format(self.__class__.__name__, len(errors), as_type)\n self.assertNotEqual('', output)\n self.assertEqual(expected, output, message)", "def is_pentomino(pent, pents):\n pidx = get_pent_idx(pent)\n if pidx == -1:\n return False\n true_pent = pents[pidx]\n \n for flipnum in range(3):\n p = np.copy(pent)\n if flipnum > 0:\n p = np.flip(pent, flipnum-1)\n for rot_num in range(4):\n if np.array_equal(true_pent, p):\n return True\n p = np.rot90(p)\n return False", "def test_complex_multple_emphasis_type_variant2(self):\n\n self.check_markdown(\n 'on the **1-4 row** of the AP Combat Table ***and*** receive',\n '<p>on the <strong>1-4 row</strong> of the AP Combat Table <strong><em>and</em></strong> receive</p>'\n )", "def is leaf(self, p):\n return self.num children(p) == 0", "def is_placeholder(self):\n return _child(self.__nvXxPr.nvPr, 'p:ph') is not None", "def is_ppt(filename):\n have_current_user = False\n have_user_edit = False\n have_persist_dir = False\n have_document_container = False\n ppt_file = None\n try:\n ppt_file = PptFile(filename)\n for stream in ppt_file.iter_streams():\n if stream.name == 'Current User':\n for record in stream.iter_records():\n if isinstance(record, PptRecordCurrentUser):\n have_current_user = True\n if have_current_user and have_user_edit and \\\n have_persist_dir and have_document_container:\n return True\n elif stream.name == 'PowerPoint Document':\n for record in stream.iter_records():\n if record.type == 0x0ff5: # UserEditAtom\n have_user_edit = True\n elif record.type == 0x1772: # PersistDirectoryAtom\n have_persist_dir = True\n elif record.type == 0x03e8: # DocumentContainer\n have_document_container = True\n else:\n continue\n if have_current_user and have_user_edit and \\\n have_persist_dir and have_document_container:\n return True\n else: # ignore other streams/storages since they are optional\n continue\n except Exception as exc:\n logging.debug('Ignoring exception in is_ppt, assume is not ppt',\n exc_info=True)\n finally:\n if ppt_file is not None:\n ppt_file.close()\n return False", "def is_header_or_footer(self, tag):\n return ((tag.name == \"text\") and tag.has_attr(\"top\")\n and ((int(tag[\"top\"]) <= self.HEADER_END_OFFSET) \n or (int(tag[\"top\"]) > self.FOOTER_START_OFFSET)))", "def is_punctuation(ch):\n if (ch == '.'): return False\n if (ch >= '!' and ch <= '/'): return True\n if (ch >= ':' and ch <= '@'): return True\n if (ch >= '\\u2010' and ch <= '\\u2014'): return True # various dashes\n if (is_quote_mark(ch)): return True\n return False", "def has_textframe(self):\n return _child(self._element, 'p:txBody') is not None", "def test_complex_multple_emphasis_type(self):\n\n self.check_markdown(\n 'traced ***along*** bla **blocked** if other ***or***',\n '<p>traced <strong><em>along</em></strong> bla <strong>blocked</strong> if other <strong><em>or</em></strong></p>' # noqa: E501\n )", "def pob(self):\n if self.index >= self.length:\n return False\n \n self._pob, n = self.parse_pob()\n if self._pob is not None:\n self.idx_pob = self.index\n self.index += n\n self.isaddr = True\n if self.index < self.length and self.words[self.index]['word'] == ',':\n self.index += 1\n if self._debug: print(\"POB\", self._pob, self.idx_pob)\n return True\n return False", "def needsProcessing(self):\n return self.isMarkdown() or self.hasMetadata()", "def it_can_add_a_p_to_itself(self):\n cases = (\n (a_body().with_nsdecls(),\n a_body().with_nsdecls().with_child(a_p())),\n (a_body().with_nsdecls().with_child(a_sectPr()),\n a_body().with_nsdecls().with_child(a_p()).with_child(a_sectPr())),\n )\n for before_body_bldr, after_body_bldr in cases:\n body = before_body_bldr.element\n # exercise -----------------\n p = body.add_p()\n # verify -------------------\n assert body.xml == after_body_bldr.xml()\n assert isinstance(p, CT_P)", "def paragraph(self, on, **kw):\n if self._terse:\n return ''\n FormatterBase.paragraph(self, on)\n tag = 'p'\n if on:\n tagstr = self._open(tag, **kw)\n else:\n tagstr = self._close(tag)\n return tagstr", "def is_singleton_beam(element):\r\n if element.getName() == 'beam':\r\n notes_inside = element.getDescendantsByName('note')\r\n rests_inside = element.getDescendantsByName('rest')\r\n return len(notes_inside) + len(rests_inside) < 2\r\n else:\r\n return False", "def paragraph_article_text(link_to_check, point_no):\n\tdetails = ''\n\t\n\tprint(link_to_check)\n\tsoup = buzzfeedbot.soup_session(link_to_check)\n\tstart = soup.find_all('span', attrs={'class': 'subbuzz__number'})[point_no].parent.parent\n\t\n\ttry:\n\t\tsubpoint = start.find('div', class_=\"subbuzz__description\")\n\t\tif subpoint == None:\t\n\t\t\treturn \"No extra information available\"\n\texcept IndexError:\n\t\treturn \"No extra information available\"\n\t\n\tfor description in subpoint.find_all('p'):\n\t\tdetails += description.text + \"\\n\\n\"\n\t\t\n\tif details == '':\n\t\treturn \"No extra information available\"\n\telse:\n\t\treturn details", "def add_paragraph(self):\n # <a:p> elements are last in txBody, so can simply append new one\n p = _Element('a:p', _nsmap)\n self.__txBody.append(p)\n return Paragraph(p)", "def containsPoint(self, p):\n return self.frameGeometry().contains(p)", "def _is_title(self):\n ph = _child(self.__nvXxPr.nvPr, 'p:ph')\n if ph is None:\n return False\n # idx defaults to 0 when idx attr is absent\n ph_idx = ph.get('idx', '0')\n # title placeholder is identified by idx of 0\n return ph_idx == '0'", "def tag_visible(element):\n if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:\n return False\n if isinstance(element, bs4.element.Comment):\n return False\n if re.match(r\"[\\n]+\", str(element)):\n return False\n return True", "def isPostscript(fmt):\n if fmt == 'POST' or fmt == 'PSCL' or fmt == 'PDF':\n return 1\n return 0", "def check_if_plist(in_path):\n is_plist = False\n with open(in_path) as fp:\n try:\n for i, line in enumerate(fp):\n if i == 1:\n # print line\n if line.find(\"PLIST 1.0\") != -1:\n is_plist = True\n elif i > 2:\n break\n except UnicodeDecodeError:\n pass\n return is_plist", "def contains(self, p):\n return self.distance(p=p) < self.tolerance", "def _is_punctuation(char):\n cp = ord(char)\n # We treat all non-letter/number ASCII as punctuation.\n # Characters such as \"^\", \"$\", and \"`\" are not in the Unicode\n # Punctuation class but we treat them as punctuation anyways, for\n # consistency.\n if ((33 <= cp <= 47) or (58 <= cp <= 64) or\n (91 <= cp <= 96) or (123 <= cp <= 126)):\n return True\n cat = unicodedata.category(char)\n if cat.startswith(\"P\"):\n return True\n return False", "def test_get_description_markdown_paragraphs(self):\n description = get_description(\"Paragraph 1\\n\\nParagraph 2\")\n expected = \"<p>Paragraph 1</p>\\n<p>Paragraph 2</p>\"\n self.assertEqual(description, expected)", "def contains(self, p):\n p = base.getvector(p)\n if len(p) == 2:\n p = np.r_[p, 1]\n return base.iszero(self.line * p)", "def is_sentence_end(mystem_element):\n word = mystem_element.get('text', '')\n return word == '\\\\s' or word == '\\n'", "def _is_punctuation(char):\n cp = ord(char)\n # We treat all non-letter/number ASCII as punctuation.\n # Characters such as \"^\", \"$\", and \"`\" are not in the Unicode\n # Punctuation class but we treat them as punctuation anyways, for\n # consistency.\n if (\n (cp >= 33 and cp <= 47)\n or (cp >= 58 and cp <= 64)\n or (cp >= 91 and cp <= 96)\n or (cp >= 123 and cp <= 126)\n ):\n return True\n cat = unicodedata.category(char)\n if cat.startswith(\"P\"):\n return True # pragma: no cover\n return False", "def is_plain_text(self):\n return self._tag == 'plain_text'", "def can_transform(self, html_element: ET.Element):\n return html_element.tag == \"mark\"", "def is_resent(self):\n return self.unixtext.find(\"...RESENT\") > 0", "def hasIntroMarker(self):\n return any(marker.type == 'intro' for marker in self.markers)", "def whetherOn(self, P):\n if isinstance(P, list):\n if len(P) == 2:\n if self.ch == 0:\n if P[1]**2+self.a1*P[0]*P[1]+self.a3*P[1] == P[0]**3+self.a2*P[0]**2+self.a4*P[0]+self.a6:\n return True\n else:\n #_log.debug(str(P[1]**2+self.a1*P[0]*P[1]+self.a3*P[1]))\n #_log.debug(str(P[0]**3+self.a2*P[0]**2+self.a4*P[0]+self.a6))\n return False\n else:\n if P[1]**2+self.a1*P[0]*P[1]+self.a3*P[1] == P[0]**3+self.a2*P[0]**2+self.a4*P[0]+self.a6:\n return True\n else:\n return False\n elif P == [self.basefield.zero]:\n return True\n raise ValueError(\"point P must be [px, py] or [0].\")", "def has_ptz(self) -> bool:\n\t\treturn self._ot == 2 and self._raw_result['data']['ptzid'] != -1", "def hasPunct(str):\n for c in str:\n if c in string.punctuation:\n return True\n return False", "def _has_notes(track):\n return len(\n list(filter(lambda m: m.type in ['note_on', 'note_off'], track))) > 0", "def isHereDoc(self, lineData, column):\n return self._getTextType(lineData, column) == 'h'", "def paragraph_count(self, doc):\n\n paragraphs = doc.split(\"\\n\\n\")\n # remove the empty string\n return len([paragraph for paragraph in paragraphs if paragraph])", "def _is_punctuation(char):\n cp = ord(char)\n if cp >= 33 and cp <= 47 or cp >= 58 and cp <= 64 or cp >= 91 and cp <= 96 or cp >= 123 and cp <= 126:\n return True\n cat = unicodedata.category(char)\n if cat.startswith('P'):\n return True\n return False", "def is_elementary(self, p):\n return self.is_abelian and all(g.order() == p for g in self.generators)", "def tt_entails(knowledge_base, sentence):\n return False", "def isQuestion(self):\n i = 0\n while i < len(self.sentence):\n if \"?\" in self.sentence[i].getWord():\n return True\n i += 1\n return False", "def is_question(self, message):\n text = message.split(' ')\n\n # get first word of message\n first_word = text[0]\n # get punctuation\n last_word = text[-1][-1]\n\n if first_word in self.question_words or last_word == '?':\n return True\n\n return False", "def has_a_pdf(self):\n return self.pdfs.count() > 0", "def is_tagged_text(text):\n return len(text) > len(strip_tags(text))", "def all_is_P (self,phrase,predicate_function=None):\r\n\r\n returnvalue = True\r\n for x in phrase:\r\n if not predicate_function(x):\r\n returnvalue = False\r\n return returnvalue", "def footnoteRef(self, text):\n return re.compile(r'(?<=\\S)\\[(\\d+)(!?)\\](\\s)?', re.U).sub(\n self.footnoteID, text\n )", "def foot(cls):\n return ''", "def is_text( self ):\n return self.get_main_type() == 'text'", "def _has_phrase(self, box):\n lines = box.get_lines()\n pattern = self.field.settings.pattern_builder.list_pattern(self._phrases)\n for line in lines:\n if re.search(pattern, line.text) is not None:\n return True\n return False", "def is_inside(self, p):\n s, t = self.get_barycentric_coord(p)\n if 0 <= s <= 1 and 0 <= t <= 1 and s + t <= 1:\n return True\n else:\n return False", "def chunk_in_text(chunk, text):\n chunk = clean_chunk(chunk)\n return text.find(chunk) >= 0", "def footnote_spot(tree: nodes.document) -> tuple[Element, int]:\n # The code uses the following heuristic:\n # a) place them after the last existing footnote\n # b) place them after an (empty) Footnotes rubric\n # c) create an empty Footnotes rubric at the end of the document\n fns = list(tree.findall(nodes.footnote))\n if fns:\n fn = fns[-1]\n return fn.parent, fn.parent.index(fn) + 1\n for node in tree.findall(nodes.rubric):\n if len(node) == 1 and node.astext() == FOOTNOTES_RUBRIC_NAME:\n return node.parent, node.parent.index(node) + 1\n doc = next(tree.findall(nodes.document))\n rub = nodes.rubric()\n rub.append(nodes.Text(FOOTNOTES_RUBRIC_NAME))\n doc.append(rub)\n return doc, doc.index(rub) + 1" ]
[ "0.8045934", "0.7436927", "0.6865161", "0.60696536", "0.60482085", "0.6026402", "0.6006918", "0.57114613", "0.56973517", "0.56481415", "0.5624542", "0.55489916", "0.5463017", "0.53871185", "0.52857167", "0.5267883", "0.5256443", "0.5246128", "0.5203835", "0.5170078", "0.51535326", "0.51524526", "0.5114998", "0.5101653", "0.50892323", "0.5081919", "0.5063406", "0.50544477", "0.5050697", "0.502158", "0.50111943", "0.49978533", "0.49797624", "0.4963478", "0.4938417", "0.49249706", "0.49165303", "0.49014288", "0.48663956", "0.48578224", "0.48518592", "0.4846009", "0.48416921", "0.48349407", "0.4812948", "0.4802646", "0.48023617", "0.47835734", "0.47823083", "0.47749484", "0.47717565", "0.4768948", "0.4735839", "0.4720276", "0.47139648", "0.46934396", "0.46879572", "0.4681913", "0.4671895", "0.46717077", "0.46643475", "0.4663205", "0.46611482", "0.46574122", "0.46454248", "0.46440682", "0.46230325", "0.46217635", "0.4611544", "0.46048075", "0.46032992", "0.46008545", "0.45994842", "0.45962945", "0.45946023", "0.45941168", "0.45935804", "0.45849025", "0.45834595", "0.45750487", "0.4562747", "0.45611617", "0.4560129", "0.4553581", "0.45449457", "0.45431167", "0.45429987", "0.4540537", "0.4532725", "0.4525427", "0.4525073", "0.45197937", "0.45128068", "0.45094225", "0.44984806", "0.44931847", "0.4480916", "0.44790402", "0.44779345", "0.44708645" ]
0.79070187
1
Checks whether a paragraph is part of a table of contents.
def is_toc(self, par): return "toc" in par.attrs.get("class", [])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isTable(line):\n # Make the string easier to parse.\n line_stripped = lineNormalise(line)\n\n # Return value.\n ret = False\n\n # If the line starts with the word table, we have a table definition!\n if line_stripped.startswith('table'):\n ret = True\n\n # Tell the horrible truth that this code could not find a table.\n return ret", "def check_marked_paragraph(paragraph, number):\n\n\tq = 0 # счетчик найденных маркеров\n\tchars = '<> ' # возможные символы в каретке\n\n\tfor i in range(len(paragraph.runs)):\n\t\tif \"<>\" in paragraph.runs[i].text: # если в тексте каретки встречается маркер\n\t\t\tfor c in paragraph.runs[i].text: # проверяем каждый символ в каретке\n\t\t\t\tif c not in chars: # если он не входит в список разрешенных символов\n\t\t\t\t\treturn False\n\t\t\tq += 1 # если проверка пройдена, увеличиваем счетчик\n\t\telif \"<\" in paragraph.runs[i].text and \">\" in paragraph.runs[i+1].text: # если маркер разделен на две соседние каретки\n\t\t\tfor c in paragraph.runs[i].text: # проверяем каждую из кареток\n\t\t\t\tif c not in chars:\n\t\t\t\t\treturn False\n\t\t\tfor c in paragraph.runs[i+1].text:\n\t\t\t\tif c not in chars:\n\t\t\t\t\treturn False\n\t\t\tq += 1\n\n\tif q != number: # если количество маркеров не совпало с указанным в выводе\n\t\treturn False\n\telse:\n\t\treturn True", "def paragraph_mentions(text: str, keyword: str) -> bool:\n soup = BeautifulSoup(text, \"html5lib\")\n paragraphs = [p.get_text() for p in soup('p')]\n\n return any(keyword.lower() in paragraph.lower()\n for paragraph in paragraphs)", "def identifyTableEntry(line):\n matches = re.findall('<td>', line)\n if len(matches) > 0:\n return True", "def is_valid_paragraphs(args, skip=False):\n if is_valid_file_and_directory(args) or skip:\n if args.paragraphs is not None:\n return True\n return False", "def check_paragraph(self, para, links_para):\n #Return False if no paragraphs found\n if para is None:\n return False\n\n links = para.find_all('a')\n #Return False if no links found\n if links is None:\n return False\n\n #Return True if one link is valid in the paragraph\n for link in links:\n if self.check_link(link, links_para):\n return True\n return False", "def should_be_compact_paragraph(self, node):\n\n if isinstance(node.parent, nodes.container):\n if 'non-paragraph' not in node.parent.attributes['classes']:\n return False\n\n # noinspection PyUnresolvedReferences\n return super().should_be_compact_paragraph(node)", "def check_partition(self, sectioned_text, full_text):\n\n restitched_text = self.restitch_text(sectioned_text)\n\n length_check = (len(restitched_text) == len(full_text))\n\n return length_check", "def hasContents():", "def is_footnote_text(self, par):\n return (par is not None) and (\"foot\" in par.attrs.get(\"class\", []))", "def verify_page_alignment(toc):\n if len({len(toc_entry[2]) for toc_entry in toc}) != 1:\n return False\n return True", "def testConvertHtmlWithTableOfContent(self):\n self._testBase(\n \"data/test_with_toc.html\",\n toc=True,\n xsl_style_sheet_data=b64encode(open(\"data/test_toc.xsl\").read()),\n )\n # XXX how to check for table of content presence ?", "def is_footnote(self, par):\n if par.find_next_sibling('p') is None:\n return False\n return self.is_footnote_text(par) or self.is_footnote_link(par)", "def test_valid_content_with_toc_2(self):\n settings = get_settings(\n PANDOC_EXTENSIONS=PANDOC_EXTENSIONS,\n PANDOC_ARGS=PANDOC_ARGS + [\"--table-of-contents\"],\n )\n\n pandoc_reader = PandocReader(settings)\n source_path = os.path.join(\n TEST_CONTENT_PATH, \"valid_content_with_toc.md\"\n )\n output, metadata = pandoc_reader.read(source_path)\n\n # Setting this so that assert is able to execute the difference\n self.maxDiff = None\n\n self.assertEqual(\n (\n \"<p>This is some valid content that should pass.\"\n \" If it does not pass we will know something is wrong.</p>\\n\"\n '<h2 id=\"first-heading\">First Heading</h2>\\n'\n \"<p>This should be the first heading in my\"\n \" table of contents.</p>\\n\"\n '<h2 id=\"second-heading\">Second Heading</h2>\\n'\n \"<p>This should be the second heading in my\"\n \" table of contents.</p>\\n\"\n '<h3 id=\"first-subheading\">First Subheading</h3>\\n'\n \"<p>This is a subsection that should be shown as such\"\n \" in the table of contents.</p>\\n\"\n '<h3 id=\"second-subheading\">Second Subheading</h3>\\n'\n \"<p>This is another subsection that should be shown as\"\n \" such in the table of contents.</p>\\n\"\n ),\n output,\n )\n self.assertEqual(\n \"Valid Content with Table of Contents\", str(metadata[\"title\"])\n )\n self.assertEqual(\"My Author\", str(metadata[\"author\"]))\n self.assertEqual(\"2020-10-16 00:00:00\", str(metadata[\"date\"]))\n self.assertEqual(\n '<nav class=\"toc\" role=\"doc-toc\">\\n'\n \"<ul>\\n\"\n '<li><a href=\"#first-heading\">First Heading</a></li>\\n'\n '<li><a href=\"#second-heading\">Second Heading</a>\\n'\n \"<ul>\\n\"\n '<li><a href=\"#first-subheading\">First Subheading</a></li>\\n'\n '<li><a href=\"#second-subheading\">Second Subheading</a></li>\\n'\n \"</ul></li>\\n\"\n \"</ul>\\n\"\n \"</nav>\\n\",\n str(metadata[\"toc\"]),\n )", "def testParagraphs(self):\n\n textractor = Textractor(paragraphs=True)\n\n # Extract text as sentences\n paragraphs = textractor(Utils.PATH + \"/article.pdf\")\n\n # Check number of paragraphs is as expected\n self.assertEqual(len(paragraphs), 13)", "def check_paragraph(line):\n if len(line) > 3 and line[:3] == '⋅⋅⋅':\n return '<p>' + line[3:] + '</p>'\n else:\n return line", "def test_valid_content_with_toc(self):\n settings = get_settings(\n PANDOC_EXTENSIONS=PANDOC_EXTENSIONS,\n PANDOC_ARGS=PANDOC_ARGS + [\"--toc\"],\n )\n\n pandoc_reader = PandocReader(settings)\n source_path = os.path.join(\n TEST_CONTENT_PATH, \"valid_content_with_toc.md\"\n )\n output, metadata = pandoc_reader.read(source_path)\n\n # Setting this so that assert is able to execute the difference\n self.maxDiff = None\n\n self.assertEqual(\n (\n \"<p>This is some valid content that should pass.\"\n \" If it does not pass we will know something is wrong.</p>\\n\"\n '<h2 id=\"first-heading\">First Heading</h2>\\n'\n \"<p>This should be the first heading in my\"\n \" table of contents.</p>\\n\"\n '<h2 id=\"second-heading\">Second Heading</h2>\\n'\n \"<p>This should be the second heading in my\"\n \" table of contents.</p>\\n\"\n '<h3 id=\"first-subheading\">First Subheading</h3>\\n'\n \"<p>This is a subsection that should be shown as such\"\n \" in the table of contents.</p>\\n\"\n '<h3 id=\"second-subheading\">Second Subheading</h3>\\n'\n \"<p>This is another subsection that should be shown as\"\n \" such in the table of contents.</p>\\n\"\n ),\n output,\n )\n self.assertEqual(\n \"Valid Content with Table of Contents\", str(metadata[\"title\"])\n )\n self.assertEqual(\"My Author\", str(metadata[\"author\"]))\n self.assertEqual(\"2020-10-16 00:00:00\", str(metadata[\"date\"]))\n self.assertEqual(\n '<nav class=\"toc\" role=\"doc-toc\">\\n'\n \"<ul>\\n\"\n '<li><a href=\"#first-heading\">First Heading</a></li>\\n'\n '<li><a href=\"#second-heading\">Second Heading</a>\\n'\n \"<ul>\\n\"\n '<li><a href=\"#first-subheading\">First Subheading</a></li>\\n'\n '<li><a href=\"#second-subheading\">Second Subheading</a></li>\\n'\n \"</ul></li>\\n\"\n \"</ul>\\n\"\n \"</nav>\\n\",\n str(metadata[\"toc\"]),\n )", "def is_valid_para(self, para_type, type_table):\n # The values of the table contain all known destination types\n if para_type in type_table.values():\n return True\n return True", "def has_contents(self):\n return len(self.byteruns())>0", "def has_text(self, page: fitz.Page) -> bool:\n return page.get_text(clip=page.trimbox).strip() != \"\"", "def check_pt_data(data):\r\n # bflb_utils.printf(binascii.hexlify(data))\r\n if partition_magic_code != bflb_utils.bytearray_to_int(data[0:4]):\r\n bflb_utils.printf(\"partition bin magic check fail \", binascii.hexlify(data[0:4]))\r\n return False, 0, 0\r\n table_count = bflb_utils.bytearray_to_int(\r\n data[6:7]) + (bflb_utils.bytearray_to_int(data[7:8]) << 8)\r\n # bflb_utils.printf(\"table count: \", table_count)\r\n if table_count > 16:\r\n bflb_utils.printf(\"error, pt enter size > 16\")\r\n return False, 0, 0\r\n crcarray = bflb_utils.get_crc32_bytearray(data[:12])\r\n if data[12:16] != crcarray:\r\n bflb_utils.printf(\"pt table crc fail \", binascii.hexlify(crcarray))\r\n return False, 0, 0\r\n crcarray = bflb_utils.get_crc32_bytearray(data[16:16 + (36 * table_count)])\r\n if data[16 + (36 * table_count):16 + (36 * table_count) + 4] != crcarray:\r\n bflb_utils.printf(\"pt entries crc fail \", binascii.hexlify(crcarray))\r\n return False, 0, 0\r\n age = bflb_utils.bytearray_to_int(data[8:9]) + (bflb_utils.bytearray_to_int(data[9:10])<<8) +\\\r\n (bflb_utils.bytearray_to_int(data[10:11])<<16) + (bflb_utils.bytearray_to_int(data[11:12])<<24)\r\n return True, table_count, age", "def is_footnote_link(self, par):\n return self.is_footnote_text(par.find_next_sibling('p'))", "def _is_valid_pt(content_type: str) -> bool:\n content_type = content_type.strip()\n return content_type in SPECIFICATION_PRIMITIVE_TYPES", "def has_table(self, table):\n return table in self.get_table_list(\".\" in table)", "def check(self, text):\n\n try:\n console.print(self.parser.parse(text)[\"result\"][1:], style=\"green\")\n return True\n\n except:\n console.print(\"An error has occurred while trying to parse the typo!\", style=\"red\")\n return False", "def check_pe_sections(self, pe):\n res = []\n for section in pe.sections:\n if b\"!This program cannot be run in DOS mode\" in section.get_data()[:400] or\\\n b\"This program must be run under Win32\" in section.get_data()[:400]:\n res.append(section.Name.decode('utf-8').strip('\\x00'))\n\n if len(res) > 0:\n print(\"[+] PE header in sections %s\" % \" \".join(res))\n return True\n return False", "def table_check(tablename, path):\n instance = arcno(path)\n tablelist = [i for i,j in instance.actual_list.items()]\n return True if tablename in tablelist else False", "def _has_phrase(self, box):\n lines = box.get_lines()\n pattern = self.field.settings.pattern_builder.list_pattern(self._phrases)\n for line in lines:\n if re.search(pattern, line.text) is not None:\n return True\n return False", "def check_marked_cell(table, row, col):\n\n\tif table.cell(row, col).paragraphs[0].runs[0].text == \"<>\": # если в указанной ячейке только маркер\n\t\treturn True\n\telse: \n\t\treturn False", "def isPdf(page):\n return page['data'][:4] == '%PDF'", "def has_text(self):\n try:\n first = self.text_planets()[0]\n except IndexError:\n first = None\n\n return first is not None", "def find_tables(pdf_dict):\n \n table_dict = {}\n \n # matches numerical tables with no whitespace between entries\n table_matcher1= re.compile('\\S\\n[\\d\\W]')\n \n # matches tables with deliberate whitespaces between entries\n table_matcher2= re.compile('\\s\\n[\\d\\s]') \n \n \n i= 0\n for page_num, paragraphs in pdf_dict.copy().items():\n for paragraph_num, text in enumerate(paragraphs):\n \n # This if statement decides what should be interpreted\n # as a \"table string\" on the text.\n # Right now, it is set to identify as a table a string that\n # has more than 4 newline characters surrounded by non white space\n # characters or a string with at least three\n # newline spaces deliberately surrounded by white spaces\n # This 'sensitivity of tables' can be modified according\n # the need and aspect of documents parsed.\n \n if (len(table_matcher1.findall(text))>=4 or len(table_matcher2.findall(text))>=3):\n i+=1\n table_position_dict = {'page':page_num,\n 'paragraph': paragraph_num+1,\n 'raw_table_text':text}\n table_dict[i] = table_position_dict\n return table_dict", "def isHTML(content):\n\n return '<html' in content or 'html>' in content", "def has_template(page_text: str) -> bool:\n\n\tpattern = '<noinclude>.*{{documentation}}.*</noinclude>'\n\tif re.search(pattern, page_text, re.DOTALL | re.IGNORECASE):\n\t\treturn True\n\telse:\n\t\treturn False", "def is_templated(self):\n for table in self.parent.tables:\n if isinstance(table, SettingTable):\n for row in table.rows:\n if row[0].lower() == \"test template\":\n return True\n return False", "def single_line_paragraph(s: str) -> bool:\n return s.startswith('@') or s.strip() in ('\"\"\"', \"'''\")", "def tt_entails(knowledge_base, sentence):\n return False", "def has_content(self):\n return self.genre or \\\n self.characters or \\\n self.synopsis or \\\n self.keywords.exists() or \\\n self.has_reprints()", "def test_get_toc_valid(self):\n from .wiki_toc import get_toc\n toc = get_toc(wiki_url=VALID_URL)\n self.assertTrue('1 Methodologies' in toc)", "def get_number_of_paragraph(self):\n file_to_read = f'{self.path}/{self.filename}'\n file = open(file_to_read, 'r', encoding='utf-8')\n string_to_match = '<p>'\n count = 0\n for line in file:\n if string_to_match in line:\n count += 1\n sqlite_for_ht.CreateTable.update_table(f_1, self.filename, 'number_of_paragraph', count)\n print(datetime.now(), '-', 'number_of_paragraph for', self.filename, 'calculated =', count)\n return None", "def _check_para(self):\n if not self._check_table(\"Para\"):\n cur = self.conn.cursor()\n sql = \"create table if not exists Para(ksxq datetime,A integer, B byte);\"\n cur.execute(sql)\n init_time = datetime.strptime('3/1/04', \"%m/%d/%y\").strftime('%m/%d/%y %H:%M:%S')\n sql = \"insert into Para values ('{}', 0, 0);\".format(init_time)\n cur.execute(sql)\n cur.close()\n self.conn.commit()\n return True", "def has_tags_in_content(self):\n\t\treturn self.get_content() and re_tag.search(self.get_content())", "def is_inspection_row(elem):\n is_tr = elem.name == 'tr'\n if not is_tr:\n return False\n td_children = elem.find_all('td', recursive=False)\n has_four = len(td_children) == 4\n this_text = clean_data(td_children[0]).lower()\n contains_word = 'inspection' in this_text\n does_not_start = not this_text.startswith('inspection')\n return is_tr and has_four and contains_word and does_not_start", "def _check_with_content(params):\r\n if 'with_content' in params and params['with_content'] != 'false':\r\n return True\r\n else:\r\n return False", "def abstract_read(self, search_param):\n \n #print(\"Search\", search_param)\n section = self.table_section_from_parameter(search_param)\n tag = self.id_from_parameter(search_param)\n\n for table_row in section:\n # Assuming that if first word in a block is valid, the other is too\n if table_row is None:\n continue\n\n if table_row.tag == tag and table_row.valid:\n table_row.access()\n return True\n \n return False", "def check_content(filename, lines, verboseout):\n\tok = True\n\ttry:\n\t\tcheck_vertical(lines)\n\texcept StyleError as e:\n\t\tok = False\n\t\tmsg = \"{:s}: {:s}\".format(filename, str(e))\n\t\tprint(msg, file=verboseout)\n\tfor (i, line) in enumerate(lines):\n\t\ttry:\n\t\t\tcheck_horizontal(line)\n\t\texcept StyleError as e:\n\t\t\tok = False\n\t\t\tmsg = \"{:s}:{:d}: {:s}\".format(filename, i + 1, str(e))\n\t\t\tprint(msg, file=verboseout)\n\treturn ok", "def is_complete_multipartite(self):\n if self._.d != 2:\n return False\n if not self._has(\"p\"):\n self.pTable()\n return any(self._.p[0, i, i] == self._.p[j, i, i]\n for i, j in [(1, 2), (2, 1)])", "def is_information_table_and_xml_entry(tag):\n xml_re = re.compile('^.+\\.xml$', re.I)\n info_re = re.compile('information table', re.I)\n try:\n return (tag.parent.parent.parent.name == 'table' and\n tag.name == 'a' and\n xml_re.match(tag.string) and\n tag.parent.parent.find(string=info_re))\n except:\n return False", "def hasRawText(self, text):\n r = re.compile(r'<(p|blockquote|div|form|table|ul|ol|dl|pre|h\\d)[^>]*?>.*</\\1>',\n re.S).sub('', text.strip()).strip()\n r = re.compile(r'<(hr|br)[^>]*?/>').sub('', r)\n return '' != r", "def is_content(cls, path_or_content):\n return any(path_or_content.lstrip().startswith(s) for s in cls.valid_content_start)", "def chunk_in_text(chunk, text):\n chunk = clean_chunk(chunk)\n return text.find(chunk) >= 0", "def ends_paragraph(s: str) -> bool:\n return not s.strip()", "def has_content(self):\n return self.genre or \\\n self.characters or \\\n self.synopsis or \\\n self.reprint_notes", "def is_markdown_cell(cell):\n return cell[\"cell_type\"] == \"markdown\"", "def _first_page_or_table(attr):\n return bool(get_page(attr) or attr.sentence.is_tabular())", "def contain(self, structure, sentence) -> Bool:\n raise NotImplementedError()", "def contain(self, structure, sentence) -> Bool:\n raise NotImplementedError()", "def is_plain_text(self):\n return self._tag == 'plain_text'", "def is_list_of_text_data(parent_element, list_name):\n list_item_name = get_singular_from_plural(list_name)\n\n if parent_element.find(list_item_name) is None:\n return False\n\n return parent_element.find(list_item_name).text is not None", "def nao_tem_passageiros(self):\n return self.counter.ja_viajaram == self.counter.num_passageiros", "def check_contain(file_content, check_text):\n for line in file_content:\n if check_text in line:\n return True\n return False", "def IsHtml(data):\n # Remove banners and XML header. Convert to lower case for easy search.\n data = ''.join(data.split('\\n')).lower()\n pattern = re.compile('<html>.*?<body.*?>.*?</body>.*?</html>')\n if pattern.findall(data):\n return True\n else:\n return False", "def has_textframe(self):\n return _child(self._element, 'p:txBody') is not None", "def test_complex_multple_emphasis_type_variant2(self):\n\n self.check_markdown(\n 'on the **1-4 row** of the AP Combat Table ***and*** receive',\n '<p>on the <strong>1-4 row</strong> of the AP Combat Table <strong><em>and</em></strong> receive</p>'\n )", "def is_chapter(self):\n return True", "def is_candidate(line):\n line = line.lower()\n line = prepare_text_line(line)\n return (has_content(line) and any(s in line for s in copyrights_hint.statement_markers))", "def has_prob(self, head):\n trimmed_head = head.get_trimmed(self._head_vars)\n return self._table.get(trimmed_head) is not None", "def match(self, sentence) -> bool:\r\n if (any(word[0] in sentence.lower() for word in self.word_list if word[1] == \"partial\") or any(\r\n word[0].lower() == sentence.lower() for word in self.word_list if word[1] == \"full\")) and not any(\r\n word[0] in sentence.lower() for word in self.word_list if word[1] == \"not\"):\r\n return True\r\n else:\r\n return False", "def doiskip(pagetext):\n saltos = getautoskip()\n # print saltos\n for salto in saltos:\n rex = r'\\{\\{\\s*[' + salto[0].upper() + salto[0].lower() + r']' + \\\n salto[1:] + r'(\\}\\}|\\|)'\n # print rex\n if re.search(rex, pagetext):\n return True\n return False", "def is_fluorescence(file):\n for line in read_file(file):\n if \"TD=\" in line.upper():\n return True\n return False", "def isTable(self, tableName):\n url = '%s/_table/%s' % (self.uri, tableName)\n data, resp = self.execute(method='GET', url=url, decode=True)\n return data", "def is_text( self ):\n return self.get_main_type() == 'text'", "def has_table(self, table):\n con = self.connection\n cur = con.cursor()\n res = cur.execute(\"\"\"SELECT COUNT(*) FROM sqlite_master\n WHERE type='table' AND name='%s'\"\"\" % table)\n tcnt = cur.fetchall()\n cur.close()\n if tcnt[0][0] > 0:\n return True\n else:\n return False", "def test_citations_and_toc_2(self):\n settings = get_settings(\n PANDOC_EXTENSIONS=PANDOC_EXTENSIONS + [\"+citations\"],\n PANDOC_ARGS=PANDOC_ARGS\n + [\n \"--table-of-contents\",\n \"--citeproc\",\n \"--csl=https://www.zotero.org/styles/ieee-with-url\",\n \"--metadata=link-citations:false\",\n \"--metadata=reference-section-title:References\",\n ],\n FORMATTED_FIELDS=FORMATTED_FIELDS,\n )\n\n pandoc_reader = PandocReader(settings)\n source_path = os.path.join(\n TEST_CONTENT_PATH, \"valid_content_with_citation.md\"\n )\n output, metadata = pandoc_reader.read(source_path)\n self.maxDiff = None\n\n self.assertEqual(\n (\n '<h2 id=\"string-theory\">String Theory</h2>\\n'\n \"<p>But this foundational principle of science has\"\n \" now been called into question by\"\n ' <a href=\"https://www.britannica.com/science/'\n 'string-theory\">String Theory</a>,'\n \" which is a relative newcomer to theoretical physics, but one\"\n \" that has captured the common imagination, judging by\"\n \" the popular explanations that abound on the Web\"\n ' <span class=\"citation\" data-cites=\"mann2019 wood2019'\n ' jones2020\">[1]–[3]</span>.'\n \" And whether string theory is or is not science, Popper\"\n \" notwithstanding, is an issue that is still up for debate\"\n \" <span\"\n ' class=\"citation\" data-cites=\"siegel2015 castelvecchi2016'\n ' alves2017 francis2019\">[4]–[7]</span>.</p>\\n'\n '<h1 class=\"unnumbered\" id=\"bibliography\">References</h1>\\n'\n '<div id=\"refs\" class=\"references csl-bib-body\"'\n ' role=\"doc-bibliography\">\\n'\n '<div id=\"ref-mann2019\" class=\"csl-entry\"'\n ' role=\"doc-biblioentry\">\\n'\n '<div class=\"csl-left-margin\">[1]'\n ' </div><div class=\"csl-right-inline\">A. Mann,'\n \" <span>“<span>What Is String Theory?</span>”</span>\"\n \" 20-Mar-2019. [Online].\"\n ' Available: <a href=\"https://www.livescience.com/'\n '65033-what-is-string-theory.html\">'\n \"https://www.livescience.com/\"\n \"65033-what-is-string-theory.html</a>.\"\n \" [Accessed: 12-Nov-2020]</div>\\n\"\n \"</div>\\n\"\n '<div id=\"ref-wood2019\" class=\"csl-entry\"'\n ' role=\"doc-biblioentry\">\\n'\n '<div class=\"csl-left-margin\">[2] </div>'\n '<div class=\"csl-right-inline\">'\n \"C. Wood, <span>“<span>What Is String Theory?</span>.\"\n \" Reference article:\"\n \" A simplified explanation and brief history of string\"\n \" theory,”</span> 11-Jul-2019.\"\n ' [Online]. Available: <a href=\"https://www.space.com/'\n '17594-string-theory.html\">'\n \"https://www.space.com/17594-string-theory.html</a>.\"\n \" [Accessed: 12-Nov-2020]</div>\\n\"\n \"</div>\\n\"\n '<div id=\"ref-jones2020\" class=\"csl-entry\"'\n ' role=\"doc-biblioentry\">\\n'\n '<div class=\"csl-left-margin\">[3]'\n ' </div><div class=\"csl-right-inline\">'\n 'A. Z. Jones, <span>“<span class=\"nocase\">The Basics of String'\n \" Theory</span>,”</span> 02-Mar-2019. [Online]. Available:\"\n ' <a href=\"https://www.thoughtco.com/'\n 'what-is-string-theory-2699363\">'\n \"https://www.thoughtco.com/what-is-string-theory-2699363</a>.\"\n \" [Accessed: 12-Nov-2020]</div>\\n\"\n \"</div>\\n\"\n '<div id=\"ref-siegel2015\" class=\"csl-entry\"'\n ' role=\"doc-biblioentry\">\\n'\n '<div class=\"csl-left-margin\">[4]'\n ' </div><div class=\"csl-right-inline\">'\n \"E. Siegel, <span>“<span>Why String Theory Is Not A Scientific\"\n \" Theory</span>,”</span> 23-Dec-2015. [Online]. Available:\"\n \" <a\"\n ' href=\"https://www.forbes.com/sites/'\n \"startswithabang/2015/12/23/\"\n 'why-string-theory-is-not-science/\">https://www.forbes.com/'\n \"sites/startswithabang/2015/12/23/\"\n \"why-string-theory-is-not-science/</a>.\"\n \" [Accessed: 12-Nov-2020]</div>\\n\"\n \"</div>\\n\"\n '<div id=\"ref-castelvecchi2016\" class=\"csl-entry\"'\n ' role=\"doc-biblioentry\">\\n'\n '<div class=\"csl-left-margin\">[5]'\n ' </div><div class=\"csl-right-inline\">'\n 'D. Castelvecchi, <span>“<span class=\"nocase\">'\n \"Feuding physicists turn\"\n \" to philosophy for help</span>. String theory is at the\"\n \" heart of a debate over the integrity of the scientific\"\n \" method itself,”</span> 05-Jan-2016. [Online]. Available:\"\n ' <a href=\"https://www.nature.com/news/'\n 'feuding-physicists-turn-to-philosophy-for-help-1.19076\">'\n \"https://www.nature.com/news/\"\n \"feuding-physicists-turn-to-philosophy-for-help-1.19076</a>.\"\n \" [Accessed: 12-Nov-2020]</div>\\n\"\n \"</div>\\n\"\n '<div id=\"ref-alves2017\" class=\"csl-entry\"'\n ' role=\"doc-biblioentry\">\\n'\n '<div class=\"csl-left-margin\">[6] </div>'\n '<div class=\"csl-right-inline\">'\n 'R. A. Batista and J. Primack, <span>“<span class=\"nocase\">'\n \"Is String theory falsifiable?</span>. Can a theory that isn’t\"\n \" completely testable still be useful to physics?”</span>\"\n \" [Online].\"\n ' Available: <a href=\"https://metafact.io/factchecks/'\n '30-is-string-theory-falsifiable\">'\n \"https://metafact.io/factchecks/\"\n \"30-is-string-theory-falsifiable</a>.\"\n \" [Accessed: 12-Nov-2020]</div>\\n\"\n \"</div>\\n\"\n '<div id=\"ref-francis2019\" class=\"csl-entry\"'\n ' role=\"doc-biblioentry\">\\n'\n '<div class=\"csl-left-margin\">[7]'\n ' </div><div class=\"csl-right-inline\">'\n 'M. R. Francis, <span>“<span class=\"nocase\">Falsifiability and'\n \" physics</span>. Can a theory that isn’t completely testable\"\n \" still be useful to physics?”</span> 23-Apr-2019.\"\n \" [Online]. Available:\"\n ' <a href=\"https://www.scientificamerican.com/'\n 'article/is-string-theory-science/\">'\n \"https://www.scientificamerican.com/article/is-\"\n \"string-theory-science/</a>. [Accessed: 12-Nov-2020]</div>\\n\"\n \"</div>\\n\"\n \"</div>\\n\"\n ),\n output,\n )\n\n self.assertEqual(\"Valid Content With Citation\", str(metadata[\"title\"]))\n self.assertEqual(\"My Author\", str(metadata[\"author\"]))\n self.assertEqual(\"2020-10-16 00:00:00\", str(metadata[\"date\"]))\n self.assertEqual(\n (\n '<nav class=\"toc\" role=\"doc-toc\">\\n'\n \"<ul>\\n\"\n '<li><a href=\"#string-theory\">String Theory</a></li>\\n'\n '<li><a href=\"#bibliography\">References</a></li>\\n'\n \"</ul>\\n</nav>\\n\"\n ),\n str(metadata[\"toc\"]),\n )", "def is_term_table(self, table):\n try:\n result = table.column_definitions['id'] and \\\n table.column_definitions['uri'] and \\\n table.column_definitions['name']\n except KeyError:\n result = False\n return result", "def is_chapter(self):\n\n return False", "def is_chapter(self):\n\n return False", "def check_type(content):\n return (isinstance(content, Elem) or type(content) == Text or\n (type(content) == list and all([type(elem) == Text or\n isinstance(elem, Elem)\n for elem in content])))", "def presentation_exists(self, presentation):\r\n result = QtSql.QSqlQuery('''SELECT * FROM presentations''')\r\n while result.next():\r\n if (unicode(presentation.title) == unicode(result.value(1).toString())\r\n and unicode(presentation.speaker) == unicode(result.value(2).toString())):\r\n return True\r\n return False", "def text_exists(self, text: str)-> bool:\n result = self.__content.find(text)\n if result == -1:\n return False\n else:\n return True", "def is_valid(self, text):\n return any(p.lower() in text.lower() for p in self.get_phrases())", "def is_text(line, start, end, line_number, code_blocks):\n if any(c[0] <= line_number <= c[1] for c in code_blocks):\n return False\n else:\n n = len(line)\n idx = -1\n last_block_was_text = False\n in_link = False\n in_url = False\n while idx < start:\n if in_link:\n link_idx = line[idx+1:].find(')')\n assert link_idx != -1\n code_idx = n\n url_idx = n\n elif in_url:\n url_idx = line[idx+1:].find('>')\n assert url_idx != -1\n code_idx = n\n link_idx = n\n else:\n code_idx = line[idx+1:].find('`')\n link_idx = line[idx+1:].find('](')\n url_idx = line[idx+1:].find('<')\n if code_idx == -1:\n code_idx = n\n if link_idx == -1:\n link_idx = n\n if url_idx == -1:\n url_idx = n\n\n nearest_match = min(code_idx, link_idx, url_idx)\n\n if nearest_match == url_idx:\n in_url = not in_url\n elif nearest_match == link_idx:\n in_link = not in_link\n idx += nearest_match+1\n last_block_was_text = not last_block_was_text\n\n return last_block_was_text", "def table_of_content(self, key, value):\n text = \"{0} -- {1}\".format(\n clean_val(\"a\", value, str) or \"\", clean_val(\"t\", value, str) or \"\"\n ).strip()\n if text != \"--\":\n chapters = re.split(r\"; | -- |--\", text)\n return chapters\n else:\n raise UnexpectedValue(subfield=\"a or t\")", "def is_ppt(filename):\n have_current_user = False\n have_user_edit = False\n have_persist_dir = False\n have_document_container = False\n ppt_file = None\n try:\n ppt_file = PptFile(filename)\n for stream in ppt_file.iter_streams():\n if stream.name == 'Current User':\n for record in stream.iter_records():\n if isinstance(record, PptRecordCurrentUser):\n have_current_user = True\n if have_current_user and have_user_edit and \\\n have_persist_dir and have_document_container:\n return True\n elif stream.name == 'PowerPoint Document':\n for record in stream.iter_records():\n if record.type == 0x0ff5: # UserEditAtom\n have_user_edit = True\n elif record.type == 0x1772: # PersistDirectoryAtom\n have_persist_dir = True\n elif record.type == 0x03e8: # DocumentContainer\n have_document_container = True\n else:\n continue\n if have_current_user and have_user_edit and \\\n have_persist_dir and have_document_container:\n return True\n else: # ignore other streams/storages since they are optional\n continue\n except Exception as exc:\n logging.debug('Ignoring exception in is_ppt, assume is not ppt',\n exc_info=True)\n finally:\n if ppt_file is not None:\n ppt_file.close()\n return False", "def test_is(self):\n invalid = self.TDTT()\n self.check_invalid_is(invalid)\n\n valid = self.TDTT(when=self.txt_when)\n self.check_valid_is(valid)", "def matches(self, response, contents):\n \n if self.type == 'python':\n vars = {\n 'response': response,\n 'contents': contents\n }\n \n exec self.contents in vars\n return True\n elif self.type == 'text':\n return contents == self.contents", "def find_table(input_file):\n contents = open(input_file, 'r').readlines()\n title = []\n for line in contents:\n if 'CREATE TABLE' in line:\n T = re.search('CREATE TABLE (.+?) \\(',line).group(1).strip('\\\"')\n title.append(T)\n if len(title) != 0:\n return True, title\n else:\n return False, title", "def verify_table(scope=driver):\n try:\n table = scope.find_element_by_tag_name('table')\n entries = table.find_elements_by_class_name('m-datatable__row--even')\n if len(entries) > 0:\n return \"Success\"\n else:\n return \"Error: No table entries\"\n except Exception as e:\n return \"Error: \" + str(e)", "def isHereDoc(self, lineData, column):\n return self._getTextType(lineData, column) == 'h'", "def is_in_file(file_path, text):\n with open(file_path, 'r') as f:\n content = f.read()\n return text in content", "def test16():\n\ttoc = {\"Introduction\":1, \"Chapter 1\":4, \"Chapter 2\":11, \"Chapter 3\":25, \"Chapter 4\":30}\n\ttoc[\"Epilogue\"]=39 # Epilogue starts on page 39\n\ttoc[\"Chapter 3\"]=24# Chapter 3 now starts on page 24\n\tprint(toc)# What are the current contents of the dictionary?\n\tif \"Chapter 5\" in toc: \n\t\tprint ('True') # Is there a Chapter 5?\n\telse:\n\t\tprint('False')", "def test_as_p(self, output=None, form=None):\n setup = {'start_tag': '<p>', 'end_tag': '</p>', 'label_end': ' ', 'input_end': ' '}\n setup['as_type'] = as_type = 'as_p'\n setup['form'] = form or self.form\n output = output or setup['form'].as_p().strip()\n expected = self.get_expected_format(setup)\n errors = []\n if output != expected:\n errors = self.log_html_diff(expected, output, as_type=as_type, full=False)\n message = \"Suite {}, had {} lines of HTML errors for {} \".format(self.__class__.__name__, len(errors), as_type)\n self.assertNotEqual('', output)\n self.assertEqual(expected, output, message)", "def isQuestion(self):\n i = 0\n while i < len(self.sentence):\n if \"?\" in self.sentence[i].getWord():\n return True\n i += 1\n return False", "def find_table(self):\n tables = self.document.tables\n header = []\n for table in tables:\n for row in table.rows:\n header[:] = []\n for cell in row.cells:\n for para in cell.paragraphs:\n header.append(para.text.strip(' '))\n # new versions of final CAPA's keep project information in a table\n if 'Project Information' in header:\n self.read_new_format(table)\n # check if elements in findings is also in header\n cond = len(header) == 5 and header[4] == 'Rating'\n if cond or [x for x in self.findings for y in header if x in y] == self.findings:\n self.table = table\n return", "def match(self, proof: dict) -> bool:\n return proof.get(\"proofPurpose\") == self.term", "def e_tabuleiro (x):\n \n if not isinstance(x, list) or len(x) != 5 or not isinstance(tabuleiro_pontuacao(x), int):\n return False\n for l in range(len(x)-1):\n for c in range(len(x[l])):\n if not isinstance(tabuleiro_posicao(x, cria_coordenada(l+1,c+1)), int):\n return False\n return True", "def test_hindi(doc):\n hindi_dictionary = ['kai','hai','dhaan','dhan','jhona','pili','jankari','saaf','mela','narma','raja','brahma','jai','parbhani','sangli','jana']\n flag = any(hindi in doc for hindi in hindi_dictionary)\n return(flag)", "def text_search(self, text, stuff_to_cop):\n if any(ext in text for ext in stuff_to_cop):\n return(True)\n else:\n return(False)", "def ifValidSERP(soupObject):\n return soupObject.find(\"p\", class_=\"noresult_tit\") is None and soupObject.title is not None", "def eh_tabuleiro(tab):\r\n if not type(tab)==tuple:\r\n return False\r\n if len(tab)==3:\r\n for linha in tab:\r\n if not type(linha)==tuple:\r\n return False\r\n if len(linha)==3:\r\n for num in linha:\r\n if not (num in [-1,0,1] and type(num)==int):\r\n return False\r\n else:\r\n return False\r\n else:\r\n return False\r\n return True" ]
[ "0.64335525", "0.6316292", "0.5921447", "0.5887786", "0.5816467", "0.58017445", "0.5749812", "0.5692028", "0.56918216", "0.56839854", "0.56645924", "0.5649797", "0.5557572", "0.5544824", "0.5544294", "0.553953", "0.5523673", "0.5453183", "0.54286253", "0.5415694", "0.53847057", "0.53706926", "0.5365205", "0.5340366", "0.5296197", "0.526264", "0.52326953", "0.5190991", "0.5184039", "0.517934", "0.5178942", "0.51776105", "0.51661694", "0.5165701", "0.5135636", "0.5128909", "0.509669", "0.5095956", "0.50527036", "0.502277", "0.5015603", "0.50033873", "0.49873888", "0.49723214", "0.49658558", "0.4954061", "0.4948887", "0.4943922", "0.49382067", "0.49357966", "0.49314287", "0.4928618", "0.4924107", "0.49165553", "0.4911431", "0.49002793", "0.49002793", "0.4896415", "0.48948827", "0.48750427", "0.48653054", "0.48620594", "0.48482567", "0.48477244", "0.48394272", "0.48302132", "0.4800256", "0.47996143", "0.47985753", "0.4798294", "0.47935697", "0.47896534", "0.47823912", "0.47771865", "0.47711077", "0.47660202", "0.47660202", "0.47658685", "0.4761133", "0.47553518", "0.47479546", "0.4743561", "0.47385487", "0.47328976", "0.47266573", "0.4726546", "0.4726413", "0.47217423", "0.4721612", "0.47185075", "0.4718206", "0.4713553", "0.470727", "0.47032264", "0.47006547", "0.46893087", "0.46892488", "0.4680359", "0.46789843", "0.4676731" ]
0.6113981
2
Returns paragraph element corresponding to the given id.
def _get_footnote_par(self, id): start = self._current_body_par if start is None: start = self.parsed link = start.find_next(id=id) if link is None: raise NoFootnoteError(f"Could not find id {id}") foot_par = link.parent.find_next_sibling('p') if not self.is_footnote_text(foot_par): raise NoFootnoteError(f"Failed to find adjacent link paragraph for footnote {id}.") return foot_par
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __getitem__(self, pID: int) -> Tuple[int, str, str]:\n\n try:\n return next(self._cursor.execute(\n f\"SELECT id, raw_document_title, raw_paragraph_context FROM paragraphs WHERE id = ?\", (pID,)\n ))\n except StopIteration:\n raise KeyError(f\"{pID} is unknown id of paragraph.\")", "def find_by_id(id):\n query = \"SELECT * FROM parcels WHERE id=%s\"\n return db.find_one(query, (id,))", "def get_paragraph(tag: Optional[Tag]) -> str:\n if tag is None:\n return \"\"\n\n paragraph = \"\\n\".join(p.text.strip() for p in tag.find_all(\"para\"))\n paragraph += \"\\n\"\n return paragraph", "def get_element_by_id(self, id):\n for element in self._elements:\n if element.get_id() == id:\n return element", "def query_by_id(doc_id):\n with Con(db = os.environ.get(\"MONGO_DB\"), host = os.environ.get(\"MONGO_URL\"), port = int(os.environ.get(\"MONGO_PORT\")), col = os.environ.get(\"MONGO_COL\")) as col:\n doc = col.find_one({'pmid': doc_id})\n doc_text = [v for k,v in doc['abstract'].iteritems()]\n doc_text = ' '.join(doc_text) + doc['title']\n return doc_text", "def getElementById(self, id) :\n\t\tif id in self.lid.keys() :\n\t\t\treturn self.lid[id]\n\t\telse :\n\t\t\treturn None", "def get_publication_html_from_id_in_db(new_id: str) -> Union[None, Dict]:\n\n # db = app.data.driver.db[\"notices_publications\"]\n # try:\n # res = db.find({\"id\": new_id}).limit(1).next()\n # except:\n # res = None\n # return res\n\n url = BaseConfig.DATAESR_NOTICES_PUBLICATIONS_URL\n url += '?where={{\"id\":\"{}\"}}'.format(new_id)\n r = requests.get(url)\n if r.status_code == 200:\n res = r.json()['data']\n else:\n res = []\n if len(res) > 1:\n print(\"ERROR more than one id - SHOULD NOT HAPPEN !!\")\n return res[0]\n elif len(res) == 1:\n return res[0]\n else:\n return None", "def get_title_by_id(id):\n\n # your code", "def extract_post_text(id, posts):\n try:\n post = posts.find(\"./*[@Id='{id}']\".format(id=id))\n return clean_up(post.attrib['Title'],False) + ' ' + clean_up(post.attrib['Body'],True)\n except AttributeError:\n return None\n except KeyError:\n return None", "def para(text: str) -> Paragraph:\n normal_style = getSampleStyleSheet()[\"Normal\"]\n return Paragraph(text, normal_style)", "def _find_note(self, id):\n for note in self.notes:\n if note.id == id:\n return note\n return None", "def find_by_id(self, id_):\n return self.by_id.get(id_)", "def get(self, id):\n if id == 'body':\n return document.body\n else:\n return self.instances[id]", "def get_proof_item(self, id):\n return self.prf.find_item(id)", "def _text_of_para(self, elem):\n if isinstance(elem, Tag):\n text = [ ]\n for sub_elem in elem:\n text.append(self._text_of_para(sub_elem))\n\n return \" \".join(text)\n else:\n return elem.string", "def get_review(self, id_):\n cursor = self._connection.cursor()\n select_command = make_select_command(\"reviews\")\n select_command += \" WHERE id_ = ?\"\n cursor.execute(select_command, (id_,))\n for row in cursor:\n return expandable_from_tuple(row, FIELD_DESCRIPTIONS) \n return None", "def find_text_in_p(self, el):\n\n all = []\n for el in el.findall(\".//p\"):\n t = el.text_content().strip()\n if len(t)<40:\n continue\n all.append(t)\n\n return \" \".join(all)", "def find(self, id):\n\n meetup = self.where('id', id)\n return meetup", "def get_question_by_id(self, id, params={}):\n\n try:\n url = BASE_URL + 'api/v1/questions/' + str(int(id))\n data = self.request('get', url, '_parse_question_json', params)\n return data\n except Exception as e:\n Utils.log(traceback.format_exc())\n Utils.send('question.error')\n Utils.error(e.args[0])", "def getByID(self, pid):\r\n i = self.pids.index(pid)\r\n return self.getByInd(i)", "def parterre_info(id):\n parterre = get_parterre(id)\n return render_template(\n \"parterre-info.html\",\n parterre = parterre,\n title = parterre.get_name(),\n capteurs = get_capteurs_parterre(id))", "def get_text(item_id):\n if item_id in all_items:\n return all_items[item_id]['text']\n return None", "def search_id(root, pid):\n for page in root.iter('page'):\n if pid == int(page.find('id').text):\n return page.find('revision').find('text').text", "def findElementWithId(tagname, xmlid):\n texts = doc.getElementsByTagName(tagname)\n for t in texts:\n if t.getAttribute(\"id\") == xmlid:\n return t", "def get_element_by_element_id(self, element_id):\n for element in self.iterate():\n if element.get_id() == element_id:\n return element", "def get_title_by_id(id):\n\n sales_data = data_manager.get_table_from_file(\"sales/sales.csv\")\n for line in sales_data:\n if line[ID] == id:\n return line[TITLE]\n return None", "def put_pid(html):\n pid = 1\n while \"<p>\" in html:\n pttn = \"<p id=\\\"p\"+str(pid)+\"\\\">\"\n html = html.replace(\"<p>\", pttn, 1)\n pid += 1\n return html", "def find(self, id):\n return self._select_one('''\n select\n *\n from\n {table}\n where\n {primary_key} = %s\n '''.format(table=self.__class__._table,\n primary_key=self.__class__._primary_key), [id])", "def _http_get_title_by_id(self, id) -> dict:\n if int(id) == -1:\n # there is no title\n return None\n playl = self._http_playlist()\n return [title for title in playl if int(title['id']) == int(id)][0]", "def get_paper_by_id(paper_id):\n dblp_key = paper_id.replace(\"/\", \"_\")\n if local.paper_exists(dblp_key):\n return dblp_key\n\n print(\"getting information from dblp about paper {}\".format(paper_id))\n data = get(\"https://dblp.org/rec/\" + paper_id + \".xml\")[\"dblp\"]\n return get_paper(data)", "def getKeyWordById(self, id):\n kWord = KeyWord()\n i = 0\n while i < len(self.sentence):\n if self.sentence[i].getId() == id:\n kWord = self.sentence[i]\n i += 1\n return kWord", "def get_presentation(self, talk_id):\r\n result = QtSql.QSqlQuery('''SELECT * FROM presentations WHERE Id=\"%s\"''' % talk_id)\r\n if result.next():\r\n return Presentation(title=unicode(result.value(1).toString()),\r\n speaker=unicode(result.value(2).toString()),\r\n description=unicode(result.value(3).toString()),\r\n category=unicode(result.value(4).toString()),\r\n event=unicode(result.value(5).toString()),\r\n room=unicode(result.value(6).toString()),\r\n date=unicode(result.value(7).toString()),\r\n startTime=unicode(result.value(8).toString()),\r\n endTime=unicode(result.value(9).toString()))\r\n else:\r\n return None", "def scapy_layers_dot11_Dot11_find_elt_by_id(self, id):\n\tfor elt in self.elts():\n\t\tif elt.ID == id:\n\t\t\treturn elt\n\treturn None", "def get_paragraph(self):\r\n \r\n size = self.paragraph_sizes.get()\r\n size += int(random.randrange(int(size * 0.8), int(size * 1.2)))\r\n \r\n lines = []\r\n paragraph_length = 0\r\n while paragraph_length < size:\r\n sentence, length = self.get_sentence()\r\n lines.append(sentence)\r\n paragraph_length += length\r\n\r\n paragraph = \"\\t\" + \" \".join(lines) + \"\\n\\n\"\r\n return paragraph, paragraph_length", "def get_by_id(self, id: int):\n\n\t\traise NotImplemented", "def FindElementById(self, id):\r\n for element in self.__listOfElements:\r\n if element.get_studentID() == id:\r\n return element\r\n raise RepositoryError(\"Inexisting Element\")", "def paragraph(self, on, **kw):\n if self._terse:\n return ''\n FormatterBase.paragraph(self, on)\n tag = 'p'\n if on:\n tagstr = self._open(tag, **kw)\n else:\n tagstr = self._close(tag)\n return tagstr", "def scrape_promed_id(id):\n url = \"http://www.promedmail.org/ajax/getPost.php?alert_id=%s\" % id\n resp = requests.get(url, headers={\"Referer\": \"http://www.promedmail.org/\"})\n content = resp.json()\n zoomLat = content.get('zoom_lat')\n zoomLon = content.get('zoom_lon')\n zoomLevel = content.get('zoom_level')\n post_html = content.get('post')\n try:\n post_html = unquote(post_html)\n except Exception as e:\n print \"Error decoding %s: %s\" % (id, e)\n formatted_content = promed_html_to_formatted_text(post_html)\n result = {\n 'promedScraperVersion' : __version__,\n 'content' : formatted_content,\n 'promedId': id,\n 'htmlContent': post_html,\n 'zoomLat': zoomLat,\n 'zoomLon': zoomLon,\n 'zoomLevel': zoomLevel\n }\n result.update(parse_post_text(formatted_content))\n return result", "def _visit_paragraph(self,elem):\n # only add this p if we don't already have a descriptor for the site\n if self._curr_url not in self._url_paragraphs:\n try:\n paragraph_text = self._text_of_para(elem).strip()\n paragraph_text = strip_tags(paragraph_text)\n paragraph_text = (paragraph_text[:1001] + '...') if len(paragraph_text) > 1000 else paragraph_text\n self._url_paragraphs[self._curr_url] = paragraph_text\n print \"description of url:\" + repr(paragraph_text)\n except:\n print \"Failed to get paragraph text\"", "def read_text_from_span_id(html, span_id):\n return html.find('span', {'id': span_id}).text", "def get_node_by_id(self, id):\r\n for n in self.nodes:\r\n if n.id==id:\r\n return n\r\n return None", "def find_by_id(id: int):\n exercise = Exercise.try_find_by_id(id)\n if not exercise:\n raise NotFound(EXERCISE_NOT_FOUND_MSG)\n return exercise", "def load_post_by_id(self, id):\n post = None\n posts = self.session.query(Post).filter(Post.id == id).all()\n if len(posts) > 0:\n post = posts[0]\n return post", "def get_response(self, id):\n if not id:\n return None\n for response in self._responses:\n if response._id == id:\n return response\n pass\n new_res = self._add_response(id)\n return new_res", "def query_by_id(_id: int) -> dict:\n post = Posts.query.filter_by(id=_id).first()\n if post is None:\n return {\"status\": 404, \"message\": \"No id Available\"}\n return {\n \"title\": post.title,\n \"body\": markdown.markdown(post.body),\n \"timestamp\": post.timestamp,\n \"id\": post.id,\n \"url\": make_url_from_title(post.title),\n }", "def get(self, id):\n return read_msg(id)", "def get_question(self, id):\n\t\tif id < len(self.questions) and id >= 0:\n\t\t\treturn self.questions[id]\n\t\telse:\n\t\t\treturn None", "def get_person(self, id):\n if self.people is None:\n self.people = self.get_people()\n\n for person in self.people:\n if person['person']['id'] == id:\n return person['person']\n\n return None", "def get_param_by_id(self, id_):\n try:\n return list(filter(lambda param: param.id == id_, self.params))[0]\n except IndexError:\n return None", "def get_by_id(self, id):\n return Entry.all().filter('entry_id = ', id).get()", "def get_paste_by_id(self, id):\n if self.caching and id in self._cache:\n return self._cache[id]\n else:\n return self._get_paste_from_result(\n self._lodgeit.pastes.getPaste(id))", "def get_title_by_id_from_table(table, id):\n\n for line in sales_data:\n if line[ID] == id:\n return line[TITLE]\n return None", "def get_paragraphs():\n soup = get_html()\n paragraphs = []\n for i in soup.findAll('div', {'class': 'faq-list1__hide'}):\n p = str(i.get_text().strip())\n paragraphs.append(p)\n return paragraphs", "def article_by_id(self, id):\n return self.es.get(index=self.index, doc_type=self.doc_type, id=id)", "def get_title_by_id_from_table(table, id):\n\n # your code", "def select_by_id(parcel_id):\n sql = \"SELECT * FROM dostawy.przesylki WHERE przesylka_ID = %s;\"\n val = (parcel_id,)\n rows = DBconnector.fetch_query_parameters(sql, val)\n return _wrap_in_parcel_list(rows)", "def getTextElement(self, elementId):\n cmdId = self.executeCommand(Command.GET_ELEMENT_TEXT, {'id': elementId})\n return cmdId", "def id(self) -> Optional[str]:\n return self.elem.get('id')", "def get_element_from_id(self, identifier):\n classification, org, rel, com = classify_id(identifier)\n if classification == id_classification.org:\n return self.get_org_question(org)\n elif classification == id_classification.rel:\n return self.get_rel_question(org, rel)\n elif classification == id_classification.com:\n return self.get_rel_comment(org, rel, com)\n return None", "def paragraph(self, text):\n return [text]", "def get(self,id):\r\n person = get_one_by_persons_id(id=id)\r\n if not person:\r\n api.abort(404)\r\n else:\r\n return person", "def title_by_id(id_: int) -> Any:\n post = Posts.query.filter_by(id=id_).first()\n if post is None:\n return \"404\"\n return post.title", "def get_by_id(data_base, id, commit_to_db=True):\n cursor = data_base.cursor(dictionary=True)\n cursor.execute(f\"SELECT * FROM post WHERE id = {id}\")\n fields = cursor.fetchone()\n cursor.close()\n if commit_to_db:\n fields['commit_to_db'] = commit_to_db\n try:\n return Post(**fields)\n except TypeError:\n return", "def get_by_id(id, lista):\n for inventar in lista:\n if get_id(inventar) == id:\n return inventar\n return None", "def get_by_id(self, id):\n return self._mzml_parser.get_by_id(id)", "def get_by_id(cls, id):\n return cls.query().get(id)", "def get(self, id):\n return {'id': id}", "def find_rental_by_id(self, id):\n return super(RentalHistoryText, self).find_rental_by_id(id)", "def get_product_from_text_id(text_id):\n program_run_id_match = re.match(PROGRAM_RUN_ID_PATTERN, text_id)\n # This text id matches the pattern of a program text id with a program run attached\n if program_run_id_match:\n match_dict = program_run_id_match.groupdict()\n potential_prog_run_id = match_dict[\"run_tag\"]\n potential_text_id_base = match_dict[\"text_id_base\"]\n # A Program's own text id may end with something that looks like a ProgramRun suffix, but has\n # no associated ProgramRun (ex: program.readable_id == \"program-v1:my+program+R1\"). This query looks\n # for a Program with a ProgramRun that matches the suffix, or one that matches the full given text id\n # without a ProgramRun. The version with a matching ProgramRun is preferred.\n program = (\n Program.objects.filter(\n Q(\n readable_id=potential_text_id_base,\n programruns__run_tag=potential_prog_run_id,\n )\n | Q(readable_id=text_id)\n )\n .order_by(\"-programruns__run_tag\")\n .prefetch_related(\n Prefetch(\n \"programruns\",\n queryset=ProgramRun.objects.filter(run_tag=potential_prog_run_id),\n to_attr=\"matching_program_runs\",\n )\n )\n .prefetch_related(\"products\")\n .first()\n )\n if not program:\n raise Program.DoesNotExist(\n f\"Could not find Program with readable_id={text_id} \"\n \"or readable_id={potential_text_id_base} with program run {potential_prog_run_id}\"\n )\n program_run = first_or_none(program.matching_program_runs)\n product = first_or_none(program.products.all())\n if not product:\n raise Product.DoesNotExist(f\"Product for {program} does not exist\")\n return product, program, program_run\n # This is a \"normal\" text id that should match a CourseRun/Program\n else:\n if is_program_text_id(text_id):\n content_object_model = Program\n content_object_filter = dict(readable_id=text_id)\n else:\n content_object_model = CourseRun\n content_object_filter = dict(courseware_id=text_id)\n content_object = (\n content_object_model.objects.filter(**content_object_filter)\n .prefetch_related(\"products\")\n .first()\n )\n if not content_object:\n raise content_object_model.DoesNotExist(\n f\"{content_object_model._meta.model} matching filter {content_object_filter} does not exist\"\n )\n product = first_or_none(content_object.products.all())\n if not product:\n raise Product.DoesNotExist(f\"Product for {content_object} does not exist\")\n return product, content_object, None", "def first_part_pid(self,text,pid):\n\n len_max=4\n key_list=pid.keys()\n while 1:\n num=min(len_max,len(text))\n if len_max==0:\n sys.exit('error pid dico not complete or invalid input :'+str([text[:min(3,len(text))]])+'\\\n \\n Complete proc_info.py')\n \n if text[:num].lower() in key_list:\n tag=text[:num].lower()\n text=text[num:]\n return text, pid[tag]\n else:\n len_max+=-1", "def add_paragraph(self):\n # <a:p> elements are last in txBody, so can simply append new one\n p = _Element('a:p', _nsmap)\n self.__txBody.append(p)\n return Paragraph(p)", "def get(self, id):\n if id == 'body':\n return window.document.body\n else:\n return self.instances[id]", "def get(self, docid):\n file = os.path.join(self.dirname, docid)\n with open(file,'r',encoding='utf-8') as f:\n text = f.read()\n return text", "def get_element(self,p):\n self._validate(p)\n return p.element()", "def getVar(self, id):\n if id in self.variables:\n return self.variables[id]", "def get_policy_by_id(self, id):\n for service, policy_list in self.remote_store.get_policy_list().items():\n for policy in policy_list:\n if policy.id == id:\n return policy", "def hello_world():\r\n sentence = '<p>Hello World! '+my_id+' <p>'\r\n return sentence", "def getText_byID(self, user_id):\n sql = \"SELECT profile0, profile1, profile2, profile3, profile4, profile5, profile6, profile7, profile8, profile9 FROM Users WHERE id='%s'\"\\\n % (user_id)\n res = self.execute(sql)\n reslist = res.fetchall()\n if reslist == []:\n return None\n else:\n return reslist[0]", "def getbyid(self, id):\n\n return esd.retrieve(id)", "def get_value(self, id):\n if DataParticleKey.has(id):\n return self.contents[id]\n else:\n raise NotImplementedException(\"Value %s not available in particle!\", id)", "def get_publication_from_id_in_db(new_id: str) -> Union[None, Dict]:\n\n # db_publications = app.data.driver.db[\"publications\"]\n # try:\n # res = db_publications.find({\"id\": new_id}).limit(1).next()\n # except:\n # res = None\n # return res\n\n url = BaseConfig.DATAESR_PUBLICATIONS_URL\n url += '?where={{\"id\":\"{}\"}}'.format(new_id)\n r = requests.get(url)\n if r.status_code == 200:\n res = r.json()['data']\n else:\n res = []\n if len(res) > 1:\n print(\"ERROR more than one id - SHOULD NOT HAPPEN !!\")\n return res[0]\n elif len(res) == 1:\n return res[0]\n else:\n return None", "def printPost(self, id):\n enc = getpreferredencoding()\n output = self._extractPost(id)['formatted_text']\n print output.encode(enc)", "def at(cls, _id):\n return cls.where(cls.primarykey == _id)", "def get(self, id):\n adm = Administration()\n pers = adm.get_person_by_id(id)\n return pers", "def find_employee_by_id(self,id):\n self.employee_id()\n if id in self.emp_id:\n print(self.emp_id[id])\n return self.emp_id[id]\n else:\n print(\"Employee not found\")", "def get_location_from_id(id):\n tree = ET.parse('./devset_topics.xml')\n root = tree.getroot()\n for item in root.findall('./topic'):\n if id == item[0].text:\n return item[1].text", "def find(self, id, columns=None):\n if not columns:\n columns = ['*']\n\n return self.where('id', '=', id).first(1, columns)", "def find_element_by_id(self, id, wait=False):\n element = None\n timeout = 0\n\n if wait:\n timeout = WAIT_ELEMENT_TIMEOUT\n\n try:\n element = WebDriverWait(self.browser, timeout).until(\n EC.presence_of_element_located((By.ID, id))\n )\n except TimeoutException as e:\n e.msg = \"Can't locate element\"\n raise e\n\n return element", "def get_project(self, id):\n for project in self.projects:\n if project.id == int(id):\n ret_val = project\n break\n else:\n ret_val = None\n\n return ret_val", "def getTextIdForCaption(elm):\n #the obvious case #1\n text = getFirstChildWithTagName(elm, \"text\")\n #the less-obvious cases\n if text == None:\n promptItem = getFirstChildWithTagName(elm, \"promptItem\")\n if promptItem == None:\n prompt = getFirstChildWithTagName(elm, \"prompt\")\n if prompt == None: return \"\"\n else:\n promptItem = getFirstChildWithTagName(prompt, \"promptItem\")\n if promptItem == None: return \"\"\n #promptItem variable better have something in it at this point\n return getTextIdFromPromptItem(promptItem)\n else:\n return text.getAttribute(\"id\")", "def get_physics_object_from_id(self, id):\n for p in self.physics_objects:\n if p.canvas_id == id:\n return p", "def by_id(cls, id):\n\t\treturn DBSession.query(Power).filter(Power.power_id == id).first()", "def load_post_synopsis_by_id(self, id):\n post_synopsis = None\n posts = self.session.query(PostSynopsis).filter(PostSynopsis.id == id).all()\n if len(posts) > 0:\n post_synopsis = posts[0]\n return post_synopsis", "def __getitem__(self,id):\n \n # make sure id is an integer\n try:\n if not isinstance(id,IntType):\n id=atoi(id)\n except ValueError:\n raise KeyError, id\n \n # make sure it's in our list of children\n if not self.ids.has_key(id):\n raise KeyError, id\n \n # return the posting\n return self.data[id].__of__(self)", "def __getitem__(self, id):\r\n \r\n if isinstance(id, basestring):\r\n return self._by_name[id]\r\n return self._by_number[id]", "def get_task_by_id(id):\n\n\t# Open connection and execute SQL to get a task\n\ttry:\n\t\tdb, cursor = connect()\n\t\t\n\t\tcursor.execute(\"\"\"SELECT * FROM tasks \n\t\t\t\t\t\tWHERE id=%s\"\"\" % id)\n\n\t\ttask = cursor.fetchone()\n\n\t# Get error messages\n\texcept catch_error(), e:\n\t\tprint \"Error %d: %s\" % (e.args[0],e.args[1])\n\n\t# Close connection\n\tfinally:\n\t\tif db:\n\t\t\tdb.close()\n\n\treturn task", "def select_id(self, id):\n with self.conn:\n self.c.execute(\n \"\"\"SELECT * FROM %s WHERE id = ?\"\"\" % (TABLE), (id,)\n )\n return self.c.fetchone()", "def getTextIdFromPromptItem(elm):\n if elm == None:\n return \"\"\n \n text = getFirstChildWithTagName(elm, \"text\")\n if text != None:\n return text.getAttribute(\"id\")\n else:\n refid = elm.getAttribute(\"refid\")\n if refid != \"\":\n prompt = findElementWithId(\"promptItem\", refid)\n return getTextIdFromPromptItem(prompt)\n else:\n return \"\"", "def get_doc_text(self, doc_id):\n cursor = self.connection.cursor()\n cursor.execute(\n \"SELECT text FROM documents WHERE id = ?\",\n (doc_id,)\n )\n result = cursor.fetchone()\n cursor.close()\n return result if result is None else result[0]", "def get(self,id):\r\n person = get_one(id=id)\r\n if not person:\r\n api.abort(404)\r\n else:\r\n return person" ]
[ "0.62857157", "0.6051216", "0.6041252", "0.5996266", "0.5886566", "0.58803475", "0.581452", "0.57547826", "0.5694344", "0.56772363", "0.5660466", "0.5647002", "0.5594322", "0.55925494", "0.55812985", "0.5573686", "0.5553297", "0.5532051", "0.5492356", "0.54790217", "0.5463343", "0.5456387", "0.5454761", "0.5447059", "0.5438267", "0.5433375", "0.54283446", "0.5424349", "0.54089236", "0.5398692", "0.53735465", "0.5369427", "0.53538495", "0.5349947", "0.5336872", "0.5336655", "0.53247523", "0.53220516", "0.5307049", "0.53003377", "0.5266262", "0.52638537", "0.5246191", "0.52410716", "0.52358973", "0.5231397", "0.52306175", "0.5222276", "0.5219506", "0.52181304", "0.5200362", "0.5196266", "0.5176137", "0.51629144", "0.5161735", "0.51615715", "0.5158528", "0.5144366", "0.5132011", "0.51264167", "0.5123012", "0.51130706", "0.50845885", "0.50807416", "0.50774723", "0.5074841", "0.5061566", "0.5041222", "0.5039527", "0.50390464", "0.5022506", "0.5015005", "0.5008691", "0.50084424", "0.5005519", "0.5002611", "0.49955457", "0.49917907", "0.49897423", "0.49718827", "0.49634564", "0.49583006", "0.49565595", "0.49556193", "0.49471387", "0.49423525", "0.49338618", "0.49174747", "0.49079806", "0.49061933", "0.49029607", "0.49002293", "0.48975748", "0.48959202", "0.48949277", "0.4894875", "0.48939908", "0.48916408", "0.48896515", "0.48874736" ]
0.5871431
6
Walk over pararaphs in the main text. If a footnote link is found, jump to that paragraph, then back to the main text.
def linked_text_paragraphs(self): for par in self._main_paragraphs_raw(): par_links = par.find_all('a') if len(par_links) == 0: self.main_count += len(par.text) yield par.text else: for el in par.contents: if el.name is None: #this is plain text self.main_count += len(str(el)) yield str(el) elif el.name == "a" and "href" in el.attrs: id = el["href"].lstrip('#') try: foot_par = self._get_footnote_par(id) except NoFootnoteError: self.log(f"Could not find footnote for {id}, skipping.") self.footnote_count += len(foot_par.text) yield foot_par.text
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def segment_paragraphs(root_el, cites=[]):\n from capdb.models import Citation\n\n last_el_ends_mid_sentence = False\n join_with_last_el = False\n html_to_prepend_to_next_el = ''\n\n # build a lookup like {\"935 F.3d\": 1, \"123 Mass.\": 2}\n reporter_indexes = {}\n for i, cite in enumerate(Citation.sorted_by_type(cites)):\n eyecite_cite = next(extract_citations_from_text(cite.cite), None)\n if eyecite_cite:\n volume = eyecite_cite.groups['volume']\n reporter = eyecite_cite.groups['reporter']\n reporter_indexes[f\"{volume} {reporter}\"] = i+1\n\n # special case -- \"[134 Hawai'i 89]\" is a page number for \"134 Haw. 86\"\n if reporter == 'Haw.':\n reporter_indexes[f\"{volume} Hawai'i\"] = i + 1\n\n # process each paragraph\n for el_pq in PyQuery(root_el)('root').children().items():\n el = el_pq[0]\n if el.tag == 'header-end':\n continue\n\n html = inner_html(el)\n page_label = None\n exact_match = False\n index = 1\n\n # clean el whitespace\n clean_html = re.sub(r'\\s+|^<br>|<br>$', ' ', html).strip()\n if not clean_html:\n el_pq.remove()\n continue\n\n # strip tags to handle examples like\n # \"<p><strong>[16 N.Y.3d 274] <strong> <p/></strong></strong> <p> <strong> [945 N.E.2d 484]</strong> </p> <p> <strong>OPINION OF THE COURT</strong> </p></p>\"\n # in NE2d/945/945ne2d484.xml\n html_no_tags = strip_tags(clean_html).strip()\n\n # check for 'Page 123'\n m = re.match(r'Page (\\d+)$', html_no_tags)\n if m:\n page_label = make_page_label(m[1])\n exact_match = True\n\n # check for '[123 Mass. 456]'\n else:\n m = re.search(r\"\\[(?P<volume>\\d+) (?P<reporter>[A-Z][A-Za-z0-9 .']+) (?P<page>\\d+)\\]\", html_no_tags)\n if m:\n vol_reporter = f\"{m['volume']} {m['reporter']}\"\n if vol_reporter in reporter_indexes:\n index = reporter_indexes[vol_reporter]\n is_valid_reporter = True\n else:\n is_valid_reporter = False\n exact_match = m[0] == html_no_tags\n if exact_match or is_valid_reporter:\n page_label = make_page_label(m['page'], index)\n\n # handle page label found\n if page_label:\n clean_html = clean_html.replace(escape(m[0]), page_label)\n\n if exact_match:\n if last_el_ends_mid_sentence:\n join_with_last_el = True\n html_to_prepend_to_next_el += clean_html\n el_pq.remove()\n continue\n\n if html_to_prepend_to_next_el:\n clean_html = html_to_prepend_to_next_el + clean_html\n html_to_prepend_to_next_el = ''\n\n if join_with_last_el:\n join_with_last_el = False\n prev_el = el_pq.prev()\n if prev_el[0].tag == el_pq[0].tag:\n prev_el.append(('' if prev_el.text().endswith('-') else ' ')+clean_html)\n el_pq.remove()\n continue\n\n last_el_ends_mid_sentence = bool(mid_sentence_re.search(html_no_tags))\n\n if clean_html != html:\n el_pq.html(clean_html)", "def is_footnote_link(self, par):\n return self.is_footnote_text(par.find_next_sibling('p'))", "def test_forward_paragraph(self):\n before_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year,\n Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000\n tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly\n weather impacts every American. Communities can now rely on the National Weather\n Service’s StormReady program to help them guard against the ravages of Mother\n Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading\n to around 500 deaths per year and nearly $14 billion in damage. StormReady, a\n program started in 1999 in Tulsa, OK, helps arm America's communities with the\n communication and safety skills needed to save lives and property– before and\n during the event. StormReady helps community leaders and emergency managers\n strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of\n severe weather through better planning, education, and awareness. No community\n is storm proof, but StormReady can help communities save lives. Does StormReady\n make a difference?\n \"\"\"\n after_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year,\n Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000\n tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly\n weather impacts every American. Communities can now rely on the National Weather\n Service’s StormReady program to help them guard against the ravages of Mother\n Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading\n to around 500 deaths per year and nearly $14 billion in damage. StormReady, a\n program started in 1999 in Tulsa, OK, helps arm America's communities with the\n communication and safety skills needed to save lives and property– before and\n during the event. StormReady helps community leaders and emergency managers\n strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of\n severe weather through better planning, education, and awareness. No community\n is storm proof, but StormReady can help communities save lives. Does StormReady\n make a difference?\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"9.0\", \"9.0\"),\n after_sel=(\"15.0\", \"15.0\"),\n command_name=\"forward-paragraph\",\n )", "def add_paragraph_marks(text, keep_line_endings=True, maxlength=72):\n\n # add # after line that ends with full stop, question and exclamation marks:\n ptrn = r\"([.؟!] *[\\r\\n]+(?:PageV\\w{2}P\\d+[abAB]?[\\r\\n]+)?)([^\\r\\n#P\\Z])\"\n text = re.sub(ptrn, r\"\\1# \\2\", text)\n\n # add # after section titles (but not before page numbers and sub-titles)\n ptrn = r\"(### .+[\\r\\n]+(?:PageV\\w{2}P\\d+[\\r\\n]+)?)([^\\r\\n#P\\Z])\"\n text = re.sub(ptrn, r\"\\1# \\2\", text)\n\n if keep_line_endings:\n # add the tildas for continued lines:\n new_text = \"\"\n for line in re.split(r\"([\\r\\n]+)\", text):\n if not line.startswith((\"P\", \"#\", \"~~\")) \\\n and not re.match(r\"[\\r\\n]+\", line):\n line = \"~~\"+line\n new_text += line\n else:\n # move page number to the previous line:\n ptrn = r\"([^ \\r\\n.؟!]) *[\\r\\n]+(PageV[^P]+P[\\w]+) *[\\r\\n]+\"\n text = re.sub(ptrn, r\"\\1 \\2 \", text)\n # Add paragraph signs before every new line:\n ptrn = r\"([\\r\\n]+)([^\\r\\n#P\\s])\"\n text = re.sub(ptrn, r\"\\1# \\2\", text)\n # break long lines into shorter lines:\n new_text = wrap(text, maxlength)\n\n new_text = re.sub(\"~~#\", \"#\", new_text)\n new_text = re.sub(r\"~~([^\\n]+%~%)\", r\"# \\1\", new_text)\n new_text = re.sub(r\"~~\\.\\./\", \"../\", new_text)\n\n return new_text", "def _get_footnote_par(self, id):\n start = self._current_body_par\n if start is None:\n start = self.parsed\n link = start.find_next(id=id)\n if link is None:\n raise NoFootnoteError(f\"Could not find id {id}\")\n foot_par = link.parent.find_next_sibling('p')\n if not self.is_footnote_text(foot_par):\n raise NoFootnoteError(f\"Failed to find adjacent link paragraph for footnote {id}.\")\n return foot_par", "def test_back_paragraph(self):\n before_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year,\n Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000\n tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly\n weather impacts every American. Communities can now rely on the National Weather\n Service’s StormReady program to help them guard against the ravages of Mother\n Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading\n to around 500 deaths per year and nearly $14 billion in damage. StormReady, a\n program started in 1999 in Tulsa, OK, helps arm America's communities with the\n communication and safety skills needed to save lives and property– before and\n during the event. StormReady helps community leaders and emergency managers\n strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of\n severe weather through better planning, education, and awareness. No community\n is storm proof, but StormReady can help communities save lives. Does StormReady\n make a difference?\n \"\"\"\n after_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year,\n Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000\n tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly\n weather impacts every American. Communities can now rely on the National Weather\n Service’s StormReady program to help them guard against the ravages of Mother\n Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading\n to around 500 deaths per year and nearly $14 billion in damage. StormReady, a\n program started in 1999 in Tulsa, OK, helps arm America's communities with the\n communication and safety skills needed to save lives and property– before and\n during the event. StormReady helps community leaders and emergency managers\n strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of\n severe weather through better planning, education, and awareness. No community\n is storm proof, but StormReady can help communities save lives. Does StormReady\n make a difference?\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"9.0\", \"9.0\"),\n after_sel=(\"6.7\", \"6.7\"),\n command_name=\"back-paragraph\",\n )", "def _visit_paragraph(self,elem):\n # only add this p if we don't already have a descriptor for the site\n if self._curr_url not in self._url_paragraphs:\n try:\n paragraph_text = self._text_of_para(elem).strip()\n paragraph_text = strip_tags(paragraph_text)\n paragraph_text = (paragraph_text[:1001] + '...') if len(paragraph_text) > 1000 else paragraph_text\n self._url_paragraphs[self._curr_url] = paragraph_text\n print \"description of url:\" + repr(paragraph_text)\n except:\n print \"Failed to get paragraph text\"", "def textparse(self,\r\n analysetext,\r\n depth=0,\r\n keys=None,\r\n re_entering=False,\r\n newindex=Index(1)):\r\n if keys is None:\r\n keys = set()\r\n if LEFTNOTE not in analysetext \\\r\n or extract.embedded_extract(analysetext)[2] == 0:\r\n return\r\n #test if it contains embedded text\r\n\r\n## ee = extract.embedded_extract(RIGHTNOTE.join(LEFTNOTE.\r\n##join(analysetext.split(LEFTNOTE)[1:]).split(RIGHTNOTE)[:-1]),eliminate = True)\r\n\r\n ee_temp = extract.embedded_extract(analysetext)\r\n embeddedlist = ee_temp[0]\r\n\r\n if depth-1 in self.pass_key_dict:\r\n\r\n self.pass_key_dict[depth] = self.pass_key_dict[depth-1]\r\n else:\r\n self.pass_key_dict[depth] = [[list(keys)], []]\r\n\r\n emb_len = str(len(embeddedlist))\r\n\r\n for a_temp, phrase in enumerate(embeddedlist):\r\n if a_temp<10 or (a_temp>9 and a_temp<100\r\n and a_temp%10 == 0) or (a_temp>99\r\n and a_temp%100==0):\r\n #display counter for embedded notes\r\n print()\r\n print(str(a_temp)+'/'+emb_len)\r\n\r\n\r\n\r\n\r\n\r\n\r\n if extract.embedded_extract(phrase)[2] > 1:\r\n\r\n\r\n if phrase[0] == LEFTNOTE and phrase[-1] == RIGHTNOTE:\r\n newindex = self.textinterpret(\r\n extract.embedded_extract(\r\n RIGHTNOTE.join(LEFTNOTE.join(phrase.split(LEFTNOTE)[1:])\r\n .split(RIGHTNOTE)[:-1]),\r\n eliminate=True)[1],\r\n depth,\r\n re_entering=re_entering,\r\n newindex=newindex)\r\n else:\r\n newindex = self.textinterpret(\r\n extract.embedded_extract(\r\n phrase,\r\n eliminate=True)[1],\r\n depth,\r\n re_entering=re_entering,\r\n newindex=newindex)\r\n newindex = self.textparse(phrase[1:-1],\r\n depth+1,\r\n re_entering=re_entering,\r\n newindex=newindex)\r\n\r\n\r\n else:\r\n\r\n newindex = self.textinterpret(phrase,\r\n depth,\r\n re_entering=re_entering,\r\n newindex=newindex)\r\n print()\r\n return newindex", "def search_loop(self, pattern, parent, cell_name, paragraph):\n index = self.paragraphs.index(paragraph)\n self.paragraphs[index] = \"\"\n while True:\n index += 1\n try:\n para = self.paragraphs[index].rstrip()\n try:\n if re.match(pattern, para):\n self.datafields[f\"{parent} {self.active}\"][cell_name] = para\n self.paragraphs[index] = \"\"\n break\n if index is len(self.paragraphs):\n break\n except KeyError:\n pass\n except IndexError:\n break", "def para_parse(text, j, op_b, cl_b):\n\n depth = 0\n loc2 = j\n\n while 1:\n if text[loc2] == op_b:\n depth = depth + 1\n\n elif text[loc2] == cl_b:\n depth = depth - 1\n if depth == 0:\n break\n loc2 = loc2 + 1\n return loc2", "def fix_footnotes(case_el, warnings):\n case_pq = PyQuery(case_el)\n # fix footnotes\n # footnotes look like this (since <small> is already stripped)\n # <p>--------</p>\n # <p>Notes:</p>\n # <p>\n # <sup>\n # <a href=\"#fn1\" name=\"fr1\">1</a>\n # </sup> text text text </p>\n # notes label can look like `<strong><br/> --------</strong>` -- NE2d/990/990ne2d139_12.xml\n notes_el = case_pq('p:contains(\"Notes:\")').filter(lambda i, el: strip_tags(PyQuery(el).text()).strip() == 'Notes:')\n refs = {}\n notes_section = None\n if notes_el:\n notes_section = notes_el.closest('article, section')\n footnote_index = 0\n opinion_index = 1\n footnote_el = None\n\n # before and after footnote sections there is a paragraph of either 8 or 15 hyphens\n footnote_breaks = ['-' * 8, '-' * 15]\n\n # remove footnote break before footnote section\n # can have tags in the footnote break -- A3d/50/50a3d607_29.xml\n prev_notes_el = notes_el.prev()\n if strip_tags(prev_notes_el.text()).strip() not in footnote_breaks:\n warnings.append(\"Unexpected element before notes el.\")\n else:\n prev_notes_el.remove()\n\n # remove \"Notes:\"\n old_footnote_el = notes_el.next()\n notes_el.remove()\n\n # step through each footnote element\n while old_footnote_el:\n # sometimes <a> tag gets out of <p> tag -- SE2d/590/590SE2d53.xml\n # put it inside a new <p>\n if old_footnote_el[0].tag == 'a':\n old_footnote_el = wrap_with(old_footnote_el, PyQuery(etree.Element('p')))\n\n link_el = old_footnote_el('a').eq(0)\n if not link_el:\n # this could be the end of footnotes, in which case stop\n if strip_tags(old_footnote_el.text()).strip() in footnote_breaks:\n old_footnote_el.remove()\n break\n # or could be a second paragraph of the previous footnote, in which case append\n if footnote_el:\n footnote_el.append(old_footnote_el)\n old_footnote_el = footnote_el.next()\n continue\n else:\n # if there's a non-footnote before the first footnote, we don't know what's going on,\n # so quit processing\n warnings.append(\"Unexpected non-footnote element.\")\n break\n label = link_el.text()\n footnote_index += 1\n footnote_id = f'footnote_{opinion_index}_{footnote_index}'\n footnote_el = PyQuery(renderer.make_footnote_el(id=footnote_id, label=label))\n refs[link_el.attr('href').lstrip('#')] = [footnote_id, footnote_el]\n while link_el.parent()[0].tag == 'sup':\n link_el = link_el.parent()\n link_el.remove()\n\n # remove space at beginning of footnote left over from removing footnote number\n if old_footnote_el[0].text:\n old_footnote_el[0].text = old_footnote_el[0].text.lstrip()\n\n wrap_with(old_footnote_el, footnote_el)\n old_footnote_el = footnote_el.next()\n\n # fix footnote references (<small> is already stripped)\n # ...<sup><a href=\"#fr1\" name=\"fn1\">1</a></sup>... typical\n # ...<sup id=\"co_fnRef_B00012045229866_ID0E4F\">1</sup> BR/590/590 B.R. 577.xml\n # ...<a href=\"#1\" name=\"fn1\" id=\"fn1\">1</a>... NW2d/781/781NW2d5512010WIApp33_29.xml\n for section in case_pq('.head-matter, .opinion').items():\n for old_ref_pq in section('a, sup[id]').items():\n label = old_ref_pq.text()\n if old_ref_pq[0].tag == 'a':\n ref_name = old_ref_pq.attr('name')\n if not (ref_name and ref_name.startswith('fn')):\n continue\n else:\n ref_name = \"fn\" + label\n ref, footnote_el = refs.get(ref_name, ['orphan', None])\n if footnote_el:\n # move footnotes from end of document to correct section -- see NW2d/906/906 N.W.2d 436_Replace.xml\n if section != notes_section:\n section.append(footnote_el)\n else:\n warnings.append(f\"Unmatched ref {repr(str(old_ref_pq))}\")\n ref_el = etree.Element('a', {'class': 'footnotemark', 'href': '#' + ref, 'id': 'ref_' + ref})\n ref_el.text = label\n while old_ref_pq.parent()[0].tag == 'sup':\n old_ref_pq = old_ref_pq.parent()\n PyQuery(ref_el).insert_before(old_ref_pq)\n old_ref_pq.remove()", "def is_footnote(self, par):\n if par.find_next_sibling('p') is None:\n return False\n return self.is_footnote_text(par) or self.is_footnote_link(par)", "def test_forward_paragraph_extend_selection(self):\n before_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year,\n Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000\n tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly\n weather impacts every American. Communities can now rely on the National Weather\n Service’s StormReady program to help them guard against the ravages of Mother\n Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading\n to around 500 deaths per year and nearly $14 billion in damage. StormReady, a\n program started in 1999 in Tulsa, OK, helps arm America's communities with the\n communication and safety skills needed to save lives and property– before and\n during the event. StormReady helps community leaders and emergency managers\n strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of\n severe weather through better planning, education, and awareness. No community\n is storm proof, but StormReady can help communities save lives. Does StormReady\n make a difference?\n \"\"\"\n after_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year,\n Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000\n tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly\n weather impacts every American. Communities can now rely on the National Weather\n Service’s StormReady program to help them guard against the ravages of Mother\n Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading\n to around 500 deaths per year and nearly $14 billion in damage. StormReady, a\n program started in 1999 in Tulsa, OK, helps arm America's communities with the\n communication and safety skills needed to save lives and property– before and\n during the event. StormReady helps community leaders and emergency managers\n strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of\n severe weather through better planning, education, and awareness. No community\n is storm proof, but StormReady can help communities save lives. Does StormReady\n make a difference?\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"10.0\", \"10.0\"),\n after_sel=(\"10.0\", \"15.0\"),\n command_name=\"forward-paragraph-extend-selection\",\n )", "def _has_page_jump(text):\n # Determines matches with format strings.\n for format_tuple in _FORMAT_STRINGS:\n jump = _get_jump_with_pattern(text, format_tuple)\n if jump:\n return jump\n\n # Recognizes common OCR for \"From page 1\".\n match = _match_pattern(text, r\"(^Frompagel$){e<=3}\")\n if match and text[-1] == 'l':\n return -1", "def is_footnote_text(self, par):\n return (par is not None) and (\"foot\" in par.attrs.get(\"class\", []))", "def test_extend_to_paragraph(self):\n before_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year,\n Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000\n tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly\n weather impacts every American. Communities can now rely on the National Weather\n Service’s StormReady program to help them guard against the ravages of Mother\n Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading\n to around 500 deaths per year and nearly $14 billion in damage. StormReady, a\n program started in 1999 in Tulsa, OK, helps arm America's communities with the\n communication and safety skills needed to save lives and property– before and\n during the event. StormReady helps community leaders and emergency managers\n strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of\n severe weather through better planning, education, and awareness. No community\n is storm proof, but StormReady can help communities save lives. Does StormReady\n make a difference?\n \"\"\"\n after_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year,\n Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000\n tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly\n weather impacts every American. Communities can now rely on the National Weather\n Service’s StormReady program to help them guard against the ravages of Mother\n Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading\n to around 500 deaths per year and nearly $14 billion in damage. StormReady, a\n program started in 1999 in Tulsa, OK, helps arm America's communities with the\n communication and safety skills needed to save lives and property– before and\n during the event. StormReady helps community leaders and emergency managers\n strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of\n severe weather through better planning, education, and awareness. No community\n is storm proof, but StormReady can help communities save lives. Does StormReady\n make a difference?\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"9.0\", \"9.0\"),\n after_sel=(\"8.0\", \"13.33\"),\n command_name=\"extend-to-paragraph\",\n )", "def links_to_text(self):\r\n self.parser.stripTags(self.get_top_node(), 'a')", "def test_two_footnotes(self):\n text = \"Footnote[^1]\\n\\n[^1]: Footnote text\"\n self.assertNotEqual(self.md(text), self.md(text))", "def extract_paragraph(file_name, url_text = None, show_property = False, database = None, extract_all_property=False, \n return_documenTM = False, cut_off = True, unit_dict = None, special_unit_dictionary = None):\n if not url_text:\n url_text = file_name\n \n if not database: \n database = {}\n \n if not isinstance(unit_dict, dict):\n unit_dict = unit_dict_default\n \n keyword_dict = make_keyword_dict(unit_dict)\n \n Q = DocumentTM(file_name, **database)\n Q.doc()\n Q.find_strange()\n chemical_type_dict = {}\n database = Q.database()\n \n if special_unit_dictionary:\n Q.set_special_unit(special_unit_dictionary)\n \n \n data_collection = []\n json_list = []\n \n for Para in Q.Para:\n new_split, unit = Q.tokenize_paragraph(Para, lemma = False, Strange = True, cut_off=cut_off)\n \n if not new_split:\n continue\n \n #print (new_split)\n \n before_represent_chem = False\n \n for sent in cut_paragraph(new_split):\n new_sent, unit_dictionary, next_represent_chem = matching_algorithm(sent, database, chemical_type_dict, before_represent_chem)\n\n if extract_all_property:\n #iters = chain.from_iterable(unit_dictionary.values())\n iters = chain.from_iterable([dics.values() for dics in unit_dictionary.values()])\n else:\n iters = unit_dictionary['Character'].values()\n \n \n #print (unit_dictionary['Character'])\n #if unit_dictionary['Character'] or unit_dictionary['Reaction']:\n #data_collection.append([sent, unit_dictionary])\n \n if show_property and (unit_dictionary['Character'] or unit_dictionary['Reaction']):\n \n print (\"\\n\\n------------------------------------\")\n print (file_name)\n print (\" \".join([str(t) for t in new_sent]))\n print (\"\\n\")\n #print (Para)\n #print (\" \".join(new_split))\n print (\"------------------------------------\")\n \n for T in chain.from_iterable(iters):\n #for T in t:\n dictionary_chemical = {'Material':T.target, 'Value':T.value, 'Unit':T.unit, 'Condition':T.condition, 'Property':T.prop,\n 'Reference':str(file_name)}\n \n json_list.append(dictionary_chemical)\n\n if show_property:\n print (\"value:\", T, \"condition:\", T.condition, \"chemical:\", T.target)\n \n if isinstance(next_represent_chem, Chemical) or not next_represent_chem:\n before_represent_chem = next_represent_chem \n \n if return_documenTM:\n return json_list, Q\n \n return json_list", "def parse_paragraphs(self):\n paragraphs = self.paragraphs\n for paragraph in paragraphs:\n try:\n if paragraph == \"Oznaczenie sądu\" and not self.locked_cells[\"Oznaczenie sądu\"]:\n self.search_index(4, \"Oznaczenie sądu\", paragraph)\n\n if paragraph.startswith(\"3.Firma,\") and not self.locked_cells[\"Firma, pod którą spółka działa\"]:\n self.search_index(2, \"Firma, pod którą spółka działa\", paragraph)\n\n if paragraph.startswith(\"3.Nazwa\") and not self.locked_cells[\"Firma, pod którą spółka działa\"]:\n self.search_index(2, \"Firma, pod którą spółka działa\", paragraph)\n\n if paragraph.startswith(\"1.Siedziba\") and not self.locked_cells[\"Siedziba\"]:\n self.search_index(4, \"Siedziba\", paragraph)\n\n if paragraph.startswith(\"2.Adres\") and not self.locked_cells[\"Adres\"]:\n self.search_index(4, \"Adres\", paragraph)\n\n if paragraph.startswith(\"Numer KRS\") and not self.locked_cells[\"KRS\"]:\n self.datafields[\"KRS\"] = paragraph.split()[-1]\n self.locked_cells[\"KRS\"] = True\n\n if paragraph.startswith(\"2.Numer REGON/NIP\") and not self.locked_cells[\"REGON/NIP\"]:\n self.search_index(2, \"REGON/NIP\", paragraph)\n\n if paragraph.startswith(\"1.Oznaczenie formy prawnej\") and not self.locked_cells[\"Forma Prawna\"]:\n self.search_index(2, \"Forma Prawna\", paragraph)\n\n if paragraph.startswith(\"1.Wysokość kapitału zakładowego\"):\n self.search_index(2, \"Kapitał Zakładowy\", paragraph)\n\n if paragraph.startswith(\"5.Kwotowe określenie części kapitału wpłaconego\"):\n self.search_index(2, \"Kapitał Wpłacony\", paragraph)\n\n if paragraph.startswith(\"Rubryka 7 - Dane wspólników\"): # Open \"Wspólnicy\" parsing block.\n self.locked_cells[\"Wspólnicy\"] = True\n\n if paragraph.startswith(\"Rubryka 7 - Komitet założycielski\"): # STOWARZYSZENIE\n break\n\n if paragraph.startswith(\"1.Nazwisko / Nazwa lub firma\") and self.locked_cells[\"Wspólnicy\"]:\n self.active += 1\n self.datafields[f\"Wspólnik {self.active}\"] = {}\n\n pattern = rf\"^[A-Z{self.unicode}]+\"\n self.search_loop(pattern, \"Wspólnik\", \"Nazwisko/Nazwa\", paragraph)\n\n if paragraph.startswith(\"2.Imiona\") and self.locked_cells[\"Wspólnicy\"]:\n pattern = rf\"[A-Z{self.unicode}]+\\s[A-Z{self.unicode}]+$|^[A-Z{self.unicode}]+$|^[*]+$\"\n self.search_loop(pattern, \"Wspólnik\", \"Imiona\", paragraph)\n\n if paragraph.startswith(\"3.Numer PESEL/REGON\") and self.locked_cells[\"Wspólnicy\"]:\n pattern = r\"[-]+|[0-9]{9,11}\"\n self.search_loop(pattern, \"Wspólnik\", \"PESEL/REGON\", paragraph)\n\n if paragraph.startswith(\"4.Numer KRS\") and self.locked_cells[\"Wspólnicy\"]:\n pattern = r\"[-]+|[*]+|[0-9]{10}$\"\n self.search_loop(pattern, \"Wspólnik\", \"KRS\", paragraph)\n\n if paragraph.startswith(\"5.Posiadane przez wspólnika udziały\"):\n index = paragraphs.index(paragraph)\n line_1 = paragraphs[index + 2].strip(\" \")\n line_2 = paragraphs[index + 3].strip(\" \")\n if line_2:\n self.datafields[f\"Wspólnik {self.active}\"][\"Udziały\"] = f\"{line_1} {line_2}\"\n else:\n self.datafields[f\"Wspólnik {self.active}\"][\"Udziały\"] = f\"{line_1}\"\n\n if paragraph == \"ZARZĄD\":\n self.locked_cells[\"Wspólnicy\"] = False # Close \"Wspólnicy\" parsing block.\n self.locked_cells[\"Zarząd\"] = True # Open \"Zarząd\" parsing block.\n self.active = 0\n\n if paragraph.startswith(\"1.Nazwisko\") and self.locked_cells[\"Zarząd\"]:\n self.active += 1\n self.datafields[f\"Zarząd {self.active}\"] = {}\n pattern = rf\"^[A-Z{self.unicode}]+\"\n self.search_loop(pattern, \"Zarząd\", \"Nazwisko/Nazwa\", paragraph)\n\n if paragraph.startswith(\"2.Imiona\") and self.locked_cells[\"Zarząd\"]:\n pattern = rf\"^[A-Z{self.unicode}]+\\s[A-Z{self.unicode}]+$|^[A-Z{self.unicode}]+$|^[*]+$\"\n self.search_loop(pattern, \"Zarząd\", \"Imiona\", paragraph)\n\n if paragraph.startswith(\"5.Funkcja w organie \") and self.locked_cells[\"Zarząd\"]:\n paragraph = paragraph.strip(\"5.Funkcja w organie reprezentującym \")\n self.datafields[f\"Zarząd {self.active}\"][\"Funkcja\"] = paragraph\n\n if paragraph.startswith(\"Rubryka 2 - Organ nadzoru\"):\n self.locked_cells[\"Zarząd\"] = False # Close \"Zarząd\" parsing block.\n except KeyError:\n pass\n return self.datafields", "def testParagraphs(self):\n\n textractor = Textractor(paragraphs=True)\n\n # Extract text as sentences\n paragraphs = textractor(Utils.PATH + \"/article.pdf\")\n\n # Check number of paragraphs is as expected\n self.assertEqual(len(paragraphs), 13)", "def treat_page(self) -> None:\n text = self.current_page.text\n\n if self.opt.up:\n text = self.opt.text + '\\n' + text\n elif not self.opt.reorder:\n text += '\\n' + self.opt.text\n else:\n text = textlib.add_text(text, self.opt.text,\n site=self.current_page.site)\n\n self.put_current(text, summary=self.opt.summary, minor=self.opt.minor)", "def test_backward_kill_paragraph(self):\n before_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year,\n Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000\n tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly\n weather impacts every American. Communities can now rely on the National Weather\n Service’s StormReady program to help them guard against the ravages of Mother\n Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading\n to around 500 deaths per year and nearly $14 billion in damage. StormReady, a\n program started in 1999 in Tulsa, OK, helps arm America's communities with the\n communication and safety skills needed to save lives and property– before and\n during the event. StormReady helps community leaders and emergency managers\n strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of\n severe weather through better planning, education, and awareness. No community\n is storm proof, but StormReady can help communities save lives. Does StormReady\n make a difference?\n \"\"\"\n after_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year,\n Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000\n tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly\n weather impacts every American. Communities can now rely on the National Weather\n Service’s StormReady program to help them guard against the ravages of Mother\n Nature.\n to around 500 deaths per year and nearly $14 billion in damage. StormReady, a\n program started in 1999 in Tulsa, OK, helps arm America's communities with the\n communication and safety skills needed to save lives and property– before and\n during the event. StormReady helps community leaders and emergency managers\n strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of\n severe weather through better planning, education, and awareness. No community\n is storm proof, but StormReady can help communities save lives. Does StormReady\n make a difference?\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"9.0\", \"9.0\"),\n after_sel=(\"7.0\", \"7.0\"),\n command_name=\"backward-kill-paragraph\",\n )", "def extract_paragraph_test(file_name, url_text = None, show_property = False, database = None, extract_all_property=False, \n return_documenTM = False, cut_off = True, unit_dict = None):\n if not url_text:\n url_text = file_name\n \n if not database: \n database = {}\n \n if not isinstance(unit_dict, dict):\n unit_dict = unit_dict_default\n \n keyword_dict = make_keyword_dict(unit_dict)\n \n Q = DocumentTM(file_name, **database)\n Q.doc(parser = 'cde_parser')\n Q.find_strange()\n chemical_type_dict = {}\n database = Q.database()\n \n data_collection = []\n json_list = []\n \n for Para in Q.Para:\n new_split, unit = Q.tokenize_test(Para, lemma = False, Strange = True, cut_off=cut_off)\n \n if not new_split:\n continue\n \n #print (new_split)\n \n before_represent_chem = False\n \n for sent in cut_paragraph(new_split):\n new_sent, unit_dictionary, next_represent_chem = matching_algorithm(sent, database, chemical_type_dict, before_represent_chem)\n\n if extract_all_property:\n #iters = chain.from_iterable(unit_dictionary.values())\n iters = chain.from_iterable([dics.values() for dics in unit_dictionary.values()])\n else:\n iters = unit_dictionary['Character'].values()\n \n \n #print (unit_dictionary['Character'])\n #if unit_dictionary['Character'] or unit_dictionary['Reaction']:\n #data_collection.append([sent, unit_dictionary])\n \n if show_property and (unit_dictionary['Character'] or unit_dictionary['Reaction']):\n \n print (\"\\n\\n------------------------------------\")\n print (file_name)\n print (\" \".join([str(t) for t in new_sent]))\n print (\"\\n\")\n #print (Para)\n #print (\" \".join(new_split))\n print (\"------------------------------------\")\n \n for T in chain.from_iterable(iters):\n #for T in t:\n dictionary_chemical = {'Material':T.target, 'Value':T.value, 'Unit':T.unit, 'Condition':T.condition, 'Property':T.prop,\n 'Reference':str(file_name)}\n \n json_list.append(dictionary_chemical)\n\n if show_property:\n print (\"value:\", T, \"condition:\", T.condition, \"chemical:\", T.target)\n \n if isinstance(next_represent_chem, Chemical) or not next_represent_chem:\n before_represent_chem = next_represent_chem \n \n if return_documenTM:\n return json_list, Q\n \n return json_list", "def home(self):\n while self.document.characters[self.position-1].character != '\\n':\n self.position -= 1\n if self.position == 0:\n # Got to beginning of file before newline\n break", "def test_back_paragraph_extend_selection(self):\n before_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year,\n Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000\n tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly\n weather impacts every American. Communities can now rely on the National Weather\n Service’s StormReady program to help them guard against the ravages of Mother\n Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading\n to around 500 deaths per year and nearly $14 billion in damage. StormReady, a\n program started in 1999 in Tulsa, OK, helps arm America's communities with the\n communication and safety skills needed to save lives and property– before and\n during the event. StormReady helps community leaders and emergency managers\n strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of\n severe weather through better planning, education, and awareness. No community\n is storm proof, but StormReady can help communities save lives. Does StormReady\n make a difference?\n \"\"\"\n after_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year,\n Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000\n tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly\n weather impacts every American. Communities can now rely on the National Weather\n Service’s StormReady program to help them guard against the ravages of Mother\n Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading\n to around 500 deaths per year and nearly $14 billion in damage. StormReady, a\n program started in 1999 in Tulsa, OK, helps arm America's communities with the\n communication and safety skills needed to save lives and property– before and\n during the event. StormReady helps community leaders and emergency managers\n strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of\n severe weather through better planning, education, and awareness. No community\n is storm proof, but StormReady can help communities save lives. Does StormReady\n make a difference?\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"9.0\", \"9.5\"),\n after_sel=(\"6.7\", \"9.5\"),\n command_name=\"back-paragraph-extend-selection\",\n )", "def test_fill_paragraph(self):\n before_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year, Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000 tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly weather impacts every American. Communities can now rely on the National Weather Services StormReady program to help them guard against the ravages of Mother Nature.\n\n Some 90% of all presidentially\n declared disasters are weather related,\n leading to around 500 deaths per year\n and nearly $14 billion in damage.\n StormReady, a program\n started in 1999 in Tulsa, OK,\n helps arm America's\n communities with the communication and\n safety skills needed to save lives and\n property--before and during the event.\n StormReady helps community leaders and\n emergency managers strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of severe weather through better planning, education, and awareness. No community is storm proof, but StormReady can help communities save lives. Does StormReady make a difference?\n \"\"\"\n after_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year, Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000 tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly weather impacts every American. Communities can now rely on the National Weather Services StormReady program to help them guard against the ravages of Mother Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading\n to around 500 deaths per year and nearly $14 billion in damage. StormReady, a\n program started in 1999 in Tulsa, OK, helps arm America's communities with the\n communication and safety skills needed to save lives and property--before and\n during the event. StormReady helps community leaders and emergency managers\n strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of severe weather through better planning, education, and awareness. No community is storm proof, but StormReady can help communities save lives. Does StormReady make a difference?\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"3.0\", \"3.7\"),\n after_sel=(\"10.0\", \" 10.0\"),\n command_name=\"fill-paragraph\",\n directives=\"@pagewidth 80\",\n )", "def label_paragraphs(root_el, fastcase_data):\n # case metadata\n citations = [alphanum_lower(\" \".join((c[\"Volume\"], c[\"Reporter\"], c[\"Page\"]) + ((c[\"Suffix\"],) if \"Suffix\" in c else ()))) for c in fastcase_data['Citations']]\n name_clean = alphanum_lower(fastcase_data['PartyHeader']) if fastcase_data['PartyHeader'] else None\n court_clean = alphanum_lower(fastcase_data['CourtName'] or fastcase_data['CourtAbbreviation'])\n docket_numbers_clean = [alphanum_lower(d) for d in fastcase_data['DocketNumbers']]\n\n # via https://github.com/harvard-lil/CaselawAccessProjectSchemas/blob/master/casebodyxml/v1/casebodyxml.xsd\n states = {k:i for i, k in enumerate([None, \"citation\", \"parties\", \"docketnumber\", \"court\", \"otherdate\", \"decisiondate\", \"history\", \"syllabus\", \"attorneys\", \"judges\", \"disposition\", \"_opinionstart\", \"_preauthor\", \"author\", \"opinion\"])}\n reverse_states = {v:k for k, v in states.items()}\n\n state = 0\n header_els = []\n opinions = [[]]\n header_complete = False\n extra_els = []\n blank_els = []\n authors = []\n opinion_starts = []\n paragraph_id = 1\n\n def shift_to_opinion(i):\n \"\"\"Move i elements from the end of header to the start of opinion.\"\"\"\n if not i:\n return\n nonlocal header_els\n opinions[0][0:0] = header_els[-i:]\n header_els = header_els[:-i]\n\n def add_el(el, state, target_list=header_els):\n nonlocal blank_els, paragraph_id\n if state:\n if not reverse_states[state].startswith('_'):\n el.attrib['class'] = reverse_states[state]\n if state == states['_opinionstart']:\n opinion_starts.append((len(target_list), el))\n elif state == states['author']:\n authors.append((len(target_list), el))\n blank_els = []\n else:\n blank_els.append(el)\n el.attrib['id'] = f'p-{paragraph_id}'\n paragraph_id += 1\n target_list.append(el)\n\n def append_to_previous(line):\n PyQuery(header_els[-1]).append(PyQuery(line))\n\n for el_pq in PyQuery(root_el)('root').children().items():\n\n if extra_els:\n extra_els.append(el_pq)\n el_pq = extra_els.pop(0)\n\n el = el_pq[0]\n\n # mark the end of the labeled front matter (which may or may not align with actual end)\n if el.tag == 'header-end':\n header_complete = True\n if state == states[\"author\"]:\n state = states[\"opinion\"]\n continue\n\n # skip\n if el.text == \"COPYRIGHT MATERIAL OMITTED\":\n continue\n\n # add linebreak after element for indentation\n if not (el.tail and el.tail.startswith('\\n')):\n el.tail = '\\n' + (el.tail or '')\n\n line = inner_html(el)\n line_text = strip_tags(line)\n line_text_lower = line_text.lower()\n line_alphanum_chars = alphanum_lower(line_text)\n\n # if we've had 5 regular paragraphs in a row, assume we missed the start of the opinion\n if state < states[\"opinion\"] and len(blank_els) >= 5:\n shift_to_opinion(len(blank_els))\n state = states[\"opinion\"]\n\n # we have now reached the opinion and no longer have to process header lines\n if state >= states[\"opinion\"]:\n # check short lines for the start of a concurrence or dissent\n m = new_opinion_re.match(line_text)\n if m:\n el.attrib['class'] = 'author'\n el.attrib['opinion-type'] = opinion_type_lookup[m[1].lower()]\n opinions.append([])\n\n add_el(el, 0, opinions[-1])\n continue\n\n # citation\n if state <= states[\"citation\"]:\n if any(c in line_alphanum_chars for c in citations) or all(citation_like_re.match(s) for s in line.split('<br>')):\n state = states[\"citation\"]\n continue # don't include citation lines in output\n\n # parties\n if state < states[\"parties\"]:\n # special case -- if the case doesn't have a name, like NE2d/939/939ne2d586.xml,\n # assume that whatever comes after the last citation is the name\n if name_clean is None or line_alphanum_chars == name_clean:\n state = states[\"parties\"]\n add_el(el, state)\n elif header_els and name_clean == alphanum_lower(inner_html(header_els[-1]) + line):\n # handle edge case where name is split across two paragraphs\n append_to_previous(line)\n elif line_alphanum_chars.startswith(name_clean) or similar_strings(line_text, fastcase_data['PartyHeader']):\n # special cases -- NW2d/881/881 N.W.2d 813-4_Replace.xml, NW2d/792/792NW2d203.xml\n state = states[\"parties\"]\n add_el(el, state)\n else:\n # if we haven't found a valid name yet, paragraphs are just regular paragraphs\n add_el(el, 0)\n continue\n\n # docket numbers or court\n if state < states[\"court\"]:\n # detect 'Supreme Judicial Court of Massachusetts.' and 'United States Bankruptcy Appellate Panel of the Ninth Circuit.' as a court, but not\n # 'Court of Appeals Case No. 04A03-1707-IF-1724' or 'Consol. Court No. 16-00054'\n # line may be 'Court of Appeals of Virginia, Chesapeake.' if court is 'Court of Appeals of Virginia'\n # line may be 'North Carolina Court of Appeals.' if court is 'Court of Appeals of North Carolina'\n # if 'court' in line.lower() or 'panel' in line.lower()) and ('No.' not in line or 'Division No.' in line):\n if any(line_alphanum_chars.startswith(s) for s in docket_numbers_clean):\n state = states[\"docketnumber\"]\n elif line_alphanum_chars.startswith(court_clean) or (\n (line_text.endswith('Court of Appeals.') or any(line_text_lower.startswith(s) for s in ('court of appeal', 'supreme court')))\n ):\n state = states[\"court\"]\n else:\n state = states[\"docketnumber\"]\n add_el(el, state)\n continue\n\n # accidental start of opinion included in head matter\n # NW2d/737/737NW2d768_3New.xml -- \"On order of the Court ...\"\n if state >= states[\"decisiondate\"]:\n if line_text.startswith(\"On order of the Court\"):\n state = states[\"opinion\"]\n add_el(el, 0, opinions[-1])\n continue\n\n # dates\n # 'DATED at Olympia, Washington, this 31st day of October, 2018.'\n # '01-04-2017'\n if state <= states[\"decisiondate\"]:\n # long line isn't decision date -- SCt/134/134sct985_2.xml\n if len(line_text) < 80 and (date_re.search(line_text) or line_text_lower.startswith('dated at') or re.match(r'\\d{1,2}-\\d{2}-\\d{4}$', line_text)):\n if any(line_text.startswith(s) for s in ('Released', 'Submitted', 'Dissenting')) and 'Decided' not in line_text:\n # handle case like\n # 'Submitted June 5, 2007, at Lansing.'\n # 'Decided June 12, 2007, at 9:05 a.m.'\n # 'Released for Publication October 11, 2007\n # 'Dissenting Opinion of Chief Justice Maynard June 27, 2008.'\n # avoid\n # 'Submitted March 2, 2010.<br>Decided April 2, 2010.'\n state = states[\"otherdate\"]\n else:\n state = states[\"decisiondate\"]\n add_el(el, state)\n continue\n\n if state < states[\"judges\"]:\n # strip off judges lines appended to current line, and add as an extra_el\n # \"for Respondent.<strong>Justice BEATTY.</strong></p>\" SE2d/708/708se2d750.xml\n # \"... West Virginia Insurance Federation.<strong>DAVIS, Justice:</strong></p>\" SE2d/719/719se2d830.xml\n # \"for appellees.<strong>Present: HUMPHREYS, McCLANAHAN and BEALES, JJ.</strong><strong>BEALES, Judge.</strong>\" SE2d/708/708se2d429.xml\n while True:\n m = re.search('(.+)(<strong>([^<]+)</strong>)$', line)\n if m and is_judges_or_author(m[3]):\n extra_els.insert(0, PyQuery('<p>'+m[2]+'</p>'))\n line = m[1]\n el_pq.html(line)\n line_text = strip_tags(line)\n line_alphanum_chars = alphanum_lower(line_text)\n continue\n break\n\n # history\n # 'Appeal by defendant from judgment entered 8 December 2004 by Judge Robert H. Hobgood in Alamance County Superior Court. Heard in the Court of Appeals 2 November 2005.'\n if line_text_lower.startswith('appeal') or any(s in line_text for s in ('Superior Court', 'District Court', 'Circuit Court')):\n state = states[\"history\"]\n add_el(el, state)\n continue\n\n # syllabus\n if 'Syllabus by the Court' in line_text or (state == states[\"syllabus\"] and re.match(r'\\d+\\.|[a-z\\[]', line_text)):\n if re.match(r'[a-z\\[]', line_text):\n # handle case where syllabus is split midsentence\n append_to_previous(line)\n else:\n state = states[\"syllabus\"]\n add_el(el, state)\n continue\n\n # attorneys\n # 'Garrett D. Blanchfield, Jr., Reinhardt Wendorf & Blanchfield, St. Paul, MN, for Appellants.'\n if any(line_text.startswith(s) for s in (\"An amicus\", \"For the\", \"On behalf of\")) or any(s in line_text for s in (' for ', 'amici curiae', 'pro se')):\n state = states[\"attorneys\"]\n add_el(el, state)\n continue\n\n # titles that mark the start of an opinion, like \"OPINION\"\n if line_alphanum_chars in opinion_start_lines or any(line_alphanum_chars.startswith(s) for s in opinion_start_line_prefixes):\n state = states[\"_opinionstart\"]\n if line_text != \"OPINION\":\n add_el(el, state)\n continue\n\n # Handle paragraph that is definitely followed by author, like \"The opinion of the court was delivered by\", A3d/148/148 A.3d 441_Replace.xml\n if line_text == \"The opinion of the court was delivered by\":\n state = states[\"_preauthor\"]\n add_el(el, 0)\n continue\n if state == states[\"_preauthor\"]:\n add_el(el, states[\"author\"])\n state = states[\"opinion\"]\n continue\n\n # author\n # note, in theory fastcase_data[\"Author\"] could be useful for identifying author paragraph, but it's often not set,\n # and when it is it can also appear in the judges line and other places ...\n judges_or_author = is_judges_or_author(line_text)\n if judges_or_author == \"judges\":\n state = states[\"judges\"]\n add_el(el, state)\n continue\n elif judges_or_author == \"author\":\n add_el(el, states[\"author\"])\n state = states[\"opinion\"] if header_complete else states[\"author\"]\n continue\n\n # weird special case where there's an order provided before the start of the opinion\n # E.g. NW2d/740/740NW2d659_1.xml, 'ORDER ENTERED JUNE 8, 2007' and subsequent unlabeled lines\n if line_text.startswith(\"ORDER ENTERED\") or state == states[\"disposition\"]:\n state = states[\"disposition\"]\n add_el(el, state)\n continue\n\n # regular paragraph\n add_el(el, 0)\n continue\n\n # fixups\n labels = [el.attrib.get('class') for el in header_els]\n # rewrite special case like NE2d/944/944ne2d1119.xml:\n # [['parties', '...'],\n # ['docketnumber', 'Feb. 15'],\n # ['docketnumber', '2011.'],\n # ['court', 'Court of Appeals of New York.']]\n # to\n # [['parties', '...'],\n # ['court', 'Court of Appeals of New York.'],\n # ['decisiondate', 'Feb. 15, 2011.']]\n if labels == [None, 'docketnumber', 'docketnumber', 'court']:\n docket_combined = header_els[1].text + \", \" + header_els[2].text\n if date_re.match(docket_combined):\n header_els[1].attrib['class'] = 'decisiondate'\n header_els[1].text = docket_combined\n header_els = [header_els[0], header_els[3], header_els[1]]\n\n # change all author labels but the last to judges; we likely misdetected one earlier\n for i, el in authors[:-1]:\n el.attrib['class'] = \"judges\"\n\n # if we didn't find an author and the last line is unlabeled, assume that's the author with a typo --\n # e.g. NW2d/753/753NW2d552_1.xml , missing comma\n if header_els and not authors and not opinion_starts and state >= states[\"judges\"] and header_els[-1].attrib.get('class') is None:\n header_els[-1].attrib['class'] = \"author\"\n authors = [(len(header_els)-1, header_els[-1])]\n\n # move author, and any paragraphs after it, to beginning of first opinion\n move_index = opinion_starts[0][0] + 1 if opinion_starts else authors[-1][0] if authors else None\n if move_index is not None:\n shift_to_opinion(len(header_els)-move_index)\n\n return header_els, opinions", "def block(self, text, head_offset=0):\n if not self.lite:\n tre = '|'.join(self.btag)\n else:\n tre = '|'.join(self.btag_lite)\n text = text.split('\\n\\n')\n\n tag = 'p'\n atts = cite = graf = ext = ''\n c1 = ''\n\n out = []\n\n anon = False\n for line in text:\n pattern = r'^(%s)(%s%s)\\.(\\.?)(?::(\\S+))? (.*)$' % (\n tre, self.align_re, self.c\n )\n match = re.search(pattern, line, re.S)\n if match:\n if ext:\n out.append(out.pop() + c1)\n\n tag, atts, ext, cite, graf = match.groups()\n h_match = re.search(r'h([1-6])', tag)\n if h_match:\n head_level, = h_match.groups()\n tag = 'h%i' % max(1, min(int(head_level) + head_offset, 6))\n o1, o2, content, c2, c1, eat = self.fBlock(tag, atts, ext,\n cite, graf)\n # leave off c1 if this block is extended,\n # we'll close it at the start of the next block\n\n if ext:\n line = \"%s%s%s%s\" % (o1, o2, content, c2)\n else:\n line = \"%s%s%s%s%s\" % (o1, o2, content, c2, c1)\n\n else:\n anon = True\n if ext or not re.search(r'^\\s', line):\n o1, o2, content, c2, c1, eat = self.fBlock(tag, atts, ext,\n cite, line)\n # skip $o1/$c1 because this is part of a continuing\n # extended block\n if tag == 'p' and not self.hasRawText(content):\n line = content\n else:\n line = \"%s%s%s\" % (o2, content, c2)\n else:\n line = self.graf(line)\n\n line = self.doPBr(line)\n if self.html_type == 'xhtml':\n line = re.sub(r'<br>', '<br />', line)\n\n if self.html_type == 'html':\n line = re.sub(r'<br />', '<br>', line)\n\n if ext and anon:\n out.append(out.pop() + \"\\n\" + line)\n elif not eat:\n out.append(line)\n\n if not ext:\n tag = 'p'\n atts = ''\n cite = ''\n graf = ''\n\n if ext:\n out.append(out.pop() + c1)\n return '\\n\\n'.join(out)", "def _do_links(self, text):\r\n MAX_LINK_TEXT_SENTINEL = 3000 # markdown2 issue 24\r\n\r\n # `anchor_allowed_pos` is used to support img links inside\r\n # anchors, but not anchors inside anchors. An anchor's start\r\n # pos must be `>= anchor_allowed_pos`.\r\n anchor_allowed_pos = 0\r\n\r\n curr_pos = 0\r\n while True: # Handle the next link.\r\n # The next '[' is the start of:\r\n # - an inline anchor: [text](url \"title\")\r\n # - a reference anchor: [text][id]\r\n # - an inline img: ![text](url \"title\")\r\n # - a reference img: ![text][id]\r\n # - a footnote ref: [^id]\r\n # (Only if 'footnotes' extra enabled)\r\n # - a footnote defn: [^id]: ...\r\n # (Only if 'footnotes' extra enabled) These have already\r\n # been stripped in _strip_footnote_definitions() so no\r\n # need to watch for them.\r\n # - a link definition: [id]: url \"title\"\r\n # These have already been stripped in\r\n # _strip_link_definitions() so no need to watch for them.\r\n # - not markup: [...anything else...\r\n try:\r\n start_idx = text.index('[', curr_pos)\r\n except ValueError:\r\n break\r\n text_length = len(text)\r\n\r\n # Find the matching closing ']'.\r\n # Markdown.pl allows *matching* brackets in link text so we\r\n # will here too. Markdown.pl *doesn't* currently allow\r\n # matching brackets in img alt text -- we'll differ in that\r\n # regard.\r\n bracket_depth = 0\r\n for p in range(start_idx+1, min(start_idx+MAX_LINK_TEXT_SENTINEL,\r\n text_length)):\r\n ch = text[p]\r\n if ch == ']':\r\n bracket_depth -= 1\r\n if bracket_depth < 0:\r\n break\r\n elif ch == '[':\r\n bracket_depth += 1\r\n else:\r\n # Closing bracket not found within sentinel length.\r\n # This isn't markup.\r\n curr_pos = start_idx + 1\r\n continue\r\n link_text = text[start_idx+1:p]\r\n\r\n # Possibly a footnote ref?\r\n if \"footnotes\" in self.extras and link_text.startswith(\"^\"):\r\n normed_id = re.sub(r'\\W', '-', link_text[1:])\r\n if normed_id in self.footnotes:\r\n self.footnote_ids.append(normed_id)\r\n result = '<sup class=\"footnote-ref\" id=\"fnref-%s\">' \\\r\n '<a href=\"#fn-%s\">%s</a></sup>' \\\r\n % (normed_id, normed_id, len(self.footnote_ids))\r\n text = text[:start_idx] + result + text[p+1:]\r\n else:\r\n # This id isn't defined, leave the markup alone.\r\n curr_pos = p+1\r\n continue\r\n\r\n # Now determine what this is by the remainder.\r\n p += 1\r\n if p == text_length:\r\n return text\r\n\r\n # Inline anchor or img?\r\n if text[p] == '(': # attempt at perf improvement\r\n match = self._tail_of_inline_link_re.match(text, p)\r\n if match:\r\n # Handle an inline anchor or img.\r\n is_img = start_idx > 0 and text[start_idx-1] == \"!\"\r\n if is_img:\r\n start_idx -= 1\r\n\r\n url, title = match.group(\"url\"), match.group(\"title\")\r\n if url and url[0] == '<':\r\n url = url[1:-1] # '<url>' -> 'url'\r\n # We've got to encode these to avoid conflicting\r\n # with italics/bold.\r\n url = url.replace('*', self._escape_table['*']) \\\r\n .replace('_', self._escape_table['_'])\r\n if title:\r\n title_str = ' title=\"%s\"' % (\r\n _xml_escape_attr(title)\r\n .replace('*', self._escape_table['*'])\r\n .replace('_', self._escape_table['_']))\r\n else:\r\n title_str = ''\r\n if is_img:\r\n result = '<img src=\"%s\" alt=\"%s\"%s%s' \\\r\n % (url.replace('\"', '&quot;'),\r\n _xml_escape_attr(link_text),\r\n title_str, self.empty_element_suffix)\r\n if \"smarty-pants\" in self.extras:\r\n result = result.replace('\"', self._escape_table['\"'])\r\n curr_pos = start_idx + len(result)\r\n text = text[:start_idx] + result + text[match.end():]\r\n elif start_idx >= anchor_allowed_pos:\r\n result_head = '<a href=\"%s\"%s>' % (url, title_str)\r\n result = '%s%s</a>' % (result_head, link_text)\r\n if \"smarty-pants\" in self.extras:\r\n result = result.replace('\"', self._escape_table['\"'])\r\n # <img> allowed from curr_pos on, <a> from\r\n # anchor_allowed_pos on.\r\n curr_pos = start_idx + len(result_head)\r\n anchor_allowed_pos = start_idx + len(result)\r\n text = text[:start_idx] + result + text[match.end():]\r\n else:\r\n # Anchor not allowed here.\r\n curr_pos = start_idx + 1\r\n continue\r\n\r\n # Reference anchor or img?\r\n else:\r\n match = self._tail_of_reference_link_re.match(text, p)\r\n if match:\r\n # Handle a reference-style anchor or img.\r\n is_img = start_idx > 0 and text[start_idx-1] == \"!\"\r\n if is_img:\r\n start_idx -= 1\r\n link_id = match.group(\"id\").lower()\r\n if not link_id:\r\n link_id = link_text.lower() # for links like [this][]\r\n if link_id in self.urls:\r\n url = self.urls[link_id]\r\n # We've got to encode these to avoid conflicting\r\n # with italics/bold.\r\n url = url.replace('*', self._escape_table['*']) \\\r\n .replace('_', self._escape_table['_'])\r\n title = self.titles.get(link_id)\r\n if title:\r\n before = title\r\n title = _xml_escape_attr(title) \\\r\n .replace('*', self._escape_table['*']) \\\r\n .replace('_', self._escape_table['_'])\r\n title_str = ' title=\"%s\"' % title\r\n else:\r\n title_str = ''\r\n if is_img:\r\n result = '<img src=\"%s\" alt=\"%s\"%s%s' \\\r\n % (url.replace('\"', '&quot;'),\r\n link_text.replace('\"', '&quot;'),\r\n title_str, self.empty_element_suffix)\r\n if \"smarty-pants\" in self.extras:\r\n result = result.replace('\"', self._escape_table['\"'])\r\n curr_pos = start_idx + len(result)\r\n text = text[:start_idx] + result + text[match.end():]\r\n elif start_idx >= anchor_allowed_pos:\r\n result = '<a href=\"%s\"%s>%s</a>' \\\r\n % (url, title_str, link_text)\r\n result_head = '<a href=\"%s\"%s>' % (url, title_str)\r\n result = '%s%s</a>' % (result_head, link_text)\r\n if \"smarty-pants\" in self.extras:\r\n result = result.replace('\"', self._escape_table['\"'])\r\n # <img> allowed from curr_pos on, <a> from\r\n # anchor_allowed_pos on.\r\n curr_pos = start_idx + len(result_head)\r\n anchor_allowed_pos = start_idx + len(result)\r\n text = text[:start_idx] + result + text[match.end():]\r\n else:\r\n # Anchor not allowed here.\r\n curr_pos = start_idx + 1\r\n else:\r\n # This id isn't defined, leave the markup alone.\r\n curr_pos = match.end()\r\n continue\r\n\r\n # Otherwise, it isn't markup.\r\n curr_pos = start_idx + 1\r\n\r\n return text", "def end_paragraph(self):\n raise NotImplementedError", "def paragraph_article_text(link_to_check, point_no):\n\tdetails = ''\n\t\n\tprint(link_to_check)\n\tsoup = buzzfeedbot.soup_session(link_to_check)\n\tstart = soup.find_all('span', attrs={'class': 'subbuzz__number'})[point_no].parent.parent\n\t\n\ttry:\n\t\tsubpoint = start.find('div', class_=\"subbuzz__description\")\n\t\tif subpoint == None:\t\n\t\t\treturn \"No extra information available\"\n\texcept IndexError:\n\t\treturn \"No extra information available\"\n\t\n\tfor description in subpoint.find_all('p'):\n\t\tdetails += description.text + \"\\n\\n\"\n\t\t\n\tif details == '':\n\t\treturn \"No extra information available\"\n\telse:\n\t\treturn details", "def jumpp(self):\r\n\r\n if not self.current_jump is None:\r\n self.current_jump = self.current_jump.next", "def find_link(html_content):\n soup = BeautifulSoup(html_content, \"html.parser\")\n paragraphs = soup.find_all('p')\n for p in paragraphs:\n string = ''\n for element in p:\n if type(element) == bs4.element.NavigableString:\n string += element\n elif type(element) == bs4.element.Tag and element.name == 'a':\n if balanced_parenths(string):\n return element\n else:\n string += element.get_text()\n return None", "def iter_main_text(self, element):\n if element.tag == 'note':\n return\n if element.text:\n yield element.text\n for e in element:\n for se in self.iter_main_text(e):\n yield se\n if e.tail:\n yield e.tail", "def paragraph_with_marker(self, text, tagged_text):\n # To aid in determining collapsed paragraphs, replace any\n # keyterms present\n node_for_keyterms = Node(\n text, node_type=Node.APPENDIX, tagged_text=tagged_text,\n label=[initial_marker(text)[0]]\n )\n keyterm = KeyTerms.keyterm_in_node(node_for_keyterms)\n if keyterm:\n mtext = text.replace(keyterm, '.' * len(keyterm))\n else:\n mtext = text\n\n for mtext in split_paragraph_text(mtext):\n if keyterm: # still need the original text\n mtext = mtext.replace('.' * len(keyterm), keyterm)\n node = Node(mtext, node_type=Node.APPENDIX,\n label=[initial_marker(mtext)[0]])\n self.nodes.append(node)", "def process_paragraph( paragraph ):\n\t# Lists of bounding boxes, text, and probabilities\n\tline_box_list = []\n\tline_text_list = []\n\tline_prob_list = []\n\n\t# Line under processing\n\tcurrent_line_text = []\n\tcurrent_line_prob = []\n\t# Bounding box temporary variables\n\tx1 = 100000\n\ty1 = 100000\n\tx2 = 0\n\ty2 = 0\n\n\tfor word in paragraph.words:\n\t\tfor symbol in word.symbols:\n\t\t\t# x1, y1 (Left upper corner)\n\t\t\tif symbol.bounding_box.vertices[0].x < x1:\n\t\t\t\tx1 = symbol.bounding_box.vertices[0].x\n\t\t\tif symbol.bounding_box.vertices[0].y < y1:\n\t\t\t\ty1 = symbol.bounding_box.vertices[0].y\n\t\t\tif symbol.bounding_box.vertices[1].y < y1: \n\t\t\t\ty1 = symbol.bounding_box.vertices[1].y\n\t\t\tif symbol.bounding_box.vertices[3].x < x1:\n\t\t\t\tx1 = symbol.bounding_box.vertices[3].x\n\t\t\t# x2, y2 (right lower corner)\n\t\t\tif symbol.bounding_box.vertices[2].x > x2:\n\t\t\t\tx2 = symbol.bounding_box.vertices[2].x\n\t\t\tif symbol.bounding_box.vertices[2].y > y2:\n\t\t\t\ty2 = symbol.bounding_box.vertices[2].y\n\t\t\tif symbol.bounding_box.vertices[1].x > x2:\n\t\t\t\tx2 = symbol.bounding_box.vertices[1].x\n\t\t\tif symbol.bounding_box.vertices[3].y > y2:\n\t\t\t\ty2 = symbol.bounding_box.vertices[3].y\n\n\t\t\tcurrent_line_text.append( symbol.text )\n\t\t\tcurrent_line_prob.append( symbol.confidence )\n\t\t\t# Check for blank spaces\n\t\t\tif symbol.property.detected_break.type in [ breaks.SPACE, breaks.SURE_SPACE ]:\n\t\t\t\tcurrent_line_text.append( ' ' )\n\t\t\t\tcurrent_line_prob.append( 0.95 )\n\t\t\t# Check for new lines\n\t\t\tif symbol.property.detected_break.type in [ breaks.EOL_SURE_SPACE, breaks.HYPHEN, breaks.LINE_BREAK ]:\n\t\t\t\tline_box_list.append( [x1, y1, x2, y2] )\n\t\t\t\tline_text_list.append( current_line_text )\n\t\t\t\tline_prob_list.append( current_line_prob )\n\t\t\t\t# Line under processing\n\t\t\t\tcurrent_line_text = []\n\t\t\t\tcurrent_line_prob = []\n\t\t\t\t# Bounding box temporary variables\n\t\t\t\tx1 = 100000\n\t\t\t\ty1 = 100000\n\t\t\t\tx2 = 0\n\t\t\t\ty2 = 0\n\n\treturn( line_box_list, line_text_list, line_prob_list )", "def print_paragraph(msg):\n print\n print \"\\n\".join(textwrap.wrap(msg, width=80))", "def test_reformat_paragraph_paragraph_1_of_3(self):\n before_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year, Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000 tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly weather impacts every American. Communities can now rely on the National Weather Service’s StormReady program to help them guard against the ravages of Mother Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading to around 500 deaths per year and nearly $14 billion in damage. StormReady, a program started in 1999 in Tulsa, OK, helps arm America's communities with the communication and safety skills needed to save lives and property– before and during the event. StormReady helps community leaders and emergency managers strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of severe weather through better planning, education, and awareness. No community is storm proof, but StormReady can help communities save lives. Does StormReady make a difference?\n\n Last paragraph.\n \"\"\"\n after_b = \"\"\"\\\n Americans live in the most severe\n weather-prone country on Earth. Each\n year, Americans cope with an average of\n 10,000 thunderstorms, 2,500 floods,\n 1,000 tornadoes, as well as an average\n of 6 deadly hurricanes. Potentially\n deadly weather impacts every American.\n Communities can now rely on the National\n Weather Service’s StormReady program to\n help them guard against the ravages of\n Mother Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading to around 500 deaths per year and nearly $14 billion in damage. StormReady, a program started in 1999 in Tulsa, OK, helps arm America's communities with the communication and safety skills needed to save lives and property– before and during the event. StormReady helps community leaders and emergency managers strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of severe weather through better planning, education, and awareness. No community is storm proof, but StormReady can help communities save lives. Does StormReady make a difference?\n\n Last paragraph.\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.0\", \"1.0\"),\n after_sel=(\"13.0\", \"13.0\"),\n command_name=\"reformat-paragraph\",\n directives=\"@language plain\\n@pagewidth 40\\n@tabwidth 8\",\n )", "def generate_new_book(text):\n\n for paragraph in text:\n for sentence in paragraph:\n for word in sentence:\n print(word, end=' ')\n print()\n print()", "def test_with_preceding_text(self):\n\n expected = r'''\n <p>foo\n <strong>foo</strong></p>\n <details class=\"note\">\n <summary>Details</summary>\n </details>\n '''\n\n self.check_markdown(\n R'''\n foo\n **foo**\n ??? note \"Details\"\n ''',\n expected,\n True\n )", "def preprocess_reference_text(text):\n try:\n splitpoint = text.lower().rindex('reference')\n except ValueError:\n splitpoint = False\n while splitpoint and len(text) - splitpoint < 100 and 'reference' in text[:splitpoint].lower():\n text = text[:splitpoint]\n splitpoint = text.lower().rindex('reference')\n if not splitpoint:\n has_reference_section = False\n non_reference_section, reference_section = text, ''\n else:\n has_reference_section = True\n non_reference_section, reference_section = text[:splitpoint], text[splitpoint:]\n return has_reference_section, reference_section, non_reference_section", "def findFootnotesPlaceholder(self, root):\n def finder(element):\n for child in element:\n if child.text:\n if child.text.find(self.getConfig(\"PLACE_MARKER\")) > -1:\n return child, element, True\n if child.tail:\n if child.tail.find(self.getConfig(\"PLACE_MARKER\")) > -1:\n return child, element, False\n finder(child)\n return None\n \n res = finder(root)\n return res", "def test_reformat_paragraph_paragraph_2_of_3(self):\n before_b = \"\"\"\\\n Americans live in the most severe\n weather-prone country on Earth. Each\n year, Americans cope with an average of\n 10,000 thunderstorms, 2,500 floods,\n 1,000 tornadoes, as well as an average\n of 6 deadly hurricanes. Potentially\n deadly weather impacts every American.\n Communities can now rely on the National\n Weather Service’s StormReady program to\n help them guard against the ravages of\n Mother Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading to around 500 deaths per year and nearly $14 billion in damage. StormReady, a program started in 1999 in Tulsa, OK, helps arm America's communities with the communication and safety skills needed to save lives and property– before and during the event. StormReady helps community leaders and emergency managers strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of severe weather through better planning, education, and awareness. No community is storm proof, but StormReady can help communities save lives. Does StormReady make a difference?\n\n Last paragraph.\n \"\"\"\n after_b = \"\"\"\\\n Americans live in the most severe\n weather-prone country on Earth. Each\n year, Americans cope with an average of\n 10,000 thunderstorms, 2,500 floods,\n 1,000 tornadoes, as well as an average\n of 6 deadly hurricanes. Potentially\n deadly weather impacts every American.\n Communities can now rely on the National\n Weather Service’s StormReady program to\n help them guard against the ravages of\n Mother Nature.\n\n Some 90% of all presidentially declared\n disasters are weather related, leading\n to around 500 deaths per year and nearly\n $14 billion in damage. StormReady, a\n program started in 1999 in Tulsa, OK,\n helps arm America's communities with the\n communication and safety skills needed\n to save lives and property– before and\n during the event. StormReady helps\n community leaders and emergency managers\n strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of severe weather through better planning, education, and awareness. No community is storm proof, but StormReady can help communities save lives. Does StormReady make a difference?\n\n Last paragraph.\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"13.0\", \"13.0\"),\n after_sel=(\"25.0\", \"25.0\"),\n command_name=\"reformat-paragraph\",\n directives=\"@language plain\\n@pagewidth 40\\n@tabwidth 8\",\n )", "def find_bound_paragraph(c: Cmdr) -> tuple[str, list[str], str]:\n head, ins, tail = c.frame.body.getInsertLines()\n head_lines = g.splitLines(head)\n tail_lines = g.splitLines(tail)\n result = []\n insert_lines = g.splitLines(ins)\n para_lines = insert_lines + tail_lines\n # If the present line doesn't start a paragraph,\n # scan backward, adding trailing lines of head to ins.\n if insert_lines and not startsParagraph(insert_lines[0]):\n n = 0 # number of moved lines.\n for s in reversed(head_lines):\n if ends_paragraph(s) or single_line_paragraph(s):\n break\n elif startsParagraph(s):\n n += 1\n break\n else:\n n += 1\n if n > 0:\n para_lines = head_lines[-n :] + para_lines\n head_lines = head_lines[: -n]\n ended, started = False, False\n for i, s in enumerate(para_lines):\n if started:\n if ends_paragraph(s) or startsParagraph(s):\n ended = True\n break\n else:\n result.append(s)\n elif s.strip():\n result.append(s)\n started = True\n if ends_paragraph(s) or single_line_paragraph(s):\n i += 1\n ended = True\n break\n else:\n head_lines.append(s)\n if started:\n head = ''.join(head_lines)\n tail_lines = para_lines[i:] if ended else []\n tail = ''.join(tail_lines)\n return head, result, tail # string, list, string\n return None, None, None", "def _get_next_textoutputsections(\n sections: List[\"Section\"], index: int\n) -> Iterator[\"Section\"]:\n for j in range(index, len(sections)):\n section = sections[j]\n if section.directive == SphinxDoctestDirectives.TESTOUTPUT:\n yield section\n else:\n break", "def parse_text(self):\n text = self.get_data()\n line1 = text[0]\n index_list = [0]\n start_index = 3\n for i in range(1, len(text)):\n\n if line1.startswith('*'):\n index_list, start_index = self.star_parser(index_list, line1)\n elif line1.startswith('.'):\n start_index = self.dot_parser(start_index, line1, text, i)\n else:\n print \"\".rjust(start_index) + line1\n line1 = text[i]\n # Parse the last line\n if text[-1].startswith('*'):\n self.star_parser(index_list, text[-1])\n elif text[-1].startswith('.'):\n print '-'.rjust(start_index) + text[-1].lstrip('.')\n else:\n print \"\".rjust(start_index) + text[-1]", "def onMnemoToMain(self):\n self.second_main_text.SetFocus()", "def goToNextLink(idx):\n eel.showLoader()\n nextlink = wikiPageStackTrace[-1].getRawList()[idx].get('href')\n url = baseArticleUrl+nextlink\n print(\"going to \", url)\n newpage = wikipediaPage(url)\n wikiPageStackTrace.append(newpage)\n titleStackTrace.append(newpage.getTitle())\n urlStackTrace.append(newpage.getUrl())\n update()", "def test_kill_paragraph(self):\n before_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year,\n Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000\n tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly\n weather impacts every American. Communities can now rely on the National Weather\n Service’s StormReady program to help them guard against the ravages of Mother\n Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading\n to around 500 deaths per year and nearly $14 billion in damage. StormReady, a\n program started in 1999 in Tulsa, OK, helps arm America's communities with the\n communication and safety skills needed to save lives and property– before and\n during the event. StormReady helps community leaders and emergency managers\n strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of\n severe weather through better planning, education, and awareness. No community\n is storm proof, but StormReady can help communities save lives. Does StormReady\n make a difference?\n \"\"\"\n after_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year,\n Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000\n tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly\n weather impacts every American. Communities can now rely on the National Weather\n Service’s StormReady program to help them guard against the ravages of Mother\n Nature.\n\n\n\n StormReady communities are better prepared to save lives from the onslaught of\n severe weather through better planning, education, and awareness. No community\n is storm proof, but StormReady can help communities save lives. Does StormReady\n make a difference?\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"9.0\", \"9.0\"),\n after_sel=(\"8.0\", \"8.0\"),\n command_name=\"kill-paragraph\",\n )", "def textinterpret(self,\r\n phrase,\r\n depth=0,\r\n re_entering=False,\r\n newindex=Index(-1)):\r\n\r\n if len(phrase) > 3:\r\n if phrase[0] == LEFTNOTE and phrase[-1] == RIGHTNOTE and len(phrase) > 1:\r\n phrase = phrase[1:-1]\r\n #eliminate enclosing brackets\r\n keylist = self.pass_key_dict[depth][0]\r\n addedlist = self.pass_key_dict[depth][1]\r\n #list to keep track of new key words added on\r\n\r\n if phrase[0] == ATSIGN:\r\n # at sign signs enclose an index\r\n right_at = True\r\n as_child = False\r\n index_phrase = phrase.split(ATSIGN)[1]\r\n index = Index(index_phrase)\r\n\r\n phrase = phrase.replace(ATSIGN+index_phrase+ATSIGN, EMPTYCHAR)\r\n # eliminates index phrase\r\n\r\n elif phrase[0] == PERCENTAGE:\r\n # percentage signs enclose a child index\r\n right_at = True\r\n as_child = True\r\n index_phrase = phrase.split(PERCENTAGE)[1]\r\n index = Index(index_phrase)\r\n\r\n phrase = phrase.replace(PERCENTAGE+index_phrase+PERCENTAGE, EMPTYCHAR)\r\n #eliminates index phrase\r\n\r\n elif phrase[0] == '\"':\r\n #for a child note\r\n phrase = phrase[1:]\r\n\r\n right_at = False\r\n as_child = True\r\n as_next = False\r\n\r\n index = self.index_sort([Index(0)]\r\n +[a_temp for a_temp\r\n in self.find_within(Index(0),\r\n Index(1),\r\n orequal=False)],\r\n by_date=False,\r\n quick=False)[-1]\r\n\r\n elif phrase[0] == \"'\":\r\n #for a next note\r\n\r\n phrase = phrase[1:]\r\n as_next = True\r\n as_child = False\r\n right_at = True\r\n index = self.index_sort([Index(0)]+[a_temp for a_temp\r\n in self.find_within(Index(0),\r\n Index(1),\r\n orequal=False)],\r\n by_date=False,\r\n quick=False)[-1]\r\n\r\n elif phrase[0] == \";\":\r\n # to go back to the previous level and add a next note\r\n phrase = phrase[1:]\r\n as_next = True\r\n as_child = False\r\n right_at = True\r\n index = self.index_sort([Index(0)]\r\n +[a_temp for a_temp\r\n in self.find_within(Index(0),\r\n Index(1),\r\n orequal=False)],\r\n by_date=False,\r\n quick=False)[-1]\r\n index = Index(index)\r\n index = index.previous()\r\n# index = str(index)\r\n\r\n\r\n elif phrase[0] not in [DOLLAR, DASH, PLUS, STAR]:\r\n # for an ordinary note\r\n\r\n j_temp = Index(int(Index(self.indexes()[-1])))\r\n # Procedure for moving notes out of the ZERO range\r\n for i_temp in self.find_within(Index(0), Index(1)):\r\n # j_temp is the next integer index\r\n self.move(i_temp, j_temp+Index(i_temp))\r\n\r\n right_at = False\r\n as_child = False\r\n as_next = False\r\n index = Index(0)\r\n\r\n if phrase[0] == DOLLAR:\r\n #new keyword set\r\n keylist = []\r\n if len(phrase) > 1:\r\n keylist += phrase[1:].split(COMMA)\r\n elif phrase[0] == PLUS:\r\n #add keyword set to existing\r\n if len(phrase) > 1:\r\n for k_temp in phrase[1:].split(COMMA):\r\n keylist.append(k_temp)\r\n addedlist.append(len(phrase[1:].split(COMMA)))\r\n\r\n elif phrase[0] == DASH:\r\n #delete keyword\r\n if addedlist and len(keylist) > addedlist[-1]:\r\n for a_temp in range(1, addedlist[-1]+1):\r\n keylist.pop()\r\n addedlist.pop()\r\n\r\n elif phrase[0] == STAR:\r\n #adds a single note with new keys,\r\n #yet without erasing the old keyset.\r\n # NEED TO CHECK IF THIS FUNCTION WORKS\r\n\r\n ks_temp = set(phrase[1:].split(SEMICOLON)[0].split(COMMA))\r\n ks_temp.update(extract.extract(phrase.split(SEMICOLON, 1)[1],\r\n LEFTCURLY,\r\n RIGHTCURLY))\r\n newindex = self.addnew(ks_temp,\r\n phrase.split(SEMICOLON, 1)[1])\r\n else:\r\n\r\n if not flatten.isflat(keylist):\r\n keylist = flatten.flatten(keylist)\r\n ks_temp = set(keylist)\r\n meta = {}\r\n if LEFTCURLY in phrase:\r\n ks_temp.update(extract.extract(phrase,\r\n LEFTCURLY,\r\n RIGHTCURLY))\r\n # extracts keywords that are enclosed\r\n #in curly brackets within the text\r\n if '^:' in phrase:\r\n metadatalist = extract.extract(phrase, '^:', ':^')\r\n # extract metadata\r\n\r\n for md_temp in metadatalist:\r\n #assigns metadata\r\n if VERTLINE in md_temp and len(md_temp.split(VERTLINE)) >= 2:\r\n if md_temp.split(VERTLINE)[1] == 'S':\r\n meta[md_temp.split(VERTLINE)[0]] = str(md_temp.split(VERTLINE)[2])\\\r\n .replace('\"'+\"'\",\"'\")\\\r\n .replace(\"'\"+'\"',\"'\")\r\n if md_temp.split(VERTLINE)[1] == 'I':\r\n meta[md_temp.split(VERTLINE)[0]] = int(md_temp.split(VERTLINE)[2])\r\n if md_temp.split(VERTLINE)[1] == 'L':\r\n meta[md_temp.split(VERTLINE)[0]] = [x_temp.replace('\"'+\"'\",\"'\")\\\r\n .replace(\"'\"+'\"',\"'\") for x_temp in\r\n md_temp.split(VERTLINE)[2][1:-1].split(COMMA)]\r\n phrase = nformat.remove_between(phrase, '^:', ':^')\r\n newindex = self.enter(ks_temp,\r\n phrase,\r\n meta,\r\n query=False,\r\n not_parsing=False,\r\n right_at=right_at,\r\n as_child=as_child,\r\n ind=str(index),\r\n re_entering=re_entering)\r\n self.pass_key_dict[depth][0] = keylist\r\n self.pass_key_dict[depth][1] = addedlist\r\n return newindex", "def split_description_into_paragraphs(unformatted_description):\n description = unformatted_description.strip()\n paragraphs = re.compile(r'[\\n\\r]{2,}').split(description)\n formatted_paragraphs = []\n\n # Sanitise paragraphs\n def external(attrs, new=False):\n url_parts = urlparse(attrs[(None, \"href\")])\n if url_parts.netloc and url_parts.netloc != 'snapcraft.io':\n if (None, \"class\") not in attrs:\n attrs[(None, \"class\")] = \"p-link--external\"\n elif \"p-link--external\" not in attrs[(None, \"class\")]:\n attrs[(None, \"class\")] += \" p-link--external\"\n return attrs\n\n for paragraph in paragraphs:\n callbacks = bleach.linkifier.DEFAULT_CALLBACKS\n callbacks.append(external)\n\n paragraph = bleach.clean(paragraph, tags=[])\n paragraph = bleach.linkify(paragraph, callbacks=callbacks)\n\n formatted_paragraphs.append(paragraph.replace('\\n', '<br />'))\n\n return formatted_paragraphs", "def test_reformat_paragraph_paragraph_3_of_3(self):\n before_b = \"\"\"\\\n Americans live in the most severe\n weather-prone country on Earth. Each\n year, Americans cope with an average of\n 10,000 thunderstorms, 2,500 floods,\n 1,000 tornadoes, as well as an average\n of 6 deadly hurricanes. Potentially\n deadly weather impacts every American.\n Communities can now rely on the National\n Weather Service’s StormReady program to\n help them guard against the ravages of\n Mother Nature.\n\n Some 90% of all presidentially declared\n disasters are weather related, leading\n to around 500 deaths per year and nearly\n $14 billion in damage. StormReady, a\n program started in 1999 in Tulsa, OK,\n helps arm America's communities with the\n communication and safety skills needed\n to save lives and property– before and\n during the event. StormReady helps\n community leaders and emergency managers\n strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of severe weather through better planning, education, and awareness. No community is storm proof, but StormReady can help communities save lives. Does StormReady make a difference?\n\n Last paragraph.\n \"\"\"\n after_b = \"\"\"\\\n Americans live in the most severe\n weather-prone country on Earth. Each\n year, Americans cope with an average of\n 10,000 thunderstorms, 2,500 floods,\n 1,000 tornadoes, as well as an average\n of 6 deadly hurricanes. Potentially\n deadly weather impacts every American.\n Communities can now rely on the National\n Weather Service’s StormReady program to\n help them guard against the ravages of\n Mother Nature.\n\n Some 90% of all presidentially declared\n disasters are weather related, leading\n to around 500 deaths per year and nearly\n $14 billion in damage. StormReady, a\n program started in 1999 in Tulsa, OK,\n helps arm America's communities with the\n communication and safety skills needed\n to save lives and property– before and\n during the event. StormReady helps\n community leaders and emergency managers\n strengthen local safety programs.\n\n StormReady communities are better\n prepared to save lives from the\n onslaught of severe weather through\n better planning, education, and\n awareness. No community is storm proof,\n but StormReady can help communities save\n lives. Does StormReady make a\n difference?\n\n Last paragraph.\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"25.10\", \"25.10\"),\n after_sel=(\"34.0\", \"34.0\"),\n command_name=\"reformat-paragraph\",\n directives=\"@language plain\\n@pagewidth 40\\n@tabwidth 8\",\n )", "def test_reformat_paragraph_new_code_4_of_8(self):\n before_b = \"\"\"\\\n - Point 1. xxxxxxxxxxxxxxxxxxxxxxxxxxxx\n Line 11.\n A. Point 2. xxxxxxxxxxxxxxxxxxxxxxxxxxx\n \"\"\"\n after_b = \"\"\"\\\n - Point 1. xxxxxxxxxxxxxxxxxxxxxxxxxxxx\n Line 11.\n A. Point 2. xxxxxxxxxxxxxxxxxxxxxxxxxxx\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.0\", \"1.0\"),\n after_sel=(\"3.0\", \"3.0\"),\n command_name=\"reformat-paragraph\",\n directives=\"@language plain\\n@pagewidth 40\\n@tabwidth 8\",\n )", "def footnote_spot(tree: nodes.document) -> tuple[Element, int]:\n # The code uses the following heuristic:\n # a) place them after the last existing footnote\n # b) place them after an (empty) Footnotes rubric\n # c) create an empty Footnotes rubric at the end of the document\n fns = list(tree.findall(nodes.footnote))\n if fns:\n fn = fns[-1]\n return fn.parent, fn.parent.index(fn) + 1\n for node in tree.findall(nodes.rubric):\n if len(node) == 1 and node.astext() == FOOTNOTES_RUBRIC_NAME:\n return node.parent, node.parent.index(node) + 1\n doc = next(tree.findall(nodes.document))\n rub = nodes.rubric()\n rub.append(nodes.Text(FOOTNOTES_RUBRIC_NAME))\n doc.append(rub)\n return doc, doc.index(rub) + 1", "def test_getPassage_prevnext(self):\n passage = self.resolver.getTextualNode(\n \"urn:cts:latinLit:phi1294.phi002.perseus-lat2\", subreference=\"1.1\", metadata=True\n )\n\n self.assertIsInstance(\n passage, Passage,\n \"GetPassage should always return passages objects\"\n )\n self.assertEqual(\n passage.prevId, \"1.pr\",\n \"Previous Passage ID should be parsed\"\n )\n self.assertEqual(\n passage.nextId, \"1.2\",\n \"Next Passage ID should be parsed\"\n )\n\n children = list(passage.getReffs())\n # Ensure navigability\n self.assertIn(\n \"verentia ludant; quae adeo antiquis auctoribus defuit, ut\",\n passage.prev.export(output=Mimetypes.PLAINTEXT),\n \"Left and Right Navigation should be available\"\n )\n self.assertIn(\n \"Qui tecum cupis esse meos ubicumque libellos \",\n passage.next.export(output=Mimetypes.PLAINTEXT),\n \"Left and Right Navigation should be available\"\n )\n\n # We check the passage is able to perform further requests and is well instantiated\n self.assertEqual(\n children[0], '1.1.1',\n \"Resource should be string identifiers\"\n )\n\n self.assertIn(\n \"Hic est quem legis ille, quem requiris,\", passage.export(output=Mimetypes.PLAINTEXT),\n \"Export PrototypeText should work correctly\"\n )\n\n self.assertEqual(\n passage.export(output=Mimetypes.PYTHON.ETREE).xpath(\".//tei:l[@n='1']/text()\", namespaces=NS, magic_string=False),\n [\"Hic est quem legis ille, quem requiris, \"],\n \"Export to Etree should give an Etree or Etree like object\"\n )", "def placeNoteLists(self, text):\n if self.notes:\n o = OrderedDict()\n for label, info in self.notes.items():\n if 'seq' in info:\n i = info['seq']\n info['seq'] = label\n o[i] = info\n else:\n self.unreferencedNotes[label] = info\n\n if o:\n # sort o by key\n o = OrderedDict(sorted(o.items(), key=lambda t: t[0]))\n self.notes = o\n text_re = re.compile('<p>notelist(%s)(?:\\:([\\w|%s]))?([\\^!]?)(\\+?)\\.?[\\s]*</p>'\n % (self.c, self.syms), re.U)\n text = text_re.sub(self.fNoteLists, text)\n return text", "def test_reformat_paragraph_new_code_7_of_8(self):\n before_b = \"\"\"\\\n 1. Point 3. xxxxxxxxxxxxxxxxxxxxxxxxxxx\n Line 32.\n\n 2. Point 4 xxxxxxxxxxxxxxxxxxxxxxxxxxx\n Line 41.\n \"\"\"\n after_b = \"\"\"\\\n 1. Point 3. xxxxxxxxxxxxxxxxxxxxxxxxxxx\n Line 32.\n\n 2. Point 4 xxxxxxxxxxxxxxxxxxxxxxxxxxx\n Line 41.\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"2.11\", \"2.11\"),\n after_sel=(\"3.1\", \"3.1\"),\n command_name=\"reformat-paragraph\",\n directives=\"@language plain\\n@pagewidth 40\\n@tabwidth 8\",\n )", "def main():\n\n parser = argparse.ArgumentParser(description=\"Prints the contents of a NiPoPoW\")\n parser.add_argument(\"--blocks\", required=True, type=int, help=\"Number of blocks\")\n parser.add_argument(\n \"--output\", default=\"proof.pkl\", type=str, help=\"Name of exported proof\"\n )\n args = parser.parse_args()\n blocks = args.blocks\n output = args.output\n if output.find(\".pkl\") == -1:\n output += \".pkl\"\n\n # Create blockchain\n header, headers_map, interlink_map = create_blockchain(blocks=blocks)\n print_headers(headers_map)\n print_interlinks(headers_map, interlink_map)\n\n # Create proof\n proof = make_proof(header, headers_map, interlink_map)\n print_proof(proof, headers_map)\n\n ### Start spoiling proof\n\n # remove_genesis(proof)\n # proof = change_interlink_hash(proof, 0)\n # proof = skip_blocks(proof, -2)\n # proof = replace_block(proof, headers_map, interlink_map, int(len(proof)/2))\n # print_proof(proof, headers_map)\n # verify_proof(Hash(proof[0][0]), proof)\n\n ### Stop spoiling proof\n\n proof_tool = ProofTool(\"../../data/proofs/\")\n p, f, lca = proof_tool.create_proof_and_forkproof(blocks, forkindex, forkblocks)\n print(p, f, lca)\n\n fixed_fork_proof = proof_tool.fetch_proof(f)\n verify_proof(Hash(fixed_fork_proof[0][0]), fixed_fork_proof)\n\n # proof_tool = ProofTool(\"../../data/proofs/\")\n # proof_tool.export_proof(fixed_fork_proof, f)", "def process_labels(ctx, tex, chapter):\n headings = ['chapter'] + ['sub'*i + 'section' for i in range(4)]\n reh = r'(' + '|'.join(headings) + r'){(.+?)}'\n environments = ['thm', 'lem', 'exc', 'figure', 'equation']\n ree = r'begin{(' + '|'.join(environments) + r')}'\n rel = r'(\\w+)label{(.+?)}'\n rel2 = r'label{(.+?)}'\n bigone = r'\\\\({})|\\\\({})|\\\\({})|\\\\(caption)|\\\\({})'.format(reh, ree, rel, rel2)\n rx = re.compile(bigone)\n\n sec_ctr = [chapter] + [0]*(len(headings))\n env_ctr = [0]*len(environments)\n blocks = catlist()\n lastlabel = None\n lastidx = 0\n m = rx.search(tex, lastidx)\n while m:\n blocks.append(tex[lastidx:m.start()])\n lastidx = m.start()\n cmd = next_command(tex, lastidx)\n lastidx = cmd.end\n if m.group(2):\n # This is a sectioning command (chapter, subsection,...)\n name = m.group(2)\n i = headings.index(name)\n if i == 0:\n env_ctr = [0]*len(env_ctr)\n sec_ctr[i:] = [sec_ctr[i]+1]+[0]*(len(headings)-i-1)\n number = \".\".join([str(x) for x in sec_ctr[:i+1]])\n idd = \"{}:{}\".format(name, number)\n lastlabel = idd\n blocks.append(\"<a id='{}'></a>\".format(idd))\n\n title = '{}&emsp;{}'.format(number, cmd.args[0])\n blocks.append(r'\\{}{{{}}}'.format(name, title))\n\n elif m.group(5):\n # This is an environment (thm, lem, ...)\n name = m.group(5)\n lastenv = name # save this for a caption command coming later...\n i = environments.index(name)\n env_ctr[i] += 1\n number = \"{}.{}\".format(sec_ctr[0], env_ctr[i])\n idd = \"{}:{}\".format(name, number)\n lastlabel = idd\n blocks.append(\"<a id='{}'></a>\".format(idd))\n\n if name in ctx.theoremlike_environments:\n nicename = ctx.named_entities[name]\n title = '{}&nbsp;{}'.format(nicename, number)\n blocks.append(r'\\begin{{{}}}[{}]'.format(name, title))\n else:\n blocks.append(r'\\begin{{{}}}'.format(name))\n\n elif m.group(6):\n # This is a labelling command (\\thmlabel, \\seclabel,...)\n label = \"{}:{}\".format(m.group(7), m.group(8))\n ctx.label_map[label] = (ctx.outputfile, lastlabel)\n\n elif m.group(9):\n # This is a caption command\n name = lastenv\n i = environments.index(name)\n number = \"{}.{}\".format(sec_ctr[0], env_ctr[i])\n idd = \"{}:{}\".format(name, number)\n lastlabel = idd\n nicename = ctx.named_entities[name]\n title = '<span class=\"title\">{}&nbsp;{}</span>'.format(nicename, number)\n text = '{}&emsp;{}'.format(title, cmd.args[0])\n blocks.append(r'\\caption{{{}}}'.format(text))\n\n elif m.group(10):\n # This is a \\label command, probably the target of a pageref\n idd = gen_unique_id()\n blocks.append(\"<a id={}></a>\".format(idd))\n ctx.label_map[m.group(11)] = (ctx.outputfile, idd)\n\n m = rx.search(tex, lastidx)\n blocks.append(tex[lastidx:])\n return \"\".join(blocks)", "def snippetyielder(filename):\n\ttext = open(filename, \"r\")\n\ta = text.readlines()\n\tp = \"\".join(a) \t #detecting the breaks between documents and identifying them to break the docs with\n\n\n\tdocbreak = re.sub(r\".*([1\\?RU]+[ce][j~p]+o[rtd\\*]+ .[2Jf].*)\",r\"DOCBREAK \\1\",p)\n\tdocbreak = re.sub(r\"(.*[lL]ett.*fro[mn].*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*(Petition .f.*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*(Order o[/f].*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*(General order of.*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*(Special order of.*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*(Unofficial letter of.*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*(Letter of .*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*([\\[I\\]]\\s*T[cue]legram.*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*(\\[Enclosure.+\\].*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*(Extracts* from .*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*(A[hb]stract[of ]*log.*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*(Instructions from.*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*(A[hb]stract of statement.*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*(Instructions* of.*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*(Memorandum from.*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*([llifM]+emorandum of.*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*(Communication from.*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*(Statement of circumstances.*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*(Further report of.*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*(Second report of.*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*(Additional report of.*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*(Detailed repor[~t] of.*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*(General report of.*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*(Deposition of.*)\",r\"DOCBREAK \\1\",docbreak)\n\t# docbreak = re.sub(r\"(DOCBREAK)+\",r\"DOCBREAK\\n\",docbreak) \t\n\tdocbreaks = docbreak.split(\"DOCBREAK\") \t #yielding one document at a time\n\tfor doc in docbreaks:\n\t\tif re.search(r\".+\",doc): \t\n\t\t\tyield doc", "def footnotes(self, text):\n html = '<div class=\"footnotes\">\\n%s<ol>%s</ol>\\n</div>\\n'\n return html % (self.hrule(), text)", "def navigation(self):\r\n text_from_xml, ids, eng_list = self.util.get_text_from_xml(self.string_xml, \"Navigation\", \"trans-unit\",\r\n Config.selected_language.strip())\r\n xpath = self.util.read_xpath_list_from_xml(self.object_repo, \"Navigation\", self.my_object)\r\n lenth = len(xpath)\r\n text_index = 0\r\n loop_index = 0\r\n while loop_index < lenth:\r\n if xpath[loop_index]['xpath'] == 'click':\r\n # self.util.client.sleep(2000)\r\n self.object.click(self.util.client, xpath[loop_index + 1]['zone'],\r\n xpath[loop_index + 1]['xpath'],\r\n xpath[loop_index + 1]['index'],\r\n xpath[loop_index + 1]['comment'],\r\n 1, self.logger_name)\r\n loop_index += 2\r\n continue\r\n if xpath[loop_index]['xpath'] == 'place':\r\n # self.place_holder(xpath,loop_index+1,actual_text,text_index)\r\n text_index += 1\r\n loop_index += 2\r\n continue\r\n self.get_text_compare(xpath, loop_index, text_from_xml, text_index, ids, eng_list)\r\n text_index += 1\r\n loop_index += 1", "def open(self, p):\n p.open()\n for s in self.surround(p):\n point = self.search(s[0], s[1])\n if point.is_bomb():\n continue\n if point.open():\n self.open(point)", "def treat_new_line(self,text):\n text=text.replace('.\\n','. ')\n text=re.sub(r'(\\n\\s*)+\\n+', '\\n\\n',text )\n \n lw=text.split('\\n\\n')\n lw=[c for c in lw if c.replace(' ','')!='']\n \n for i in range(1,len(lw)):\n try:\n\n el=lw[i]\n if len(el)>=1:\n try:\n first_w=el.split()[0]\n except:\n first_w=el\n first_l=first_w[0]\n if first_l.isupper() :\n if len(lw[i-1])>0 and lw[i-1].replace(' ','') !='':\n if lw[i-1].replace(' ','')[-1] not in [\":\",'.',\"-\",'/',\"'\",\";\"]:\n prec=lw[i-1].split(\".\")[-1]\n merge=(prec+' '+lw[i]).split()\n dic=dict(nltk.tag.pos_tag(merge))\n proper_noun=dic[first_w]=='NNP'\n if not proper_noun:\n if not \".\" in lw[i-1]:\n lw[i-1]=lw[i-1]+\".\\n\\n \"\n else:\n lw[i-1]=lw[i-1][:-1]+\".\\n\\n \"\n else:\n lw[i-1]+=' '\n\n\n elif first_l.islower():\n if len(lw[i-1])>0 and lw[i-1][-1].replace(' ','')!='':\n\n if lw[i-1][-1].replace(' ','')[-1]!='-':\n lw[i-1]+=\"\"\n else:\n\n ltemp_prev=lw[i-1].split(' ')\n ltemp_next=lw[i].split(' ')\n motprev=ltemp_prev[-1][:-1]\n motnext=lw[i].split(' ')[0]\n if len((motprev+' '+motnext).split())==2:\n\n if self.english_voc.check(motprev) and self.english_voc.check(motnext) and not self.english_voc.check(\"\".join([motprev,motnext])) :\n newmot=\" \".join([motprev,motnext])\n else:\n newmot=\"\".join([motprev,motnext])\n ltemp_prev[-1]=newmot\n ltemp_next[0]=\"\"\n lw[i-1]=\" \".join(ltemp_prev)\n lw[i]=\" \".join(ltemp_next)\n else:\n lw[i-1]+=\"\\n\\n\"\n \n except:\n print('Error occurs, the reader may not be suitable for your pdf files')\n \n \n text=\"\".join(lw)\n \n lw=text.split('\\n')\n lw=[c for c in lw if c.replace(' ','')!='']\n for i in range(1,len(lw)):\n try:\n el=lw[i]\n if len(el)>=1:\n try:\n first_w=el.split()[0]\n except:\n first_w=el\n first_l=first_w[0]\n if first_l.isupper() :\n if len(lw[i-1])>0 and lw[i-1].replace(' ','')!='':\n if lw[i-1].replace(' ','')[-1] not in [\":\",'.',\"-\",'/',\"'\",\";\"]:\n prec=lw[i-1].split(\".\")[-1]\n merge=(prec+' '+lw[i]).split()\n dic=dict(nltk.tag.pos_tag(merge))\n proper_noun=dic[first_w]=='NNP'\n if not proper_noun:\n if not \".\" in lw[i-1]:\n lw[i-1]=lw[i-1]+\".\\n\\n \"\n else:\n lw[i-1]=lw[i-1][:-1]+\".\\n\\n \"\n else:\n lw[i-1]+=' '\n elif first_l.islower():\n if len(lw[i-1])>0 and lw[i-1].replace(' ','')!='':\n if lw[i-1].replace(' ','')[-1]==\"-\":\n ltemp_prev=lw[i-1].split(' ')\n ltemp_next=lw[i].split(' ')\n motprev=ltemp_prev[-1][:-1]\n motnext=lw[i].split(' ')[0]\n if len((motprev+' '+motnext).split())==2:\n if self.english_voc.check(motprev) and self.english_voc.check(motnext) and not self.english_voc.check(\"\".join([motprev,motnext])) :\n newmot=\" \".join([motprev,motnext])\n else:\n newmot=\"\".join([motprev,motnext])\n ltemp_prev[-1]=newmot\n ltemp_next[0]=\"\"\n lw[i-1]=\" \".join(ltemp_prev)\n lw[i]=\" \".join(ltemp_next)\n\n\n\n else:\n lw[i-1]+=\" \"\n else:\n lw[i-1]+=\" \"\n \n except:\n print('Error occurs, the reader may not be suitable for your pdf files')\n \n text=\"\".join(lw)\n return text", "def test_reformat_paragraph_list_1_of_5(self):\n before_b = \"\"\"\\\n This paragraph leads of this test. It is the \"lead\"\n paragraph.\n\n 1. This is item \n number 1. It is the first item in the list.\n\n 2. This is item \n number 2. It is the second item in the list.\n\n 3. This is item \n number 3. It is the third item in the list.\n\n This paragraph ends the test. It is the \"final\"\n paragraph.\n \"\"\"\n after_b = \"\"\"\\\n This paragraph leads of this test. It is\n the \"lead\" paragraph.\n\n 1. This is item \n number 1. It is the first item in the list.\n\n 2. This is item \n number 2. It is the second item in the list.\n\n 3. This is item \n number 3. It is the third item in the list.\n\n This paragraph ends the test. It is the \"final\"\n paragraph.\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.0\", \"1.0\"),\n after_sel=(\"4.0\", \"4.0\"),\n command_name=\"reformat-paragraph\",\n directives=\"@language plain\\n@pagewidth 40\\n@tabwidth 8\",\n )", "def display_text(target_text):\n\n print('Text to analyze:')\n print('')\n print('-------TEXT BELOW-------')\n print(target_text)\n print('-------TEXT ENDS-------')\n print('')", "def test_reformat_paragraph_new_code_6_of_8(self):\n before_b = \"\"\"\\\n 1. Point 3. xxxxxxxxxxxxxxxxxxxxxxxxxxx\n Line 32.\n\n 2. Point 4 xxxxxxxxxxxxxxxxxxxxxxxxxxx\n \"\"\"\n after_b = \"\"\"\\\n 1. Point 3. xxxxxxxxxxxxxxxxxxxxxxxxxxx\n Line 32.\n\n 2. Point 4 xxxxxxxxxxxxxxxxxxxxxxxxxxx\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.0\", \"1.0\"),\n after_sel=(\"4.0\", \"4.0\"),\n command_name=\"reformat-paragraph\",\n directives=\"@language plain\\n@pagewidth 40\\n@tabwidth 8\",\n )", "def test_getPassage_metadata_prevnext(self):\n passage = self.resolver.getTextualNode(\n \"urn:cts:latinLit:phi1294.phi002.perseus-lat2\", subreference=\"1.1\", metadata=True, prevnext=True\n )\n self.assertIsInstance(\n passage, Passage,\n \"GetPassage should always return passages objects\"\n )\n self.assertEqual(\n str(passage.metadata[NAMESPACES.CTS.term(\"title\"), \"eng\"]), \"Epigrammata\",\n \"Local Inventory Files should be parsed and aggregated correctly\"\n )\n self.assertEqual(\n str(passage.metadata[NAMESPACES.CTS.term(\"groupname\"), \"eng\"]), \"Martial\",\n \"Local Inventory Files should be parsed and aggregated correctly\"\n )\n self.assertEqual(\n str(passage.metadata[NAMESPACES.CTS.term(\"label\"), \"eng\"]), \"Epigrams\",\n \"Local Inventory Files should be parsed and aggregated correctly\"\n )\n self.assertEqual(\n str(passage.metadata[NAMESPACES.CTS.term(\"description\"), \"eng\"]),\n \"M. Valerii Martialis Epigrammaton libri / recognovit W. Heraeus\",\n \"Local Inventory Files should be parsed and aggregated correctly\"\n )\n self.assertEqual(\n passage.citation.name, \"book\",\n \"Local Inventory Files should be parsed and aggregated correctly\"\n )\n self.assertEqual(\n len(passage.citation), 3,\n \"Local Inventory Files should be parsed and aggregated correctly\"\n )\n self.assertEqual(\n passage.prevId, \"1.pr\",\n \"Previous Passage ID should be parsed\"\n )\n self.assertEqual(\n passage.nextId, \"1.2\",\n \"Next Passage ID should be parsed\"\n )\n children = list(passage.getReffs())\n # Ensure navigability\n self.assertIn(\n \"verentia ludant; quae adeo antiquis auctoribus defuit, ut\",\n passage.prev.export(output=Mimetypes.PLAINTEXT),\n \"Left and Right Navigation should be available\"\n )\n self.assertIn(\n \"Qui tecum cupis esse meos ubicumque libellos \",\n passage.next.export(output=Mimetypes.PLAINTEXT),\n \"Left and Right Navigation should be available\"\n )\n\n # We check the passage is able to perform further requests and is well instantiated\n self.assertEqual(\n children[0], '1.1.1',\n \"Resource should be string identifiers\"\n )\n\n self.assertIn(\n \"Hic est quem legis ille, quem requiris,\", passage.export(output=Mimetypes.PLAINTEXT),\n \"Export PrototypeText should work correctly\"\n )\n\n self.assertEqual(\n passage.export(output=Mimetypes.PYTHON.ETREE).xpath(\".//tei:l[@n='1']/text()\", namespaces=NS, magic_string=False),\n [\"Hic est quem legis ille, quem requiris, \"],\n \"Export to Etree should give an Etree or Etree like object\"\n )", "def get_first_link(self, article):\n #Hit the article Wikipeadia URL\n page = urllib.request.urlopen(article)\n html = page.read()\n soup = BeautifulSoup(html, 'lxml')\n\n #Iterate over all the paragraphs on that page to find the first valid link\n for child_para in soup.find_all('p'):\n links_para = str(re.findall('\\((.*?)\\)', str(child_para)))\n if self.check_paragraph(child_para, links_para):\n for child_link in child_para.find_all('a'):\n if self.check_link(child_link, links_para):\n #Return the next child link\n return 'https://en.wikipedia.org' + child_link['href']", "def test_reformat_paragraph_new_code_5_of_8(self):\n before_b = \"\"\"\\\n A. Point 2. xxxxxxxxxxxxxxxxxxxxxxxxxxx\n Line 22.\n 1. Point 3. xxxxxxxxxxxxxxxxxxxxxxxxxxx\n \"\"\"\n after_b = \"\"\"\\\n A. Point 2. xxxxxxxxxxxxxxxxxxxxxxxxxxx\n Line 22.\n 1. Point 3. xxxxxxxxxxxxxxxxxxxxxxxxxxx\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.0\", \"2.0\"),\n after_sel=(\"3.0\", \"3.0\"),\n command_name=\"reformat-paragraph\",\n directives=\"@language plain\\n@pagewidth 40\\n@tabwidth 8\",\n )", "def __extract_text_and_hyp(self, line):\n\n line = unquote(line)\n list_hyp = []\n num_mentions = 0\n start_entities = [m.start() for m in re.finditer('<a href=\"', line)]\n end_entities = [m.start() for m in re.finditer('\">', line)]\n end_mentions = [m.start() for m in re.finditer(\"</a>\", line)]\n\n disambiguation_ent_errors = 0\n start_entity = line.find('<a href=\"')\n\n while start_entity >= 0:\n line = line[start_entity + len('<a href=\"') :]\n end_entity = line.find('\">')\n end_mention = line.find(\"</a>\")\n mention = line[end_entity + len('\">') : end_mention]\n\n if (\n (\"Wikipedia\" not in mention)\n and (\"wikipedia\" not in mention)\n and (len(mention) >= 1)\n ):\n # Valid mention\n entity = line[0:end_entity]\n find_wikt = entity.find(\"wikt:\")\n entity = entity[len(\"wikt:\") :] if find_wikt == 0 else entity\n entity = self.wikipedia.preprocess_ent_name(entity)\n\n if entity.find(\"List of \") != 0:\n if \"#\" not in entity:\n ent_wiki_id = self.wikipedia.ent_wiki_id_from_name(entity)\n if ent_wiki_id == -1:\n disambiguation_ent_errors += 1\n else:\n num_mentions += 1\n list_hyp.append(\n {\n \"mention\": mention,\n \"ent_wikiid\": ent_wiki_id,\n \"cnt\": num_mentions,\n }\n )\n # find new entity\n start_entity = line.find('<a href=\"')\n return (\n list_hyp,\n disambiguation_ent_errors,\n [len(start_entities), len(end_entities), len(end_mentions)],\n )", "def go_to(self, value=None):\n self.go_to_this_line = self.line_number.get()\n self.my_text.mark_set(INSERT, str(float(self.go_to_this_line)))\n self.current_area()\n self.my_text.see(INSERT)\n self.searcher.destroy()", "def test_extract_pdf_prev():\n\n test_pdf_path = 'tests/files/research/fea48178ffac3a42035ed27d6e2b897cb570cf13.pdf'\n text = pdf_util.extract_pdf_text_prev(test_pdf_path)\n\n assert text\n assert \"Yoshiyuki\" in text", "def split_paragraphs(block):\n # Break block contents into paragraphs by blank lines.\n def gen(block):\n par = []\n for obj in block:\n if isinstance(obj, Text) and obj.empty:\n # New paragraph.\n yield par\n par = []\n else:\n par.append(obj)\n yield par\n\n # Combine paragraphs. \n def finish(pars):\n for par in pars:\n if len(par) == 0:\n continue\n elif any( isinstance(o, Text) for o in par ):\n # Paragraph contains text. Use a P element.\n yield Block(par, tag='P')\n else:\n # Doesn't contain text; don't wrap it.\n yield from par\n\n block[:] = finish(gen(block))", "def test_reformat_paragraph_list_3_of_5(self):\n before_b = \"\"\"\\\n This paragraph leads of this test. It is\n the \"lead\" paragraph.\n\n 1. This is item number 1. It is the\n first item in the list.\n\n 2. This is item \n number 2. It is the second item in the list.\n\n 3. This is item \n number 3. It is the third item in the list.\n\n This paragraph ends the test. It is the \"final\"\n paragraph.\n \"\"\"\n after_b = \"\"\"\\\n This paragraph leads of this test. It is\n the \"lead\" paragraph.\n\n 1. This is item number 1. It is the\n first item in the list.\n\n 2. This is item number 2. It is the\n second item in the list.\n\n 3. This is item \n number 3. It is the third item in the list.\n\n This paragraph ends the test. It is the \"final\"\n paragraph.\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"7.0\", \"7.0\"),\n after_sel=(\"10.0\", \"10.0\"),\n command_name=\"reformat-paragraph\",\n directives=\"@language plain\\n@pagewidth 40\\n@tabwidth 8\",\n )", "def insert_paragraphs(self, str, ignore_pre=True):\n for block in splittag(str, 'pre'):\n if ignore_pre and '<pre>' in block:\n self.insert_text(block)\n if 'Holds down and then' in block:\n print block\n else:\n self.insert_text('\\n'.join('<p>%s</p>' % line\n for line in block.splitlines()))", "def do_p(self, line):\n if not self.current:\n print_table(self.get_profiles(), self.vertical_display)", "def _process_parse(parse, coreflist):\n sentence = parse.get('sentences')\n if sentence:\n ptree = Tree.parse(tag_ptree(sentence[0]['parsetree'], coreflist))\n words = [(w[0], w[1]) for w in sentence[0]['words']]\n depends = [(d[0], d[1], d[2]) for d in sentence[0]['dependencies']]\n text = sentence[0]['text']\n\n return ptree, words, depends, text\n else:\n return None", "def test_reformat_paragraph_list_2_of_5(self):\n before_b = \"\"\"\\\n This paragraph leads of this test. It is\n the \"lead\" paragraph.\n\n 1. This is item number 1. It is the\n first item in the list.\n\n 2. This is item \n number 2. It is the second item in the list.\n\n 3. This is item \n number 3. It is the third item in the list.\n\n This paragraph ends the test. It is the \"final\"\n paragraph.\n \"\"\"\n after_b = \"\"\"\\\n This paragraph leads of this test. It is\n the \"lead\" paragraph.\n\n 1. This is item number 1. It is the\n first item in the list.\n\n 2. This is item \n number 2. It is the second item in the list.\n\n 3. This is item \n number 3. It is the third item in the list.\n\n This paragraph ends the test. It is the \"final\"\n paragraph.\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"4.0\", \"4.0\"),\n after_sel=(\"7.0\", \"7.0\"),\n command_name=\"reformat-paragraph\",\n directives=\"@language plain\\n@pagewidth 40\\n@tabwidth 8\",\n )", "def highlight_next_match(self):\n self.text.tag_remove('found.focus', '1.0',\n tk.END) # remove existing tag\n try:\n start, end = self.text.tag_nextrange('found', self.start, tk.END)\n self.text.tag_add('found.focus', start, end)\n self.text.mark_set(tk.INSERT, start)\n self.text.see(start)\n self.start = end\n except ValueError:\n if self.start != '1.0':\n self.start = '1.0'\n self.text.see('1.0')\n self.highlight_next_match()", "def highlight_next_match(self):\n self.text.tag_remove('found.focus', '1.0',\n tk.END) # remove existing tag\n try:\n start, end = self.text.tag_nextrange('found', self.start, tk.END)\n self.text.tag_add('found.focus', start, end)\n self.text.mark_set(tk.INSERT, start)\n self.text.see(start)\n self.start = end\n except ValueError:\n if self.start != '1.0':\n self.start = '1.0'\n self.text.see('1.0')\n self.highlight_next_match()", "def afterFlowable(self, flowable):\n\n if isinstance(flowable, Paragraph):\n style = flowable.style.name\n txt = flowable.getPlainText()\n\n if style == 'Title':\n self.title = txt\n elif style == 'Heading1':\n self.chapter = txt \n key = 'ch%s' % self.seq.nextf('chapter')\n self.canv.bookmarkPage(key)\n self.canv.addOutlineEntry(txt, key, 0, 0)\n self.seq.reset(\"section\")\n self.notify('TOCEntry', (0, txt, self.page, key))\n elif style == 'Heading2':\n self.section = flowable.text\n key = 'ch%ss%s' % (self.seq.thisf(\"chapter\"), self.seq.nextf(\"section\"))\n self.canv.bookmarkPage(key)\n self.canv.addOutlineEntry(txt, key, 1, 0)\n self.notify('TOCEntry', (1, txt, self.page, key))", "def generate_paragraphs(self):\n def dig(hr_tag, end_index):\n paragraphs = []\n for tag in hr_tag.children:\n if tag.name == 'hr':\n return paragraphs + dig(tag, end_index)\n text = (str(tag)\n if isinstance(tag, NavigableString)\n else tag.get_text())\n if '$' in text and not tag.find('table'):\n start_index = document_txt.index(text[:search_chars])\n end_index = start_index + len(text)\n paragraphs.append({\n 'text': text,\n 'start': start_index,\n 'end': end_index\n })\n return paragraphs\n\n with open('document.txt', 'rb') as f1:\n document_txt = f1.read().decode()\n search_chars = 20\n paragraphs = dig(self.soup.find('body'), 0)\n paragraphs = sorted(paragraphs, key=lambda x: x['start'])\n with open('paragraphs.txt', 'wb') as f2:\n f2.write(json.dumps(paragraphs, indent=2, sort_keys=True).encode())", "def onMainToMnemo(self):\n self.second_mnemo_text.SetFocus()", "def check_paragraph(self, para, links_para):\n #Return False if no paragraphs found\n if para is None:\n return False\n\n links = para.find_all('a')\n #Return False if no links found\n if links is None:\n return False\n\n #Return True if one link is valid in the paragraph\n for link in links:\n if self.check_link(link, links_para):\n return True\n return False", "def process(self, doc):\n # don't try to process null notes\n if not doc[1]:\n if self.verbose:\n print(\"Error segmenting doc\",doc[0])\n return []\n # odd notes may throw an error. Just continue rather than stopping the entire process\n try:\n sentences = self.sentence_tokenizer.segToSentenceSpans(doc[1])\n except KeyError:\n if self.verbose:\n print(\"Error segmenting doc\",doc[0])\n return []\n\n #context_doc = pyConTextGraph.ConTextDocument() # ConTextDoc not needed for simple usage\n\n doc_annots = list()\n\n for sentence in sentences:\n # run sentence tokenizer on input text, return the spans\n sentence_text = doc[1][sentence.begin:sentence.end]\n # process every sentence by adding markup\n markup = pyConTextGraph.ConTextMarkup()\n markup.setRawText(sentence_text)\n markup.cleanText()\n # apply targets and modifiers\n markup.markItems(self.targets, mode=\"target\")\n markup.markItems(self.modifiers, mode=\"modifier\")\n # address scope of modifiers to targets, remove inactive modifiers and self-modifying relationships\n markup.pruneMarks()\n markup.applyModifiers()\n markup.pruneSelfModifyingRelationships()\n markup.dropInactiveModifiers()\n\n marked_targets = markup.getMarkedTargets()\n for marked_target in marked_targets:\n modifiers = markup.getModifiers(marked_target)\n if not modifiers:\n span = (sentence.begin+marked_target.getSpan()[0],sentence.begin+marked_target.getSpan()[1])\n if self.mode == 'combined':\n annot = (doc[0], marked_target.getPhrase(), span[0], span[1], marked_target.getCategory()[0]+'_unspecified', marked_target.getCode())\n elif self.mode == 'separate':\n annot = (doc[0], marked_target.getPhrase(), span[0], span[1], marked_target.getCategory()[0], 'unspecified', marked_target.getCode())\n if annot not in doc_annots:\n doc_annots.append(annot)\n else:\n for modifier in modifiers:\n if marked_target.getSpan()[0] < modifier.getSpan()[0]:\n span = (sentence.begin+marked_target.getSpan()[0],sentence.begin+modifier.getSpan()[1])\n else:\n span = (sentence.begin+modifier.getSpan()[0],sentence.begin+marked_target.getSpan()[1])\n if self.mode == 'combined':\n annot = (doc[0], doc[1][span[0]:span[1]], span[0], span[1], marked_target.getCategory()[0]+'_'+modifier.getCategory()[0], marked_target.getCode())\n elif self.mode == 'separate':\n annot = (doc[0], doc[1][span[0]:span[1]], span[0], span[1], marked_target.getCategory()[0], modifier.getCategory()[0], marked_target.getCode())\n if annot not in doc_annots:\n doc_annots.append(annot)\n\n #context_doc.addMarkup(markup)\n\n return doc_annots", "def test_reformat_paragraph_new_code_8_of_8(self):\n before_b = \"\"\"\\\n 2. Point 4 xxxxxxxxxxxxxxxxxxxxxxxxxxx\n Line 41.\n \"\"\"\n after_b = \"\"\"\\\n 2. Point 4 xxxxxxxxxxxxxxxxxxxxxxxxxxx\n Line 41.\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.0\", \"1.0\"),\n after_sel=(\"3.0\", \"3.0\"),\n command_name=\"reformat-paragraph\",\n directives=\"@language plain\\n@pagewidth 40\\n@tabwidth 8\",\n )", "def text_by_paragraph(self,\r\n filename,\r\n splitchar=EOL,\r\n keys=True,\r\n key_definitions=False,\r\n query=True):\r\n\r\n\r\n analysetext = file_access.get_text_file(filename)\r\n #load the text to be analysed\r\n\r\n if keys:\r\n\r\n possible_keys = set()\r\n if len(self.keys())>50:\r\n nprint (\"TOO MANY KEYS\")\r\n for key in self.keys():\r\n #grab all keys, removing tags.\r\n #DESIDERATUM: Make it possible to\r\n #restrict the range of notes\r\n #from which the keys are grabbed\r\n\r\n if SLASH in key:\r\n if key.split(SLASH)[0] != EMPTYCHAR:\r\n possible_keys.add(key.split(SLASH)[0].lower())\r\n else:\r\n possible_keys.add(key.split(SLASH)[0].lower())\r\n\r\n\r\n possible_keys = list(possible_keys)\r\n\r\n possible_keys = show_list(possible_keys,\r\n from_here=0,\r\n to_here=len(possible_keys),\r\n label='KEYS',\r\n select=True,\r\n display=display)\r\n # show the keys through display\r\n #object and select which are to be kept\r\n possible_keys += input(queries.ADDITIONAL_KEYS).split(COMMA)\r\n display.noteprint((labels.KEYS,\r\n formkeys(possible_keys)))\r\n\r\n\r\n for paragraph in analysetext.split(splitchar):\r\n # iterate over segments of the text to be analysed\r\n found_words = set()\r\n keyset = set()\r\n\r\n if keys:\r\n found_words.update({a_temp for a_temp in get_words(paragraph)\r\n if len(a_temp) > 3}.intersection(set(possible_keys)))\r\n # make a set of all the words that have been found\r\n keyset = found_words\r\n if key_definitions:\r\n found_words.update(self.default_dict['definitions']\r\n .return_keys(get_words(paragraph)))\r\n keyset = found_words\r\n\r\n display.noteprint((formkeys(keyset),\r\n nformat.encase(paragraph,\r\n found_words,\r\n surround=False)))\r\n # display the segment as a note\r\n #with found words encased\r\n #in arrow brackets\r\n\r\n if not query:\r\n if keyset == set():\r\n keyset = {VOIDTERM}\r\n if paragraph.strip() != EMPTYCHAR:\r\n self.enter(ek=keyset,\r\n et=paragraph)\r\n\r\n else:\r\n\r\n if input(queries.INCLUDE) in YESTERMS+[EMPTYCHAR]:\r\n # ask if the found words\r\n #should be included as keys\r\n\r\n newkeys = set(input(formkeys(keyset)\r\n +queries.KEYWORDS_TO_ADD).split(COMMA)).union(keyset)\r\n if paragraph.strip() != EMPTYCHAR:\r\n self.enter(ek=newkeys, et=paragraph)\r\n if input(queries.CONTINUE + BLANK) not in YESTERMS+[EMPTYCHAR]:\r\n break", "def processJumpTable(jt_ea):", "def match_first_paragraph():\n html = \"<p>pybites != greedy</p>\" \"<p>not the same can be said REgarding ...</p>\"\n\n pattern = \"<p>(.+?)</p>\"\n text = html\n\n match = re.findall(pattern, text)\n return match[0]", "def _add_article(self, link, index=None):\n if self.verbose:\n sys.stdout.write(\".\")\n sys.stdout.flush()\n\n link_url = self.base_url + link\n pdf_filepath = (\n self.output_raw_dir\n + \"/FOMC_PresConfScript_\"\n + self._date_from_link(link)\n + \".pdf\"\n )\n\n if not os.path.exists(pdf_filepath) or self.force_download:\n # Scripts are provided only in pdf. Save the pdf and pass the content\n res = requests.get(link_url)\n\n with open(pdf_filepath, \"wb\") as f:\n f.write(res.content)\n else:\n if self.verbose:\n print(\"File already exists: \", pdf_filepath)\n\n # Extract text from the pdf\n pdf_file_parsed = \"\" # new line\n with pdfplumber.open(pdf_filepath) as pdf:\n for page in pdf.pages:\n pg_width = page.width\n pg_height = page.height\n pg_bbox = (\n self.crop_coords[0] * float(pg_width),\n self.crop_coords[1] * float(pg_height),\n self.crop_coords[2] * float(pg_width),\n self.crop_coords[3] * float(pg_height),\n )\n page_crop = page.crop(bbox=pg_bbox)\n text = page_crop.extract_text()\n pdf_file_parsed = pdf_file_parsed + \"\\n\" + text\n paragraphs = re.sub(\"(\\n)(\\n)+\", \"\\n\", pdf_file_parsed.strip())\n paragraphs = paragraphs.split(\"\\n\")\n\n section = -1\n paragraph_sections = []\n for paragraph in paragraphs:\n if not re.search(\n \"^(page|january|february|march|april|may|june|july|august|september|october|november|december|jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec)\",\n paragraph.lower(),\n ):\n if len(re.findall(r\"[A-Z]\", paragraph[:10])) > 5 and not re.search(\n \"(present|frb/us|abs cdo|libor|rp–ioer|lsaps|cusip|nairu|s cpi|clos, r)\",\n paragraph[:10].lower(),\n ):\n section += 1\n paragraph_sections.append(\"\")\n if section >= 0:\n paragraph_sections[section] += paragraph\n self.articles[index] = self.segment_separator.join(\n [paragraph for paragraph in paragraph_sections]\n )", "def footnoteRef(self, text):\n return re.compile(r'(?<=\\S)\\[(\\d+)(!?)\\](\\s)?', re.U).sub(\n self.footnoteID, text\n )", "def _paragraphs_raw(self):\n for par in self.parsed.find_all(\"p\")[self.PAR_START:]:\n yield par", "def test_reformat_paragraph_list_4_of_5(self):\n before_b = \"\"\"\\\n This paragraph leads of this test. It is\n the \"lead\" paragraph.\n\n 1. This is item number 1. It is the\n first item in the list.\n\n 2. This is item number 2. It is the\n second item in the list.\n\n 3. This is item \n number 3. It is the third item in the list.\n\n This paragraph ends the test. It is the \"final\"\n paragraph.\n \"\"\"\n after_b = \"\"\"\\\n This paragraph leads of this test. It is\n the \"lead\" paragraph.\n\n 1. This is item number 1. It is the\n first item in the list.\n\n 2. This is item number 2. It is the\n second item in the list.\n\n 3. This is item number 3. It is the\n third item in the list.\n\n This paragraph ends the test. It is the \"final\"\n paragraph.\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"10.0\", \"10.0\"),\n after_sel=(\"13.0\", \"13.0\"),\n command_name=\"reformat-paragraph\",\n directives=\"@language plain\\n@pagewidth 40\\n@tabwidth 8\",\n )", "def get_chapter_text(location):\n chapter_output_dictionary = {}\n chapter_contents_list = []\n\n soup = Ripper(location, parser=\"html5lib\", save_path=save_path).soup\n text = soup.find(\"table\", class_=\"texttable\")\n\n for each in text.find_all(\"p\"):\n attributes = each.attrs\n if attributes:\n if \"desc\" in attributes[\"class\"]:\n pass\n elif \"note\" in attributes[\"class\"]:\n pass\n else:\n new_cont = each.contents\n chapter_contents_list.extend(new_cont)\n return join_chapter_text(chapter_contents_list)", "def parse(text):\n md = markdown.Markdown(['codehilite', 'tables', ])\n\n for iref in re.findall(img_ref_re, text):\n img_id = iref[7]\n try:\n image = FlatPageImage.objects.get(pk=int(img_id))\n md.references[img_id] = (image.image_path.url, '')\n except ObjectDoesNotExist:\n pass\n\n for lref in re.findall(reference_re, text):\n doc_name = lref[7]\n try:\n doc = File.objects.get(name=doc_name)\n md.references[doc_name]= (doc.url, doc.name)\n except ObjectDoesNotExist:\n pass\n\n return md.convert(text)", "def getDataParagraph(startpattern,stoppattern,datararray):\n output = []\n inparagraph = 'FALSE'\n lines=datararray\n for i in range(len(lines)):\n search_start=re.search(r'{0}'.format(startpattern),lines[i])\n if search_start is not None or inparagraph == 'TRUE':\n inparagraph = 'TRUE'\n lines[i] = lines[i].split('\\n')[0]\n if lines[i].startswith('*'):\n pass\n else:\n output.append(lines[i])\n search_stop=re.search(r'{0}'.format(stoppattern),lines[i])\n if search_stop is not None:\n return output\n pass", "def footnote_item(self, key, text):\n back = (\n '<a href=\"#fnref-%s\" class=\"footnote\">&#8617;</a>'\n ) % escape(key)\n text = text.rstrip()\n if text.endswith('</p>'):\n text = re.sub(r'<\\/p>$', r'%s</p>' % back, text)\n else:\n text = '%s<p>%s</p>' % (text, back)\n html = '<li id=\"fn-%s\">%s</li>\\n' % (escape(key), text)\n return html", "def find_text_in_p(self, el):\n\n all = []\n for el in el.findall(\".//p\"):\n t = el.text_content().strip()\n if len(t)<40:\n continue\n all.append(t)\n\n return \" \".join(all)" ]
[ "0.5886465", "0.58725065", "0.58705807", "0.57472676", "0.56360775", "0.55975014", "0.5573705", "0.5540514", "0.54371405", "0.54029816", "0.53338504", "0.53178537", "0.5315798", "0.5282773", "0.52491987", "0.52362645", "0.5195637", "0.5168886", "0.513893", "0.51377296", "0.5118753", "0.5088631", "0.5075016", "0.5071764", "0.5047268", "0.5017047", "0.5005871", "0.49910498", "0.49874976", "0.4936778", "0.4926041", "0.4919049", "0.4877412", "0.48595953", "0.48499316", "0.48443785", "0.47922003", "0.47802982", "0.47675872", "0.47646645", "0.47511595", "0.47256246", "0.47139958", "0.47078016", "0.47020352", "0.4697681", "0.46869442", "0.4673873", "0.46729764", "0.46578866", "0.46558815", "0.4647966", "0.46325785", "0.46244353", "0.46235827", "0.46217126", "0.46063894", "0.46021205", "0.46020183", "0.4584548", "0.45799038", "0.4567086", "0.45667344", "0.4557137", "0.45494983", "0.45479178", "0.453415", "0.45320606", "0.45314157", "0.45312214", "0.45263898", "0.4525976", "0.45239428", "0.45231903", "0.4522102", "0.45092356", "0.45060197", "0.4505863", "0.45053294", "0.4504869", "0.45047733", "0.45047733", "0.450352", "0.4501385", "0.4496951", "0.44891357", "0.44842756", "0.4479969", "0.4478814", "0.44782898", "0.4460351", "0.44595334", "0.44510978", "0.44501644", "0.4449899", "0.444508", "0.4444739", "0.44423863", "0.4441433", "0.4428486" ]
0.6464983
0
Tokenize on linebyline basis.
def tokenize_generic(self, tokenizer, N=None, drop_punctuation=True): ct, done = 0, False with open(self.textfile) as f: for ln in f.readlines(): if done: break ln = ln.replace("(return)", "") for token in tokenizer(ln.strip()): if not done: if (not drop_punctuation) or (token not in self.punctuation): yield token ct += 1 if (N is not None ) and ct == N: done = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tokenize_words(line):\n return", "def tokenize(\n self, text_row: Optional[List[str]], token_row: Optional[List[List[str]]]\n ):\n raise NotImplementedError", "def tokenize_line(line, max_line_length=None, number_token=None, name_token=None, gpe_token=None):\n new_line = word_tokenize(line,\n number_token=number_token,\n name_token=name_token,\n gpe_token=gpe_token)\n if max_line_length:\n new_line = new_line[:max_line_length]\n new_line = vector_to_string(new_line)\n return new_line", "def tokenize(src):\n\n pass", "def tokenize(self, lines):\n tokens = []\n\n for line_number, line in enumerate(lines):\n self._line_number = line_number + 1\n self._line = line.rstrip('\\r\\n')\n\n last_end = 0\n for token in _TOKEN_RE.finditer(self._line):\n if token.start() != last_end:\n self.error(\n 'invalid token',\n token=Token('INVALID',\n self._line[last_end:token.start()],\n self.filename, self._line,\n self._line_number, last_end))\n last_end = token.end()\n\n # Omit whitespace and comments now to avoid sprinkling this logic\n # elsewhere.\n if token.lastgroup in ('WHITESPACE', 'COMMENT',\n 'CONTINUATION'):\n continue\n tokens.append(\n Token(token.lastgroup, token.group(), self.filename,\n self._line, self._line_number, token.start()))\n if last_end != len(self._line):\n self.error(\n 'invalid token',\n token=Token('INVALID', self._line[last_end:],\n self.filename, self._line, self._line_number,\n last_end))\n\n if self._line.endswith('\\\\'):\n # This line is not finished yet.\n continue\n\n if tokens:\n # Return a copy of the token list so that the caller can be free\n # to modify it.\n yield tokens[::]\n tokens.clear()", "def tokenize(lines, token='word'):\n if token == 'word':\n return [line.split() for line in lines]\n elif token == 'char':\n return [list(line) for line in lines]\n else:\n print('ERROR: unknown token type: ' + token)", "def _tokenize_line(self, line: str, pattern='\\W'):\n # TODO check nltk tokenize\n # TODO check string not to lower\n line = re.sub(\"[.,;:]\", \" \", line)\n return re.split(pattern, line.lower())", "def _tokenize(source):\n lines = source.split(\"\\n\")\n print(\n \"{type:<10}{string:<25} {start:^12} {end:^12}\".format(\n type=\"Type\", string=\"String\", start=\"Start\", end=\"End\"\n )\n )\n print(\"-\" * 60)\n for line in lines:\n tokens = collect_tokens(line)\n for token in tokens:\n print(token)", "def tokenize(self, input): # pylint: disable=redefined-builtin\n (tokens, _, _) = self.tokenize_with_offsets(input)\n return tokens", "def tokenise(sample):\n\n processed = sample.split()\n return processed", "def tokenize(fp):\n for line in fp:\n line = line.strip()\n if line[0] == '#':\n continue\n for tok in line.split():\n yield tok", "def _batch_tokenize(self, text: List[str]) -> List[List[str]]:\n return self.bert_model.batch_tokenize([t.strip() for t in text])", "def tokenize(line):\n tokens = [x for x in re.split(\"[ \\f\\n\\r\\t\\v,()]+\", line) if x]\n return tokens[0], tokens[1:]", "def tokenize(self, raw_text):\n # TODO implement\n raw_tokens = word_tokenize(raw_text.decode('utf8'))\n return self.filter_tokens(raw_tokens)\n # return self.split_by(raw_tokens, '-')", "def collect_tokens(line):\n tokens = []\n try:\n for tok in tokenize.generate_tokens(StringIO(line).readline):\n token = Token(tok)\n if not token.string.strip(): # ignore spaces\n continue\n if token.type == tokenize.COMMENT:\n break\n tokens.append(token)\n except tokenize.TokenError:\n return []\n except Exception as e:\n print(\"%s raised in utils.collect_tokens\" % repr(e))\n\n return tokens", "def tokenize(self, text):\n text = convert_to_unicode(text)\n text = self._clean_text(text)\n\n # This was added on November 1st, 2018 for the multilingual and Chinese\n # models. This is also applied to the English models now, but it doesn't\n # matter since the English models were not trained on any Chinese data\n # and generally don't have any Chinese data in them (there are Chinese\n # characters in the vocabulary because Wikipedia does have some Chinese\n # words in the English Wikipedia.).\n text = self._tokenize_chinese_chars(text)\n\n orig_tokens = whitespace_tokenize(text)\n split_tokens = []\n for token in orig_tokens:\n if self.do_lower_case:\n token = token.lower()\n token = self._run_strip_accents(token)\n if self.split_on_punc:\n split_tokens.extend(self._run_split_on_punc(token))\n else:\n split_tokens.append(token) # pragma: no cover\n\n output_tokens = whitespace_tokenize(\" \".join(split_tokens))\n return output_tokens", "def tokenize(line):\n line = line.strip()\n tokens = deque()\n permanent = line.startswith('@')\n if permanent:\n line = line[1:]\n while line:\n token, line, comment = OrdersParser._get_token(line)\n if comment:\n return (tokens, permanent, token)\n else:\n tokens.append(token)\n \n return (tokens, permanent, None)", "def tokenize(raw_text):\n def _xop(tokens):\n def _(x):\n return xop(x, \"op\", tokens)\n return _\n \n raw_tokens=xversa_split(raw_text, tokens=Op_Tokens+Group_Tokens)\n tokens=map(xtotype, raw_tokens) \n tokens=map(_xop(Op_Tokens+Group_Tokens), tokens)\n return tokens", "def tokenize_wrapper(input):\n skip = {token.NEWLINE, token.INDENT, token.DEDENT}\n tokens = tokenize.generate_tokens(io.StringIO(input).readline)\n for quintuple in tokens:\n type, value, start, end, line_text = quintuple\n if type not in skip:\n yield quintuple", "def _tokenize(self, text):\n split_tokens = []\n for token in self.basic_tokenizer.tokenize(text):\n for sub_token in self.wordpiece_tokenizer.tokenize(token):\n split_tokens.append(sub_token)\n return split_tokens", "def tokenize(self, text):\n text = utils.convert_to_unicode(text)\n text = self._clean_text(text)\n\n # This was added on November 1st, 2018 for the multilingual and Chinese\n # models. This is also applied to the English models now, but it doesn't\n # matter since the English models were not trained on any Chinese data\n # and generally don't have any Chinese data in them (there are Chinese\n # characters in the vocabulary because Wikipedia does have some Chinese\n # words in the English Wikipedia.).\n text = self._tokenize_chinese_chars(text)\n\n orig_tokens = whitespace_tokenize(text)\n split_tokens = []\n for token in orig_tokens:\n if self.do_lower_case:\n token = token.lower()\n token = self._run_strip_accents(token)\n split_tokens.extend(self._run_split_on_punc(token))\n\n output_tokens = whitespace_tokenize(\" \".join(split_tokens))\n return output_tokens", "def _split_to_tokens(self, file_content: str):\n cur_token = ''\n string_started = False\n for line in file_content.split('\\n'):\n cur_token = ''\n line = line.strip()\n if line.startswith('#'): # skip comments\n continue\n for char in line:\n if string_started:\n if char == '\"': # string ended\n self._add_non_empty_token(cur_token)\n cur_token = '' # start of a new string\n string_started = False\n else:\n cur_token += char\n elif char == '\"':\n self._add_non_empty_token(cur_token)\n cur_token = '' # start of a new string\n string_started = True\n elif (char == \" \" and not string_started) or char == '\\n':\n self._add_non_empty_token(cur_token)\n cur_token = ''\n elif char in [':', '{', '}', '[', ']', ',']:\n self._add_non_empty_token(cur_token)\n self._tokens.append(char)\n cur_token = ''\n else:\n cur_token += char\n self._add_non_empty_token(cur_token)\n self._add_non_empty_token(cur_token)", "def tokenize_line(line, row, col):\n if len(line) == 0:\n return []\n\n # skip spaces\n if line[0].isspace():\n return tokenize_line(line[1:], row, col + 1)\n\n # check for comments\n if line[0] == '#':\n return []\n\n first_token = None\n\n # check for the first token\n for token_type, token_regex, value_func in token_lex_info_list:\n match = re.match(token_regex, line)\n if match is not None:\n uncooked = match.group(0)\n if first_token is None or\\\n len(first_token.uncooked) < len(uncooked):\n first_token = Token(type = token_type,\n value = value_func(uncooked),\n uncooked = uncooked,\n location = (row, col))\n\n if first_token is None:\n raise LexerException(\"Invalid token\", (row, col))\n else:\n token_len = len(first_token.uncooked)\n line_tail = line[token_len:]\n return [first_token] + tokenize_line(line_tail, row, col + token_len)", "def basic_tokenizer(line, normalize_digits=True):\n #returns words: array of tokens\n #removes <u>, </u>, [, ] from given line\n line = re.sub('<u>', '', line) #re.sub is regex\n line = re.sub('</u>', '', line)\n line = re.sub('\\[', '', line)\n line = re.sub('\\]', '', line)\n words = []\n #re.compiles a regex into a regex object so match or search can be used\n #python 3: b\"\" turns string into \"bytes literal\" which turns string into byte. Ignored in Python 2\n #r string prefix is raw string: '\\n' is \\,n instead of newline\n _WORD_SPLIT = re.compile(b\"([.,!?\\\"'-<>:;)(])\") #includes () for re.split below\n _DIGIT_RE = re.compile(r\"\\d\")\n #strip removes whitespace at beginning and end\n #lowercase string\n for fragment in line.strip().lower().split(): #each of these is a fragment ['you,', 'are', 'here!']\n for token in re.split(_WORD_SPLIT, fragment): #each token splits each fragment i.e. each token in ['here', '!']\n if not token: #if empty array\n continue\n if normalize_digits: #substitutes digits with #\n token = re.sub(_DIGIT_RE, b'#', token)\n words.append(token)\n return words", "def tokeneater(self, toktype, toktext, (srow,scol), (erow,ecol), line):\n # If we encounter any errors, then just give up.\n if toktype == token.ERRORTOKEN:\n raise tokenize.TokenError, toktype\n\n # Did we skip anything whitespace? If so, add a pseudotoken\n # for it, with toktype=None. (Note -- this skipped string\n # might also contain continuation slashes; but I won't bother\n # to colorize them.)\n startpos = self.line_offsets[srow] + scol\n if startpos > self.pos:\n skipped = self.text[self.pos:startpos]\n self.cur_line.append( (None, skipped) )\n\n # Update our position.\n self.pos = startpos + len(toktext)\n\n # Update our current line.\n self.cur_line.append( (toktype, toktext) )\n\n # When we reach the end of a line, process it.\n if toktype == token.NEWLINE or toktype == token.ENDMARKER:\n self.handle_line(self.cur_line)\n self.cur_line = []", "def tokenize(self, text):\n text = convert_to_unicode(text)\n text = self.clean_text(text)\n\n # This was added on November 1st, 2018 for the multilingual and Chinese\n # models. This is also applied to the English models now, but it doesn't\n # matter since the English models were not trained on any Chinese data\n # and generally don't have any Chinese data in them (there are Chinese\n # characters in the vocabulary because Wikipedia does have some Chinese\n # words in the English Wikipedia.).\n text = self._add_space_around_cjk_chars(text)\n\n orig_tokens = split_by_whitespace(text)\n split_tokens = []\n for token in orig_tokens:\n if self.do_lower_case:\n token = token.lower()\n token = remove_accents(token)\n split_tokens.extend(split_by_punctuation(token))\n\n output_tokens = split_by_whitespace(\" \".join(split_tokens))\n return output_tokens", "def tokenize(text):\n sentence = Sentence(text)\n return sentence.tokens()", "def tokenize(text, token):\n text = [token(x) for x in text]\n return text", "def line2Token(linein):\n#\t\"\"\" input=readlines() output=token\"\"\n\tlistout = []\n\tfor s in linein:\n\t\tif s==\"\":\n\t\t\tcontinue\n\t\ts = string.replace(s,'\\n','')\n\t\ts = string.replace(s,',',' ')\n s = string.replace(s,'=',\"= \")\n\t\ts = string.replace(s,':',\": \")\n\n\t\tlista=string.split(s)\n\t\tfor x in lista:\n\t\t\tif x<>\"\":\n\t\t\t\tlistout.append(x)\n\n\treturn listout", "def tokenize(\n tokenizer: Tokenizer,\n lines: typing.Iterable[str],\n language: typing.Optional[str] = None,\n is_csv: bool = False,\n csv_delimiter: str = \"|\",\n split_sentences: bool = False,\n inline_pronunciations: bool = False,\n) -> typing.Iterable[typing.Dict[str, typing.Any]]:\n # String used to join tokens.\n # See RegexTokenizer\n join_str: str = getattr(tokenizer, \"join_str\", \" \")\n\n if inline_pronunciations:\n assert language is not None\n lang_phonemes = gruut_ipa.Phonemes.from_language(language)\n assert lang_phonemes is not None, f\"Unsupported language {language}\"\n\n def process_lines(lines):\n for line in lines:\n yield encode_inline_pronunciations(line, lang_phonemes)\n\n lines = process_lines(lines)\n\n for line in lines:\n line = line.strip()\n if not line:\n continue\n\n utt_id = \"\"\n\n if is_csv:\n # Input format is id|text\n utt_id, line = line.split(csv_delimiter, maxsplit=1)\n\n sentences = list(tokenizer.tokenize(line))\n\n if split_sentences:\n # One output line per sentence\n for sentence_idx, sentence in enumerate(sentences):\n sentence_id = str(sentence_idx)\n if utt_id:\n sentence_id = f\"{utt_id}_{sentence_id}\"\n\n yield {\n \"id\": sentence_id,\n \"raw_text\": sentence.raw_text,\n \"raw_words\": sentence.raw_words,\n \"clean_words\": sentence.clean_words,\n \"tokens\": [dataclasses.asdict(t) for t in sentence.tokens],\n \"clean_text\": sentence.clean_text,\n \"sentences\": [],\n }\n else:\n # One output line per input line\n raw_words: typing.List[str] = []\n clean_words: typing.List[str] = []\n tokens: typing.List[Token] = []\n\n for sentence in sentences:\n raw_words.extend(sentence.raw_words)\n clean_words.extend(sentence.clean_words)\n tokens.extend(sentence.tokens)\n\n yield {\n \"id\": utt_id,\n \"raw_text\": line,\n \"raw_words\": raw_words,\n \"clean_words\": clean_words,\n \"tokens\": [dataclasses.asdict(t) for t in tokens],\n \"clean_text\": join_str.join(clean_words),\n \"sentences\": [dataclasses.asdict(s) for s in sentences],\n }", "def tokenize(self, rawt):\n def createtokens(rawt):\n for oper in self.control.sortedopers:\n if oper in rawt:\n par = rawt.partition(oper)\n return createtokens(par[0]) + [par[1]] + createtokens(par[2])\n for punc in self.control.punctuation:\n if punc in rawt:\n par = rawt.partition(punc)\n return createtokens(par[0]) + [par[1]] + createtokens(par[2])\n return [rawt]\n tokens = [token for token in createtokens(rawt) if token]\n ret = []\n currentquote = None\n for token in tokens:\n if token in self.control.allquotes and token:\n if currentquote == None:\n ret.append(token)\n currentquote = token\n else:\n if token == currentquote:\n currentquote = None\n ret[-1] += token\n elif currentquote:\n ret[-1] += token\n else:\n ret.append(token)\n #@define stuff\n linep = 0\n while linep < len(ret): \n if ret[linep] and ret[linep] in self.control.datadef:\n self.control.applyrules(ret.args.pop(0))\n linep+=1\n ret2 = []\n for token in ret:\n if token:\n if token[0] not in self.control.allquotes:\n if token.strip(self.control.nbwhitespace):\n if __debug__:\n assert token[-1] not in self.control.allquotes, token\n ret2.append(token.strip(self.control.nbwhitespace))\n else:\n ret2.append(token)\n\n ret = []\n for e in (e.strip(self.control.nbwhitespace) for e in ret2):\n if not ret or not e or e not in self.control.delims['endline']\\\n or str(ret[-1].data) not in self.control.delims['endline']:\n ret.append(group(e, control = self.control))\n # quit(list(str(x) for x in ret))\n return ret\n # return [group(e, control = self.control) for e in (e.strip(self.control.nbwhitespace) for e in ret2) if e]", "def tokenize(text):\n return text.split(' ')", "def basic_tokenizer(line, normalize_digits=True):\n line = re.sub(b'<u>', b'', line)\n line = re.sub(b'</u>', b'', line)\n line = re.sub(br'\\[', b'', line)\n line = re.sub(br'\\]', b'', line)\n words = []\n _WORD_SPLIT = re.compile(b\"([.,!?\\\"'-<>:;)(])\")\n _DIGIT_RE = re.compile(br\"\\d\")\n for fragment in line.strip().lower().split():\n for token in re.split(_WORD_SPLIT, fragment):\n if not token:\n continue\n if normalize_digits:\n token = re.sub(_DIGIT_RE, b'#', token)\n words.append(token)\n return words", "def tokenize(self, text):\n text = self._clean_text(text)\n text = self._tokenize_chinese_chars(text)\n orig_tokens = whitespace_tokenize(text)\n split_tokens = []\n for token in orig_tokens:\n if self.do_lower_case and token not in self.never_split:\n token = token.lower()\n token = self._run_strip_accents(token)\n split_tokens.extend(self._run_split_on_punc(token))\n output_tokens = whitespace_tokenize(' '.join(split_tokens))\n return output_tokens", "def lex(lines):\n for line in lines:\n yield line.rstrip()", "def _tokenize(self, text: str) -> List[str]:\n return self.bert_model.tokenize(text.strip())", "def tokenize(fp):\n\n tokenizer = Tokenizer()\n\n for (lineno, line) in enumerate(fp):\n try:\n line = line.decode('utf8')\n except UnicodeDecodeError as detail:\n print >>sys.stderr, \"failed to decode line %i: %s\" % (lineno+1,\n detail)\n line = line.decode('utf8', 'replace')\n\n # This should use \"yield from ...\" (new in Python 3.3)\n for t in tokenizer.feed_data(line):\n yield t\n last = tokenizer.finalize()\n if last:\n yield last", "def tokenize(self, text):\n # Ignore non-ASCII characters.\n text = remove_non_ascii(text)\n text = text.translate(Tokenizer.trans)\n tokens = [t for t in text.split() \n if len(t) >= self._config[u'min_len']\n and t not in self._config[u'stopwords']]\n self._counter.update(tokens)", "def tokenize_with_offsets(self, input): # pylint: disable=redefined-builtin\n name = None\n with ops.name_scope(name, \"WhitespaceTokenize\", [input]):\n input_tensor = ragged_tensor.convert_to_tensor_or_ragged_tensor(input)\n if input_tensor.shape.ndims is None:\n raise ValueError(\"Rank of input_tensor must be statically known.\")\n if ragged_tensor.is_ragged(input_tensor):\n if input_tensor.flat_values.shape.ndims > 1:\n # If the flat_values of our ragged tensor is multi-dimensional, we can\n # process it separately and our output will have the same nested\n # splits as our input.\n (tokens, starts,\n ends) = self.tokenize_with_offsets(input_tensor.flat_values)\n return (input_tensor.with_flat_values(tokens),\n input_tensor.with_flat_values(starts),\n input_tensor.with_flat_values(ends))\n else:\n # Recursively process the values of the ragged tensor.\n (tokens, starts,\n ends) = self.tokenize_with_offsets(input_tensor.values)\n return (input_tensor.with_values(tokens),\n input_tensor.with_values(starts),\n input_tensor.with_values(ends))\n else:\n if input_tensor.shape.ndims > 1:\n # Convert the input tensor to ragged and process it.\n return self.tokenize_with_offsets(\n ragged_conversion_ops.from_tensor(input_tensor))\n elif input_tensor.shape.ndims == 0:\n (tokens, starts, ends) = self.tokenize_with_offsets(\n array_ops_stack.stack([input_tensor]))\n return tokens.values, starts.values, ends.values\n else:\n # Our rank 1 tensor is the correct shape, so we can process it as\n # normal.\n return self._whitespace_tokenize_with_offsets(input_tensor)", "def tokenise(line):\n\n sline = str(line)\n tokens = []\n token = False\n quote = False\n tquote = \"\"\n start = 0\n for i in range(len(sline)):\n c = sline[i]\n if token and not quote:\n if c == \" \" or c == \"\\t\" or c == \"\\n\":\n # end of current token\n tokens.append(sline[start:i])\n token = False\n quote = False\n if token and (c == '\"' or c == \"'\"):\n # Detected a quote - flip the quote flag\n if quote:\n if c == tquote:\n quote = False\n else:\n quote = True\n tquote = c\n if not token:\n if c != \" \" and c != \"\\t\" and c != \"\\n\":\n # Start of a new token\n token = True\n start = i\n if c == '\"' or c == \"'\":\n # Also it's quoted\n quote = True\n tquote = c\n\n # End of the loop\n if token:\n # End of the last token\n tokens.append(sline[start : len(sline)])\n return tokens", "def tokenize(self):\n\n self.feats = {\n 'features': [], # Lists of the `InputFeatures` objects.\n 'segments': [], # Segments of the phrase. 0: Promoun, 1: A-term, 2: B-term \n 'df_ids': [], # DataFrame index.\n 'target_token_ids': [] # Indexes of the target term in the tokens lists.\n }\n unique_id = 0 # Unique ID of the dataset.\n for _, row in tqdm(self.df.iterrows()):\n segment_tokens = self.tokenize_single_row(row)\n for j, segment in enumerate(segment_tokens):\n if segment['target_token_index'] > 0:\n features = self.tokens_to_features(unique_id, segment['tokens'])\n unique_id += 1\n self.feats['features'].append(features)\n self.feats['segments'].append(j)\n self.feats['target_token_ids'].append(segment['target_token_index'] )\n self.feats['df_ids'].append(row.ID)", "def tokenize(tokenizer, max_length, stride, row):\r\n # The input data must be tokenized in the order dictated by the side the\r\n # selected checkpoint apply the padding\r\n pad_on_right = tokenizer.padding_side == \"right\"\r\n \r\n return tokenizer(\r\n row[\"question\" if pad_on_right else \"context\"],\r\n row[\"context\" if pad_on_right else \"question\"],\r\n max_length=max_length,\r\n truncation=\"only_second\" if pad_on_right else \"only_first\",\r\n return_overflowing_tokens=True,\r\n return_offsets_mapping=True,\r\n stride=stride,\r\n padding=\"max_length\",\r\n )", "def preprocess_lines(movie_line):\n\ttokens = tokenizer.tokenize(movie_line)\n\twords = [word for word in tokens if word not in stopwords_set]\n\tstemmed_terms = [porter_stemmer.stem(word) for word in words]\n\tlemmatized_terms = [wordnet_lemmatizer.lemmatize(word) for word in stemmed_terms]\n\treturn lemmatized_terms", "def identity_tokenizer(text):\n return text", "def tokenize(self, path):\n assert os.path.exists(path)\n with open(path, 'r') as f:\n sentences = []\n for sentence in tqdm(f, desc='Processing file: {}'.format(path)):\n sentences.append(sentence.split())\n self.data = sentences", "def process_line(line):\n [label, text] = line.split('\\t')\n return text.split()", "def tokenize(text):\n source = list(text.rstrip().replace('\\n', ' '))\n return source", "def tokenize(self, text):\n return self._tokenize(text)", "def _tokenize(self, text: str) -> List[str]:\n text = text.lower().strip()\n return self.bpe.tokenize(text)", "def get_tokens(line: str) -> Generator[str, None, None]:\n for token in line.rstrip().split():\n if len(token) > 0:\n yield token", "def _tokenize(self, text: str) -> List[str]:\n return self.bpe.tokenize(text)", "def tokenize(lines):\n\tfor line in lines:\n\t\tif line == '':\n\t\t\tcontinue\n\t\t\n\t\tif line.startswith('#'):\n\t\t\tcontinue\n\n\t\tif line.startswith('[') and line.endswith(']'):\n\t\t\tyield ('key', line[1:-1])\n\t\t\tcontinue\n\t\t\n\t\tvalues = tuple(map(float, line.split(' ')))\n\t\tyield ('values', values)\n\t\tcontinue", "def tokenize(self, path, build_dict=False, thd=0):\n\n assert os.path.exists(path)\n\n if build_dict:\n # Add words to the dictionary\n with open(path, 'r') as f:\n for line in f:\n words = line.split()\n for word in words:\n self.dictionary.add_word(word)\n if thd > 1:\n self.dictionary.rebuild_by_freq(thd)\n\n # Tokenize file content\n ids_list = []\n with open(path, 'r') as f:\n for line in f:\n words = line.split()\n ids = []\n for word in words:\n ids.append(self.dictionary[word])\n ids_list.append(ids)\n\n return ids_list", "def preprocess_file_to_tokens(self, dirPath, fnx):\n fnxPath = os.path.join(dirPath, fnx)\n with open(fnxPath) as f:\n fnx_data = f.read()\n fnx_data_nnl = re.sub(r'\\n', ' ', fnx_data)\n fnx_data_sb = re.sub(r\"( )+\", ' ', fnx_data_nnl)\n\n fnx_tokens = nltk.word_tokenize(fnx_data_sb)\n fnx_unk, fnx_tokens_prepped = self.infreq_to_UNK(fnx_tokens)\n \n return fnx_tokens_prepped", "def tokenize(text):\n tokens=word_tokenize(text)\n lemmatizer=WordNetLemmatizer()\n \n clean_tokens=[]\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n return clean_tokens\n pass", "def tokenize(self, line: str) -> List[str]:\n\n # expand shortcuts and aliases\n line = self._expand(line)\n\n # check if this line is a comment\n if line.lstrip().startswith(constants.COMMENT_CHAR):\n return []\n\n # split on whitespace\n try:\n tokens = shlex_split(line)\n except ValueError as ex:\n raise Cmd2ShlexError(ex)\n\n # custom lexing\n tokens = self.split_on_punctuation(tokens)\n return tokens", "def tokenize(self, text):\n\n # Normalize text\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text.lower())\n\n # Tokenize text\n tokens = word_tokenize(text)\n\n # Remove stop words\n tokens = [t for t in tokens if t not in stopwords.words(\"english\")]\n\n # Lemmatization\n lemmatizer = WordNetLemmatizer()\n # reduce words to their root form using default pos\n tokens = [lemmatizer.lemmatize(t) for t in tokens]\n # lemmatize verbs by specifying pos\n tokens = [lemmatizer.lemmatize(t, pos='v') for t in tokens]\n # lemmatize verbs by specifying pos\n tokens = [lemmatizer.lemmatize(t, pos='a') for t in tokens]\n\n # drop duplicates\n tokens = list(pd.Series(tokens, dtype='object').drop_duplicates().values)\n\n return ' '.join(tokens)", "def tokenize_records(records):\r\n contents = map(lambda record: record[constants.TEXT], records)\r\n tokenized_records = [word_tokenize(record.lower()) for record in contents]\r\n lemmatized_records = lemmatize_words(tokenized_records)\r\n lemmatized_words = list()\r\n for lemmatized_record in lemmatized_records:\r\n lemmatized_words.extend(lemmatized_record)\r\n return lemmatized_words", "def tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n \n return clean_tokens", "def tokenizer(text):\n for entry in text.split('$$$$\\n'):\n if entry.rstrip():\n lines_stream = deque(entry.split('\\n'))\n else:\n continue\n\n # yield from _molfile(stream=lines_stream)\n for token in _molfile(stream=lines_stream):\n yield token\n\n if len(lines_stream):\n # yield from _sdfile(stream=lines_stream)\n for token in _sdfile(stream=lines_stream):\n yield token\n\n yield EndOfFile()", "def tokenize(text):\n\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n \n clean_tokens =[]\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens", "def _tokenizer_(data, settings):\n\n input_col = settings['input_col']\n output_col = settings['output_col']\n min_token_length = settings['min_token_length']\n to_lowercase = settings['to_lowercase']\n pattern = settings.get('pattern', r\"(?u)\\b\\w\\w+\\b\")\n frag = settings['id_frag']\n\n token_pattern = re.compile(pattern)\n\n def tokenizer(doc):\n return token_pattern.findall(doc)\n\n result = []\n if len(data) > 0:\n\n for sentence in data[input_col].values:\n tokens = tokenizer(sentence)\n row = []\n for t in tokens:\n if len(t) > min_token_length:\n if to_lowercase:\n row.append(t.lower())\n else:\n row.append(t)\n result.append(row)\n\n else:\n result = np.nan\n\n if output_col in data.columns:\n data.drop([output_col], axis=1, inplace=True)\n\n data[output_col] = result\n\n info = generate_info(data, frag)\n return data, info", "def tokenize(self, path):\n assert os.path.exists(path), path\n # Add words to the dictionary\n with open(path, 'r') as f:\n # TODO: joblib\n tokens = 0\n for line in f:\n words = line.split() + ['<eos>']\n tokens += len(words)\n for word in words:\n self.dictionary.add_word(word)\n\n # Tokenize file content\n with open(path, 'r') as f:\n ids = torch.LongTensor(tokens)\n token = 0\n for line in tqdm(f):\n words = line.split() + ['<eos>']\n for word in words:\n ids[token] = self.dictionary.word2idx[word]\n token += 1\n return ids", "def transform_line(\n line: Line, mode: Mode, features: Collection[Feature] = ()\n) -> Iterator[Line]:\n if line.is_comment:\n yield line\n return\n\n line_str = line_to_string(line)\n\n ll = mode.line_length\n sn = mode.string_normalization\n string_merge = StringMerger(ll, sn)\n string_paren_strip = StringParenStripper(ll, sn)\n string_split = StringSplitter(ll, sn)\n string_paren_wrap = StringParenWrapper(ll, sn)\n\n transformers: List[Transformer]\n if (\n not line.contains_uncollapsable_type_comments()\n and not line.should_split_rhs\n and not line.magic_trailing_comma\n and (\n is_line_short_enough(line, mode=mode, line_str=line_str)\n or line.contains_unsplittable_type_ignore()\n )\n and not (line.inside_brackets and line.contains_standalone_comments())\n ):\n # Only apply basic string preprocessing, since lines shouldn't be split here.\n if Preview.string_processing in mode:\n transformers = [string_merge, string_paren_strip]\n else:\n transformers = []\n elif line.is_def:\n transformers = [left_hand_split]\n else:\n\n def _rhs(\n self: object, line: Line, features: Collection[Feature], mode: Mode\n ) -> Iterator[Line]:\n \"\"\"Wraps calls to `right_hand_split`.\n\n The calls increasingly `omit` right-hand trailers (bracket pairs with\n content), meaning the trailers get glued together to split on another\n bracket pair instead.\n \"\"\"\n for omit in generate_trailers_to_omit(line, mode.line_length):\n lines = list(right_hand_split(line, mode, features, omit=omit))\n # Note: this check is only able to figure out if the first line of the\n # *current* transformation fits in the line length. This is true only\n # for simple cases. All others require running more transforms via\n # `transform_line()`. This check doesn't know if those would succeed.\n if is_line_short_enough(lines[0], mode=mode):\n yield from lines\n return\n\n # All splits failed, best effort split with no omits.\n # This mostly happens to multiline strings that are by definition\n # reported as not fitting a single line, as well as lines that contain\n # trailing commas (those have to be exploded).\n yield from right_hand_split(line, mode, features=features)\n\n # HACK: nested functions (like _rhs) compiled by mypyc don't retain their\n # __name__ attribute which is needed in `run_transformer` further down.\n # Unfortunately a nested class breaks mypyc too. So a class must be created\n # via type ... https://github.com/mypyc/mypyc/issues/884\n rhs = type(\"rhs\", (), {\"__call__\": _rhs})()\n\n if Preview.string_processing in mode:\n if line.inside_brackets:\n transformers = [\n string_merge,\n string_paren_strip,\n string_split,\n delimiter_split,\n standalone_comment_split,\n string_paren_wrap,\n rhs,\n ]\n else:\n transformers = [\n string_merge,\n string_paren_strip,\n string_split,\n string_paren_wrap,\n rhs,\n ]\n else:\n if line.inside_brackets:\n transformers = [delimiter_split, standalone_comment_split, rhs]\n else:\n transformers = [rhs]\n # It's always safe to attempt hugging of power operations and pretty much every line\n # could match.\n transformers.append(hug_power_op)\n\n for transform in transformers:\n # We are accumulating lines in `result` because we might want to abort\n # mission and return the original line in the end, or attempt a different\n # split altogether.\n try:\n result = run_transformer(line, transform, mode, features, line_str=line_str)\n except CannotTransform:\n continue\n else:\n yield from result\n break\n\n else:\n yield line", "def tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for token in tokens:\n clean_token = lemmatizer.lemmatize(token).lower().strip()\n clean_tokens.append(clean_token)\n \n return clean_tokens", "def tokenize(self, path):\n # Convert class 1,2 to 0,1\n # print(\"Convert class 1,2 to 0,1\")\n # Convert class 1,2 to 0,1\n dropped = cropped = 0\n oov_count = 0.\n word_count = 0.\n with open(path, 'r') as f:\n linecount = 0\n lines = []\n tags = []\n for line in f:\n linecount += 1\n if self.max_lines > 1 and linecount >= self.max_lines:\n break\n if self.lowercase:\n words = line.lower().strip().split()\n else:\n words = line.strip().split()\n tag, words = int(words[0]), words[1:]\n\n # if applying BPE\n if self.apply_bpe:\n words = [bp for word in words\n for bp in self.bpemb_en.encode(word)]\n\n if len(words) > self.maxlen:\n cropped += 1\n words = words[:self.maxlen]\n# try:\n# crop_words = words[:maxlen]\n# last_period = max(rindex(crop_words, '.'), rindex(crop_words, '!'), rindex(crop_words, ','))\n# except:\n# last_period = self.maxlen\n# if last_period < 10:\n# print(\"Sentence too short! {}\".format(words))\n# words = words[:last_period]\n if len(words) < 3:\n dropped += 1\n# print(words)\n continue\n words = ['<sos>'] + words\n words += ['<eos>']\n\n # vectorize\n vocab = self.dictionary.word2idx\n unk_idx = vocab['<oov>']\n indices = [vocab[w] if w in vocab else unk_idx for w in words]\n word_count += len(indices)\n oov_count += sum([1 if ii==unk_idx else 0 for ii in indices])\n # add to output list\n lines.append(indices)\n # Convert class 1,2 to 0,1\n # tag = tag - 1\n tags.append(tag)\n # tags = to_class_id(tags)\n print(\"Number of sentences cropped from {}: {} out of {} total, dropped {}. OOV rate {:.3f}\".\n format(path, cropped, linecount, dropped, oov_count/word_count))\n\n return list(zip(tags, lines))", "def tokenize_wordchars(lines):\n return", "def tokenize(self, path):\n dropped = 0\n with open(path, 'r') as f:\n linecount = 0\n lines = []\n for line in f:\n linecount += 1\n if self.lowercase:\n words = line[:-1].lower().strip().split(\" \")\n else:\n words = line[:-1].strip().split(\" \")\n if len(words) > self.maxlen:\n dropped += 1\n continue\n words = ['<sos>'] + words\n words += ['<eos>']\n # vectorize\n vocab = self.dictionary.word2idx\n unk_idx = vocab['<oov>']\n indices = [vocab[w] if w in vocab else unk_idx for w in words]\n lines.append(indices)\n\n print(\"Number of sentences dropped from {}: {} out of {} total\".\n format(path, dropped, linecount))\n return lines", "def tokenize(self, path):\n assert os.path.exists(path)\n # add the start of sentence token\n sentence_sep = [BOS]\n with open(path, 'r') as f:\n sentences = [BOS]\n for sentence in tqdm(f, desc='Processing file: {}'.format(path)):\n sentences += sentence.split() + sentence_sep\n # split into list of tokens\n self.data = sentences", "def get_tokens(self, numbered_lines):\n tokens = []\n tokens_append = tokens.append\n\n # simple tokenization: spaces and some punctuation\n splitter = re.compile('[\\\\t =;]+').split\n\n for _line_number, line in numbered_lines:\n line = line.strip()\n if line:\n line = prepare_text_line(line)\n if line :\n line = strip_markup(line)\n if line and line.strip():\n for tok in splitter(line):\n # strip trailing quotes and ignore empties\n tok = tok.strip(\"' \")\n if not tok:\n continue\n # strip trailing colons: why?\n tok = tok.rstrip(':').strip()\n # strip leading @: : why?\n tok = tok.lstrip('@').strip()\n if tok and tok not in (':',):\n tokens_append(tok)\n logger.debug('CopyrightDetector:tokens: ' + repr(tokens))\n return tokens", "def tokenize(self, start_pos=0, text=None):\n pass", "def split_into_tokens(dataset, delimiter=\"\"):\n pass", "def preprocess_sent(sent):\n #tokenized = word_tokenize(sent.lower())\n tokenizer = Tok()\n tokenized = tokenizer.tokenize(sent.lower())\n return tokenized", "def tokenize(self, fileid):\n for paragraph in self.corpus.paras(fileids=fileid):\n sents = []\n for sent in sent_tokenize(paragraph, language='russian'):\n words = []\n for word in wordpunct_tokenize(sent):\n token = self.lemmatize(word)\n if not self.is_punct(token) and not self.is_stopword(token):\n\n words.append((token, str(self.morph.parse(word)[0].tag.POS)))\n\n sents.append(words)\n yield sents\n # yield [\n # (word, morph.parse(word)[0].tag.POS)\n # # pos_tag(wordpunct_tokenize(sent), lang='rus')\n # for sent in sent_tokenize(paragraph, language='russian')\n # for word in wordpunct_tokenize(sent)\n # ]\n # yield [\n # pos_tag(wordpunct_tokenize(sent), lang='rus')\n # for sent in sent_tokenize(paragraph, language='russian')\n # ]", "def tokenize(self, text):\n # text = convert_to_unicode(text)\n\n output_tokens = []\n for token in split_by_whitespace(text):\n chars = list(token)\n if len(chars) > self.max_input_chars_per_word:\n output_tokens.append(self.unk_token)\n continue\n\n is_bad = False\n start = 0\n sub_tokens = []\n while start < len(chars):\n end = len(chars)\n cur_substr = None\n while start < end:\n substr = \"\".join(chars[start:end])\n if start > 0:\n substr = \"##\" + substr\n if substr in self.vocab:\n cur_substr = substr\n break\n end -= 1\n if cur_substr is None:\n is_bad = True\n break\n sub_tokens.append(cur_substr)\n start = end\n\n if is_bad:\n output_tokens.append(self.unk_token)\n else:\n output_tokens.extend(sub_tokens)\n return output_tokens", "def _multiprocess_tokenize(self, num_workers: int):\n\n # While displaying progress bar through tqdm, specify total number of\n # sequences to tokenize, because tqdm won't know in case of pool.imap\n with mp.Pool(num_workers) as pool:\n print(f\"[{self._split}] Tokenizing questions...\")\n _question_tuples = self.questions.items()\n _question_indices = [t[0] for t in _question_tuples]\n _questions = list(\n tqdm(\n pool.imap(word_tokenize, [t[1] for t in _question_tuples]),\n total=len(self.questions)\n )\n )\n self.questions = {\n i: question + [\"?\"] for i, question in\n zip(_question_indices, _questions)\n }\n # Delete variables to free memory.\n del _question_tuples, _question_indices, _questions\n\n print(f\"[{self._split}] Tokenizing answers...\")\n _answer_tuples = self.answers.items()\n _answer_indices = [t[0] for t in _answer_tuples]\n _answers = list(\n tqdm(\n pool.imap(word_tokenize, [t[1] for t in _answer_tuples]),\n total=len(self.answers)\n )\n )\n self.answers = {\n i: answer + [\"?\"] for i, answer in\n zip(_answer_indices, _answers)\n }\n # Delete variables to free memory.\n del _answer_tuples, _answer_indices, _answers\n\n print(f\"[{self._split}] Tokenizing captions...\")\n # Convert dict to separate lists of image_ids and captions.\n _caption_tuples = self.captions.items()\n _image_ids = [t[0] for t in _caption_tuples]\n _captions = list(\n tqdm(\n pool.imap(word_tokenize, [t[1] for t in _caption_tuples]),\n total=(len(_caption_tuples))\n )\n )\n # Convert tokenized captions back to a dict.\n self.captions = {i: c for i, c in zip(_image_ids, _captions)}", "def tokenize_text(self, s):\n tokens = []\n # We could just have a \"while s:\" here instead of \"for line: while\n # line:\", but for really large log messages with heavy\n # tokenization, the cost in both performance and memory\n # consumption of the approach taken was atrocious.\n for line in s.replace(\"\\r\\n\", \"\\n\").split(\"\\n\"):\n line = line + \"\\n\"\n while line:\n best_match = best_conv = best_userdata = None\n for test in self._formatters:\n match = test[0].search(line)\n # If we find and match and (a) its our first one, or (b) it\n # matches text earlier than our previous best match, or (c) it\n # matches text at the same location as our previous best match\n # but extends to cover more text than that match, then this is\n # our new best match.\n #\n # Implied here is that when multiple formatters match exactly\n # the same text, the first formatter in the registration list wins.\n if match and (\n (best_match is None)\n or (match.start() < best_match.start())\n or (\n (match.start() == best_match.start())\n and (match.end() > best_match.end())\n )\n ):\n best_match = match\n best_conv = test[1]\n best_userdata = test[2]\n # If we found a match...\n if best_match:\n # ... add any non-matching stuff first, then the matching bit.\n start = best_match.start()\n end = best_match.end()\n if start > 0:\n tokens.append(\n _item(match=line[:start], converter=self.format_text, userdata=None)\n )\n tokens.append(\n _item(match=best_match, converter=best_conv, userdata=best_userdata)\n )\n line = line[end:]\n else:\n # Otherwise, just add the rest of the string.\n tokens.append(_item(match=line, converter=self.format_text, userdata=None))\n line = \"\"\n return ViewVCHtmlFormatterTokens(tokens)", "def tokenize(self, input: str) -> List[Tuple[str, int, int]]:\n raise NotImplementedError", "def _multiprocess_tokenize(self, num_workers: int):\n\n # While displaying progress bar through tqdm, specify total number of\n # sequences to tokenize, because tqdm won't know in case of pool.imap\n with mp.Pool(num_workers) as pool:\n print(f\"[{self._split}] Tokenizing questions...\")\n _question_tuples = self.questions.items()\n _question_indices = [t[0] for t in _question_tuples]\n _questions = list(\n tqdm(\n pool.imap(word_tokenize, [t[1] for t in _question_tuples]),\n total=len(self.questions),\n )\n )\n self.questions = {\n i: question + [\"?\"]\n for i, question in zip(_question_indices, _questions)\n }\n # Delete variables to free memory.\n del _question_tuples, _question_indices, _questions\n\n print(f\"[{self._split}] Tokenizing answers...\")\n _answer_tuples = self.answers.items()\n _answer_indices = [t[0] for t in _answer_tuples]\n _answers = list(\n tqdm(\n pool.imap(word_tokenize, [t[1] for t in _answer_tuples]),\n total=len(self.answers),\n )\n )\n self.answers = {\n i: answer + [\"?\"] for i, answer in zip(_answer_indices, _answers)\n }\n # Delete variables to free memory.\n del _answer_tuples, _answer_indices, _answers\n\n print(f\"[{self._split}] Tokenizing captions...\")\n # Convert dict to separate lists of image_ids and captions.\n _caption_tuples = self.captions.items()\n _image_ids = [t[0] for t in _caption_tuples]\n _captions = list(\n tqdm(\n pool.imap(word_tokenize, [t[1] for t in _caption_tuples]),\n total=(len(_caption_tuples)),\n )\n )\n # Convert tokenized captions back to a dict.\n self.captions = {i: c for i, c in zip(_image_ids, _captions)}", "def tokenize_tag(text):\n return [tok for tok in single_tokenizer(text)]", "def tokenize(self, path):\n assert os.path.exists(path)\n # Add words to the dictionary\n with open(path, 'r') as f:\n tokens = 0\n for line in f:\n words = line.split() + ['<eos>']\n tokens += len(words)\n for word in words:\n self.dictionary.add_word(word)\n\n # Tokenize file content\n with open(path, 'r') as f:\n ids = []\n token = 0\n for line in f:\n words = line.split() + ['<eos>']\n for word in words:\n ids.append(self.dictionary.word2idx[word])\n token += 1\n\n return ids", "def tokenize_chars(line):\n return", "def wordize(lines):\n parser = Parser()\n tokenizer = Tokenizer()\n word_ctr = WordCounter()\n words = []\n for l in lines :\n if (l.rstrip()) :\n statement = parser.parseSentence(l, int(word_ctr))\n token_lists = tokenizer.tokenizeStatement(statement, int(word_ctr))\n for l in token_lists :\n if len(l) > 0 :\n words.append(l)\n word_ctr += 1\n return words", "def tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n return clean_tokens", "def tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for token in tokens:\n clean_token = lemmatizer.lemmatize(token).lower().strip()\n clean_tokens.append(clean_token)\n\n return clean_tokens", "def _tokenize(self, string):\n self._tokens = []\n\n # Split and strip the input string by newlines\n for token in re.split('(.*)', string):\n if token.strip() != '':\n self._tokens.append(token)", "def _whitespace_tokenize_with_offsets(self, input_tensor):\n (values, row_splits, start_offsets, end_offsets) = (\n gen_whitespace_tokenizer_v2.tf_text_whitespace_tokenize_with_offsets_v2(\n input_values=input_tensor, input_config=self._config))\n values = RaggedTensor.from_nested_row_splits(\n flat_values=values,\n nested_row_splits=[row_splits])\n start_offsets = RaggedTensor.from_nested_row_splits(\n flat_values=start_offsets,\n nested_row_splits=[row_splits])\n end_offsets = RaggedTensor.from_nested_row_splits(\n flat_values=end_offsets,\n nested_row_splits=[row_splits])\n return (values, start_offsets, end_offsets)", "def tokenize(self, path):\n assert os.path.exists(path)\n # Add words to the dictionary\n with open(path, 'r', encoding=\"utf8\") as f:\n for line in f:\n words = line.split() + ['<eos>']\n for word in words:\n self.dictionary.add_word(word)\n\n # Tokenize file content\n with open(path, 'r', encoding=\"utf8\") as f:\n idss = []\n for line in f:\n words = line.split() + ['<eos>']\n ids = []\n for word in words:\n ids.append(self.dictionary.word2idx[word])\n idss.append(torch.tensor(ids).type(torch.int64))\n ids = torch.cat(idss)\n\n return ids", "def span_tokenize_words(self, text):\n\t\tsentences = self.tokenize_sentences(text)\n\t\ttokens_per_sentence = list()\n\t\tsentence_offset = 0\n\t\t\n\t\tsentence_counter = 0\n\t\tfor sentence in sentences:\n\t\t\tsentence_tokens = list()\n\t\t\tfor token in self.word_tokenizer.span_tokenize(sentence):\n\t\t\t\t# save actual token together with it's positions\n\t\t\t\tbegin = token[0] + sentence_offset\n\t\t\t\tend = token[1] + sentence_offset\n\t\t\t\ttoken_tuple = (text[begin:end],begin,end,sentence_counter)\n\t\t\t\tsentence_tokens.append(token_tuple)\n\t\t\t\t\n\t\t\ttokens_per_sentence.append(sentence_tokens)\n\t\t\t\n\t\t\tsentence_counter = sentence_counter + 1\n\t\t\tsentence_offset = sentence_offset + len(sentence) + 1\n\t\t\n\t\treturn tokens_per_sentence", "def tokenize(self, input_string: str) -> List[str]:", "def tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens", "def split_whitespace_tokens(line):\n in_quote = False\n token = \"\"\n token_start = 0\n for i, char in enumerate(line):\n if char == ' ':\n if len(token) > 0:\n yield (token_start, token)\n token = \"\"\n else:\n if len(token) == 0:\n token_start = i\n token += char\n if len(token) > 0:\n yield (token_start, token)", "def tokenize(self, path, training_set=False):\n assert os.path.exists(path)\n with open(path, encoding='utf8') as fin:\n num_lines = sum(1 for _ in fin.readlines())\n with open(path, 'r', encoding=\"utf8\") as f:\n words = []\n for i, line in enumerate(tqdm(f, total=num_lines)):\n if self.max_lines > 0 and i > self.max_lines:\n break\n line = line.strip()\n if not line:\n continue # Skip empty lines.\n elif line.startswith('='):\n continue # Skip headers.\n else:\n sentence = (self.order - 1) * [SOS] + \\\n [process(word, self.lower) for word in line.split()] + [EOS]\n if training_set:\n words.extend(sentence)\n self.vocab.update(sentence)\n else:\n sentence = [word if word in self.vocab else UNK for word in sentence]\n words.extend(sentence)\n return words", "def tokenize(self, path):\n assert os.path.exists(path)\n # Add symbol to the dictionary\n with open(path, 'r') as f:\n tokens = 0\n for line in f:\n tokens += len(line)\n for s in line:\n self.dictionary.add_symbol(s)\n\n # Tokenize file content\n with open(path, 'r') as f:\n ids = torch.LongTensor(tokens)\n token = 0\n for line in f:\n for s in line:\n ids[token] = self.dictionary.symbol2idx[s]\n token += 1\n\n return ids", "def tokenize(self, path):\n assert os.path.exists(path)\n # Add words to the dictionary\n with open(path, 'r') as f:\n tokens = 0\n for line in f:\n words = line.split() + ['<eos>']\n tokens += len(words)\n for word in words:\n self.dictionary.add_word(word)\n\n # Tokenize file content\n with open(path, 'r') as f:\n ids = torch.LongTensor(tokens)\n token = 0\n for line in f:\n words = line.split() + ['<eos>']\n for word in words:\n ids[token] = self.dictionary.word2idx[word]\n token += 1\n\n return ids", "def tokenize(self, path):\n assert os.path.exists(path)\n # Add words to the dictionary\n with open(path, 'r') as f:\n tokens = 0\n for line in f:\n words = line.split() + ['<eos>']\n tokens += len(words)\n for word in words:\n self.dictionary.add_word(word)\n\n # Tokenize file content\n with open(path, 'r') as f:\n ids = torch.LongTensor(tokens)\n token = 0\n for line in f:\n words = line.split() + ['<eos>']\n for word in words:\n ids[token] = self.dictionary.word2idx[word]\n token += 1\n\n return ids", "def preprocess(self, sentence, vocab_set=None):\n tokens = sentence.split()\n new_tokens = []\n for token in tokens:\n new_tokens += self.__clean(token)\n tokens = new_tokens\n\n tokens = self.__normalize_document(' '.join(tokens))\n\n return tokens", "def tokenize(text):\n\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n return [lemmatizer.lemmatize(token).lower().strip() for token in tokens]", "def preprocess(self,text):\n return preprocess.get_tokens(text)", "def prepare_data(self, lines: List[str]) -> List[str]:\n if self.is_tokenized:\n if self.parser == \"spacy\":\n lines = [l.split() for l in lines]\n elif self.parser == \"udpipe\":\n lines = [[l.split()] for l in lines]\n\n return lines", "def tokenize(self):\n count = 0\n for entry in self._entries:\n token_pairs = []\n for relation in entry['relations']:\n assert len(relation) == 3\n token_pairs.append((relation[0][0],relation[1][0],relation[2][0]))\n\n num_rels = len(entry['relations'])\n num_random_rels = (self._max_seq_length - 2) // 3 - num_rels\n\n if num_random_rels>0:\n pass\n # gt_pairs = {(rel[0],rel[2]) for rel in entry['relations']}\n # random_pairs = self._get_random_pair(entry['objects'], gt_pairs, num_random_rels)\n # for pair in list(random_pairs):\n # token_pairs.append((pair[0][0],'background', pair[1][0]))\n else:\n for i in range(-num_random_rels):\n token_pairs.pop()\n\n random.shuffle(token_pairs)\n tokens = []\n for pair in token_pairs:\n tokens.extend(pair)\n\n tokens = ['[CLS]'] + tokens + ['[SEP]']\n tokens_char = tokens\n\n target = [self._tokenizer.vocab.get(self._tokenizer.tokenize(x)[0], self._tokenizer.vocab['[UNK]']) if i%3==2 else -1 for i, x in enumerate(tokens)]\n tokens = [self._tokenizer.vocab.get(self._tokenizer.tokenize(x)[0], self._tokenizer.vocab['[UNK]']) if i%3!=2 else self._tokenizer.vocab.get('[MASK]', self._tokenizer.vocab['[UNK]']) for i, x in enumerate(tokens)]\n \n for i in range(len(tokens)):\n if target[i] != -1:\n print(tokens_char[i],tokens[i],target[i])\n\n segment_ids = [0] * len(tokens)\n input_mask = [1] * len(tokens)\n # input_mask = [1 if i%3==2 else 0 for i in range(len(tokens))]\n # co_attention_mask = [-1 if i%3==2 else 1 for i in range(len(tokens))]\n # co_attention_mask = torch.zeros((self._max_region_num, self._max_seq_length))\n # co_attention_mask[0] = -1\n # co_attention_mask[-1] = -1\n \n if len(tokens) < self._max_seq_length:\n padding = [self._padding_index] * (self._max_seq_length - len(tokens))\n tokens = tokens + padding\n input_mask += padding\n segment_ids += padding \n target += [-1] * len(padding) \n\n assert_eq(len(tokens), self._max_seq_length)\n entry['input_ids'] = tokens \n entry[\"input_mask\"] = input_mask\n entry['segment_ids'] = segment_ids\n # entry[\"co_attention_mask\"] = co_attention_mask\n entry['target'] = target\n\n sys.stdout.write('%d/%d\\r' % (count, len(self._entries)))\n sys.stdout.flush()\n count += 1" ]
[ "0.69884706", "0.66323274", "0.6459621", "0.64471626", "0.6395952", "0.6366297", "0.63627106", "0.6332375", "0.62884957", "0.62397844", "0.620047", "0.61909837", "0.6182466", "0.61787194", "0.6176044", "0.61732286", "0.6165838", "0.6162342", "0.6160397", "0.6157418", "0.615142", "0.6129513", "0.6124969", "0.61070395", "0.6058943", "0.6048806", "0.6046805", "0.6020645", "0.6020171", "0.5999851", "0.59908134", "0.59895146", "0.59850377", "0.59798986", "0.59663945", "0.5961288", "0.59568745", "0.5953661", "0.5939347", "0.59392077", "0.5907301", "0.58812547", "0.5873932", "0.58699256", "0.58578634", "0.58509135", "0.5849148", "0.5841671", "0.58408695", "0.58387196", "0.582752", "0.58274585", "0.58252877", "0.5822991", "0.58172214", "0.58162224", "0.581546", "0.58142805", "0.5788797", "0.5784557", "0.57842773", "0.57840234", "0.5781467", "0.577383", "0.5773533", "0.57676417", "0.5767148", "0.57589984", "0.5751371", "0.5748216", "0.5739918", "0.5732561", "0.57267535", "0.5721466", "0.5717346", "0.5716998", "0.57153744", "0.57062733", "0.5703294", "0.57018965", "0.5698772", "0.5698591", "0.5684695", "0.56829935", "0.5676587", "0.5671458", "0.56628734", "0.56603163", "0.5659083", "0.5658848", "0.5655517", "0.5653851", "0.5634147", "0.56172305", "0.5617033", "0.5617033", "0.5615655", "0.56137455", "0.5611802", "0.56096035", "0.55984676" ]
0.0
-1
tokenize using the nltk default (ptb + a 'punkt' sentence tokenizer)
def tokenize(self, N=None, drop_punctuation=True, lower=True): for tok in self.tokenize_generic(word_tokenize, N=N, drop_punctuation=drop_punctuation): if lower: tok = tok.lower() yield tok
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gpt2_tokenize(self, text: str):\n return self.bpe_tokenize(text)", "def preprocess(text):\n\tX = []\n\tsent_detector = nltk.data.load('tokenizers/punkt/english.pickle')\n\tfor t in text:\n\t\tsents = sent_detector.tokenize(t)\n\t\tresult = ''\n\t\tfor s in sents:\n\t\t\ttokens = word_tokenize(s)\n\t\t\tresult += ' ' + ' '.join(tokens)\n\t\tX.append(result)\n\treturn X", "def sentence_tokenizer(text):\n return sent_tokenize(text)", "def tokenize(sentence):\n return nltk.word_tokenize(sentence)", "def preprocess_sent(sent):\n #tokenized = word_tokenize(sent.lower())\n tokenizer = Tok()\n tokenized = tokenizer.tokenize(sent.lower())\n return tokenized", "def train_punkt_sent_tokenizer(train_ref,train_pickle):\r\n\timport nltk.tokenize.punkt\r\n\ttokenizer = nltk.tokenize.punkt.PunktSentenceTokenizer()\r\n\t\r\n\t# read training corpus\r\n\timport codecs\r\n\ttext = codecs.open(train_ref).read()\r\n\ttokenizer.train(text)\r\n\r\n\t# dump pickled tokenizer\r\n\timport pickle\r\n\tout = open(train_pickle,\"wb\")\r\n\tpickle.dump(tokenizer, out)\r\n\tout.close()", "def tokenize(text):\n lowers = text.lower()\n trans = {ord(c): None for c in string.punctuation}\n no_punctuation = lowers.translate(trans)\n trans_dig = {ord(c): None for c in string.digits}\n no_digits = no_punctuation.translate(trans_dig)\n tokens = nltk.word_tokenize(no_digits)\n return tokens", "def tokenize_text(text):\n return nltk.tokenize.word_tokenize(text)", "def tokenize(raw_text):\n tokenized_text = nltk.tokenize.word_tokenize(raw_text)\n return tokenized_text", "def tokenize(text):\n text = text.decode('utf-8')\n return nltk.word_tokenize(text)", "def tokenize_pt(text):\n #primeiros padrões, separação de palavra de [. , ? ! ( ) [ ] : ; ' ' \" \" ]\n return split_level_two(split_level_one(text))", "def tokenize(self, start_pos=0, text=None):\n pass", "def tokenize(sentence):\n tokens = nltk.word_tokenize(sentence)\n return tokens", "def tokenize(text):\n sentence = Sentence(text)\n return sentence.tokens()", "def get_tokens(sent):\n return word_tokenize(sent)", "def tokenize (self, text):\n if self.type == \"gpt2\":\n return self._tokenizer.tokenize(text)\n if self.type == \"bpe\":\n return self._tokenizer.EncodeAsPieces(text)", "def _tokenize(self, text: str) -> List[str]:\n return self.bert_model.tokenize(text.strip())", "def _tokenize(self, text: str) -> List[str]:\n return self.bpe.tokenize(text)", "def _tokenize(text, language_code):\n seq = annotool.raw2basicseq(text, language_code, pos=False)\n word_seq = seq['word']\n return word_seq", "def tokenize(txt):\n Depunc = depunctuate(txt).lower()\n Tokens = word_tokenize(Depunc)\n \n return Tokens", "def wordpunct_space_tokenize(sent):\n return _rts.tokenize(sent)", "def tokenize(text):\n #Clean data, remove all character except character and number,such as punctuation etc.\n text = re.sub(r'[^a-zA-Z0-9]', ' ', text.lower())\n tokens = word_tokenize(text)\n tokens = [WordNetLemmatizer().lemmatize(word) for word in tokens if word not in ST_english]\n return tokens", "def simple_tokenizer(text):\n re_tok = re.compile(punctuation_string)\n return re_tok.sub(' ', text).split()", "def english_tokenzier(text: str):\n TOKENIZER = ToktokTokenizer().tokenize\n return TOKENIZER(text)", "def naive(self, text):\n\t\t#print(text)\n\t\ttokenizedText = []\n\t\tfor k in text: #look at each entity in one sentence\n\t\t\t\n\t\t\ta = \"\"#stores the current word \n\t\t\trun = []; #appends all words in a particular sentence\n\t\t\tfor i in range(len(k)):\n\t\t\t\t\n\t\t\t\tif(k[i] == ' ' or k[i] == '\t'): #tokenization at space or tab\n\t\t\t\t\t\n\t\t\t\t\tif(a!=\"\"):\n\t\t\t\t\t\tif(a[-1] == ',' or a[-1] == '-' or a[-1] == \"\\'\" or a[-1] == \";\" or a[-1] == \":\" or a[-1] ==\"!\" or a[-1] == \"?\" or a[-1] ==\"\\\"\") : #but remove mentioned punctuations from the end of the word, if present\n\t\t\t\t\t\t\ta = a[:-1]\n\t\t\t\t\t\tif(len(a)>0 and a[0] == \"\\\"\"):#remove starting quotes\n\t\t\t\t\t\t\ta = a[1:]\n\t\t\t\t\t\tif(len(a)>0):\n\t\t\t\t\t\t\trun.append(a)\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\ta = \"\"\n\n\n\t\t\t\telif(i == len(k)-1): #remove the last punctuation mark, if present\n\t\t\t\t\t\n\t\t\t\t\ta = a+k[i];\n\t\t\t\t\t\n\t\t\t\t\tif(a[-1] == '.' or a[-1] == '\\\"' or a[-1] ==\"!\" or a[-1] == \"?\" or a[-1] ==\"\\'\" ):\n\t\t\t\t\t\ta = a[:-1]\n\t\t\t\t\tif(len(a)>0 and a[0] == \"\\\"\"):\n\t\t\t\t\t\ta = a[1:]\n\t\t\t\t\tif(len(a)>0):\n\t\t\t\t\t\trun.append(a)\n\t\t\t\t\t\t\n\t\t\t\t\t\ta = \"\"\n\n\n\t\t\t\telse:\n\t\t\t\t\t\n\t\t\t\t\tif((k[i] == ',' or k[i] == ':' or k[i] == ';') and k[i+1]!= ' ' ): # for other punctuation marks followed by a space\n\t\t\t\t\t\t#print(k[i-1])\n\t\t\t\t\t\tif(len(a)>0):\n\t\t\t\t\t\t\tif(a[-1] == '\\\"' or a[-1] ==\"!\" or a[-1] == \"?\" ):\n\t\t\t\t\t\t\t\ta = a[:-1]\n\t\t\t\t\t\t\tif(len(a)>0 and a[0] == \"\\\"\"):\n\t\t\t\t\t\t\t\ta = a[1:]\n\t\t\t\t\t\t\tif(len(a)>0):\n\t\t\t\t\t\t\t\trun.append(a)\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\ta = \"\"\n\n\n\t\t\t\t\telse:\n\n\t\t\t\t\t\ta = a+k[i];\n\n\t\t\ttokenizedText.append(run)\t\t\n\n\t\t\n\t\t\t\n\n\n\n\n\t\t#Fill in code here\n\n\t\treturn tokenizedText", "def _tokenize(self, text: str) -> List[str]:\n text = text.lower().strip()\n return self.bpe.tokenize(text)", "def tokenize(text):\n wt = nltk.RegexpTokenizer(pattern=r'\\s+', gaps=True)\n tokens = wt.tokenize(text)\n return tokens", "def tokenize_text(document, nlp):\n\n return [token.text for token in nlp(document)]", "def custom_tokenizer(nlp, infix_reg):\n return Tokenizer(nlp.vocab, infix_finditer=infix_reg.finditer)", "def tokenize(self, text, **kwargs):\n if self.opt['tracker'] == 'babi6':\n text = babi6_dirty_fix(text)\n text = text.replace('<SILENCE>', '_SILENCE_')\n\n return [t.text for t in NLP.tokenizer(text)]", "def tokenize(text):\n tokens=word_tokenize(text)\n lemmatizer=WordNetLemmatizer()\n \n clean_tokens=[]\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n return clean_tokens\n pass", "def tokenize(text):\n text = text.lower()\n remove = re.compile('[' + string.punctuation + '0-9\\\\r\\\\t\\\\n]')\n cleanText = re.sub(remove, \" \", text)\n tokens = nltk.word_tokenize(cleanText)\n tokens = [w.lower() for w in tokens if (len(w) >= 3 and w not in ENGLISH_STOP_WORDS)]\n return tokens", "def tokenize(review):\n\n token = strip_multiple_whitespaces(strip_punctuation(review))\n return [token.split() for token in simple_preprocess(token) if token not in STOPWORDS]", "def tokenize(text):\n tokens = TreebankWordTokenizer().tokenize(text)\n tokens = lemmatize(tokens)\n tokens = filter(lambda s: len(s) > 2, tokens) # remove tokens with < 3 chars\n return tokens", "def tokenize(text):\n return text.split(' ')", "def tokenize_and_stem(doc):\n punctuation_remover = dict((ord(char), None) for char in string.punctuation)\n tokens = nltk.word_tokenize(doc.lower().translate(punctuation_remover))\n return PlagiarismDetector.stem_tokens(tokens)", "def tokenize(text):\n \n #regular expression to avoid pucntuations or any special character\n tokenizer = nltk.RegexpTokenizer(r\"\\w+\")\n \n #tokenizing text\n tokens = tokenizer.tokenize(text)\n \n #initiating lemmatizer\n lemmatizer = WordNetLemmatizer()\n \n #iteratating through each token\n clean_tokens = []\n for tok in tokens:\n \n #stop words are irrelevant in this context of classifying response\n if (tok.lower() not in stopwords.words(\"english\")):\n \n # lemmatizing, normalizing case, and removing leading/trailing white space\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n \n return clean_tokens", "def tokenize(text):\n \n #regular expression to avoid pucntuations or any special character\n tokenizer = nltk.RegexpTokenizer(r\"\\w+\")\n \n #tokenizing text\n tokens = tokenizer.tokenize(text)\n \n #initiating lemmatizer\n lemmatizer = WordNetLemmatizer()\n \n #iteratating through each token\n clean_tokens = []\n for tok in tokens:\n \n #stop words are irrelevant in this context of classifying response\n if (tok.lower() not in stopwords.words(\"english\")):\n \n # lemmatizing, normalizing case, and removing leading/trailing white space\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n \n return clean_tokens", "def tokenizer(self):\n tokenizer = RegexpTokenizer(r'\\w+')\n \n self.tweet_tokenized_train = [tokenizer.tokenize(x.lower()) for x in self.tweet_prepro_train]\n self.tweet_tokenized_test = [tokenizer.tokenize(x.lower()) for x in self.tweet_prepro_test]", "def tokenizer(s):\n\n tokens = tokenize(s.lower()) # apply the nltk tokenizer\n tokens = [t for t in tokens if doc_frequency[t]>5 and t not in stop_words]# and doc_frequency[t]<3000]\n \n return tokens", "def tokenize(self, text):\n\n text = convert_to_unicode(text)\n\n output_tokens = []\n for token in whitespace_tokenize(text):\n chars = list(token)\n if len(chars) > self.max_input_chars_per_word:\n output_tokens.append(self.unk_token)\n continue\n\n is_bad = False\n start = 0\n sub_tokens = []\n while start < len(chars):\n end = len(chars)\n cur_substr = None\n while start < end:\n substr = \"\".join(chars[start:end])\n ### joonho.lim @ 2019-03-15\n # if start > 0:\n # substr = \"##\" + substr\n # print ( '[substr]\\t%s\\t%s\\t%d\\t%d' % ( substr, substr in self.vocab, start, end))\n if substr in self.vocab:\n cur_substr = substr\n break\n end -= 1\n if cur_substr is None:\n is_bad = True\n break\n sub_tokens.append(cur_substr)\n start = end\n\n if is_bad:\n output_tokens.append(self.unk_token)\n else:\n output_tokens.extend(sub_tokens)\n output_tokens.insert(0, '[CLS]')\n output_tokens.append('[SEP]')\n return output_tokens", "def tokenize(text: str):\n # Create list of word tokens\n token_list = []\n # nlp = init_nlp(TaggingMethod.SPACY, Lang.ES, size=DictionarySize.MEDIUM)\n doc = nlp(text)\n token_list = [token.text for token in doc]\n # for token in doc:\n # token_list.append(token.text)\n return token_list", "def pre_process(text):\n # replace (,.'\") with ''\n text = text.replace(',', '')\n text = text.replace('.', '')\n text = text.replace(\"'\", '')\n text = text.replace(\"\\\"\", '')\n\n # tokenize into words\n tokens = [word for sent in sent_tokenize(text) for word in word_tokenize(sent)]\n\n # remove stopwords\n stop = stopwords.words('english')\n tokens = [token for token in tokens if token not in stop]\n\n # remove words less than three letters\n tokens = [word for word in tokens if len(word) >= 3]\n\n # lower capitalization\n tokens = [word.lower() for word in tokens]\n\n # lemmatize\n lmtzr = WordNetLemmatizer()\n tokens = [lmtzr.lemmatize(word) for word in tokens]\n\n return tokens", "def detokenize(tokens):\n pass", "def basic_tokenize(tweet):\n tweet = \" \".join(re.split(\"[^a-zA-Z.,!?]*\", tweet.lower())).strip()\n return tweet.split()", "def process_token_sentence(text):\n\n sentences = nltk.sent_tokenize(text)\n tokenized_sentences = [nltk.word_tokenize(sentence) for sentence in sentences]\n tagged_sentences = [nltk.pos_tag(sentence) for sentence in tokenized_sentences]\n sentences = nltk.ne_chunk_sents(tagged_sentences, binary=True)\n\n return sentences", "def tokenize(t):\n tweet_tok = TweetTokenizer(strip_handles=True, reduce_len=True)\n tokens = tweet_tok.tokenize(t)\n wnl = WordNetLemmatizer()\n stems = []\n for item in tokens:\n stems.append(wnl.lemmatize(item))\n return stems", "def tokenize(src):\n\n pass", "def _word_tokenize_spacy(sent):\n tokens = SQuADDataTokenizer._spacy_tokenizer(sent)\n return tokens", "def tokenize(sentence):\n lower_s = sentence.lower()\n no_punc_s = lower_s.translate(TRANS, string.punctuation)\n\n word_list = no_punc_s.split()\n\n return word_list", "def english_tokenize(text):\n tokens = word_tokenize(text, 'english')\n stems = stem_tokens(tokens, stemmers['english'])\n stems = [i for i in stems if i not in punctuations]\n return stems", "def _tokenize(self, text):\n bpe_tokens = []\n for token in re.findall(self.pat, text):\n bpe_tokens.extend([t for t in self.bpe(token).split(\" \")])\n return bpe_tokens", "def tokenize(text):\n global TOK\n tokens = TOK.tokenize(text)\n output = {\n 'words': tokens.words(),\n 'offsets': tokens.offsets(),\n 'pos': tokens.pos(),\n 'lemma': tokens.lemmas(),\n 'ner': tokens.entities(),\n }\n return output", "def tokenize(text):\n\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n return [lemmatizer.lemmatize(token).lower().strip() for token in tokens]", "def tokenize(text):\n \n text.lower() # convert to lowercase\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text) #remove punctuation\n words = word_tokenize(text) # tokenize by individual word\n words = [w for w in words if w not in stopwords.words(\"english\")] #remove stop words\n lemmed = [WordNetLemmatizer().lemmatize(w) for w in words] #lemminization\n \n return words", "def sentence_tokenize(tokenizer, sent):\n tokens = tokenizer.EncodeAsIds(sent).tokenization\n return tokens", "def tokenize(doc):\n\n # Calls NLTK function to tokenize the document. Broken into individual words, cleans out punctuation\n tokens = nltk.word_tokenize(doc)\n\n return tokens", "def tokenize(self, text):\n # text = convert_to_unicode(text)\n\n output_tokens = []\n for token in split_by_whitespace(text):\n chars = list(token)\n if len(chars) > self.max_input_chars_per_word:\n output_tokens.append(self.unk_token)\n continue\n\n is_bad = False\n start = 0\n sub_tokens = []\n while start < len(chars):\n end = len(chars)\n cur_substr = None\n while start < end:\n substr = \"\".join(chars[start:end])\n if start > 0:\n substr = \"##\" + substr\n if substr in self.vocab:\n cur_substr = substr\n break\n end -= 1\n if cur_substr is None:\n is_bad = True\n break\n sub_tokens.append(cur_substr)\n start = end\n\n if is_bad:\n output_tokens.append(self.unk_token)\n else:\n output_tokens.extend(sub_tokens)\n return output_tokens", "def ie_preprocess(document):\n sentences = nltk.sent_tokenize(document) #NLTK default sentence segmenter\n #print sentences # sentences are segmented\n sentences = [nltk.word_tokenize(sent) for sent in sentences] # NLTK word tokenizer \n #print sentences # sentences are tokenized\n sentences = [nltk.pos_tag(sent) for sent in sentences] # NLTK POS tagger \n #print sentences # sentences are POS tagged\n return sentences", "def preprocess_document(raw,sentence_level):\r\n\r\n\t# raw = raw.decode(\"utf-8\")\r\n\t# raw = raw.encode(\"ascii\",\"ignore\")\r\n\t\r\n\tfrom nltk.tokenize.punkt import PunktSentenceTokenizer, PunktParameters\r\n\tparam = PunktParameters()\r\n\ttokenizer = PunktSentenceTokenizer(param)\r\n\tif sentence_level:\r\n\t\tsentences = tokenizer.tokenize(raw)\r\n\t\tsentences_words = list()\r\n\t\tfor s in sentences:\r\n\t\t\tsentences_words.append((s.strip(),preprocess_sentence(s)))\r\n\t\treturn sentences_words\r\n\telse:\r\n\t\treturn [(raw.strip(),preprocess_sentence(raw))]", "def word_tokenize(text):\n word_list = []\n for sentences in nltk.sent_tokenize(text):\n for words in nltk.word_tokenize(sentences):\n word_list.append(words)\n return word_list", "def identity_tokenizer(text):\n return text", "def tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n return clean_tokens", "def tokenization(text):\n\n global tokenizer_tree\n tokenised_document = tokenizer_tree.tokenize(text)\n return tokenised_document", "def tokenize(tweet):\n tweet = \" \".join(re.split(\"[^a-zA-Z]*\", tweet.lower())).strip()\n tokens = [stemmer.stem(t) for t in tweet.split()]\n return tokens", "def tokenize(tweet):\n tweet = \" \".join(re.split(\"[^a-zA-Z]*\", tweet.lower())).strip()\n tokens = [stemmer.stem(t) for t in tweet.split()]\n return tokens", "def tokenize(text, token):\n text = [token(x) for x in text]\n return text", "def custom_tokenizer(self, nlp):\n # nlp.tokenizer = custom_tokenizer(nlp)\n return Tokenizer(\n nlp.vocab,\n prefix_search=regex.PREFIX_RE.search,\n suffix_search=regex.SUFFIX_RE.search,\n infix_finditer=regex.INFIX_RE.finditer,\n token_match=regex.SIMPLE_URL_RE.match,\n )", "def spanish_tokenize(text):\n tokens = word_tokenize(text, 'spanish')\n stems = stem_tokens(tokens, stemmers['spanish'])\n stems = [i for i in stems if i not in punctuations]\n return stems", "def _tokenize(self, text):\n if not text:\n return []\n\n text = PUNCTUATION_CHARS.sub(' ', text)\n\n words = [\n t[:128].lower() for t in text.split()\n if len(t) >= MIN_WORD_LENGTH and t.lower() not in STOP_WORDS\n ]\n\n return words", "def tokenize(text):\n return [token.lower() for token in simple_preprocess(text) if token not in STOPWORDS]", "def tokens_from_string(self, text):\n\n if self.level == \"character\":\n return list(text)\n elif self.level == \"word\":\n return nltk.word_tokenize(text)\n else:\n print(\"error: invalid level\")", "def processes_and_tokenize(raw_document):\n\ttokenizer = RegexpTokenizer(r'\\w+')\n\ttokens = tokenizer.tokenize(raw_document.lower())\t\t# tokens = nltk.word_tokenize(corpus.lower()) # without removing punctiation\n\n\t#remove stop words\n\tstop_words = set(nltk.corpus.stopwords.words('english'))\n\t#stop_words = set(stopwords.words('english'))\n\tfiltered_tokens = [w for w in tokens if not w in stop_words]\n\treturn filtered_tokens", "def tokenize_de(text):\n return [tok.text for tok in spacy_de.tokenizer(text)]", "def tokenize_de(text):\n return [tok.text for tok in spacy_de.tokenizer(text)]", "def tokenize_de(text):\n return [tok.text for tok in spacy_de.tokenizer(text)]", "def tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n \n return clean_tokens", "def tokenize(raw_text):\n def _xop(tokens):\n def _(x):\n return xop(x, \"op\", tokens)\n return _\n \n raw_tokens=xversa_split(raw_text, tokens=Op_Tokens+Group_Tokens)\n tokens=map(xtotype, raw_tokens) \n tokens=map(_xop(Op_Tokens+Group_Tokens), tokens)\n return tokens", "def my_tokenize(sentence):\n sentence = sentence.lower()\n ll = word_tokenize(sentence)\n lls = [stemmer.stem(ii) for ii in ll if re.search(r'[a-z0-9]+', ii)]\n\n return lls", "def tokenize(text):\n\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n \n clean_tokens =[]\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens", "def tokenize_text(text):\n tokens = []\n for sent in nltk.sent_tokenize(text):\n for word in nltk.word_tokenize(sent):\n if len(word) < 2:\n continue\n tokens.append(word.lower())\n return tokens", "def process_text(text, stem=True):\n table = str.maketrans(\"\",\"\",string.punctuation) \n text = text.translate(table)\n tokens = word_tokenize(text)\n \n if stem:\n stemmer = PorterStemmer()\n tokens = [stemmer.stem(t) for t in tokens]\n \n return tokens", "def tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens", "def _tokenize(self, text):\n if not text:\n return []\n\n text = self.PUNCTUATION_CHARS.sub(' ', text)\n\n words = [t[:128] for t in text.split() if len(t) >= self.MIN_WORD_LENGTH and t.lower() not in self.STOP_WORDS]\n\n return words", "def process_text(text, stem=True):\n text = text.translate(str.maketrans('', '', string.punctuation))\n tokens = word_tokenize(text)\n \n if stem:\n stemmer = PorterStemmer()\n tokens = [stemmer.stem(t) for t in tokens]\n \n return tokens", "def tokenize(text):\n text = re.sub('[^A-Za-z0-9]', ' ', text)\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n return clean_tokens", "def tokenize(self, text: str) -> List[str]:\n\n # calls the selected tokenizer function e.g. 're' => re_tokenize(text)\n word_tokens = self.gpt2_tokenize(text)\n\n return word_tokens", "def tokenize(text):\n text = text.translate(str.maketrans('', '', string.punctuation))\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n s = stopwords.words('english')\n result = []\n for token in clean_tokens:\n if token not in s:\n result.append(token)\n\n return result", "def process_text(text, stem=True):\n exclude = set(string.punctuation)\n text = ''.join(ch for ch in text if ch not in exclude)\n #text = text.translate(None, string.punctuation)\n tokens = word_tokenize(text)\n if stem:\n stemmer = PorterStemmer()\n tokens = [stemmer.stem(t) for t in tokens]\n return tokens", "def process_text(text, stem=True):\n table = string.maketrans(\"\",\"\")\n text = text.translate(table, string.punctuation)\n tokens = word_tokenize(text)\n \n if stem:\n stemmer = PorterStemmer()\n tokens = [stemmer.stem(t) for t in tokens]\n \n return tokens", "def parse(self, tokenizer):\n pass", "def tokenize(self, fileid):\n for paragraph in self.corpus.paras(fileids=fileid):\n yield [\n pos_tag(wordpunct_tokenize(sent))\n for sent in sent_tokenize(paragraph)\n ]", "def tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for token in tokens:\n clean_token = lemmatizer.lemmatize(token).lower().strip()\n clean_tokens.append(clean_token)\n \n return clean_tokens", "def tokenize(self, text):\n # Ignore non-ASCII characters.\n text = remove_non_ascii(text)\n text = text.translate(Tokenizer.trans)\n tokens = [t for t in text.split() \n if len(t) >= self._config[u'min_len']\n and t not in self._config[u'stopwords']]\n self._counter.update(tokens)", "def tokenize(text):\n text = text.lower()\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text)\n words = word_tokenize(text)\n words = [w for w in words if w not in stopwords.words(\"english\")]\n stemmed = [WordNetLemmatizer().lemmatize(w) for w in words]\n return(stemmed)", "def word_tokenize(sentence, number_token=None, name_token=None, gpe_token=None):\n tokenized_sentence = nltk.word_tokenize(sentence)\n # Verify that the tokens are lowercase or none and then do the replacement\n _check_tokens(number_token, name_token, gpe_token)\n\n tokenized_sentence = number2token(tokenized_sentence, number_token)\n tagged_sentence = entities2token(tokenized_sentence, name_token, gpe_token)\n\n return tagged_sentence", "def tokenize(self, fileids=None, categories=None):\n morph = pymorphy2.MorphAnalyzer()\n for paragraph in self.paras(fileids=fileids):\n yield [\n (word, morph.parse(word)[0].tag.POS)\n # pos_tag(wordpunct_tokenize(sent), lang='rus')\n for sent in sent_tokenize(paragraph)\n for word in wordpunct_tokenize(sent)\n ]", "def process_text(text):\n text = text.translate(translator)\n tokens = word_tokenize(text)\n# if stem:\n stemmer = PorterStemmer()\n tokens = [stemmer.stem(t) for t in tokens]\n \n return tokens", "def _tokenize(ustr1):\n\n return list(tk.generate_tokens(io.StringIO(ustr1).readline))", "def tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for token in tokens:\n clean_token = lemmatizer.lemmatize(token).lower().strip()\n clean_tokens.append(clean_token)\n\n return clean_tokens", "def tokenize(text):\n text = re.sub(r\"[^a-zA-Z0-9]+\", \" \", text.lower())\n\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens" ]
[ "0.7247982", "0.7229188", "0.7094783", "0.70895004", "0.70805717", "0.69946355", "0.6974862", "0.6922694", "0.6919841", "0.68694", "0.6835729", "0.6791642", "0.67796576", "0.6747571", "0.6684571", "0.66801494", "0.6662924", "0.6644727", "0.66325396", "0.6598598", "0.6588903", "0.65691334", "0.65586853", "0.6558186", "0.6549853", "0.6478443", "0.64611876", "0.64549243", "0.6454689", "0.644867", "0.64195466", "0.6407422", "0.64037126", "0.6390652", "0.63852113", "0.6379052", "0.63688165", "0.63688165", "0.6366419", "0.6366204", "0.63631684", "0.6355827", "0.6348684", "0.63484967", "0.6342229", "0.6333818", "0.6308071", "0.63023174", "0.63005716", "0.6291695", "0.6281657", "0.62743795", "0.62678057", "0.6227588", "0.6225919", "0.6219901", "0.6208961", "0.62062466", "0.62051904", "0.62049043", "0.6204524", "0.62026596", "0.6202526", "0.61996126", "0.6196428", "0.6196428", "0.6191171", "0.61894536", "0.6186232", "0.61860377", "0.6180211", "0.61750156", "0.6167292", "0.61640924", "0.61640924", "0.61640924", "0.6163359", "0.6163207", "0.61618376", "0.61612433", "0.61531717", "0.6151722", "0.6148146", "0.61473435", "0.6137973", "0.61277336", "0.61272985", "0.612609", "0.61227787", "0.61217695", "0.6118919", "0.61144996", "0.61115485", "0.61046946", "0.60973305", "0.6092075", "0.60884213", "0.6085781", "0.60843825", "0.6069151", "0.60649437" ]
0.0
-1
boolean of if changes have been made to present sha
def dirty(cls): output = subprocess.check_output(DIRTY_INCANTATION) return len(output.strip()) == 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_changes(self):\n if self.repo_is_empty:\n return True\n\n tree = self.repo.get(self.index.write_tree(self.repo))\n diff = tree.diff_to_tree(self.repo.get(self.repo.head.target).tree)\n return bool(diff)", "def index_is_dirty():\n result, output = popen('git diff --cached', False, False)\n return len(output) > 0", "def is_rev_dirty(ctx: \"PlanemoCliContext\", directory: str) -> bool:\n return io.shell([\"git\", \"diff\", \"--quiet\"], cwd=directory) != 0", "def is_changed(self, include_md: bool = True) -> bool:\n current = self.calculate_hash(include_md=include_md)\n stored = self.hash if include_md else self.stub_hash\n log.trace(f\"changed = {self.hash != current} | Stored: {stored} | Current: {current}\")\n return stored != current", "def is_sha(self):\n return self.name.startswith('SHA')", "def local_changes():\n result, output = popen('git status', False, False)\n try:\n return not output[-1].startswith(\"nothing to commit\")\n except IndexError:\n return True", "def consistent(self):\n return self.incore_digest == self.ondisk_digest", "def hash_comparison(self):\n for result in self.cards:\n if result.hash_status:\n return True\n return False", "def _is_commit_sha(commit):\n return len(commit) == 40 and all([\n ch.isdigit() or (ch >= \"a\" and ch <= \"f\")\n for ch in commit.elems()\n ])", "def dirty(self) -> bool:\n return len(self.detect_changed_files()) != 0", "def check(self) -> bool:\n return self.check_sum() == self.__md5_sum", "def hasChanged(self):\n return ((self.mtime != getmtime(self.path)) or\n (self.size != os.path.getsize(self.path)) )", "def isOpen(self):\n return self.analyzed_digest != {}", "def needs_update(self, cache_key):\r\n return self._read_sha(cache_key) != cache_key.hash", "def is_git_sha(text):\n # Handle both the full sha as well as the 7-character abbreviation\n if len(text) in (40, 7):\n try:\n int(text, 16)\n return True\n except ValueError:\n pass\n return False", "def _check_md5(self):\n\n self.log.info('-' * 80)\n self.log.info('Check md5 sum')\n\n self.log.info(self._ref_value)\n self.log.info(self._output_file)\n\n code, out = cmd_exec(['md5sum', self._output_file], shell=False, log=self.log)\n if code:\n self.log.error(out)\n return False\n self.log.info(out)\n\n md5sum, _ = out.split(' ')\n\n self.log.info(f'reference md5: {self._ref_value}')\n self.log.info(f'actual md5: {md5sum}')\n\n if self._ref_value != md5sum:\n return False\n\n return True", "def has_changes(directory=None):\n out = check_output('git status', shell=True, cwd=directory)\n if 'nothing to commit (working directory clean)' in out:\n return False\n if 'nothing to commit, working directory clean' in out:\n return False\n if 'nothing to commit, working tree clean' in out:\n return False\n if 'nothing added to commit' in out:\n return False\n return True", "def something_to_commit():\n\n # Procelain returns nothing if there's nothing to commit\n ret = subprocess.check_output([\"git\", \"status\", \"--porcelain\"])\n\n if (len(ret) > 0):\n return True\n\n return False", "def changed(self):\n if self.exists():\n return self.current_content != self.content\n else:\n return True", "def repo_has_uncommitted():\n buff = subprocess.check_output(['hg', 'status'])\n\n if len(buff):\n print('Dirty / uncommitted changes in repository!')\n return True\n\n return False", "def _is_tracked(filename, metadata):\n current_local_sha = local_metadata.get(filename, None)\n current_remote_sha = metadata.get(filename, None)\n return current_local_sha is not None \\\n and current_remote_sha is not None \\\n and current_local_sha == current_remote_sha", "def _can_checkout(wit_path) -> bool:\n\n current_id = _get_head(wit_path)\n changes_to_be_committed = _return_as_string(_get_changes_to_be_committed, wit_path, current_id)\n changes_not_staged_for_commit = _return_as_string(_get_changes_not_staged_for_commit, wit_path)\n if changes_to_be_committed + changes_not_staged_for_commit == '':\n return True\n logging.error(FileNotSavedError('Some files are not saved. Try \"status\" command to view them.'))\n return False", "def has_changed(self) -> bool:\n # TODO: Add in change logic here\n state = None\n if state != self._file_state:\n self._changed_flag = True\n self._file_state = state\n return self._changed_flag", "def need_update(self):\n self.logging.debug( \"need_update()\" )\n\n for name in self.tables:\n\n md5 = self.dbs_tables[name]['md5']\n test = get_md5(self.dbs_tables[name]['path'])\n\n self.logging.debug('(%s) table:%s md5:[old: %s new: %s]' % \\\n (self.db,name,md5,test) )\n\n if test != md5: return True\n\n return False", "def has_unsaved_changes(self):\n return self._file_content != self.buffer.text", "def validate_sha_github(sha):\n r = requests.head(github_changeset_url % sha)\n return r.status_code == 200", "def is_old_sha(operator_tag):\n return operator_tag in SHA_DIGESTS_BEFORE_RESTRICTED_MODE_SUPPORT", "def sha256_supplied(self):\n return self._sha256_supplied", "def check_modified(self) -> bool:\n return bool(self._modified)", "def has_hash(self, h):\n rsp = h.hashlist(self.path)\n if re.search(\"\\n[0-9a-f]+\\smd5\\s%s\" % self.path, rsp):\n rval = True\n else:\n rval = False\n return rval", "def new_commits(repo, sha):\n from datetime import datetime\n\n dateformat = \"%a, %d %b %Y %H:%M:%S GMT\"\n release_commit = repo.get_commit(sha)\n since = datetime.strptime(release_commit.last_modified, dateformat)\n commits = repo.get_commits(since=since)\n if len(list(commits)) == 1:\n return False\n return reversed(list(commits)[:-1])", "def git_has_object(project: Project, name: str) -> bool:\n ret = project.git(\"rev-parse\", \"--verify\", name, _ok_code=[0, 128])\n return ret.exit_code == 0", "def isDirty(self):\n\t#@DEBUG christophe have to fix denoising optionnal issue prior to set isDirty() to True\n return False", "def test_eq_true(self):\n self.assertTrue(self.instance == Commit(self.instance.sha))", "def check_dirty(args):\n man = load_manifest()\n any_dirty = False\n for (name, project) in man.projects.iteritems():\n repo = GitRepo(workdir_for_project(project))\n any_dirty = check_dirty_repo(repo) or any_dirty\n return any_dirty", "def is_update_success():\n file_success = os.getcwd() + \"\\\\last_success.txt\"\n if os.path.exists(file_success):\n return True\n else:\n return False", "def dirty(self):\n return not self.consistent", "def need_checksum_for_changes(self):\n # We don't need to run checksum for changes, if we don't want checksum\n # at all\n if not self.need_checksum():\n return False\n if self.is_full_table_dump:\n log.warning(\n \"We're adding new primary key to the table. Skip running \"\n \"checksum for changes, because that's inefficient\"\n )\n return False\n return True", "def has_staged_changes(repo):\n return subprocess.call(['git', 'diff-index', '--cached', '--quiet', 'HEAD'],\n cwd=repo) != 0", "def hasChanged(self):\r\n if self.is_updated:\r\n self.is_updated = False\r\n return True\r\n else:\r\n return False\r\n\r\n # if not self.hasBeenUpdatedOnce:\r\n # self.hasBeenUpdatedOnce = True\r\n # return True\r\n # else:\r\n # if BLENDER_MODE == 'BPY':\r\n # # for e in dir(self.obj): print(e)\r\n # # print(self.obj, self.obj.name, self.obj.is_updated, self.obj.is_updated_data)\r\n # # return self.obj.is_updated # DOESN't UPDATE A THING!\r\n # # return True\r\n # return self.is_updated\r\n\r\n # return False # no update in BGE mode\r", "def is_git_dirty():\n dirty_status = local('git diff --quiet || echo \"*\"', capture=True)\n if dirty_status == '*':\n return True\n\n untracked_count = int(local('git status --porcelain 2>/dev/null| grep \"^??\" | wc -l', capture=True))\n if untracked_count > 0:\n return True\n\n return False", "def check_file(self, path, approve_if_no_dbhash=False):\r\n if self.mod.filehash:\r\n h = create_filehash(path)\r\n return h == self.mod.filehash\r\n return approve_if_no_dbhash", "def has_changed(self):\n return bool(self.changed_data)", "def get_changed() -> bool:\n return g.ledger.changed()", "def was_successful(self):\n return self._build_proto.status == common.SUCCESS", "def _check_guts_eq(attr, old, new, last_build):\n if old != new:\n print \"building because %s changed\" % attr\n return True\n return False", "def has_changed(self):\n timestamp = os.stat(self.filename).st_mtime\n if timestamp > self.last_timestamp:\n self.last_timestamp = timestamp\n return True\n return False", "def changed(dirname, filename='.md5', args=None, glob=None):\n root = Path(dirname)\n if not root.exists():\n # if dirname doesn't exist it is changed (by definition)\n return True\n\n cachefile = root / filename\n current_digest = cachefile.open().read() if cachefile.exists() else \"\"\n \n _digest = digest(dirname, glob=glob)\n if args and args.verbose: # pragma: nocover\n print(\"md5:\", _digest)\n has_changed = current_digest != _digest\n\n if has_changed:\n with open(os.path.join(dirname, filename), 'w') as fp:\n fp.write(_digest)\n\n return has_changed", "def check_dependency(self, repo, minhash=None):\n try:\n p = Project.objects.get(repo_url=repo)\n except Project.DoesNotExist:\n return False\n j = p.last_successful_job()\n\n if j:\n if minhash:\n if p.commit_in_history(minhash, j.commit):\n # We already have a successful job that is new enough\n return True\n else:\n return True\n\n return False", "def check_md5checksum_in_cache_modified(file_hash: str, cache_path: Path, update: bool) -> bool:\n if cache_path.exists():\n old_md5_checksum_content = Path(cache_path).read_text()\n if old_md5_checksum_content.strip() != file_hash.strip():\n if update:\n save_md5_file(cache_path, file_hash)\n return True\n else:\n if update:\n save_md5_file(cache_path, file_hash)\n return True\n return False", "def changed(self) -> bool:\n for chunk_location, chunk in self._chunk_cache.items():\n if chunk is None:\n # if the chunk is None and the saved record is not None, the chunk has changed.\n if chunk_location not in self._chunk_index:\n return True\n _, save_chunk_index = self._chunk_index[chunk_location]\n chunk_storage = self._chunk_history[chunk_location]\n if chunk_storage[save_chunk_index] is not None:\n return True\n elif chunk.changed:\n return True\n for chunk_index, save_chunk_index in self._chunk_index.values():\n if chunk_index != save_chunk_index:\n return True\n return False", "def _is_hash_valid(self):\n downloaded_hash = sha1(self._downloaded_bytes).digest()\n return downloaded_hash == self.hash", "def is_change(self) -> bool:\n return self._change", "def check(self):\n if self.is_signed():\n data = self._document.read()\n hash_value = data[-self._append_size+1:-1]\n data = data[:-self._append_size]\n\n encrypted = self._encryptor.encrypt_cbc(data, self._init_vector)\n current_hash_value = encrypted[-16:]\n\n if current_hash_value != hash_value:\n print(\"Hash values did not matched!\")\n else:\n print(\"Hash values matched!\")\n else:\n print(\"The document is not signed!\")", "def has_unsaved_changes():\n return False", "def verify_checksum(self):\n return self.generate_header_checksum(omit_checksum=False) == 0", "def canBeAccessed(self):\n \n try:\n self._client.log(self._repositoryUri)\n return True\n except ClientError, error:\n _logger.debug(error.args[0])\n for _, errorCode in error.args[1]:\n if errorCode == 160006: # We have no commit in the repository, but its ok.\n return True\n return False", "def file_present(self,imagefile=None):\n import hashlib\n if self.filesize()==0:\n return False # empty files are never present\n if imagefile==None:\n imagefile=self.imagefile # use this one\n for hashname in ['md5','sha1']:\n oldhash = self.tag(hashname)\n if oldhash:\n newhash = hashlib.new(hashname,self.contents(imagefile=imagefile)).hexdigest()\n return oldhash==newhash\n raise ValueError,\"Cannot process file \"+self.filename()+\": no hash in \"+str(self)", "def modified(self):\n\t\treturn self.last_modified > self.last_processed", "def is_modified(self):\n return len(self.modified_fields) > 0", "def is_dirty(self):\n return True in [n.is_dirty for n in self.nodes]", "def dirty(self):\n return self._orig_line is not None", "def modified(self):\r\n\t\treturn self.last_modified > self.last_processed", "def valid(self):\n return self.hash.to_int('little') < self.target", "def has_changed(self):\n return self.get_old_value() != self.get_current_value()", "def has_been_modified(self):\n return self._has_been_modified", "def verify_blob_checksum(self, blob):\n path = self.csum_to_path(blob)\n csum = path.checksum()\n return csum != blob", "def valid(self):\n return (self.get(\"~#mtime\", 0) and\n self[\"~#mtime\"] == util.mtime(self[\"~filename\"]))", "def is_changed(obj):\n revision_field = get_version_fieldname(obj)\n version = get_revision_of_object(obj)\n return not obj.__class__.objects.filter(**{obj._meta.pk.name: obj.pk,\n revision_field: version}).exists()", "def path_touched(*paths, commit_range):\n return check_output([\n 'git', 'diff', '--name-only', commit_range, '--', *paths\n ]).decode('utf-8').strip() != ''", "def has_unstaged_changes(repo):\n subprocess.check_call(['git', 'update-index', '-q', '--ignore-submodules',\n '--refresh'], cwd=repo)\n return subprocess.call(['git', 'diff-index', '--quiet', 'HEAD'],\n cwd=repo) != 0", "def updated(self):\n return self._dict_hash != self.gen_model_hash(self.json(sort_keys=True))", "def is_staging_clean() -> bool:\n c = cmd.run(\"git diff --no-ext-diff --cached --name-only\")\n return not bool(c.out)", "def _is_always_unsatisfied(self):\n # If this is a github sha tarball, then it is always unsatisfied\n # because the url has a commit sha in it and not the version\n # number.\n url = self._req.url\n if url:\n filename = filename_from_url(url)\n if filename.endswith(ARCHIVE_EXTENSIONS):\n filename, ext = splitext(filename)\n if is_git_sha(filename):\n return True\n return False", "def isDirty( self ):\n return self._dirty", "def is_hash_locally_cached(self, ipfs_hash: str, ipfs_refs_local=None) -> bool:\n output = run([\"ipfs\", \"files\", \"stat\", \"--with-local\", \"--size\", f\"/ipfs/{ipfs_hash}\"])\n if \"(100.00%)\" in output:\n log(\"already fully cached\", \"green\")\n log(output)\n return True\n else:\n log(\"not fully cached\", \"red\")\n log(output)\n return False", "def is_valid_git_sha1(hash):\r\n\r\n if len(hash) != 40:\r\n return False\r\n try:\r\n value = int(hash, 16)\r\n except ValueError:\r\n return False\r\n\r\n return True", "def validate_checksum(self):\n return self.calculate_checksum() == self.checksum()", "def verify(path, sha_path, verbose):\n if verbose:\n print(\"verifying\", path)\n with open(path, \"rb\") as source:\n found = hashlib.sha256(source.read()).hexdigest()\n with open(sha_path, \"r\") as sha256sum:\n expected = sha256sum.readline().split()[0]\n verified = found == expected\n if not verified:\n print(\"invalid checksum:\\n\"\n \" found: {}\\n\"\n \" expected: {}\".format(found, expected))\n return verified", "def _check_guts_toc_mtime(attr, old, toc, last_build, pyc=0):\n for (nm, fnm, typ) in old:\n if mtime(fnm) > last_build:\n print \"building because %s changed\" % fnm\n return True\n elif pyc and mtime(fnm[:-1]) > last_build:\n print \"building because %s changed\" % fnm[:-1]\n return True\n return False", "def isDirty(self):\n return self._dirty", "def checkHash(song):\n\tsql = \"Select path, filename, hash from songs where hash = '\" + song.hash + \"';\"\n\tc, conn = connect()\n\tc.execute(sql)\n\tnotexists = True\n\tfor (path, filename, hash) in c:\n\t\tif hash == song.hash:\n\t\t\tnotexists = False\n\t\telse:\n\t\t\tnotexists = True\n\treturn notexists", "def audit(self, data, proof_hashes):\n if self.root_hash == None:\n return False\n\n hash_ = self.sha256Sum(data)\n\n # A one element tree does not make much sense, but if one exists\n # we simply need to check if the files hash is the correct root\n if self.max_height == 0 and hash_ == self.root_hash:\n return True\n if self.max_height == 0 and hash_ != self.root_hash:\n return False\n\n proof_hashes_cp = copy.copy(proof_hashes)\n return self._audit(hash_, proof_hashes_cp)", "def needs_rebuild(self) -> bool:\n old_hash = self._cache.get(\"config\", None)\n new_hash = utilities.hash_object_sha256(self._get_config_raw())\n self._cache[\"config\"] = new_hash\n\n if not old_hash:\n return False\n return old_hash != new_hash", "def _can_update(self):\r\n if not self._is_persisted: return False\r\n pks = self._primary_keys.keys()\r\n return all([not self._values[k].changed for k in self._primary_keys])", "def has_unsaved_changes(self):\n # TODO\n pass", "def changelog_exists():\n\n output = subprocess.getoutput(['ls'])\n return 'changelog.md' in output.lower()", "def is_dirty(self):\n return self.dirty", "def is_dirty(self):\n return self.dirty", "def original_modified(self):\n if self.modified > self.created:\n return True\n else:\n return False", "def changelog_updated(target_branch):\n\n output = subprocess.getoutput(['git diff HEAD origin/{}'.format(target_branch)])\n return 'a/changelog.md b/changelog.md' in output.lower()", "def __eq__(self, other):\n return self.sha == other.sha", "def is_release_notes_changed(self):\n # there exists a difference between origin/master and current branch\n if self.master_diff:\n diff_releases = self.master_diff.split('##')\n unreleased_section = diff_releases[1]\n unreleased_section_lines = unreleased_section.split('\\n')\n\n adds_in_diff = 0\n removes_in_diff = 0\n\n for line in unreleased_section_lines:\n if line.startswith('+'):\n adds_in_diff += 1\n elif line.startswith('-') and not re.match(r'- *$', line):\n removes_in_diff += 1\n\n # means that at least one new line was added\n if adds_in_diff - removes_in_diff > 0:\n return True\n\n print_error(F'No new comment has been added in the release notes file: {self.release_notes_path}')\n return False", "def check_for_major_changes(cabal: CabalFile) -> bool:\n old_ver = cabal.get_version()\n old_tag = None\n if f'v{old_ver}' in get_tags():\n old_tag = f'v{old_ver}'\n if f'{old_ver}' in get_tags():\n old_tag = f'{old_ver}'\n if old_tag is None:\n print(f\"Couldn't find tag {old_tag} for current version; skipping revision check.\\n\")\n return False\n\n cmd = ['git', 'diff', '--name-only', f'{old_tag}..HEAD']\n changed_files = [ l.strip()\n for l in check_output(cmd).decode('UTF-8').split('\\n')\n if len(l.strip()) > 0 ]\n non_cabals = [ f\n for f in changed_files\n if not f.endswith('.cabal') ]\n print(f\"{len(changed_files)} files have changed since {old_tag}:\\n \",\n ' \\n'.join(changed_files))\n\n if len(non_cabals) > 0:\n return False\n else:\n print(dedent(f'''\n It appears that the only changes between {old_tag} and now are in the\n cabal file. Perhaps you want to make a revision instead?\n\n y = make a revision\n n = do a full release anyways\n d = show me a diff\n '''))\n while True:\n resp = prompt_for_char('How to proceed?', options='ynd')\n if resp == 'd':\n cmd = ['git', 'diff', f'{old_tag}..HEAD']\n print(' '.join(cmd))\n check_call(cmd)\n elif resp == 'y':\n return True\n elif resp == 'n':\n return False", "def changed(self) -> bool:\n return self._changed", "def check_sha1(filename, sha1_hash):\n sha1 = hashlib.sha1()\n with open(filename, 'rb') as f:\n while True:\n data = f.read(1048576)\n if not data:\n break\n sha1.update(data)\n\n sha1_file = sha1.hexdigest()\n l = min(len(sha1_file), len(sha1_hash))\n return sha1.hexdigest()[0:l] == sha1_hash[0:l]", "def has_checksum_file(self):\n return self.checksum_file_path.is_file()", "def compute(self):\n\n commit_hashes = {item['hash'] for item in self.items}\n return len(commit_hashes)", "def is_versioned(target):\n\n assert os.path.exists(target), \"%s does not exist!\" % target\n git_tree = get_git_tree(target)\n\n versioned = False\n if git_tree is not None:\n output = gitopen([\"status\", \"--ignored\", \"--porcelain\", target], git_tree)\n if not (output.startswith(b\"!!\") or output.startswith(b\"??\")):\n versioned = True\n\n return versioned", "def is_outdated(self):\n\n if not self.is_done:\n return False\n elif not (self.input_files and self.output_files):\n return False\n\n return fileutils.modified_after(self.input_files, self.output_files)" ]
[ "0.69775397", "0.6945446", "0.6825735", "0.68252903", "0.6719734", "0.6671272", "0.6610195", "0.6565223", "0.6532837", "0.6526835", "0.6508548", "0.6499044", "0.6385325", "0.63748574", "0.6319224", "0.6314689", "0.63038826", "0.6292349", "0.6275215", "0.6274554", "0.6246274", "0.6239213", "0.6232751", "0.6206976", "0.61877716", "0.61662316", "0.6165848", "0.6158898", "0.6149281", "0.61447316", "0.6067305", "0.60669225", "0.6061476", "0.60506415", "0.60495496", "0.60482675", "0.60463977", "0.6036835", "0.602954", "0.60102654", "0.6007176", "0.60011506", "0.5990005", "0.5989979", "0.5988558", "0.59648436", "0.5933169", "0.592408", "0.5917", "0.59042466", "0.58915263", "0.58805335", "0.58711874", "0.5868117", "0.5850648", "0.5848601", "0.5846991", "0.584164", "0.58265465", "0.58247334", "0.58238643", "0.5819552", "0.5816777", "0.58072627", "0.5803607", "0.5794835", "0.5786548", "0.5781742", "0.5771136", "0.5755906", "0.57509017", "0.57324153", "0.57263297", "0.5723251", "0.5719201", "0.57173175", "0.5717087", "0.57079685", "0.568946", "0.5684553", "0.5671596", "0.5671145", "0.56703895", "0.5663467", "0.56616503", "0.5661144", "0.5651745", "0.5651206", "0.5651206", "0.56506807", "0.56474257", "0.56424624", "0.563908", "0.5638002", "0.56207895", "0.56181175", "0.5617853", "0.5606229", "0.5606037", "0.5603827" ]
0.66552734
6
Read input from a text file
def read_input(): orbitDict = {} with open('day06_input.txt') as f: for line in f: planet, satellite = line.split(')') satellite = satellite.rstrip('\n') if satellite in orbitDict: orbitDict[satellite].append(planet) else: orbitDict[satellite] = [planet] return orbitDict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_input():\n return Path(__file__).with_name('input.txt').read_text().splitlines()", "def read_txt(cls, input_file):\n return open(input_file, \"r\", encoding=\"utf-8\").readlines()", "def read_txt(cls, input_file):\n return open(input_file, \"r\", encoding=\"utf-8\").readlines()", "def read_data() -> str:\n with open('input.txt') as input_file:\n return input_file.read()", "def simple_text_reader(text_file):\n with open(text_file, 'rt') as file:\n data = file.read()\n return data", "def readInput(fileName):\n\n with open(fileName, 'r') as file:\n return file.read().splitlines()", "def read_input_file(path: str):\n with open(path, \"r\") as f:\n return f.readlines()", "def _read_input_file(self):\n pass", "def read_filename(self, filename):\r\n self.text_lines = task3.read_text_file(filename)", "def readInput(fileName):\n with open(fileName, 'r') as file:\n\n fileContent = file.read()\n\n return fileContent.split(\"\\n\")", "def _read_txt(file_path):\n translation_pairs = []\n with file_path.open() as f:\n for line in f:\n translation_pairs.append(\n evaluation.TranslationPair(source=None, translation=line.strip())\n )\n return translation_pairs", "def read_input(file_name='input.txt'):\n # always open relative to current file\n file_name = join(dirname(__file__), file_name)\n with open(file_name, 'r') as f_in:\n input_list = [line.strip()\n for line in f_in.readlines()\n if line.strip()]\n return map(int, input_list)", "def readFromTextFile(self, file_name):\n with open(file_name, 'r') as file_obj:\n return file_obj.read()", "def read(in_file):\n require_type(is_input(in_file), 'the parameter of read must be an input file')\n txt = in_file.readline().lower()\n while txt == '\\n':\n txt = in_file.readline().lower()\n return txt.strip() if txt else Symbol('#!eof')", "def readInput(fileName):\r\n with open(fileName, 'r') as file:\r\n\r\n fileContent = []\r\n for line in file:\r\n fileContent.append(line.strip())\r\n\r\n return fileContent", "def get_input():\n # return TEST_INPUT.split('\\n')\n with open(INPUT_FILE) as f:\n return f.readlines()", "def read_from_file(self, filename: str) -> None:", "def _read_data(self, txtfile):\n data_string = open(txtfile,'r').read()\n return data_string", "def read_input_file(path):\n file_input = os.path.join(path, 'INPUT')\n f = open(file_input, 'r')\n lines = f.read().replace('\\n', ':')\n f.close()\n lines = ' '.join(lines.split()) #remove whitespace\n lines = re.split(':', lines.replace(': ', ':'))\n return lines", "def read_text_file(file_name):\n target_file = open(file_name)\n lines = target_file.readlines()\n\n target_file.close()\n return lines", "def read_input():\r\n tx = []\r\n with open(\"rail_input.txt\", \"r\") as file: \r\n data = file.readlines() \r\n for line in data:\r\n tx.append(line.strip())\r\n return tx", "def read_input(input_path: str) -> str:\n with open(input_path, 'r') as input_file:\n input_data = input_file.read().strip()\n return input_data", "def read_input(input_path: str) -> str:\n with open(input_path, 'r') as input_file:\n input_data = input_file.read().strip()\n return input_data", "def load_txt(filename, **kwargs):\n with sys_open(filename, 'r', **kwargs) as f:\n return f.readlines()", "def read_input(fname=\"day16.in\"):\n with open(fname) as f:\n return line2ints(f.read())", "def open_and_read_file(file_path):\n text_data = open(file_path).read()\n # print text_data\n return text_data", "def _read(self, in_file):\n self.string = in_file.readline().decode().strip()", "def read_file(file_path):\n try:\n input_file = open(file_path)\n text_content = input_file.read()\n input_file.close()\n return text_content\n except IOError:\n print (\"Can not read from file\")", "def load_input(self, path):\n f = codecs.open(path, 'r', 'utf-8')\n raw_text = f.read()\n return raw_text", "def _read_file(self, input_file):\n with io.open(input_file, \"r\", encoding=\"UTF-8\") as file:\n examples = []\n for line in file:\n data = line.strip().split(\"_!_\")\n example = InputExample(\n guid=data[0], label=data[1], text_a=data[3])\n examples.append(example)\n\n return examples", "def read_file(text_file):\n\n try:\n with open(text_file,\"r\") as handle:\n data = handle.read()\n return data\n except FileNotFoundError:\n return None", "def read_text_file(str_name_file: str):\n content: str = ''\n with open(str_name_file, mode=\"r\", encoding='utf-8') as file:\n print(\"file being read: \" + str_name_file + \"\\n\")\n content = file.read()\n return content", "def read(self, filename):\n pass", "def read(self, filename):\n pass", "def readFromFile(filename):\n raise NotImplementedError", "def load_text_file(file_path: str):\n with open(file_path) as f:\n content = f.readlines()\n return content", "def readTextFromFile(self, filename):\r\n f = open(filename)\r\n self.text = f.read()\r\n f.close()", "def read_from_file(filename):\n\twith open(filename, 'r') as myfile:\n\t\ttext=myfile.read()\n\treturn text", "def read_from_file(file_name):\n with open(file_name, \"rb\") as text_file:\n return text_file.read()", "def input_data(self):\n return read_file(self.file_path)", "def get_input():\n # return TEST_INPUT.strip().split('\\n\\n')\n with open(INPUT_FILE) as f:\n return f.read().strip().split('\\n\\n')", "def readInput(in_file_name):\n in_file = open(in_file_name, 'r')\n positions = []\n samples = []\n M = []; P = [];\n MC = []; PC = [];\n while True:\n line = in_file.readline()\n if not line: break\n if line[0] == '#': continue #skip comment\n line = line.rstrip('\\n').split('\\t')\n \n #genomic positions and allele support in plasma samples\n positions.append(int(line[0]))\n samples.append(tuple(map(int, line[1:5])))\n \n #maternal and paternal alleles\n M.append(tuple(line[5:7]))\n MC.append(tuple(map(float, line[7:9])))\n \n P.append(tuple(line[9:11]))\n PC.append(tuple(map(float, line[11:13]))) \n \n in_file.close()\n return positions, samples, M, P, MC, PC", "def read_input():\n splits = []\n with open('solutions/day5/input.txt') as f:\n for line in f:\n splits.append(line.strip('\\n'))\n return splits", "def read1(cls):\n x_i = \"vas.txt\"\n with open(x_i, 'r')as txt_file:\n file = txt_file.readlines()\n return file", "def read_input_file(filename=\"wires.txt\"):\n raw_wires = []\n with open(filename, 'r') as wirefile:\n raw_wires = wirefile.readlines()\n return raw_wires", "def puzzle_input():\n with open('day13input.txt') as file:\n return file.read()", "def read_text_file(file_name):\n \n file_data = {}\n \n with open(file_name) as fp:\n lines = fp.readlines()\n for line in lines:\n lineno = line.strip().split(':')[0].strip()\n #here we are checking whether a particluar line in the file contains a valid data [i.e line number and content]\n try:\n content = line.strip().split(':')[1].strip()\n file_data[lineno] = content\n except IndexError:\n pass\n \n return file_data", "def read_txt(path):\n with open(path, \"r\") as f:\n return f.read().splitlines()", "def read_txt(path):\n with open(path, \"r\") as f:\n return f.read().splitlines()", "def read_txt_file(relative_path_to_txt_file: str):\n with open(file=relative_path_to_txt_file) as f:\n lines = f.read()\n return lines", "def read_file():\n\n file = input(\"Input file: \")\n\n try:\n pfile = open(file)\n except: # Checks if the file can be opened\n print(\"ERROR: Could not open file \" + file)\n sys.exit()\n\n pfile = pfile.readlines()\n\n preorder = pfile[0].split()\n inorder = pfile[1].split()\n encode = pfile[2].strip()\n\n return preorder, inorder, encode", "def read(self, inputfile):\n infile = open(inputfile, 'r')\n if (inputfile.lower().endswith('.po')):\n self.read_po(infile)\n elif (inputfile.lower().endswith('.json')):\n self.read_json(infile)\n elif (inputfile.lower().endswith('.xml')):\n self.read_properties(infile)\n infile.close()", "def read_txt(path):\n \n with open(path, \"r\") as f:\n return f.read().splitlines()", "def read(path):", "def readText(self, filename, firstLine = 0, lastLine = None):\n \n assert filename.endswith('.txt')\n file = open(filename, 'r')\n self.samples = []\n\n li = 0\n while li < firstLine:\n if not file.readline():\n return\n li += 1\n\n while lastLine == None or li < lastLine:\n line = file.readline()\n if not line:\n return\n li += 1\n line = line.strip()\n if line:\n columns = line.split('|')\n if columns[1] == 'client-fps':\n self.samples.append(Sample(line, columns))", "def load_input(self, file_name):\n with open(file_name, \"r\") as in_file:\n self.all_lines = [line.rstrip('\\n') for line in in_file]", "def _read(cls, input_file, quotechar=None):\n with open(input_file, \"r\", encoding='utf-8') as f:\n lines = []\n for line in f:\n lines.append(line.strip())\n return lines", "def handle_file(filename,operation = 'r'):\n with open(filename,operation) as f:\n data = f.readlines()\n return data", "def read_text_file(text_file):\n try:\n with open(text_file, \"rb\") as f:\n text = tf.compat.as_str(f.read()) \n except Exception as e:\n print(\"Unable to open\", text_file, \":\", e)\n raise\n return text", "def read_input_file(self):\n\n # Check if input file exists in current directory, if not kill process\n if not os.path.isfile('./visualise.inpt'):\n print('Cannot find input file \"visualise.inpt\" in current directory')\n sys.exit()\n\n # Read input file and analysis options and parameters\n print('Reading input file')\n with open('visualise.inpt','r') as f:\n f.readline()\n self.prefix = f.readline().split()[0]\n f.readline()\n f.readline()\n self.frame = int(f.readline().split()[0])\n f.readline()\n f.readline()\n self.vis_particles = int(f.readline().split()[0])\n self.vis_vortype = int(f.readline().split()[0])\n self.vis_cellcolour = int(f.readline().split()[0])\n self.vis_save = int(f.readline().split()[0])", "def readInConfigFile( self, fileName ):\n self.console.info( \"Read input file\" )", "def read_text_file(filename):\n try:\n file = open(filename, 'r')\n except:\n print('Cannot read file ' + filename + '. Please check the path', file=sys.stderr)\n sys.exit(1)\n output = []\n \n for line in file:\n line = line.strip().lower()\n output.append(line)\n return output", "def load_from_txt(path):\n with open(path) as file:\n data = [line.rstrip() for line in file]\n return data", "def _read(self, file_name):\n f = open(file_name)\n lines = f.readlines()\n begin = 0\n end = 0\n while end < len(lines):\n op = ''\n for l in lines[begin:]:\n end += 1\n op = l.split()[0]\n if op in operations:\n self.operations.append(op)\n break\n if op == '=push':\n nfa = Automaton(lines[begin:end - 1])\n self.aut_to_push.append(nfa)\n begin = end\n f.close()", "def read(cls):\n x_i=\"vas.txt\"\n with open(x_i, 'r')as txt_file:\n file = txt_file.read()\n return file", "def read_from_file(filename):\n with open(filename, \"r\") as f:\n f.readlines()", "def read_file(filename):\r\n\r\n print(\"Reading TextFile \" + filename)\r\n text = []\r\n with open(filename, encoding=\"utf8\") as file:\r\n lines = file.readlines()\r\n for line in lines:\r\n line = line.strip()\r\n text.append(line)\r\n return text", "def load_input(filepath: str) -> list:\n lines = []\n with open(filepath, \"r\", encoding=\"utf-8\") as file:\n for line in file.readlines():\n lines.append(line.strip())\n return lines", "def read(self, filename):\n raise NotImplementedError", "def read_txt(filename):\n file_object = open(filename, 'r')\n file_as_string = file_object.read()\n return create_word_list(file_as_string)", "def file_reader(filePath):\n try:\n word_file = open(filePath, \"rt\")\n word_list = word_file.read().splitlines()\n word_file.close()\n return word_list\n except Exception:\n print(f\"An error has occured when reading the file.\")\n\n return", "def myReadFile( path):\n f = open(path,'r')\n result = f.readlines()\n f.close\n return result", "def readText(fileName):\n fileText = \"\"\n with open(fileName,\"r\") as fileObject:\n fileText = fileObject.read()\n \n return fileText", "def read_in_file(self):\n try: # we are opening the file, this could fail..\n for line in open(self.question_file, 'r').readlines(): # Open the file and read in all the lines and put them in an array\n if line == '\\n': # if the line is simply equal to \"\\n\"\n continue # \"continue\" means \"don't continue execution, go back to the top of the loop\n else: # the line simply isn't \"\\n\" so let's append it.\n self.question_data.append(line.rstrip()) # append the line to the self.question_data array, strip the \\n off\n except OSError as err: # Let's capture the exception catch\n print(\"Problem opening question file: %s\" % self.question_file)\n fatal(\"System Error {0}\".format(err), -1) # let's print FATAL and the actual exception catch msg and exit -1", "def read_inpfile(self, filename):\n return wntr.network.io.read_inpfile(filename, append=self)", "def read_file(path):\n try:\n with open(path, 'r') as text_file:\n return \"\".join(text_file.readlines()).strip()\n except IOError:\n exit(\"Error: file '%s' is not readable!\" % path)", "def get_input(file_name, separator='\\n'):\n with open(file_name) as input_file:\n input_data = input_file.read().split(separator)\n return input_data", "def parse_txt_file(txtfile):\n array = np.genfromtxt(txtfile)\n return array", "def read_input(fname=\"day11.in\"):\n with open(fname) as f:\n return [int(v.strip()) for v in next(f).split(\",\")]", "def open_and_read_file(file_path):\n\n contents = open(file_path).read()\n words = contents.split()\n return words", "def read_data(cls, input_file,quotechar = None):\n if 'pkl' in str(input_file):\n lines = load_pickle(input_file)\n else:\n lines = input_file\n return lines", "def read_file(path):\n # Mystery arguments:\n strictness = False\n # Read the string:\n return _iterate_bibtexsource(_bibtex.open_file(path, strictness))", "def from_text_file(cls, filename):\n raise NotImplementedError()", "def open_and_read_file(file_path):\n\n text_file = open(file_path)\n full_text = text_file.read()\n\n return full_text", "def read_data(cls, input_file, quotechar=None):\r\n if 'pkl' in str(input_file):\r\n lines = load_pickle(input_file)\r\n else:\r\n lines = input_file\r\n return lines", "def read_text(filename):\n with open(filename, 'r') as f:\n com = f.readline()[0]\n wavelength, flux = np.loadtxt(filename, unpack=True,\n usecols=(0, 1), comments=com)\n return wavelength, flux", "def read_file(self, file_name: str):\n file_text = []\n with open(file_name, encoding='utf-8', errors='ignore') as file:\n for line in file:\n line = line.strip()\n file_text.append(line)\n return file_text", "def read_input(fname=\"day05.in\"):\n with open(fname) as f:\n return [int(v.strip()) for v in next(f).split(\",\")]", "def load_file(file):\r\n\r\n try:\r\n with open(Path(file), \"r\", encoding=\"utf-8\", newline=\"\") as f:\r\n txt_file = f.read()\r\n except:\r\n sys.exit(\"IO_Tools: ERROR: \"+str(file)+\" not found!\")\r\n \r\n lines = txt_file.split(\"\\n\")\r\n\r\n return lines", "def read_input(infile):\n #Some utility functions to read in particular types of input\n def read_int():\n return int(infile.readline().strip())\n def read_ints():\n return np.array(infile.readline().split(), dtype=int)\n def read_bigints(): #For ints that won't fit directly in an int32 array\n line = infile.readline().split()\n return np.array(map(lambda x: int(x), line))\n def read_float():\n return float(infile.readline().strip())\n def read_floats():\n return np.array(infile.readline().split(), dtype=float)\n def read_string():\n return infile.readline().strip()\n def read_strings():\n return np.array(infile.readline().split(), dtype=object) #N.B. general dtype\n \n N = read_int()\n cars = read_strings()\n assert N == len(cars)\n \n return cars", "def read_file(input_file):\n with open(input_file, \"r\", encoding=\"utf-8-sig\") as f:\n sentences = f.read().splitlines()\n return sentences", "def readfile(file):\n with open(file, 'r') as f:\n data = f.read().splitlines()\n return data", "def txt_file_reader(path):\n return open(path, encoding=cfg.ENCODING)", "def read():\n # TODO", "def get_inputs_from_file(filename=\"\"):\n import json\n\n with open(filename) as input_text:\n json_obj = json.load(input_text)\n return json_obj", "def read_text_file(file_name, ncol = 0):\n\t\n\tfrom string import split\n\tinf = file(file_name, \"r\")\n\tline = inf.readline()\n\tdata = []\n\twhile len(line) > 0:\n\t\tif ncol == -1:\n\t\t\tvdata = split(line)\n\t\t\tif data == []:\n\t\t\t\tfor i in xrange(len(vdata)):\n\t\t\t\t\tdata.append([float(vdata[i])])\n\t\t\telse:\n\t\t\t\tfor i in xrange(len(vdata)):\n\t\t\t\t\tdata[i].append(float(vdata[i]))\t\t\t\n\t\telse:\n\t\t\tvdata = float(split(line)[ncol])\n\t\t\tdata.append(vdata)\n\t\tline = inf.readline()\n\treturn data", "def read_file(self):\n Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing\n self.filename = askopenfilename(title='Select Hospital Text File') # show an \"Open\" dialog box and return the path to the selected file", "def read_file(path_to_file):\n 8", "def read_text(text_path):\n if not os.path.exists(text_path):\n raise IOError('File does not exist: %s' % text_path)\n return open(text_path).read()", "def read_text(filename):\n\n if '.txt' not in filename:\n raise ValueError('Input file must be a .txt file!')\n\n if filename[:4] == \"http\": # website\n website = urlopen(filename)\n # slicing to get rid of project gutenberg preamble/license\n txt = website.read().decode('UTF-8').lower()[800:-19500]\n else: # text file\n f = open(filename)\n txt = f.read().lower()\n\n # strip punctuation. Have switched hyphens to a capital letter and back so that they do not get removed.\n translator = txt.maketrans('--', ' ')\n txt = txt.translate(translator)\n translator = txt.maketrans('-', 'A')\n txt = txt.translate(translator)\n translator = txt.maketrans(\"\\n\\r\\t\", ' '*3)\n txt = txt.translate(translator)\n translator = txt.maketrans('', '', string.punctuation + \"'`’‘”“\")\n txt = txt.translate(translator)\n translator = txt.maketrans('A', '-')\n txt = txt.translate(translator).split(' ')\n\n return [s for s in txt if s !='']", "def read_file(file_path):\n file_contents = None\n with open(file_path) as f_desc:\n file_contents = f_desc.read()\n if not file_contents:\n raise CLIError('Could not read {}'.format(file_path))\n return file_contents" ]
[ "0.80333656", "0.73937935", "0.73937935", "0.7325028", "0.7260986", "0.72499967", "0.72369945", "0.712606", "0.7094524", "0.70686525", "0.69390434", "0.6938965", "0.6922924", "0.68762004", "0.68759114", "0.68530405", "0.68422097", "0.67936075", "0.67765266", "0.6772103", "0.6754905", "0.67473525", "0.67473525", "0.6726431", "0.6720038", "0.6665915", "0.66620106", "0.66521084", "0.6638513", "0.6625374", "0.6603505", "0.6567475", "0.65621763", "0.65621763", "0.6559847", "0.65585935", "0.6540416", "0.65377474", "0.6517126", "0.6514316", "0.65054554", "0.6501766", "0.6500878", "0.64915836", "0.6469703", "0.6468749", "0.6453031", "0.6434721", "0.6434721", "0.6423808", "0.6420907", "0.64068407", "0.64022654", "0.6398541", "0.63894796", "0.6384324", "0.6378887", "0.6370953", "0.6369451", "0.63534635", "0.63433725", "0.63342446", "0.63307214", "0.63300097", "0.6329512", "0.63293827", "0.63251585", "0.6320512", "0.6314581", "0.63145083", "0.63139737", "0.630884", "0.6306951", "0.6306539", "0.62916994", "0.62882733", "0.6285201", "0.6279981", "0.62745184", "0.6273009", "0.62706697", "0.6270334", "0.6261505", "0.6248187", "0.6246732", "0.6216517", "0.6214354", "0.62142855", "0.6203794", "0.6176728", "0.6174175", "0.61735046", "0.6172751", "0.6170998", "0.61656326", "0.6157625", "0.6154607", "0.6139352", "0.61362845", "0.61310744", "0.6129817" ]
0.0
-1
Counts the number of orbitating planets
def num_of_orbits(orbitDict): total_orbits = len(orbitDict.keys()) total_orbits += independent_orbits(orbitDict) return total_orbits
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def orbit_count(self) -> int:\n if self.orbit:\n return 1 + self.orbit.orbit_count()\n return 0", "def orbit_count(objects: Dict[str, ObjectMass]) -> int:\n total = 0\n\n for mass in objects.values():\n total += mass.orbit_count()\n\n return total", "def count():", "def count_balls(self, **kwargs):\n return 0", "def test_number_of_surface_objects(self):\n for O in self.mod.objts.itervalues():\n no_of_surfaces = 0\n for C in O.conts.itervalues():\n if C.surf != 0:\n no_of_surfaces += 1\n self.assertEqual(O.surfsize, no_of_surfaces)", "def countComponents26(cube):\n n,l = labelComponents26(cube);\n return n;", "def countPanelTrajectoryExpressions(self):\n nbr = 0\n for e in self.children:\n nbr += e.countPanelTrajectoryExpressions()\n return nbr", "def get_neighbor_live_count(cart):\n count = 0\n for i in range(6):\n cart2 = (cart[0] + dxv[i],cart[1] + dyv[i],cart[2] + dzv[i])\n if check_cart(cart2) and voxel_data[cart_to_loc(cart2)] == 1:\n count += 1\n return count", "def count_pegs(self):\r\n count = 0\r\n\r\n for i in range(0, len(self.matrix)):\r\n for j in range(0, len(self.matrix[i])):\r\n if self.matrix[i][j] == \"1\":\r\n count += 1\r\n\r\n return count", "def drawCountPlotPlanets(df):\n plt.style.use('dark_background')\n fig, ax = plt.subplots(figsize=(15,8))\n sns.countplot(x=df.amount_of_planets, data=df, palette='Reds_d')\n plt.show()", "def obstacle_count(self):\n #scan area in front of robot\n self.scan()\n #Figure ot how many obstacles there were\n see_an_object = False\n count = 0", "def nbr_tours(self):\n nbr_tours = 0\n for i in range(3):\n for j in range(3):\n if self.grille[i][j] != 0:\n nbr_tours += 1\n return nbr_tours", "def num_atoms(self):\n return self.h5['{}/{}'.format(SETTINGS, N_ATOMS)][()]", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def airport_count(airport_data, state_abbr):\n \n counter = 0\n \n for dat in airport_data:\n \n if dat[3] == state_abbr:\n \n counter += 1\n \n return counter", "def num_wet(self):\n return np.sum(self.array == 5)", "def count_all_atoms(self):\n n = 0\n for model in self.iter_models():\n n += model.count_all_atoms()\n return n", "def count_atoms(self):\n n = 0\n for chain in self.iter_chains():\n n += chain.count_atoms()\n return n", "def count_atoms(self):\n n = 0\n for chain in self.iter_chains():\n n += chain.count_atoms()\n return n", "def total_number_of_animals(self):\n animals = self.animal()\n print 'Total number of animals on island: {:4}'.format(\n animals[\"Herbivores\"] + animals[\"Carnivores\"])", "def get_num_plants(self) -> int:\r\n\r\n return len(self.plants)", "def number_of_carnivores_island(self):\n return np.sum(self.carnivores_on_island)", "def number_of_atoms(formula):\n pass\n # ======== YOUR CODE HERE ========", "def count(self, syms = None ):\n if syms == None:\n syms = self.alpha.getSymbols()\n for sym in syms:\n idx = self.alpha.getIndex( sym )\n self.cnt[idx] += 1.0\n self.tot += 1", "def getNumTiles(self):\n return len(list(product(list(range(self.width+1))[1:], list(range(self.height+1))[1:])))", "def obstacle_count(self):\n self.wide_scan()\n found_something = False\n counter = 0\n for distance in self.scan:\n if distance and distance < 200 and not found_something:\n found_something = True\n counter += 1\n print(\"Object # %d found, I think\" % counter)\n if distance and distance > 200 and found_something:\n found_something = False\n print(\"\\n----I SEE %d OBJECTS----\\n\" % counter)", "def count_all_atoms(self):\n n = 0\n for atm in self.atom_order_list:\n if isinstance(atm, Atom):\n n += 1\n else:\n n += len(atm)\n return n", "def Points_Counting(self):\n return len(self.__traectory_list)", "def get_num_markets(add):\r\n name=get_zipcode_names(add)\r\n engine = get_sql_engine()\r\n number_markets = text(\r\n \"\"\"\r\n SELECT COUNT(\"NAME\") AS num_markets\r\n FROM farmers_markets\r\n WHERE \"ZIP\" = :name\r\n \"\"\"\r\n )\r\n resp = engine.execute(number_markets, name=name).fetchone()\r\n return resp[\"num_markets\"]", "def n_tracers(self):\n # Extract parameters\n pzs = self.params[0]\n return len(pzs)", "def n_tracers(self):\n # Extract parameters\n pzs = self.params[0]\n return len(pzs)", "def getNumberOfTraces(self) -> int:\n\n if not self.debug:\n self.myFieldFox.write(\"CALC:PAR:COUN?\")\n ret = self.myFieldFox.read()\n else:\n ret = 4\n return ret", "def count(self, volume):\n\n countResult = 0\n\n for x in range(volume.shape[0]):\n for y in range(volume.shape[1]):\n for z in range(volume.shape[2]):\n if self.isMember(volume[x,y,z]):\n countResult += 1\n\n return countResult", "def n_rays(self):\n try: \n return self._n_rays\n except AttributeError:\n self._n_rays = 0\n for r in self.rays(): self._n_rays += 1\n return self._n_rays", "def num_trajs(self):\n return len(list(self.run_traj_idx_tuples()))", "def countPanelTrajectoryExpressions(self):\n return 1 + self.child.countPanelTrajectoryExpressions()", "def number_of_atoms(self):\n if self._number_of_atoms is None:\n if self.mol is not None:\n self._number_of_atoms = len(self.mol.atoms)\n elif not self.is_ts:\n self._number_of_atoms = len(self.get_xyz().splitlines())\n return self._number_of_atoms", "def count_timepoints(sc, session, files):\n tuples = zip(range(len(files)), files)\n files_sc = sc.parallelize(tuples)\n\n def count_planes(kv):\n index, path2 = kv\n try:\n from ScanImageTiffReader import ScanImageTiffReader\n img = ScanImageTiffReader(path2).data()\n except Exception:\n import tifffile\n img = tifffile.imread(path2)\n return img.shape[0]\n\n data2 = files_sc.map(count_planes).collect()\n frame_numbers = np.array(data2)\n vol_numbers = frame_numbers / len(session.fieldMask)\n return vol_numbers.astype(int)", "def get_num_streets(market):\r\n return len(market)", "def count() -> int:\n pass", "def voxel_count(self):\n return self.cols * self.rows * self.sections", "def main():\n row, col, island = make_matrix()\n print(count_island(row, col, island))", "def obstacle_count(self):\n for x in range(6):\n # do a scan of the area in front of the robot\n self.scan()\n\n \n see_an_object = False\n count = 0 \n # Do a scan and count the amount of objects in the way\n for angle in self.scan_data:\n dist = self.scan_data[angle]\n if dist < self.SAFE_DISTANCE and not see_an_object: \n see_an_object = True\n count += 1\n print(\"~~~ I SEE SOMETHING!! ~~~\")\n elif dist > self.SAFE_DISTANCE and see_an_object:\n see_an_object = False\n print(\"I guess the object ended\") \n print(\"ANGLE: %d | DIST: %d\" % (angle, dist))\n self.turn_by_deg(90)\n print(\"\\nI saw %d objects\" % count)", "def getnumbarablocktriplets(self): # 3\n res,resargs = self.__obj.getnumbarablocktriplets()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _num_return_value = resargs\n return _num_return_value", "def number_of_herbivores_island(self):\n return np.sum(self.herbivores_on_island)", "def get_orbit_hops(self) -> Dict[str, int]:\n total = 0\n orbit_hops = {}\n\n orbit = self.orbit\n\n while orbit:\n orbit_hops[orbit.name] = total\n orbit = orbit.orbit\n total += 1\n\n return orbit_hops", "def count(self):\n return self.vcount", "def count(self):\n return len(self._components)", "def count_waters(self):\n n = 0\n for frag in self.iter_waters():\n n += 1\n return n", "def _count_parties(data_set): #DEMOCRATS, THEN REPUBLICANS\r\n reps = 0\r\n dems = 0\r\n for data_point in data_set:\r\n if data_point.dat_party == \"R\": reps+=1\r\n if data_point.dat_party == \"D\": dems+=1\r\n\r\n return (dems, reps)", "def get_number_of_atoms_to_optimize(self):\n v = self.c.get(simulation_cell=True)\n return len(v.data.stoichiometry)", "def count_support(projection):\n\tprev_id = -1\n\tsize = 0\n\tfor p in projection:\n\t\tif prev_id != p.id:\n\t\t\tprev_id = p.id\n\t\t\tsize += 1\n\treturn size", "def numel(self):\n return self.t.size", "def nb_triples(self) -> int:\n return 0", "def nAtoms(self, species = None, element = None):\n try:\n m = self.elementIndex(element)\n k = self.speciesIndex(species)\n na = _cantera.phase_natoms(self._phase_id, k, m)\n #if na < 0: return 0\n return na\n except CanteraError:\n return 0", "def atom_count(self):\n return len(self.repeated_elements())", "def getNumTiles(self):\n return (self.width) * (self.height)", "def count_all_atoms(self):\n n = 0\n for chain in self.iter_chains():\n n += chain.count_all_atoms()\n return n", "def count(self):\n # TODO not implemented yet\n return 0", "def getNumTiles(self):\n\t\treturn self.numTiles", "def number_of_constituents(bc_class):\n num_trn = 0\n cn = bc_class.constituent_properties\n if cn.salinity:\n num_trn += 1\n if cn.temperature:\n num_trn += 1\n if cn.vorticity:\n num_trn += 1\n if not cn.general_constituents.empty:\n num_trn += len(cn.general_constituents.index)\n if not cn.sand.empty:\n num_trn += len(cn.sand.index)\n if not cn.clay.empty:\n num_trn += len(cn.clay.index)\n return num_trn", "def all_valid(self, tower) -> int:\r\n count = 0\r\n for layer in range(1, len(tower.tower)):\r\n for index in range(1, 4):\r\n if self.is_valid(layer, index, tower):\r\n count += 1\r\n \r\n return count", "def count(self):\n\n raise NotImplementedError", "def N(self):\n return len(self.cavity_grid.cavities) + 1", "def gal_count(clusters):\n sum = 0\n for x in clusters:\n sum += x.ngal\n return sum", "def num_injectors(self):\n injectors = self.info_wells.groupby('well_type').get_group('inj')\n return injectors['well'].nunique()", "def get_number_of_ions(exp_builder, phase, system_id):\n # Read in output pdb file to read ionic strength.\n if phase == 'complex' or phase == 'solvent1':\n phase_id = 0\n else:\n phase_id = 1\n system_filepath = exp_builder._db.get_system_files_paths(system_id)[phase_id].position_path\n system_filepath = os.path.splitext(system_filepath)[0] + '.pdb'\n system_traj = mdtraj.load(system_filepath)\n\n # Count number of waters and ions.\n n_waters = 0\n n_pos_ions = 0\n n_neg_ions = 0\n for res in system_traj.topology.residues:\n if res.is_water:\n n_waters += 1\n elif '+' in res.name:\n n_pos_ions += 1\n elif '-' in res.name:\n n_neg_ions += 1\n\n # Verify that number of ions roughly models the expected ionic strength.\n try:\n solvent_id = exp_builder._db.systems[system_id]['solvent']\n except KeyError:\n solvent_id = exp_builder._db.systems[system_id][phase] # solvent1 or solvent2\n ionic_strength = exp_builder._db.solvents[solvent_id]['ionic_strength']\n n_ionic_strength_ions = int(np.round(n_waters * ionic_strength / (55.41*unit.molar)))\n\n return n_pos_ions, n_neg_ions, n_ionic_strength_ions", "def numAtoms(self):\n return self.nAtoms", "def get_numberOfProjections(self):\n self._nproj = len(self._projIndices)\n return self._nproj", "def count(self, trace):\n return len(trace)", "def natoms(self):\n return len(self.atoms)", "def noOfPlayers(self):\n\t\tnumber = 0\n\t\tfor n in range(6):\n\t\t\tif self.playerList[n] != None:\n\t\t\t\tnumber = number + 1\n\t\treturn number", "def ir_count(self) -> int:\n return int(self.graph_tuple_stats.ir_count or 0)", "def count_rotation_numels(model):\n total = 0\n for m in model.modules():\n if isinstance(m, (ConvLayerRotation, LinearLayerRotation)):\n total += m.rotation_matrix.numel()\n return total", "def __len__(self):\n i = 0\n for S in self.states():\n i += 1\n return i", "def getNrStations(self):\n return len(self.stationData)", "def get_num_petals(self):\n return self._num_petals", "def get_vehicle_count(self):\n return len(self.vehicles)", "def num_ellipsoids(self):\n return self._shape_count(_sff.ellipsoid)", "def get_number_of_ocean_levels(description):\n\n if description == \"none\":\n return \"0\"\n elif re.search(\"levels\", description):\n match = re.search(\"(?P<nlo>\\d+)\\s?(levels|vertical levels)\", description)\n return match.groupdict()[\"nlo\"]\n else:\n return \"60\"", "def numAtoms(self):\n\n\t\tnatoms = 0\n\t\tfor chain in self.chain:\n\t\t\tfor residue in chain.residue:\n\t\t\t\tnatoms += residue.numAtoms()\n\n\t\treturn natoms", "def total_num_atoms(self):\n return self.GetNumberOfAtoms()", "def getNumTiles(self):\n return self.height * self.width", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def get_num_of_baskets(self):\n return self.num_of_baskets", "def _count_explores(self) -> int:\n explore_count = 0\n for model in self.project.models:\n explore_count += len(model.explores)\n return explore_count", "def n_pos(self):\n running_total = 0\n for i in range(self.prob.num):\n if self.alphas[i] > 1e-5 > self.prob.C - self.deltas[i] and self.prob.Y[i] == 1:\n running_total += 1\n return running_total if running_total > 0 else 1", "def n_pos(self):\n running_total = 0\n for i in range(self.prob.num):\n if self.alphas[i] > 1e-5 > self.prob.C - self.deltas[i] and self.prob.Y[i] == 1:\n running_total += 1\n return running_total if running_total > 0 else 1", "def lives_counter(self):\n count = 15\n for row in self.board:\n for column in row:\n if column == HITSHIP:\n count -= 1\n self.lives = count\n return self.lives", "def GetNumberOfVariables(self):\n\n # nvar = 0\n # for i in self.variables_order:\n # # DO NOT COUNT VARIABLES THAT GET CONDENSED OUT\n # if i!=0:\n # if mesh.element_type == \"tri\":\n # nvar += (i+1)*(i+2) // 2\n # elif mesh.element_type == \"tet\":\n # nvar += (i+1)*(i+2)*(i+3) // 6\n # elif mesh.element_type == \"quad\":\n # nvar += (i+1)**2\n # elif mesh.element_type == \"hex\":\n # nvar += (i+1)**3\n\n # nvar = sum(self.variables_order)\n if self.nvar == None:\n self.nvar = self.ndim\n return self.nvar", "def num_animals(self):\n return self._num_herbs + self._num_carns", "def getNumTiles(self):\n return self.w * self.h", "def n_atoms(self) -> int:\n return 0 if self.atoms is None else len(self.atoms)", "def num_nbrs_torus(A, r, j, c, k):\n\tnum = 0\n\tr = r - 1 # to account for off by one errors\n\tc = c - 1\n\tif j == 0:\n\t\tif k == 0:\n\t\t\t# top left corner edge case\n\t\t\tif A[r, c] == ALIVE:\n\t\t\t\tnum += 1\n\t\t\tif A[j, c] == ALIVE:\n\t\t\t\tnum += 1\n\t\t\tif A[j+1, c] == ALIVE:\n\t\t\t\tnum += 1\n\n\t\t\tif A[r, k+1] == ALIVE:\n\t\t\t\tnum += 1\n\t\t\tif A[j, k+1] == ALIVE:\n\t\t\t\tnum += 1\n\t\t\tif A[j+1, k+1] == ALIVE:\n\t\t\t\tnum += 1\n\n\t\tif k > 0 and k < c:\n\t\t\t# top row minus corners edge cases\n\t\t\tif A[r, k-1] == ALIVE:\n\t\t\t\tnum += 1\n\t\t\tif A[j, k-1] == ALIVE:\n\t\t\t\tnum += 1\n\t\t\tif A[j+1, k-1] == ALIVE:\n\t\t\t\tnum += 1\n\n\t\t\tif A[r, k+1] == ALIVE:\n\t\t\t\tnum += 1\n\t\t\tif A[j, k+1] == ALIVE:\n\t\t\t\tnum += 1\n\t\t\tif A[j+1, k+1] == ALIVE:\n\t\t\t\tnum += 1\n\t\t\n\t\tif k == c:\n\t\t\t# top right corner edge case\n\t\t\tif A[r, k-1] == ALIVE:\n\t\t\t\tnum += 1\n\t\t\tif A[j, k-1] == ALIVE:\n\t\t\t\tnum += 1\n\t\t\tif A[j+1, k-1] == ALIVE:\n\t\t\t\tnum += 1\n\n\t\t\tif A[r, 0] == ALIVE:\n\t\t\t\tnum += 1\n\t\t\tif A[j, 0] == ALIVE:\n\t\t\t\tnum += 1\n\t\t\tif A[j+1, 0] == ALIVE:\n\t\t\t\tnum += 1\n\t\t\n\t\tif A[j+1,k] == ALIVE:\n\t\t\tnum += 1\n\t\tif A[r, k] == ALIVE:\n\t\t\tnum += 1\n\n\tif j > 0 and j < r:\n\t\tif k == 0:\n\t\t\t# left side minus corners edge cases\n\t\t\tif A[j-1, c] == ALIVE:\n\t\t\t\tnum += 1\n\t\t\tif A[j, c] == ALIVE:\n\t\t\t\tnum += 1\n\t\t\tif A[j+1, c] == ALIVE:\n\t\t\t\tnum += 1\n\n\t\t\tif A[j-1, k+1] == ALIVE:\n\t\t\t\tnum += 1\n\t\t\tif A[j, k+1] == ALIVE:\n\t\t\t\tnum += 1\n\t\t\tif A[j+1, k+1] == ALIVE:\n\t\t\t\tnum += 1\n\n\t\tif k > 0 and k < c:\n\t\t\t# center\n\t\t\tif A[j-1, k-1] == ALIVE:\n\t\t\t\tnum += 1\n\t\t\tif A[j, k-1] == ALIVE:\n\t\t\t\tnum += 1\n\t\t\tif A[j+1, k-1] == ALIVE:\n\t\t\t\tnum += 1\n\n\t\t\tif A[j-1, k+1] == ALIVE:\n\t\t\t\tnum += 1\n\t\t\tif A[j, k+1] == ALIVE:\n\t\t\t\tnum += 1\n\t\t\tif A[j+1, k+1] == ALIVE:\n\t\t\t\tnum += 1\n\t\t\n\t\tif k == c:\n\t\t\t# right side minus corners edge cases\n\t\t\tif A[j-1, k-1] == ALIVE:\n\t\t\t\tnum += 1\n\t\t\tif A[j, k-1] == ALIVE:\n\t\t\t\tnum += 1\n\t\t\tif A[j+1, k-1] == ALIVE:\n\t\t\t\tnum += 1\n\n\t\t\tif A[j-1, 0] == ALIVE:\n\t\t\t\tnum += 1\n\t\t\tif A[j, 0] == ALIVE:\n\t\t\t\tnum += 1\n\t\t\tif A[j+1, 0] == ALIVE:\n\t\t\t\tnum += 1\n\t\t\n\t\tif A[j+1,k] == ALIVE:\n\t\t\tnum += 1\n\t\tif A[j-1, k] == ALIVE:\n\t\t\tnum += 1\n\n\n\tif j == r:\n\t\tif k == 0:\n\t\t\t# bottom left corner edge cases\n\t\t\tif A[j-1, c] == ALIVE:\n\t\t\t\tnum += 1\n\t\t\tif A[j, c] == ALIVE:\n\t\t\t\tnum += 1\n\t\t\tif A[0, c] == ALIVE:\n\t\t\t\tnum += 1\n\n\t\t\tif A[0, k+1] == ALIVE:\n\t\t\t\tnum += 1\n\t\t\tif A[j, k+1] == ALIVE:\n\t\t\t\tnum += 1\n\t\t\tif A[j-1, k+1] == ALIVE:\n\t\t\t\tnum += 1\n\n\t\tif k > 0 and k < c:\n\t\t\t# bottom row minus corners edge cases\n\t\t\tif A[0, k-1] == ALIVE:\n\t\t\t\tnum += 1\n\t\t\tif A[j, k-1] == ALIVE:\n\t\t\t\tnum += 1\n\t\t\tif A[j-1, k-1] == ALIVE:\n\t\t\t\tnum += 1\n\n\t\t\tif A[0, k+1] == ALIVE:\n\t\t\t\tnum += 1\n\t\t\tif A[j, k+1] == ALIVE:\n\t\t\t\tnum += 1\n\t\t\tif A[j-1, k+1] == ALIVE:\n\t\t\t\tnum += 1\n\t\t\n\t\tif k == c:\n\t\t\t# bottom right corner edge cases\n\t\t\tif A[0, k-1] == ALIVE:\n\t\t\t\tnum += 1\n\t\t\tif A[j, k-1] == ALIVE:\n\t\t\t\tnum += 1\n\t\t\tif A[j-1, k-1] == ALIVE:\n\t\t\t\tnum += 1\n\n\t\t\tif A[0, 0] == ALIVE:\n\t\t\t\tnum += 1\n\t\t\tif A[j, 0] == ALIVE:\n\t\t\t\tnum += 1\n\t\t\tif A[j-1, 0] == ALIVE:\n\t\t\t\tnum += 1\n\t\t\n\t\tif A[j-1,k] == ALIVE:\n\t\t\tnum += 1\n\t\tif A[0, k] == ALIVE:\n\t\t\tnum += 1\n\t\n\treturn num" ]
[ "0.7507539", "0.72100306", "0.617972", "0.6152469", "0.61030316", "0.603404", "0.6009309", "0.5947875", "0.59461266", "0.5906092", "0.5883676", "0.5869086", "0.58329415", "0.5806103", "0.5806103", "0.5806103", "0.5806103", "0.577206", "0.574075", "0.57232994", "0.57174265", "0.57174265", "0.56978667", "0.5690096", "0.56874657", "0.56598604", "0.56567085", "0.56392145", "0.5636239", "0.5634523", "0.5625791", "0.56198424", "0.5619639", "0.5619639", "0.5613094", "0.55897135", "0.5588809", "0.55753696", "0.55742574", "0.55730665", "0.5572822", "0.5562752", "0.55625284", "0.55289274", "0.5520585", "0.55070835", "0.55019325", "0.55006886", "0.5496928", "0.54943734", "0.5489772", "0.5481818", "0.5481797", "0.5468241", "0.5468083", "0.54626596", "0.5462629", "0.5457187", "0.54426366", "0.54416436", "0.5427329", "0.54249084", "0.54223293", "0.541635", "0.5407243", "0.5402466", "0.54001045", "0.5385608", "0.5384107", "0.53811646", "0.5376768", "0.53733367", "0.5364865", "0.5363121", "0.53520143", "0.53505963", "0.5349892", "0.5347887", "0.5347176", "0.5337795", "0.5336007", "0.5334661", "0.5334219", "0.5330905", "0.5324562", "0.53201324", "0.5316428", "0.5316428", "0.5316428", "0.5316428", "0.5311232", "0.5308357", "0.53081965", "0.53081965", "0.53042156", "0.5299123", "0.5294308", "0.5292634", "0.52873975", "0.52872354" ]
0.6075204
5
This function identify on which orbits it is possible to meet santa with the least number of jumps
def meet_santa(orbitDict): santa_count, santa_path = does_orbit('SAN', orbitDict) your_count, your_path = does_orbit('YOU', orbitDict) santa_planets = set(santa_path) your_planets = set(your_path) common = len(santa_planets.intersection(your_planets)) dist = santa_count + your_count - 2*common return dist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def spotlessroomba_first_heuristic(state : SpotlessRoombaState) -> float:\n # TODO a nontrivial admissible heuristic\n return len(state.dirty_locations)", "def challenge1(self):\n self.parse_input()\n\n # Find strongest nanobot\n strongest = max(self.nanobots, key=lambda n: n.r)\n\n # Find all in range of this\n num_in_range = 0\n for nanobot in self.nanobots:\n if manhattan_dist(nanobot.coord, strongest.coord) <= strongest.r:\n num_in_range += 1\n\n print(f\"{num_in_range} nanobots are in range of strongest\")", "def bridge_problem3(here):\r\n\r\n def all_over(state):\r\n here, _ = state\r\n return not here or here == set([\"light\"])\r\n\r\n start = (frozenset(here) | frozenset([\"light\"]), frozenset())\r\n return lowest_cost_search(start, bsuccessors2, all_over, bcost)", "def shortest_tips(neuron):\n (branch_index,) = np.where(neuron.branch_order[neuron.n_soma:] == 2)\n (endpoint_index,) = np.where(neuron.branch_order[neuron.n_soma:] == 0)\n selected_index = np.union1d(endpoint_index + 1,\n branch_index + 1)\n selected_index = np.append(0, selected_index)", "def obstacle_count(self):\n self.wide_scan()\n found_something = False\n counter = 0\n for distance in self.scan:\n if distance and distance < 200 and not found_something:\n found_something = True\n counter += 1\n print(\"Object # %d found, I think\" % counter)\n if distance and distance > 200 and found_something:\n found_something = False\n print(\"\\n----I SEE %d OBJECTS----\\n\" % counter)", "def brute_force_search_solution():\n return len(coin_search(TOTAL, COINS))", "def minimum_spanning_arborescence(sol):", "def count_stair_ways(n):\n if n == 1:\n return 1\n if n == 2:\n return 2\n return count_stair_ways(n - 1) + count_stair_ways(n - 2)", "def large_neighborhood_search(facilities: list, towns: list, hazards: list, max_iteration: int = 50, max_improvement: int = 10) -> tuple:\r\n\r\n # counters\r\n max_attempts_per_swappable_facility = 10\r\n attempts_per_swappable_facility = 0\r\n improvements = 0\r\n iterations = 0\r\n\r\n # list of the opened facilities\r\n solutions = list(filter(lambda o_facility: o_facility.is_open, facilities.copy()))\r\n\r\n while True:\r\n\r\n # list of the closed facilities\r\n neighborhood = list(filter(lambda c_facility: not c_facility.is_open, facilities.copy()))\r\n\r\n # get the town with the maximum hazard perceived\r\n max_hazard_town, max_hazard = get_town_with_max_hazard(facilities, towns, hazards)\r\n\r\n # get a random opened facility to close\r\n swappable_facility = random.choice(solutions)\r\n\r\n while True:\r\n # get a random closed facility to test\r\n test_facility = random.choice(neighborhood)\r\n\r\n # check capacity constraint (ADMISSIBILITY)\r\n if test_facility.capacity >= swappable_facility.capacity:\r\n # destroy & repair the solution\r\n swappable_facility.is_open = False\r\n test_facility.is_open = True\r\n\r\n # calculate the new max hazard and the respective town\r\n new_max_hazard_town, new_max_hazard = get_town_with_max_hazard(facilities, towns, hazards)\r\n\r\n # check if the new max hazard is an improvement (SOLUTION)\r\n if new_max_hazard < max_hazard:\r\n # reassign the chosen facility to the towns whose were associated with the swappable_facility\r\n for town in towns:\r\n if town.facility == swappable_facility:\r\n town.facility = test_facility\r\n\r\n # reset the attempts per town counter because now there is a new solution to iterate on\r\n improvements += 1\r\n\r\n # rebuild the solution\r\n solutions = list(filter(lambda o_facility: o_facility.is_open, facilities.copy()))\r\n\r\n # exit the loop because a solution was found\r\n break\r\n\r\n else:\r\n # restore of the current solution's data structures\r\n swappable_facility.is_open = True\r\n test_facility.is_open = False\r\n\r\n # skip the chosen facility\r\n neighborhood.pop(neighborhood.index(test_facility))\r\n\r\n else:\r\n # skip the chosen facility\r\n neighborhood.pop(neighborhood.index(test_facility))\r\n\r\n # increment the counter for swappable facility attempts to swap, then check the limit and maybe break\r\n attempts_per_swappable_facility += 1\r\n # forbid the current swappable facility to be taken in a count for the next iteration\r\n if len(neighborhood) == 0 or attempts_per_swappable_facility == max_attempts_per_swappable_facility:\r\n solutions.pop(solutions.index(swappable_facility))\r\n break\r\n\r\n # if there are no more facilities in solution to be taken in account, exit the loop\r\n if len(solutions) == 0:\r\n break\r\n\r\n # check the number of improvement\r\n if improvements == max_improvement:\r\n break\r\n\r\n iterations += 1\r\n # check the number of iteration\r\n if iterations == max_iteration:\r\n break\r\n\r\n return iterations, improvements", "def sky_orbits(test=True):\n \n t = Table.read('/home/ana/data/baumgardt_positions.fits')\n \n ind_disterr = ~np.isfinite(t['e_Rsun'])\n t['e_Rsun'][ind_disterr] = 0.1 * t['Rsun'][ind_disterr]\n e_max = np.nanmax(t['e_Rsun'][~ind_disterr])\n ind_cap = t['e_Rsun']>e_max\n t['e_Rsun'][ind_cap] = e_max\n \n clusters = ['NGC 3201', 'NGC 4590', 'NGC 5824', 'NGC 5272', 'NGC 5139', 'NGC 5024']\n #clusters = ['NGC 5824', 'NGC 5024']\n N = len(clusters)\n \n match = dict()\n match['NGC 3201'] = dict(streams=['gjoll'], direction=[-1], nstep=[35], gc_label='NGC\\n3201', gcra_off=0*u.deg, gcdec_off=-13*u.deg, gcl_off=0*u.deg, gcb_off=-13*u.deg, stream_label=['$Gj\\\\\\\" oll$'], stream_ra=[-156*u.deg], stream_dec=[-4.5*u.deg], eq_angle=[-45*u.deg], stream_l=[-148*u.deg], stream_b=[-33*u.deg], gal_angle=[22*u.deg])\n \n match['NGC 4590'] = dict(streams=['fjorm'], direction=[1], nstep=[100], gc_label='NGC\\n4590', gcra_off=-15*u.deg, gcdec_off=0*u.deg, gcl_off=-13*u.deg, gcb_off=-10*u.deg, stream_label=['$Fj\\\\\\\" orm$'], stream_ra=[-22*u.deg], stream_dec=[66*u.deg], eq_angle=[35*u.deg], stream_l=[110*u.deg], stream_b=[50*u.deg], gal_angle=[-50*u.deg])\n \n match['NGC 5024'] = dict(streams=['sylgr', 'ravi'], direction=[-1, 1], nstep=[300,500], gc_label='NGC\\n5024', gcra_off=-15*u.deg, gcdec_off=0*u.deg, gcl_off=10*u.deg, gcb_off=-20*u.deg, stream_label=['Sylgr', 'Ravi'], stream_ra=[-70*u.deg, 83*u.deg], stream_dec=[2*u.deg, -47*u.deg], eq_angle=[25*u.deg, 65*u.deg], stream_l=[-110*u.deg, -18.5*u.deg], stream_b=[62*u.deg, -47*u.deg], gal_angle=[30*u.deg, -10*u.deg])\n \n match['NGC 5139'] = dict(streams=['fimbulthul'], direction=[-1], nstep=[70], gc_label='NGC\\n5139', gcra_off=-5*u.deg, gcdec_off=-15*u.deg, gcl_off=0*u.deg, gcb_off=-12*u.deg, stream_label=['Fimbulthul'], stream_ra=[-20*u.deg], stream_dec=[-15*u.deg], eq_angle=[0*u.deg], stream_l=[-20*u.deg], stream_b=[45*u.deg], gal_angle=[0*u.deg])\n \n match['NGC 5272'] = dict(streams=['svol'], direction=[1], nstep=[70], gc_label='NGC\\n5272', gcra_off=-15*u.deg, gcdec_off=10*u.deg, gcl_off=-23*u.deg, gcb_off=-17*u.deg, stream_label=['$Sv\\\\\\\" ol$'], stream_ra=[-2*u.deg], stream_dec=[34*u.deg], eq_angle=[-10*u.deg], stream_l=[55*u.deg], stream_b=[55*u.deg], gal_angle=[-65*u.deg])\n \n match['NGC 5824'] = dict(streams=['triangulum', 'turbio'], direction=[1,1], nstep=[700,1], gc_label='NGC\\n5824', gcra_off=15*u.deg, gcdec_off=-5*u.deg, gcl_off=15*u.deg, gcb_off=-5*u.deg, stream_label=['Triangulum', 'Turbio'], stream_ra=[152*u.deg, 130*u.deg], stream_dec=[32*u.deg, -51*u.deg], eq_angle=[-48*u.deg, 30*u.deg], stream_l=[120*u.deg, -82*u.deg], stream_b=[-31*u.deg, -57*u.deg], gal_angle=[70*u.deg, 105*u.deg])\n \n dt = 0.5*u.Myr\n wangle = 180*u.deg\n ra_off = 120*u.deg\n l_off = 0*u.deg\n \n colors = [mpl.cm.plasma(0.95*x/N) for x in range(N)]\n \n np.random.seed(27529)\n if test:\n Nsample = 1\n else:\n Nsample = 100\n \n plt.close()\n fig = plt.figure(figsize=(12,12))\n \n ax0 = fig.add_subplot(211, projection='mollweide')\n ax1 = fig.add_subplot(212, projection='mollweide')\n ax = [ax0, ax1]\n \n for i in range(N):\n #ind = t['Name']== clusters[i]\n ind = t['Name']==clusters[i]\n t_ = t[ind]\n \n c = coord.SkyCoord(ra=t_['RAJ2000'], dec=t_['DEJ2000'], distance=t_['Rsun'], pm_ra_cosdec=t_['pmRA_'], pm_dec=t_['pmDE'], radial_velocity=t_['RV'], frame='icrs')\n cgal = c.transform_to(coord.Galactic)\n #w0 = gd.PhaseSpacePosition(c.transform_to(gc_frame).cartesian)[0]\n \n color = colors[i]\n alpha_text = 0.8\n \n plt.sca(ax[0])\n plt.plot((c.ra + ra_off).wrap_at(wangle).rad, c.dec.rad, '+', color=color, mew=3, ms=15, label=t_['Name'][0])\n plt.text((c.ra + ra_off + match[clusters[i]]['gcra_off']).wrap_at(wangle).rad, (c.dec + match[clusters[i]]['gcdec_off']).rad, match[clusters[i]]['gc_label'], fontsize='small', ha='center', va='center', alpha=alpha_text)\n \n plt.sca(ax[1])\n plt.plot((cgal.l + l_off).wrap_at(wangle).rad, cgal.b.rad, '+', color=color, mew=3, ms=15, label=t_['Name'][0])\n plt.text((cgal.l + l_off + match[clusters[i]]['gcl_off']).wrap_at(wangle).rad, (cgal.b + match[clusters[i]]['gcb_off']).rad, match[clusters[i]]['gc_label'], fontsize='small', ha='center', va='center', alpha=alpha_text)\n \n\n for j in range(len(match[clusters[i]]['direction'])):\n # sample gc positional uncertainties\n for k in range(-1, Nsample):\n if k==-1:\n c = coord.SkyCoord(ra=t_['RAJ2000'], dec=t_['DEJ2000'], distance=t_['Rsun'], pm_ra_cosdec=t_['pmRA_'], pm_dec=t_['pmDE'], radial_velocity=t_['RV'], frame='icrs')\n w0 = gd.PhaseSpacePosition(c.transform_to(gc_frame).cartesian)[0]\n \n lw = 1.5\n alpha = 1\n else:\n c = coord.SkyCoord(ra=t_['RAJ2000'], dec=t_['DEJ2000'], distance=t_['Rsun'] + np.random.randn()*t_['e_Rsun'], pm_ra_cosdec=t_['pmRA_'] + np.random.randn()*t_['e_pmRA_'], pm_dec=t_['pmDE'] + np.random.randn()*t_['e_pmDE'], radial_velocity=t_['RV'] + np.random.randn()*t_['e_RV'], frame='icrs')\n w0 = gd.PhaseSpacePosition(c.transform_to(gc_frame).cartesian)[0]\n \n lw = 1\n alpha = 0.1\n \n orbit = ham.integrate_orbit(w0, dt=dt*match[clusters[i]]['direction'][j], n_steps=match[clusters[i]]['nstep'][j])\n orbit_eq = orbit.to_coord_frame(coord.ICRS, galactocentric_frame=gc_frame)\n orbit_gal = orbit.to_coord_frame(coord.Galactic, galactocentric_frame=gc_frame)\n \n \n plt.sca(ax[0])\n dra = (orbit_eq.ra+ra_off).wrap_at(wangle)[1:] - (orbit_eq.ra+ra_off).wrap_at(wangle)[:-1]\n if np.any(np.abs(dra)>180*u.deg):\n pos_break = dra>180*u.deg\n ind_break = np.argmax(pos_break)\n ipad = 1\n plt.plot((orbit_eq.ra+ra_off).wrap_at(wangle).rad[:ind_break-ipad], orbit_eq.dec.rad[:ind_break-ipad], '-', color=color, lw=lw, label='', alpha=alpha)\n plt.plot((orbit_eq.ra+ra_off).wrap_at(wangle).rad[ind_break+ipad:], orbit_eq.dec.rad[ind_break+ipad:], '-', color=color, lw=lw, label='', alpha=alpha)\n else:\n plt.plot((orbit_eq.ra+ra_off).wrap_at(wangle).rad, orbit_eq.dec.rad, '-', color=color, lw=lw, label='', alpha=alpha)\n \n plt.sca(ax[1])\n dl = orbit_gal.l.wrap_at(wangle)[1:] - orbit_gal.l.wrap_at(wangle)[:-1]\n if np.any(np.abs(dl)>180*u.deg):\n pos_break = dl>180*u.deg\n ind_break = np.argmax(pos_break)\n ipad = 1\n plt.plot((orbit_gal.l+l_off).wrap_at(wangle).rad[:ind_break-ipad], orbit_gal.b.rad[:ind_break-ipad], '-', color=color, lw=lw, label='', alpha=alpha)\n plt.plot((orbit_gal.l+l_off).wrap_at(wangle).rad[ind_break+ipad:], orbit_gal.b.rad[ind_break+ipad:], '-', color=color, lw=lw, label='', alpha=alpha)\n else:\n plt.plot((orbit_gal.l+l_off).wrap_at(wangle).rad, orbit_gal.b.rad, '-', color=color, lw=lw, label='', alpha=alpha)\n \n # add streams\n pkl = pickle.load(open('../data/streams/data_{:s}.pkl'.format(match[clusters[i]]['streams'][j]), 'rb'))\n cs = coord.SkyCoord(ra=pkl['dec'][0], dec=pkl['dec'][1], frame='icrs')\n cs_gal = cs.transform_to(coord.Galactic)\n \n plt.sca(ax[0])\n plt.plot((cs.ra+ra_off).wrap_at(wangle).rad, cs.dec.rad, 'o', color=color, ms=8, label=match[clusters[i]]['streams'][j])\n plt.text(coord.Longitude(match[clusters[i]]['stream_ra'][j]).wrap_at(wangle).rad, coord.Latitude(match[clusters[i]]['stream_dec'][j]).rad, match[clusters[i]]['stream_label'][j], fontsize='small', alpha=alpha_text, rotation=match[clusters[i]]['eq_angle'][j].value, ha='center', va='center')\n \n plt.sca(ax[1])\n plt.plot((cs_gal.l+l_off).wrap_at(wangle).rad, cs_gal.b.rad, 'o', color=color, ms=8, label=match[clusters[i]]['streams'][j])\n plt.text(coord.Longitude(match[clusters[i]]['stream_l'][j]).wrap_at(wangle).rad, coord.Latitude(match[clusters[i]]['stream_b'][j]).rad, match[clusters[i]]['stream_label'][j], fontsize='small', alpha=alpha_text, rotation=match[clusters[i]]['gal_angle'][j].value, ha='center', va='center')\n \n \n plt.sca(ax[0])\n plt.grid(ls=':')\n plt.xlabel('R.A. [deg]')\n plt.ylabel('Dec [deg]')\n\n plt.gca().xaxis.set_ticklabels([])\n \n xloc = coord.Longitude(np.arange(-150,180,30)*u.deg)\n xloc = np.delete(xloc, [3])\n yloc = coord.Latitude(5*u.deg)\n Nx = len(xloc)\n \n for i in range(Nx):\n plt.text(xloc[i].wrap_at(wangle).rad, yloc.rad, '{:.0f}$\\degree$'.format((xloc[i]-ra_off).wrap_at(wangle).degree), alpha=0.6, ha='center', va='center')\n \n \n plt.sca(ax[1])\n plt.grid(ls=':')\n plt.xlabel('Galactic longitude [deg]')\n plt.ylabel('Galactic latitude [deg]')\n \n plt.gca().xaxis.set_ticklabels([])\n \n xloc = coord.Longitude(np.arange(-150,180,30)*u.deg)\n xloc = np.delete(xloc, [2,3])\n yloc = coord.Latitude(5*u.deg)\n Nx = len(xloc)\n \n for i in range(Nx):\n plt.text(xloc[i].wrap_at(wangle).rad, yloc.rad, '{:.0f}$\\degree$'.format((xloc[i]+l_off).wrap_at(wangle).degree), alpha=0.6, ha='center', va='center')\n \n \n \n plt.tight_layout(h_pad=2)\n plt.savefig('../paper/sky_orbits.pdf')", "def branchNBound2(nationtxt, bound, scheme):\n\n\n nation = nationLoader(nationtxt)\n transmitterCosts = scheme\n\n neighborCount = {}\n for province in nation:\n neighborCount.update({province:len(nation.get(province)[0])})\n\n\n neighborCountSorted = sorted(neighborCount, key=neighborCount.__getitem__)\n\n #~ neighborCountSorted = sorted(neighborCount, key=neighborCount.__getitem__, reverse=True)\n\n for key in neighborCountSorted:\n provinces.append(key)\n #~ print provinces\n\n upperbound = bound\n #~ print bound\n\n\n\n solution = []\n\n\n counter = 0\n\n\n\n\n while index >= 0:\n\n counter += 1\n if counter % 100000000 == 0:\n print counter\n print \"Now at:\", nation\n\n\n if index == -1:\n break\n\n # Assign transmitter\n if nation[provinces[index]][1] == numTransmitters:\n costs, index = updateTransmitter(nation, True, scheme, provinces, costs, index)\n continue\n\n else:\n costs, index = updateTransmitter(nation, False, scheme, provinces, costs, index)\n\n # Check if costs are above upper bound\n if (costs + (len(provinces) - (index + 1)) * transmitterCosts[0]) > upperbound:\n costs, index = updateTransmitter(nation, True, scheme, provinces, costs, index)\n continue\n\n # Check if a neighbor has the same transmitter\n conflict = False\n for neighbor in nation[provinces[index]][0]:\n if nation[neighbor][1] == nation[provinces[index]][1]:\n conflict = True\n break\n\n if conflict:\n continue\n\n # Check if a solution is found\n if index == len(provinces) - 1:\n #~ print \"\\nSOLUTION:\"\n if costs < upperbound:\n solution = []\n solution.append(json_deep_copy(nation))\n upperbound = costs\n #~ print \"Score:\", upperbound\n #~ print nation\n costs, index = updateTransmitter(nation, True, scheme, provinces, costs, index)\n continue\n\n index += 1\n\n\n\n usedTrans = []\n fivePlus = 0\n fivePlusNoDuplicate = 0\n\n for nation in solution:\n\n one = 0\n two = 0\n three = 0\n four = 0\n five = 0\n six = 0\n seven = 0\n\n for province in nation:\n\n if nation[province][1] == 1:\n one += 1\n if nation[province][1] == 2:\n two += 1\n if nation[province][1] == 3:\n three += 1\n if nation[province][1] == 4:\n four += 1\n if nation[province][1] == 5:\n five += 1\n if nation[province][1] == 6:\n six += 1\n if nation[province][1] == 7:\n seven += 1\n\n\n if five > 0 or six > 0 or seven > 0:\n fivePlus += 1\n if transmitterCosts[3] != transmitterCosts[4]:\n fivePlusNoDuplicate += 1\n\n usedTrans.append([one, two, three, four, five, six, seven])\n\n return counter", "def heads_legs(heads, legs):\n for i in range(0, heads + 1):\n cows = heads - i\n if 4 * cows + 2 * i == legs:\n chickens = i\n return chickens, cows\n return \"No solutions.\"", "def earliest_arrival(jump_distance, stones):\n #jump_distance of 3 means they skip 2 stones and land on 3rd\n stone = ''\n #based on jump_distance, what are all the stone nums within that distance\n #when jump_distance is 5, can jujp to stones[4]\n \n stone = min(stones[:(jump_distance - 1)]) #stone = 2\n\n t = max(0, stone) #t = 2\n print(f't starts at = {t}')\n i = stones.index(stone) # i = 1\n print(f'i starts at = {i}') \n #as long as \n while i + jump_distance <= len(stones) - 1: # 3 + 5 <= 7\n print(f'i + jump_distance = {i + jump_distance}')\n\n stone = min(stones[(i + 1):(i + jump_distance)]) #stone = 3\n print(f'stone: {stone}')\n if t < stone: \n t = stone #reassign to 3\n \n i = stones.index(stone) # i = 2\n print(f'i = {i}') \n stone = min(stones[(i + 1):(i + jump_distance)]) #stone \n t = max(t, stone)\n print(f'end of while loop: t = {t}') \n \n return t\n #what's the lowest num within that jump_distance \n #go to that stone, then look at next stones in new jump_distance\n #t becomes whatever stone we jumped to\n #if the next stone we jump to is a higher value, reassign t\n #get length of list of stones so we don't go past length\n #var assigned to len(list) and compare to index we're at \n #if index we're jumping to is higher than len of list, then we're done\n #control for indexerror with while loop?\n # ", "def spotlessroomba_second_heuristic(state : SpotlessRoombaState) -> float:\n # TODO a nontrivial consistent heuristic\n \n if not state.dirty_locations:\n return 0\n \n best_start = 0 # best dirty tile to start from\n best_cost = INF # cost of the path from the above start tile\n\n for i in range(len(state.dirty_locations)):\n estimate_cost = 0\n lowest_cost = INF\n closest_dirty = 0\n dirty_locations = list(state.dirty_locations)\n current_pos = dirty_locations.pop(i)\n\n # find the shortest cost solution path from this starting tile\n while dirty_locations:\n for j in range(len(dirty_locations)):\n manhattan = abs(current_pos.row - dirty_locations[j].row) + abs(current_pos.col - dirty_locations[j].col)\n if manhattan < lowest_cost:\n lowest_cost = manhattan\n closest_dirty = j\n estimate_cost += lowest_cost\n current_pos = dirty_locations.pop(closest_dirty)\n lowest_cost = INF\n # if estimated path cost is cheaper than best path cost so far, replace best_cost and best_start\n if estimate_cost < best_cost:\n best_cost = estimate_cost\n best_start = i\n # if estimated path cost and best path cost so far are equal, tiebreak with proximity to start tile\n if estimate_cost == best_cost:\n current_pos = state.position\n dist_to_prev_best = abs(current_pos.row - state.dirty_locations[best_start].row) + abs(current_pos.col - state.dirty_locations[best_start].col)\n dist_to_i = abs(current_pos.row - state.dirty_locations[i].row) + abs(current_pos.col - state.dirty_locations[i].col)\n if dist_to_i < dist_to_prev_best:\n best_start = i\n \n\n current_pos = state.position\n # Calculate distance to the best start tile\n dist_to_start = abs(current_pos.row - state.dirty_locations[best_start].row) + abs(current_pos.col - state.dirty_locations[best_start].col)\n # Returned heuristic is the sum of distance to the start tile and estimated cost from said tile\n return dist_to_start + best_cost", "def fn(i, s0, s1, c0, c1):\n if s0 > n or s1 > n: return 0 # impossible \n if i == len(balls): return int(c0 == c1)\n ans = 0 \n for x in range(balls[i]+1): \n ans += fn(i+1, s0+x, s1+balls[i]-x, c0+(x > 0), c1+(x < balls[i])) * comb(balls[i], x)\n return ans", "def solution(A):\n \n cars = 0\n ones = 0\n\n for i in range(len(A), 0, -1):\n\n if A[i-1] == 1:\n ones += 1\n else:\n cars += ones\n\n return (-1 if cars > 1000000000 else cars)", "def find_all_ORFs_both_strands(dna):\n \n # YOUR IMPLEMENTATION HERE", "def gameOfStones(n):\n winning_start = set([2, 3, 4, 5]) # 7 we don't know who would win yet\n losing_start = set([0, 1])\n\n def n_remain(n_):\n n_next = set([n_ - x for x in [2, 3, 5] if n_ - x >= 0])\n return n_next\n\n i = 0\n while i < n + 1:\n x = n_remain(i)\n # let's build lookup table to see what each value of up to n yields as\n # far as winner under optimal play\n if i in winning_start:\n # print('Player 1 wins')\n pass\n elif i in losing_start:\n # print('Player 2 wins')\n pass\n elif len(losing_start.intersection(x)) > 0:\n # if player 1 has any option that is a losing start, he win\n print(i, 'Adding to winning start', x)\n winning_start.add(i)\n elif len(winning_start.intersection(x)) == len(x):\n # if player 1 gives player 2 only winning options, p1 loses\n print(i, 'Adding to losing start', x)\n losing_start.add(i)\n else:\n print('this should never happen')\n i += 1\n\n if n in winning_start:\n print('First')\n return 'First'\n else:\n print('Second')\n return 'Second'", "def Solve(bases):\r\n n = 1\r\n while 1:\r\n n += 1\r\n done = True\r\n for b in bases:\r\n if not Happy(n, b):\r\n done = False\r\n break\r\n if done:\r\n return n", "def myHeuristic3(state, problem=None):\n #canto =[(1,1), (1,2), (2,1), (2,2), (36,36), (35,36), (35,35), (36,35), (1,36),(1,35),(2,36),(2,35),(36,1),(36,2),(35,1),(35,2)]\n canto = []\n for l in range(2):\n for c in range(2):\n canto.append((l,c))\n heru = abs(state[0] - 1) + abs(state[1] - 1)\n #heru=10\n if state in canto:\n #print(\"sim\")\n heru = heru * 0.5\n return heru", "def test_teleport_minimal_basis_gates(self):\n shots = 2000\n circuits = ref_algorithms.teleport_circuit()\n targets = ref_algorithms.teleport_counts(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='U,CX')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0.05 * shots)", "def branchNBound(nationtxt, bound, scheme):\n provinces = []\n index = 0\n costs = 0\n numTransmitters = 7\n\n transmitterCosts = scheme\n nation = nationLoader(nationtxt)\n\n\n neighborCount = {}\n for province in nation:\n neighborCount.update({province:len(nation.get(province)[0])})\n\n\n #~ neighborCountSorted = sorted(neighborCount, key=neighborCount.__getitem__)\n\n neighborCountSorted = sorted(neighborCount, key=neighborCount.__getitem__, reverse=True)\n\n for key in neighborCountSorted:\n provinces.append(key)\n #~ print provinces\n\n upperbound = bound\n #~ print bound\n #~ print bound\n\n\n\n solution = []\n\n\n counter = 0\n\n\n\n\n\n while index >= 0:\n\n\n counter += 1\n if counter % 100000000 == 0:\n print counter\n print \"Now at:\", nation\n\n\n if index == -1:\n break\n\n # Assign transmitter\n if nation[provinces[index]][1] == numTransmitters:\n costs, index = updateTransmitter(nation, True, scheme, provinces, costs, index)\n continue\n\n else:\n costs, index = updateTransmitter(nation, False, scheme, provinces, costs, index)\n\n # Check if costs are above upper bound\n if (costs + (len(provinces) - (index + 1)) * transmitterCosts[0]) > upperbound:\n costs, index = updateTransmitter(nation, True, scheme, provinces, costs, index)\n continue\n\n # Check if a neighbor has the same transmitter\n conflict = False\n for neighbor in nation[provinces[index]][0]:\n if nation[neighbor][1] == nation[provinces[index]][1]:\n conflict = True\n break\n\n if conflict:\n continue\n\n # Check if a solution is found\n if index == len(provinces) - 1:\n #~ print \"\\nSOLUTION:\"\n if costs < upperbound:\n solution = []\n solution.append(json_deep_copy(nation))\n upperbound = costs\n #~ print \"Score:\", upperbound\n #~ print nation\n costs, index = updateTransmitter(nation, True, scheme, provinces, costs, index)\n continue\n\n index += 1\n\n\n usedTrans = []\n fivePlus = 0\n fivePlusNoDuplicate = 0\n\n for nation in solution:\n\n one = 0\n two = 0\n three = 0\n four = 0\n five = 0\n six = 0\n seven = 0\n\n for province in nation:\n\n if nation[province][1] == 1:\n one += 1\n if nation[province][1] == 2:\n two += 1\n if nation[province][1] == 3:\n three += 1\n if nation[province][1] == 4:\n four += 1\n if nation[province][1] == 5:\n five += 1\n if nation[province][1] == 6:\n six += 1\n if nation[province][1] == 7:\n seven += 1\n\n\n if five > 0 or six > 0 or seven > 0:\n fivePlus += 1\n if transmitterCosts[3] != transmitterCosts[4]:\n fivePlusNoDuplicate += 1\n\n usedTrans.append([one, two, three, four, five, six, seven])\n\n return fivePlus, fivePlusNoDuplicate, usedTrans, upperbound, len(solution), counter\n #~ f.write(\"\\n Used Transmitters: \"+ str(one)+\" \"+ str(two)+\" \"+ str(three)+\" \"+ str(four)+\" \"+ str(five)+\" \"+ str(six)+\" \"+ str(seven)+\"\\n Cost: \"+str(upperbound)+\"\\n Number of solutions: \"+str(len(solution))+\"\\n Iterations: \"+str(counter)+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\")\n\n #~ print \"transmitter frequecies:\", one, two, three, four, five, six, seven\n #~ print \"Solutions:\", solution\n #~ print \"Cost:\", upperbound\n #~ print \"Number of solutions:\", len(solution)\n #~ print \"Iterations:\", counter", "def solve(k=[]):\n if len(k) < 2:\n return 1\n min_jumps = len(k)\n for i in reversed(range(k[0])):\n if i > len(k):\n return 1\n if k[i] != 0:\n sub_jumps = solve(k[i + 1 :])\n min_jumps = min(min_jumps, sub_jumps)\n return min_jumps", "def main():\n LIMIT = 600\n GOAL = 1000\n for i in range(1, LIMIT + 1):\n for j in range(1, LIMIT + 1):\n for k in range(1, LIMIT + 1):\n if i + j + k == GOAL:\n if i**2 == j**2 + k**2:\n print \"GOAL:\", (i, j, k)", "def occam_razor() -> None:\r\n print(\"WARNING! Mode three activated. Time to complete may be several minutes\")\r\n temp = [] # x-y-conflicts\r\n global example\r\n backup = example.copy() # Backup so it can backtrack through solutions\r\n for x in range(shape):\r\n for y in range(shape):\r\n conflict_counter = 0\r\n for z in range(shape):\r\n if conflict_space[x, y] != 0:\r\n if conflict_space[x, y] == conflict_space[x, z] and z != y:\r\n conflict_counter += 1\r\n if conflict_space[x, y] == conflict_space[z, y] and z != x:\r\n conflict_counter += 1\r\n if conflict_counter > 0 and no_neighbour(x, y):\r\n temp.append([x, y, conflict_counter])\r\n threshold = [0, 0, 0]\r\n \"\"\"Takes an educated guess on the node in most conflict in case it's one move away from being solved\"\"\"\r\n for x in range(len(temp)):\r\n if temp[x][2] > threshold[2]:\r\n threshold = temp[x]\r\n if threshold[2] > 0:\r\n example[threshold[0], threshold[1]] = 0\r\n shade_neighbours(threshold[0], threshold[1])\r\n if not victory_checker():\r\n \"\"\"code now begins guessing\"\"\"\r\n for x in range(len(temp)):\r\n example = backup.copy()\r\n if no_neighbour(temp[x][0], temp[x][1]):\r\n example[temp[x][0], temp[x][1]] = 0\r\n else:\r\n continue\r\n progress_handler(False, True)\r\n while progress_handler(True, False):\r\n print_debug(\"itteration\")\r\n progress_handler(False, False)\r\n mark_check()\r\n if victory_checker():\r\n completion(True)\r\n if not progress_handler(True, False):\r\n special_corner()\r\n if not progress_handler(True, False):\r\n separation_crawler(True)\r\n if not progress_handler(True, False):\r\n occam_razor() # Recursive\r\n if not progress_handler(True, False):\r\n if victory_checker():\r\n completion(True)\r\n else:\r\n print(\"Searching...\")\r\n continue\r\n conflict_check()", "def solve(num_wizards, num_constraints, wizards, constraints):\n\n # print(num_wizards)\n # print(num_constraints)\n # print(wizards)\n # print(constraints)\n # node_set = set(wizards)\n \n\n\n def cost(sol,num_constraints,constraints):\n constraints_satisfied = 0\n constraints_failed = []\n output_ordering_map = {k: v for v, k in enumerate(sol)}\n for c in constraints:\n\n m = output_ordering_map # Creating an alias for easy reference\n\n wiz_a = m[c[0]]\n wiz_b = m[c[1]]\n wiz_mid = m[c[2]]\n\n if (wiz_a < wiz_mid < wiz_b) or (wiz_b < wiz_mid < wiz_a):\n constraints_failed.append(c)\n else:\n constraints_satisfied += 1\n return num_constraints - constraints_satisfied\n\n def neighbors(sol):\n wiz1 = random.randint(0,num_wizards-1)\n wiz2 = random.randint(0,num_wizards-1)\n\n new_sol = copy.copy(sol)\n temp = new_sol[wiz1]\n new_sol[wiz1] = new_sol[wiz2]\n new_sol[wiz2] = temp\n \n return new_sol\n\n def acceptance_probability(old_cost,new_cost,T):\n exponent = (old_cost - new_cost) / T\n \n try:\n ans = math.exp(exponent)\n except OverflowError:\n ans = float('inf')\n return ans\n\n\n def anneal(solution, num_constraints, constraints):\n old_cost = 0\n new_cost = 0\n old_cost = cost(solution,num_constraints,constraints)\n T = 1.0\n T_min = 0.000001\n alpha = 0.98\n while T > T_min:\n i = 1\n while i <= 1000:\n new_solution = neighbors(solution)\n new_cost = cost(new_solution,num_constraints,constraints)\n if new_cost == 0:\n return new_solution,new_cost\n ap = acceptance_probability(old_cost, new_cost, T)\n if ap > random.random():\n solution = new_solution\n old_cost = new_cost\n i += 1\n T = T*alpha\n return solution, old_cost\n\n s = copy.copy(wizards)\n random.shuffle(s)\n ret = anneal(s,num_constraints,constraints)\n \n for i in range(10):\n if ret[1] == 0:\n break\n random.shuffle(s)\n new_ret = anneal(s,num_constraints,constraints)\n print(i)\n if new_ret[1] < ret[1]:\n ret = new_ret\n print(\"constraints failed: {0}\".format(ret[1]))\n return ret[0]", "def solution(n: int = 28123) -> int:\n\n nums = range(1, n+1)\n abundant = list(filter(is_abundant, nums))\n abundant_sums = set(all_sums(abundant, n))\n fit = set(nums) - abundant_sums\n return fit", "def find_non_trivial_orbit(generators: [Permutation]) -> int:\n if not generators:\n return None\n n = generators[0].n\n for P in generators:\n for element in range(n):\n if P[element] != element:\n return element", "def brute_force(city_list):\n start = time.time()*1000\n shortest = exhaustive_search(city_list,6)\n stop = time.time()*1000\n print(\"Shortest tour for 6 first cities:\", tour_distance(shortest))\n print (\"Time spent on 6 first cities:\", \"%.2f\" % (stop-start), \"ms\")\n print(\"-\")\n \n start = time.time()*1000\n shortest = exhaustive_search(city_list,7)\n stop = time.time()*1000\n print(\"Shortest tour for 7 first cities:\", tour_distance(shortest))\n print (\"Time spent on 7 first cities:\", \"%.2f\" % (stop-start), \"ms\")\n print(\"-\")\n \n start = time.time()*1000\n shortest = exhaustive_search(city_list,8)\n stop = time.time()*1000\n print(\"Shortest tour for 8 first cities:\", tour_distance(shortest))\n print (\"Time spent on 8 first cities:\", \"%.2f\" % (stop-start), \"ms\")\n print(\"-\")\n \n start = time.time()*1000\n shortest = exhaustive_search(city_list,9)\n stop = time.time()*1000\n print(\"Shortest tour for 9 first cities:\", tour_distance(shortest))\n print (\"Time spent on 9 first cities:\", \"%.2f\" % (stop-start), \"ms\")\n print(\"-\")\n \n start = time.time()*1000\n shortest = exhaustive_search(city_list,10)\n stop = time.time()*1000\n print(\"Shortest tour for 10 first cities:\", tour_distance(shortest))\n print (\"Time spent on 10 first cities:\", \"%.2f\" % (stop-start), \"ms\")\n print(\" \")", "def __checkvictory__(self,playerchar):\n\t\tvictory = False\n\t\tboardx = deepcopy(self.board)\n\t\trow = 5\n\t\tcolumn = 6\n\t\tstarburst_bag = []\n\t\tcats_game = True\n\t\tfor a in range(row+1):\n\t\t\tfor b in range(column+1):\n\t\t\t\tstarburst = []\n\t\t\t\tstarburst.append((a,b))\n\t\t\t\t\n\t\t\t\tif self.__checkplace__(a,b) is True:\n\t\t\t\t\tcats_game = False\n\t\t\t\t\tcontinue\n\t\t\t\telif self.__checkplace__(a,b) == playerchar:\n\t\t\t\t\t\n\t\t\t\t\tstarburst.append(1)\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\twhile True:\n\t\t\t\t\t\tif a-starburst[1] < 0:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tif self.__checkplace__(a-starburst[1],b) == playerchar:\n\t\t\t\t\t\t\tstarburst[1] += 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\tstarburst.append(1)\n\t\t\t\t\t\n\t\t\t\t\twhile True:\n\t\t\t\t\t\tif a-starburst[2] < 0:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tif b+starburst[2] > 6:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tif self.__checkplace__(a-starburst[2],b+starburst[2])\\\n\t\t\t\t\t\t == playerchar:\n\t\t\t\t\t\t\tstarburst[2] += 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\tstarburst.append(1)\n\t\t\t\t\t\n\t\t\t\t\twhile True:\n\t\t\t\t\t\tif b+starburst[3] > 6:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tif self.__checkplace__(a,b+starburst[3]) == playerchar:\n\t\t\t\t\t\t\tstarburst[3] += 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\tstarburst.append(1)\n\t\t\t\t\t\n\t\t\t\t\twhile True:\n\t\t\t\t\t\tif a+starburst[4] > 5:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tif b+starburst[4] > 6:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tif self.__checkplace__(a+starburst[4],b+starburst[4])\\\n\t\t\t\t\t\t== playerchar:\n\t\t\t\t\t\t\tstarburst[4] += 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\tstarburst_bag.append(starburst)\n\t\t\n\t\tfor starburst in starburst_bag:\n\t\t\t\n\t\t\ta = starburst[0][0]\n\t\t\tb = starburst[0][1]\n\t\t\t\n\t\t\tif starburst[1] > 3:\n\t\t\t\tvictory = True\n\t\t\t\tfor i in range(starburst[1]):\n\t\t\t\t\tboardx[a-i][b] = boardx[a-i][b].\\\n\t\t\t\t\treplace(playerchar,playerchar.upper())\n\t\t\tif starburst[2] > 3:\n\t\t\t\tvictory = True\n\t\t\t\tfor i in range(starburst[2]):\n\t\t\t\t\tboardx[a-i][b+i] = boardx[a-i][b+i].\\\n\t\t\t\t\treplace(playerchar,playerchar.upper())\n\t\t\tif starburst[3] > 3:\n\t\t\t\tvictory = True\n\t\t\t\tfor i in range(starburst[3]):\n\t\t\t\t\tboardx[a][b+i] = boardx[a][b+i].\\\n\t\t\t\t\treplace(playerchar,playerchar.upper())\n\t\t\tif starburst[4] > 3:\n\t\t\t\tvictory = True\n\t\t\t\tfor i in range(starburst[4]):\n\t\t\t\t\tboardx[a+i][b+i] = boardx[a+i][b+i].\\\n\t\t\t\t\treplace(playerchar,playerchar.upper())\n\t\t\t\n\t\tif cats_game:\n\t\t\treturn None\n\t\tif victory:\n\t\t\treturn boardx\n\t\telse:\n\t\t\treturn False", "def find_santa(curr_pos=src, prev_pos='YOU', transfers=0):\n if curr_pos == dst: # base case: we found it!\n return transfers\n\n # let's try moving inward (unless that would be retracing our steps)\n parent = orbits[curr_pos]['parent']\n if parent is not None and parent != prev_pos:\n possible_route = find_santa(curr_pos=parent, prev_pos=curr_pos, transfers=transfers+1)\n if possible_route is not False:\n return possible_route\n\n # if we're still here, inward didn't work. let's move outward...\n children = [c for c in orbits[curr_pos]['children'] if c != prev_pos] # don't retrace steps\n for child in children:\n possible_route = find_santa(curr_pos=child, prev_pos=curr_pos, transfers=transfers+1)\n if possible_route is not False:\n return possible_route\n\n # at this point there are no possible routes. return false\n return False", "def find():\n b = 0\n q = 0\n while b == q:\n seq = [randint(-10, 10) for _ in range(randint(15, 30))]\n b, b_at = brute_force(seq)\n q = solution(seq)\n print(seq, b, q, b_at)", "def lowest_cost_search(start, successors, is_goal, action_cost):\r\n # your code here\r\n explored = set()\r\n frontier = [ [start] ]\r\n while frontier:\r\n path = frontier.pop(0)\r\n state1 = final_state(path)\r\n if is_goal(state1):\r\n return path\r\n explored.add(state1)\r\n pcost = path_cost(path)\r\n for (state, action) in successors(state1).items():\r\n if state not in explored:\r\n total_cost = pcost + action_cost(action)\r\n path2 = path [(action, total_cost), state]\r\n add_to_frontier(frontier, path2)\r\n return Fail", "def McNuggets(n):\n # Your Code Here\n for c in xrange( n/20+2):\n for b in xrange( (n-20*c)/9+2):\n for a in xrange ((n-20*c-9*b)/6 +2):\n if (6*a + 9*b + 20*c) == n :\n return True\n return False", "def _is_this_healthy_rDNA(self):\n if self.length < 3000:\n return 0\n mapping_state = []\n for item in self.sam_summary:\n if item[1] != '0':\n mapping_state.append(1)\n else:\n mapping_state.append(0)\n threshold = 0.8\n if sum(mapping_state)/len(mapping_state) > threshold:\n return 1\n else:\n for i in range(1, len(mapping_state) - 50):\n if sum(mapping_state[i:])/len(mapping_state[i:]) > threshold or \\\n sum(mapping_state[:-i])/len(mapping_state[:-i]) > threshold:\n healthy = 2\n return 0", "def brute_force(seats):\n for seat in seats:\n if seat + 1 not in seats and seat + 2 in seats:\n return seat + 1\n\n return None", "def goal_test(self, state):\n \"*** YOUR CODE HERE ***\"\n shoot_loc_arr = []\n for allowed_state in self.allowed:\n for goal_state in self.goals:\n if allowed_state[0] == goal_state[0] and allowed_state[1] < goal_state[1]: shoot_loc_arr.append((allowed_state[0], allowed_state[1], 0)) # Head North\n if allowed_state[0] > goal_state[0] and allowed_state[1] == goal_state[1]: shoot_loc_arr.append((allowed_state[0], allowed_state[1], 1)) # Head West\n if allowed_state[0] == goal_state[0] and allowed_state[1] > goal_state[1]: shoot_loc_arr.append((allowed_state[0], allowed_state[1], 2)) # Head South\n if allowed_state[0] < goal_state[0] and allowed_state[1] == goal_state[1]: shoot_loc_arr.append((allowed_state[0], allowed_state[1], 3)) # Head East\n if state in shoot_loc_arr:\n return True\n else:\n return False", "def N_states_for_learner(self):\n idx_max = []\n limits = 50, 2*_math.pi, 50, 50, 50, 50, 50, 50, 50\n for idx, limit in enumerate(limits):\n test = [0 for i in xrange(len(limits))]\n check = _arange(-limit,limit,limit/1000.)\n maxi = 0\n for v in check:\n test[idx]=v\n ret = self._state_index(*test)\n maxi = max((maxi, ret[idx]))\n idx_max.append(maxi)\n\n return tuple([idx+1 for idx in idx_max])", "def getSuccessors(self, state):\n\n successors = []\n top, right = self.walls.height - 2, self.walls.width - 2\n for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:\n # Add a successor state to the successor list if the action is legal\n # Here's a code snippet for figuring out whether a new position hits a wall:\n x, y = state[0]\n dx, dy = Actions.directionToVector(action)\n nextx, nexty = int(x + dx), int(y + dy)\n hitsWall = self.walls[nextx][nexty]\n \"*** YOUR CODE HERE ***\"\n \"\"\"\n La función sucesores funciona de la siguiente manera:\n * Si la acción no hace que choque con una pared, entonces...\n - Defino nextState como las coordenadas de lo que me da la acción\n - Creo una copia de la grid de true/false que tiene el estado, para así no modificar la original\n - A esta copia le actualizo la información, si el sucesor es una de las esquinas. Tengo que realizar\n esto manualmente dada la definición de mi grid de booleanos.\n - Creo una nueva variable que es una tupla en la que inserto las nuevas coordenadas y la grid actualizada\n - La añado a la lista de sucesores\n \"\"\"\n if not hitsWall:\n nextState = (nextx, nexty) # Defino la tupla que será la posición del sucesor\n nextFood = state[1].copy() # Hago una copia para así poder modificarla tranquilamente\n if nextState == (1, 1): # Manualmente miro si es alguna de las esquinas\n nextFood[1][0] = False # Si lo es, actualizo de true a false el elemento correspondiente\n if nextState == (1, top):\n nextFood[0][0] = False\n if nextState == (right, 1):\n nextFood[1][1] = False\n if nextState == (right, top):\n nextFood[0][1] = False\n nextStateFood = (nextState, nextFood) # Lo añado como tupla\n cost = 1 # Por orden del enunciado, el coste es siempre 1\n successors.append((nextStateFood, action, cost)) # Lo añado a la lista de sucesores\n self._expanded += 1\n return successors", "def greedy(self, state, timestep, epsilon=0):\n\n counts = np.bincount(self.call_locs, minlength=self.num_nodes)\n # print(self.lengths)\n # print(counts)\n score = self.lengths @ counts\n action = []\n for _ in range(self.num_ambulance):\n node = np.argmin(score)\n action.append(node)\n score[node] = 99999999\n return action", "def uniform_cost_search(problem):\r\n \"*** YOUR CODE HERE ***\"\r\n fringe = util.PriorityQueue()\r\n path = set()\r\n final = []\r\n acts = dict()\r\n state = problem.get_start_state()\r\n fringe.push(state, 0)\r\n\r\n while (True):\r\n state = fringe.pop()\r\n path.add(state)\r\n states = problem.get_successors(state)\r\n acts[state] = states[:]\r\n if problem.is_goal_state(state):\r\n break\r\n\r\n #states = problem.get_successors(state)\r\n # push into fringe\r\n for stat in states:\r\n if stat[0] not in path:\r\n fringe.push(stat[0], stat[1].piece.get_num_tiles()) #problem.get_cost_of_actions([stat[1]])\r\n\r\n while (True):\r\n if state == problem.get_start_state():\r\n break\r\n for key, val in acts.items():\r\n for va in val:\r\n if va[0] == state:\r\n final.append(va[1])\r\n state = key\r\n break\r\n else:\r\n continue\r\n break\r\n\r\n final.reverse()\r\n\r\n return final", "def calculate_image_possibilities():\n\n # Reordering the color ramps in the palette yields 3! combinations\n palette_reorder_possibilities = 6\n\n return len(palettes) * palette_reorder_possibilities * len(grips) * len(pommels) * len(crossguards) * len(blades)", "def manhattan_heuristic(state):\n man_h = 0\n size = len(state)\n for i in range (size):\n for j in range (size):\n if state[i][j] == 0:\n continue\n else:\n man_h = man_h + abs(i - int(state[i][j]/3)) + abs(j - (state[i][j])%3)\n return man_h", "def nextMoveDecision(self):\n b = random.randint(1, 9) \n while (self.Occupied(b)):\n b = random.randint(1, 9) \n return b", "def efficient_sol(rt=\"O(n)\"):\n def bribe_count(L): # L = line\n bribes = 0\n # from back to front\n for i in range(len(L) - 1, -1, -1):\n\n expected_index = i + 1\n # Case 1: Expected Person already in the right spot.\n if L[i] == expected_index:\n continue\n\n # Case 2: Expected Person is 1 step away. Swap.\n elif L[i - 1] == expected_index:\n L[i], L[i - 1] = L[i - 1], L[i]\n bribes += 1\n\n # Case 3: Expected person is 2 steps away. Put into proper place and other 2 up.\n elif L[i - 2] == expected_index:\n L[i - 2], L[i - 1], L[i] = L[i - 1], L[i], L[i - 2]\n bribes += 2\n else:\n return \"Too chaotic\" # Expected person is more than 2 steps away.\n return bribes\n\n test_count = int(input())\n for _ in range(test_count):\n input()\n line = list(map(int, input().split()))\n print(bribe_count(line))", "def findHeuristic(self, _, __):\n popSize = 100\n retain = 0.25\n random_select = 0.1\n mutate = 0.1\n\n popList = self.populationList(popSize)\n\n solved = False\n count = 0\n while not solved:\n # evolves current\n popList = (self.evolve(popList, retain, random_select, mutate))\n# print(popList) # for troubleshooting\n for i in popList:\n if (self.fitness(i) == 0):\n print(\"solution: \", i)\n solved = True\n break\n # if plateus at a local minima, then end after 50 generations\n if count >= 50:\n if (self.fitness(i) <= 10):\n print(\"solution: \", i)\n solved = True\n break\n if solved is True:\n break\n print(\"-----------------\")\n\n # will modify mutation, random_select and retain values to help leave a\n # local minima. More randomness the longer it takes up to specific points\n if count % 3 == 0:\n if mutate < 0.2:\n mutate += 0.01\n if random_select < 0.3:\n random_select += 0.01\n count += 1\n\n return exit(0)", "def solve(given: np.array) -> np.array:\n possible = np.full((9, 9, 9), True)\n mask = given > 0\n possible[mask, :] = False\n possible[mask, given[mask] - 1] = True\n\n # number of possibilities at each site, masking those already propagated\n # to avoid repetitive work. All masked == problem solved\n count = ma.array(possible.sum(axis=2), fill_value=1)\n\n # allocate upfront to as out parameter to np.equal\n # (ma.array because count is ma.array)\n where = ma.array(np.empty((9, 9), dtype=bool), fill_value=False)\n\n stack = [(possible, count)]\n while stack:\n node, count = stack.pop()\n unsolved = propagate(node, count, where)\n if unsolved == -1:\n continue\n if unsolved == 0:\n break\n # try all possibilities from cell with fewest > 1\n i, j = np.unravel_index(count.argmin(), count.shape)\n for k in np.flatnonzero(node[i, j, :]):\n node_copy, count_copy = node.copy(), count.copy()\n node_copy[i, j, :] = False\n node_copy[i, j, k] = True\n count_copy[i, j] = 1\n stack.append((node_copy, count_copy))\n\n i, j, k = node.nonzero()\n count[i, j] = k + 1\n return np.array(count)", "def McNuggets(n):\n \n '''if n == 0:\n return True\n for i in (6, 9, 20):\n if n >= i and McNuggets(n - i):\n return True\n return False\n '''\n \n for a in range(0,n):\n for b in range(0,n):\n for c in range(0,n):\n if 6*a+9*b+20*c == n:\n return True\n return False", "def minNumberOfSemesters(self, n: int, dependencies: List[List[int]], k: int) -> int:\n\n @lru_cache(None)\n def dp(status, take, avaliable):\n if status == target: # all taken\n return 0\n bin_take = bin(take)[2:][::-1]\n for i,v in enumerate(bin_take):\n if v == '1':\n for j in edges[i]: # the indegree number changed during recursion\n indegree[j] -= 1\n if indegree[j] == 0:\n avaliable |= (1 << j)\n status |= (1 << i)\n # print('i, status', i, v, bin(status))\n # take -= (1 << i)\n\n lst = [i for i,v in enumerate(bin(avaliable)[2:][::-1]) if v == '1']\n # print(indegree)\n # print(lst)\n if not lst:\n res = 0\n # print('lst', lst, k)\n elif len(lst) <= k:\n res = dp(status, avaliable, 0)\n else:\n res = float('inf')\n for comb in combinations(lst, k):\n # print(comb)\n t, a = 0, avaliable\n for d in comb:\n t |= (1 << d)\n a -= (1 << d)\n res = min(res, dp(status, t, a))\n for i,v in enumerate(bin_take):\n if v == '1':\n for j in edges[i]: \n indegree[j] += 1\n return 1 + res\n\n self.counts = 0\n edges = defaultdict(list)\n indegree = Counter()\n for i,j in dependencies:\n edges[i].append(j)\n indegree[j] += 1\n\n courses = set(range(1, n+1))\n start = courses - indegree.keys()\n target = 2**(n+1) - 1\n avaliable = 0\n for i in start:\n avaliable |= (1 << i)\n\n return dp(1, 0, avaliable) - 1# first dp not take courses", "def test_teleport_waltz_basis_gates(self):\n shots = 2000\n circuits = ref_algorithms.teleport_circuit()\n targets = ref_algorithms.teleport_counts(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='u1,u2,u3,cx')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0.05 * shots)", "def spotlessroomba_third_heuristic(state : SpotlessRoombaState) -> float:\n h = 0\n current_position = state.position\n dirty_locations = list(state.dirty_locations)\n partial_heuristic = INF\n closest_dirty = 0\n\n while dirty_locations:\n for i in range(len(dirty_locations)):\n manhattan = abs(current_position.row - dirty_locations[i].row) + abs(current_position.col - dirty_locations[i].col)\n if manhattan < partial_heuristic:\n partial_heuristic = manhattan\n closest_dirty = i\n h += partial_heuristic\n current_position = dirty_locations.pop(closest_dirty)\n partial_heuristic = INF\n \n return h", "def part1(problem_input: Iterable[str]) -> int:\n blueprints: dict[int, tuple[RobotCost, RobotCost, RobotCost, RobotCost]] = {}\n for s in problem_input:\n num = re.search(r\"Blueprint (\\d+):\", s)[1] # type: ignore\n r_r = re.search(r\"Each ore robot costs (\\d+) ore.\", s)[1] # type: ignore\n c_r = re.search(r\"Each clay robot costs (\\d+) ore.\", s)[1] # type: ignore\n _, b_r, b_c = re.search(r\"Each obsidian robot costs (\\d+) ore and (\\d+) clay.\", s) # type: ignore\n _, g_r, g_b = re.search(r\"Each geode robot costs (\\d+) ore and (\\d+) obsidian.\", s) # type: ignore\n blueprints[int(num)] = (\n RobotCost(int(r_r), 0, 0),\n RobotCost(int(c_r), 0, 0),\n RobotCost(int(b_r), int(b_c), 0),\n RobotCost(int(g_r), 0, int(g_b)),\n )\n\n def max_geodes_harvested(blueprint: int, time: int) -> int:\n orebot_cost, claybot_cost, obsidianbot_cost, geodebot_cost = blueprints[\n blueprint\n ]\n initial_state = EconomicState(0, 0, 0, 0, 1, 0, 0, 0)\n end_states = set([initial_state])\n for t in range(time):\n print(f\"{blueprint} {t}..\")\n future_states: set[EconomicState] = set()\n for s in end_states:\n future_states.add(\n EconomicState(\n ore=s.ore + s.ore_robots,\n clay=s.clay + s.clay_robots,\n obsdian=s.obsdian + s.obsdian_robots,\n geodes=s.geodes + s.geode_robots,\n ore_robots=s.ore_robots,\n clay_robots=s.clay_robots,\n obsdian_robots=s.obsdian_robots,\n geode_robots=s.geode_robots,\n )\n )\n if (\n geodebot_cost.ore <= s.ore\n and geodebot_cost.clay <= s.clay\n and geodebot_cost.obsdian <= s.obsdian\n ):\n future_states.add(\n EconomicState(\n ore=s.ore - geodebot_cost.ore + s.ore_robots,\n clay=s.clay - geodebot_cost.clay + s.clay_robots,\n obsdian=s.obsdian\n - geodebot_cost.obsdian\n + s.obsdian_robots,\n geodes=s.geodes + s.geode_robots,\n ore_robots=s.ore_robots,\n clay_robots=s.clay_robots,\n obsdian_robots=s.obsdian_robots,\n geode_robots=s.geode_robots + 1,\n )\n )\n if (\n obsidianbot_cost.ore <= s.ore\n and obsidianbot_cost.clay <= s.clay\n and obsidianbot_cost.obsdian <= s.obsdian\n ):\n future_states.add(\n EconomicState(\n ore=s.ore - obsidianbot_cost.ore + s.ore_robots,\n clay=s.clay - obsidianbot_cost.clay + s.clay_robots,\n obsdian=s.obsdian\n - obsidianbot_cost.obsdian\n + s.obsdian_robots,\n geodes=s.geodes + s.geode_robots,\n ore_robots=s.ore_robots,\n clay_robots=s.clay_robots,\n obsdian_robots=s.obsdian_robots + 1,\n geode_robots=s.geode_robots,\n )\n )\n if (\n claybot_cost.ore <= s.ore\n and claybot_cost.clay <= s.clay\n and claybot_cost.obsdian <= s.obsdian\n ):\n future_states.add(\n EconomicState(\n ore=s.ore - claybot_cost.ore + s.ore_robots,\n clay=s.clay - claybot_cost.clay + s.clay_robots,\n obsdian=s.obsdian - claybot_cost.obsdian + s.obsdian_robots,\n geodes=s.geodes + s.geode_robots,\n ore_robots=s.ore_robots,\n clay_robots=s.clay_robots + 1,\n obsdian_robots=s.obsdian_robots,\n geode_robots=s.geode_robots,\n )\n )\n if (\n orebot_cost.ore <= s.ore\n and orebot_cost.clay <= s.clay\n and orebot_cost.obsdian <= s.obsdian\n ):\n future_states.add(\n EconomicState(\n ore=s.ore - orebot_cost.ore + s.ore_robots,\n clay=s.clay - orebot_cost.clay + s.clay_robots,\n obsdian=s.obsdian - orebot_cost.obsdian + s.obsdian_robots,\n geodes=s.geodes + s.geode_robots,\n ore_robots=s.ore_robots + 1,\n clay_robots=s.clay_robots,\n obsdian_robots=s.obsdian_robots,\n geode_robots=s.geode_robots,\n )\n )\n end_states = future_states\n if t > 20:\n high_score = max(s.geodes for s in end_states)\n if high_score > 0:\n end_states = set(s for s in end_states if s.geodes > high_score * 0.67)\n return max(s.geodes for s in end_states)\n\n return sum(n * max_geodes_harvested(n, 24) for n in blueprints.keys())", "def isSolvable(state):\n\n invCount = 0\n size = len(state)\n for i in range(0, size-1):\n for j in range(i+1, size):\n if (int(state[j]) and int(state[i]) and state[i] > state[j]):\n invCount += 1\n # return (invCount%2 == 0)\n return 1", "def problem2(self, s):\n \n points = self.neighbor(100, 10, s.exhaustive_search)\n points += self.neighbor(10, 100, s.exhaustive_search)\n points += 1\n\n _testDriver.get_code(s.exhaustive_search)\n print \"\\n(Check that scipy.spatial.KDTree is not used)\"\n points *= self.grade(1)\n\n return points", "def final_hop(count, final_set, last_iteration, distances):\n res = list()\n\n for j in range(1, count):\n cj0 = distances[j][0] # cost of final hop of tour\n # min-cost from 0 to j, visiting everybody once +\n index_j = bits.generate_index(final_set, count, j)\n res.append(last_iteration[index_j] + cj0)\n return min(res)", "def alphabeta_search(state):\r\n \r\n '''\r\n Terminates when game.actions is empty\r\n Class Game needs the following functions:\r\n - game.result(state, a) -- successor\r\n - game.actions(state) -- possible moves\r\n - game.utility -- returns the state of the game (win/lose or tie, when game is terminal)\r\n \r\n '''\r\n #sort state.actions in increasing or decreasing based on max or min (alpha or beta)\r\n #use heuristics fn to get a value for each move (move is in format (x,y) where x and y are ints\r\n \r\n d = depthset[0] #this is the cutoff test depth value. if we exceed this value, stop\r\n cutoff_test=None\r\n sort_fn = [vitalpoint, eyeHeur]\r\n eval_fn = survivalheur \r\n #randnumheuristics \r\n player = state.to_move()\r\n prune = 0\r\n pruned = {} #this will store the depth of the prune\r\n totaldepth = [0]\r\n visited = {}\r\n heuristicInd = 0\r\n \r\n def max_value(state, alpha, beta, depth, heuristicInd):\r\n branches = len(state.actions())\r\n onbranch = 0\r\n \r\n if totaldepth[0] < depth:\r\n totaldepth[0] = depth\r\n if cutoff_test(state, depth):\r\n return eval_fn(state)\r\n v = -infinity\r\n \r\n #sort state.actions based on heuristics before calling\r\n #max wants decreasing\r\n #sorted(state.actions(), key = eval_sort, reverse = True)\r\n \r\n #sort by favorites first, returns a list of actions\r\n # for sorts in sort_fn:\r\n tempher = heuristicInd\r\n\r\n sorts = sort_fn[heuristicInd]\r\n sortedactions, heuristicInd = sorts(state)\r\n #if heuristicInd != tempher:\r\n # print 's',\r\n ''''''\r\n for a in sortedactions:\r\n if visited.get(depth) == None:\r\n visited[depth] = [a]\r\n else:\r\n visited[depth].append(a)\r\n \r\n onbranch += 1\r\n v = max(v, min_value(state.result(a),\r\n alpha, beta, depth+1, heuristicInd)) #+ vitscore.count(a)\r\n if v >= beta: #pruning happens here, but in branches\r\n if pruned.get(depth) == None:\r\n pruned[depth] = branches - onbranch\r\n else:\r\n pruned[depth] += (branches - onbranch)\r\n #print \"prune\", depth, \" \", state.actions()\r\n #state.display()\r\n return v\r\n alpha = max(alpha, v)\r\n \r\n #print depth, \" \", state.actions()\r\n #state.display()\r\n \r\n return v\r\n\r\n def min_value(state, alpha, beta, depth, heuristicInd):\r\n branches = len(state.actions())\r\n onbranch = 0\r\n \r\n if totaldepth[0] < depth:\r\n totaldepth[0] = depth\r\n if cutoff_test(state, depth):\r\n return eval_fn(state)\r\n v = infinity\r\n \r\n #sort state.actions based on heuristics before calling\r\n #min wants increasing\r\n #sorted(state.actions(), key = eval_sort)\r\n #Shayne\r\n tempher = heuristicInd\r\n sorts = sort_fn[heuristicInd]\r\n sortedactions, heuristicInd = sorts(state, 1)\r\n #if heuristicInd != tempher:\r\n # print 's',\r\n for a in sortedactions: #state.actions():\r\n onbranch += 1\r\n if visited.get(depth) == None:\r\n visited[depth] = [a]\r\n else:\r\n visited[depth].append(a)\r\n v = min(v, max_value(state.result(a),\r\n alpha, beta, depth+1, heuristicInd))\r\n if v <= alpha: #pruning happens here, but in branches\r\n if pruned.get(depth) == None:\r\n pruned[depth] = branches - onbranch\r\n else:\r\n pruned[depth] += (branches - onbranch)\r\n #print \"prune\", depth, \" \", state.actions()\r\n #state.display()\r\n return v\r\n beta = min(beta, v)\r\n #print depth, \" \", state.actions()\r\n #state.display()\r\n return v\r\n\r\n # Body of alphabeta_search starts here:\r\n #def cutoff_test and eval_fn \r\n cutoff_test = (cutoff_test or\r\n (lambda state,depth: depth>d or state.terminal_test()))\r\n eval_fn = eval_fn or (lambda state: state.utility(player))\r\n #by default, utility score is used\r\n \r\n \r\n #argmax goes through all the possible actions and \r\n # applies the alphabeta search onto all of them\r\n # and returns the move with the best score \r\n #print state.actions()\r\n heuristicInd = 0\r\n sorts = sort_fn[heuristicInd]\r\n sortedact, heuristicInd = sorts(state)\r\n abmove = argmax(sortedact,\r\n lambda a: min_value(state.result(a),\r\n -infinity, infinity, 0, heuristicInd))\r\n\r\n print 'problem,', problemno[0], ', total tree depth,', totaldepth[0]\r\n for i in range(1, len(visited)):\r\n if len(pruned) < i:\r\n pruned[i] = 0\r\n print i, \",\", len(visited[i]), \",\", pruned[i]\r\n \r\n return abmove", "def day_03_b() -> int:\n instructions = read_instructions('aoc/aoc2015/input/03A.txt')\n santa_instructions = ''.join(w for i, w in enumerate(instructions) if is_odd(i))\n robo_instructions = ''.join(w for i, w in enumerate(instructions) if is_even(i))\n\n santa_houses = get_visited_houses(santa_instructions)\n robo_houses = get_visited_houses(robo_instructions)\n\n return len(merge_dicts(santa_houses, robo_houses))", "def safeJourney(Alist,s,d):\n #Initialize dictionaries\n dinit = 10**6\n Edict = {} #Explored nodes\n Udict = {} #Unexplored nodes\n path = [[] for l in Alist]\n\n Alen = len(Alist) #length of Alist\n dinits = [dinit]*Alen #list of airport indexes\n Udict = dict(zip(list(range(Alen)),dinits)) #zip into dictionary\n Udict[s] = 0\n path[s] = [s]\n \n #Main search\n while len(Udict)>0:\n #Find node with min d in Udict and move to Edict\n dmin = dinit\n for n,w in Udict.items():\n if w<dmin:\n dmin=w\n nmin=n\n Edict[nmin] = Udict.pop(nmin)\n print(\"moved node\", nmin)\n\n #Update provisional distances for unexplored neighbors of nmin\n \n #for n,w in G.adj[nmin].items():\n for item in Alist[nmin]: #nminth element is a list of two element tuples (node, weight)\n n = item[0] #first elt of tuple is node/neighbour\n w = item[1] #2nd elt is density/weigh\n #for n,w in etc_______________________-\n \n if n in Edict:\n pass\n elif n in Udict:\n #dcomp = dmin + w\n dcomp = max(w,dmin) #take largest value to record most dangerous segment\n if dcomp<Udict[n]:\n print(Udict)\n Udict[n]=dcomp\n path[n] = path[nmin] + [n]\n #path[n].extend(path[nmin])\n #path[n] = path[nmin]\n \n #path[n].append(n) #n not nmin\n print(path)\n # else:\n #dcomp = dmin + w\n # dcomp = max(w,dmin)\n # Udict[n] = dcomp\n #path[n].extend(path[nmin])\n #path[n].append(nmin) \n \n if nmin == d: #if current node is destination\n return path[d],Edict[d]\n return [] #no path", "def genRandTeam(nPos, totPlayers):\n # 0 1 2 3 4 5 6\n # nPos = [nFirst, nSecond, nThird, nShort, nCatcher, nOf, nDh]\n chromosome = []\n sum = 0\n count = 0\n\n\n for i in nPos: # general loop\n if count == 6: # when loop enters the nDh players it instead chooses from ALL positions five times\n for j in range(5): # to represent the 2 util positions and the 3 benches\n rNum = random.randint(0, totPlayers - 1) # random number of ANY player\n chromosome.append(rNum) # picks a random pos\n break # no more work needs to be done\n if count == 5: # this will occur before the previous loop; nOF must be iterated 3 times for 3 outfield spots\n for j in range(2):\n rNum2 = random.randint(0, i - 1)\n chromosome.append(rNum2 + sum) # nOF must be iterated 3 times for 3 outfield spots; i is on oF\n rNum3 = random.randint(0, i - 1)\n chromosome.append(rNum3 + sum)\n sum += i\n count += 1\n # first = random.randint(0,nPos[0])\n # second = random.randint(0,nPos[1])\n # third = random.randint(0,nPos[2])\n # short = random.randint(0,nPos[3])\n # catcher = random.randint(0,nPos[4])\n # of = [random.randint(0,nPos[5]), random.randint(0,nPos[5]), random.randint(0,nPos[5])] #THREE outfielders\n # rNum = [random.randint(0,6) for i in range(5)] #random numbers representing one of the nPos rosters\n # util = [random.randint(0,nPos[rNum[0]]), random.randint(0,nPos[rNum[1]])] #picks 2 random players from ANY roster\n # ben = [random.randint(0,nPos[rNum[2]]), random.randint(0,nPos[rNum[3]]), random.randint(0,nPos[rNum[4]])] # picks 3 random players form any roster\n # print first,second,third,short,catcher,of,util,ben\n # temp = Team()\n return chromosome", "def spreadOutAndFindDot(self, gameState):\n # Here are some useful elements of the startState\n currentPosition = gameState.getPacmanPosition(self.index)\n foodList = gameState.getFood().asList()\n walls = gameState.getWalls()\n randomFood = []\n problem = []\n\n #problem = AnyFoodSearchProblem(gameState, self.index)\n\n # if min(manhattan(currentPosition, foodPosition) for foodPosition in food.asList()) > 10:\n # return [Directions.STOP]\n #print(\"self.targets = \", self.targets)\n if self.index == 0:\n TargetFood = ClosestFood(currentPosition, foodList)\n #self.targets.append(TargetFood)\n problem = PositionSearchProblem(gameState, 0, goal=TargetFood, start=currentPosition, warn=False, visualize=False)\n return search.aStarSearch(problem, manhattanHeuristic)\n if self.index == 1:\n TargetFood = ClosestFood(currentPosition, foodList)\n \"\"\"\n want to find a way to avoid both agents coming up with the same target. But the below doesn't work because\n each agent has their own self.targets. How to keep a common list of targets?\n \"\"\"\n # if TargetFood in self.targets:\n # tempFoodList = foodList.copy()\n # tempFoodList.pop(tempFoodList.index(TargetFood))\n # TargetFood = ClosestFood(currentPosition, tempFoodList)\n # self.targets.append(TargetFood)\n # else:\n # self.targets.append(TargetFood)\n problem = PositionSearchProblem(gameState, 1, goal=TargetFood, start=currentPosition, warn=False, visualize=False)\n return search.aStarSearch(problem, manhattanHeuristic)\n if self.index == 2:\n TargetFood = RandomFood(currentPosition, foodList)\n problem = PositionSearchProblem(gameState, 2, goal=TargetFood, start=currentPosition, warn=False, visualize=False)\n return search.aStarSearch(problem, manhattanHeuristic)\n if self.index == 3:\n TargetFood = RandomFood(currentPosition, foodList)\n problem = PositionSearchProblem(gameState, 3, goal=TargetFood, start=currentPosition, warn=False, visualize=False)\n return search.aStarSearch(problem, manhattanHeuristic)\n #return search.bfs(problem)\n\n #util.raiseNotDefined()", "def sea_execution(board, position, role):\n quitt = False\n if position == 'comp':\n #print(1)\n #temporary for dumb AI\n #create and print a list of coastal, friendly regions where norse is not the ONLY one\n \n possible_region_list = []\n \n #loops through list of friendly, coastal, not just Norse regions to append to a possible_region_list\n for region in board.get_controlled_regions(role):\n #print(2)\n coastal = False\n just_norse = False\n if region.coast:\n coastal = True\n if len(region.blocks_present) == 1 and region.blocks_present[0].name.upper() == 'NORSE':\n just_norse = True\n \n if coastal and not just_norse:\n possible_region_list.append(region)\n \n \n #loops through list of friendly, coastal regions to append to a possible_final_region_list\n possible_final_region_list = []\n for region in board.get_controlled_regions(role): \n #print(3) \n if region.coast:\n possible_final_region_list.append(region)\n \n \n \n if len(possible_final_region_list) >= 2:\n #if you want to add in last-min strategy, do it here\n #random region from possible list\n england = board.regions[22]\n if england in possible_region_list:\n original_region = england\n else:\n original_region = possible_region_list[random.randint(0, len(possible_region_list) - 1)]\n #remove the original region from the possible end regions\n possible_final_region_list.remove(original_region)\n \n #possible_block_list\n #list of possible blocks to move (present in region) and not norse\n possible_block_list = []\n for block in original_region.blocks_present:\n if block.name != 'NORSE':\n possible_block_list.append(block)\n \n move_block_list = []\n blocks_moved = 0\n #print(4)\n\n while blocks_moved < 2:\n #print(5)\n block = possible_block_list[random.randint(0, len(possible_block_list)-1)]\n #if it's not already on the list,append to move_block_list\n if block not in move_block_list:\n move_block_list.append(block)\n blocks_moved+=1\n elif block in move_block_list and len(possible_block_list) == 1:\n blocks_moved+=1\n else:\n print('neither condition was met so this is an infinite loop')\n \n \n #print(6) \n new_region = possible_final_region_list[random.randint(0, len(possible_final_region_list) - 1)]\n \n for block in move_block_list:\n \n board.add_to_location(block, new_region)\n print(block.name + ' moved from ' + original_region.name + ' to ' + new_region.name)\n \n else:\n print('There are not enough friendly regions with which to play this card.')\n \n \n #add in if it's not possible\n elif position == 'opp':\n \n \n possible_region_list = []\n \n #loops through list of friendly, coastal, not just Norse regions to append to a possible_region_list\n for region in board.get_controlled_regions(role):\n coastal = False\n just_norse = False\n if region.coast:\n coastal = True\n if len(region.blocks_present) == 1 and region.blocks_present[0].name.upper() == 'NORSE':\n just_norse = True\n \n if coastal and not just_norse:\n possible_region_list.append(region)\n \n \n #loops through list of friendly, coastal regions to append to a possible_final_region_list\n possible_final_region_list = []\n for region in board.get_controlled_regions(role): \n if region.coast:\n possible_final_region_list.append(region)\n \n \n \n if len(possible_final_region_list) >= 2:\n \n print('Possible origin regions:')\n for region in possible_region_list:\n print(region.name)\n \n #user input region, check if in possible list\n valid_region = False\n while not valid_region:\n \n original_region_name = input('What region would you like to move block(s) from? Enter a name or \\'none\\'.\\n>').upper()\n \n if original_region_name != 'NONE':\n \n original_region = search.region_name_to_object(board, original_region_name)\n \n if original_region and original_region in possible_region_list:\n valid_region = True\n else:\n print('Invalid region.')\n else:\n quitt = True\n \n if not quitt:\n #remove the original region from the possible end regions\n possible_final_region_list.remove(original_region)\n \n #possible_block_list\n #list of possible blocks to move (present in region) and not norse\n possible_block_list = []\n for block in original_region.blocks_present:\n if block.name != 'NORSE':\n possible_block_list.append(block)\n \n print('Possible blocks:')\n for block in possible_block_list:\n print(block.name)\n \n \n move_block_list = []\n blocks_moved = 0\n quittt = False\n block_name = ''\n while blocks_moved < 2 and not quittt:\n if block_name != 'NONE':\n valid_block = False\n while not valid_block:\n \n \n block_name = input('Which block would you like to move? Enter a name or \\'none\\'.\\n>').upper()\n \n if block_name != 'NONE':\n \n block_to_move = search.block_name_to_object(possible_block_list, block_name)\n \n if block_to_move and block_to_move not in move_block_list:\n valid_block = True\n move_block_list.append(block_to_move)\n blocks_moved+=1\n \n elif block in move_block_list and len(possible_block_list) == 1:\n blocks_moved=1\n \n else:\n print('Invalid block.')\n continue\n else:\n valid_block = True\n if len(move_block_list) == 1:\n quittt = True\n quitt = False\n if len(move_block_list) > 0: \n print('Possible final regions:')\n for region in possible_final_region_list:\n print(region.name)\n \n #user input region, check if in possible list\n valid_region = False\n while not valid_region:\n \n new_region_name = input('What region would you like to move block(s) to? Enter a name or \\'none\\'.\\n>').upper()\n \n if new_region_name != 'NONE':\n \n new_region = search.region_name_to_object(board, new_region_name)\n \n if new_region and new_region in possible_final_region_list:\n valid_region = True\n else:\n print('Invalid region.')\n continue\n else:\n valid_region = True\n quitt = True\n \n if not quitt:\n \n for block in move_block_list:\n \n board.add_to_location(block, new_region)\n print(block.name + ' moved from ' + original_region.name + ' to ' + new_region.name)\n \n else:\n print('There are not enough friendly coastal regions with which to play this card.')", "def get_high_one(self, state):\n non_zero_idxs = [-2, -3, -4]\n idx_idxs = np.random.randint(low=0, high=3, size=10)\n for idx_idx in idx_idxs:\n non_zero_idx = non_zero_idxs[idx_idx]\n if self.potential(state) + self.weights[non_zero_idx] <= self.initial_potential:\n state[non_zero_idx] += 1\n break\n return state", "def find_initial_betas(self):\n if self.verbose >= 2:\n print(\"\\r{}\\rFinding initial betas\".format(' ' * 80), end='', file=sys.stderr)\n # Find ranges of states for each CRE\n Tranges = numpy.zeros((self.rna.shape[0], 2), dtype=numpy.int32)\n for i in range(self.rna_indices.shape[0] - 1):\n s = self.rna_indices[i]\n e = self.rna_indices[i + 1]\n if e - s == 0:\n continue\n s1 = self.state_indices[i]\n e1 = self.state_indices[i + 1]\n if e1 - s1 == 0:\n continue\n starts = numpy.searchsorted(self.state['end'][s1:e1],\n self.rna['TSS'][s:e] - self.initialization_dist,\n #* numpy.logical_not(self.rna['strand'][s:e]),\n side='right') + s1\n stops = numpy.searchsorted(self.state['start'][s1:e1],\n self.rna['TSS'][s:e] + self.initialization_dist,\n #* self.rna['strand'][s:e],\n side='left') + s1\n Tranges[s:e, 0] = starts\n Tranges[s:e, 1] = stops\n # Divide list across multiple processes\n tss_queue = multiprocessing.JoinableQueue()\n results_queue = multiprocessing.JoinableQueue()\n processes = []\n for i in range(self.threads):\n processes.append(multiprocessing.Process(\n target=self._assign_promoter_state, args=(tss_queue, results_queue, Tranges,\n self.initialization_dist,\n self.rng.randint(99999), True)))\n processes[-1].daemon = True\n processes[-1].start()\n step = int(self.rna_indices[-1] / max(self.threads, 1) / 4.)\n for i in range(self.rna_indices.shape[0] - 1):\n for j in range(self.rna_indices[i], self.rna_indices[i + 1], step):\n stop = min(self.rna_indices[i + 1], j + step)\n tss_queue.put((j, stop))\n for i in range(self.threads):\n tss_queue.put(None)\n # Even though there may be multiple reps for a celltype, we only find the average state proportion across reps\n Tstates = numpy.zeros((self.rna.shape[0], self.cellN, self.stateN), dtype=numpy.float32)\n finished = 0\n while finished < self.threads:\n results = results_queue.get(True)\n if results is None:\n finished += 1\n continue\n start, stop = results[:2]\n Tstates[start:stop, :, :] = results[2]\n Tstates2 = numpy.copy(Tstates)\n Tstates = Tstates[:, self.lmask, :]\n Tstates /= numpy.sum(Tstates, axis=2, keepdims=True)\n betas = numpy.linalg.lstsq(Tstates.reshape(-1, Tstates.shape[2], order='C'),\n self.rna['rna'][:, self.lmask].reshape(-1, order='C'),\n rcond=None)[0]\n self.initial_betas = betas\n if self.verbose >= 2:\n print(\"\\r{}\\r\".format(' ' * 80), end='', file=sys.stderr)", "def __find_all_moves(self, tower) -> list:\r\n choice = []\r\n for height in range(1,len(tower.tower)-2):\r\n for index in range(1,4):\r\n if self.stat_brain.is_valid(height, index, tower):\r\n choice.append((height, index))\r\n \r\n r.shuffle(choice)\r\n return choice", "def calculateOptimal(self) -> (list, int):\n\t\tcombinations = list(itertools.product(*self.clusters))\n\t\tmin_dist = 1000000\n\t\tmin_combination = None\n\t\tfor combination in combinations:\n\t\t\tdist = super().step(combination)\n\t\t\tif(dist < min_dist):\n\t\t\t\tmin_dist = dist\n\t\t\t\tmin_combination = combination\n\t\treturn (min_combination, min_dist)", "def test_teleport_default_basis_gates(self):\n shots = 2000\n circuits = ref_algorithms.teleport_circuit()\n targets = ref_algorithms.teleport_counts(shots)\n job = execute(circuits, QasmSimulator(), shots=shots)\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0.05 * shots)", "def __find_random_moves(self, tower) -> int:\r\n while True:\r\n height, index = r.randint(1, len(tower.tower)-1), r.randint(1,3)\r\n if tower.tower[height-1].blocks[index-1] == 1:\r\n return height, index", "def split_candy(case):\n\n s_best_sum = 0\n\n# print case\n if len(case) <= 1:\n return \"NO\"\n\n # annotate values to protect against duplicates\n annotated_case = [ (i, x) for i,x in enumerate(case) ]\n# print annotated_case\n\n for length in range(1, len(annotated_case)):\n for s_pile in itertools.combinations(annotated_case, length):\n p_pile = set(annotated_case).difference(s_pile)\n\n # unzip\n s_pile = zip(*s_pile)[1]\n p_pile = zip(*p_pile)[1]\n\n# print s_pile, p_pile\n\n s_sum = reduce(lambda x, y: x+y, s_pile)\n s_xor = reduce(lambda x, y: x^y, s_pile)\n p_xor = reduce(lambda x, y: x^y, p_pile)\n\n# print s_pile, s_sum, s_xor, p_pile, p_xor\n if s_xor == p_xor:\n# print \"match\"\n s_best_sum = max(s_sum, s_best_sum)\n\n return s_best_sum or \"NO\"", "def checkNumNeighbors():", "def buyable(n: int) -> bool:\n if n in [4, 6, 25]:\n return True\n elif n < 4:\n return False\n else:\n buyability = False\n for size in [4, 6, 25]:\n buyability |= buyable(n - size)\n return buyability", "def get_heurisitc_cost_stacks(element, stack_index, goal_state, unused_stacks, unused_index):\n for k,v in goal_state.items():\n if element in v:\n return (abs(stack_index - k), v.index(element))\n if stack_index in unused_stacks:\n return (0, -1)\n else:\n return (abs(unused_index - stack_index), -1)", "def gene_finder(dna):\n orfs = find_all_ORFs_both_strands(dna)\n print(orfs)\n threshold = longest_ORF_noncoding(dna, 1000)\n print('threshold is', threshold)\n print('number of orfs:', len(orfs))\n aa_sequences = []\n i = 0\n while i < len(orfs):\n print(len(orfs[i]))\n if len(orfs[i]) > threshold:\n print('if')\n aa_sequences += [coding_strand_to_AA(orfs[i])]\n i += 1\n print(aa_sequences)", "def four_stools_hanoi(n, first_stool, second_stool, third_stool, fourth_stool):\n if n == 1:\n return [(first_stool, fourth_stool)]\n else:\n i = find_i(n)\n a = four_stools_hanoi(n - i, first_stool, third_stool, fourth_stool,\n second_stool)\n b = three_stools_hanoi(i, first_stool, third_stool, fourth_stool)\n c = four_stools_hanoi(n - i, second_stool, third_stool, first_stool,\n fourth_stool)\n return a + b + c", "def find_all_ORFs_oneframe(dna):", "def _check_sonar_obstacles(self):\n # TODO: what's a good number?\n BLOCKED_THRESHOLD = 0.7\n\n rate = rospy.Rate(10) # 10 hz\n count = 10\n left = 0\n center = 0\n right = 0\n\n for i in range(count):\n obstacle = self.swarmie.get_obstacle_condition()\n\n if obstacle & Obstacle.SONAR_LEFT == Obstacle.SONAR_LEFT:\n left += 1\n if (obstacle & Obstacle.SONAR_CENTER ==\n Obstacle.SONAR_CENTER):\n center += 1\n if obstacle & Obstacle.SONAR_RIGHT == Obstacle.SONAR_RIGHT:\n right += 1\n\n rate.sleep()\n\n left_blocked = left / count > BLOCKED_THRESHOLD\n center_blocked = center / count > BLOCKED_THRESHOLD\n right_blocked = right / count > BLOCKED_THRESHOLD\n\n return left_blocked, center_blocked, right_blocked", "def findAstromCorrs(self):\n self.logfile.write(\"Entered findAstromCorrs - will run: \"+\\\n \"makeGSCcat(), makeObjcats(), doMatching().\")\n\n if self.makeGSCcat() != 0:\n return -1\n if self.makeObjcats()!= 0:\n return -1\n if self.doMatching() != 0:\n # here we want to remake the GSCcat using a \"chopSpur\" flag,\n # if the cat has a goodly number of objects\n if(self.Ncull >= 10):\n print \"Retrying matchup with only GSC objs detected in 2 bands...\"\n self.logfile.write(\"Retrying matchup with only GSC objs detected in 2 bands...\")\n if self.makeGSCcat(chopSpur=1) != 0:\n return -1\n if self.makeObjcats()!= 0:\n return -1\n if self.doMatching() != 0:\n return -1\n\n return 0", "def find_best_k(data, anots, neibhours_range):\r\n \r\n best_k = 0\r\n best_acc = 0\r\n for n_neighbors in neibhours_range:\r\n accur = iterate_over_chanels(data, anots, n_neighbors)\r\n mean_acc = accur.mean()\r\n if mean_acc > best_acc:\r\n best_acc = mean_acc\r\n best_k = n_neighbors\r\n return best_k", "def count_ways(n):\n if n < 0:\n return 0\n elif n == 0:\n return 1\n else:\n total = 0\n for i in range(1, min(n, 3) + 1):\n total += count_ways(n - i)\n return total", "def spawn_ok(game):\n me = game.me\n shipyard_cell = game.game_map[me.shipyard]\n\n # % turns above mining rate to dropoff the halite, will typically be about 2?\n mining_over_head = 2\n ship_count = len(me.get_ships())\n\n #\n # absolute constraints (order can be important)\n #\n\n if ship_count >= MAX_SHIPS:\n if DEBUG & (DEBUG_GAME): logging.info(\"Game - Spawn denied. MAX ships reached\".format())\n return False\n\n if me.halite_amount < constants.SHIP_COST:\n if DEBUG & (DEBUG_GAME): logging.info(\"Game - Spawn denied. Insufficient halite\".format())\n return False\n\n #\n # conditional constraints\n #\n\n logging.debug(\"shipyard_cell.is_occupied: {}\".format(shipyard_cell.is_occupied))\n if shipyard_cell.is_occupied:\n logging.debug(\"shipyard_cell.ship.owner == me.id: {}\".format(shipyard_cell.ship.owner == me.id))\n\n # watch for collisions with owner only, note this will be 1 turn behind\n occupied_cells = []\n if shipyard_cell.is_occupied and shipyard_cell.ship.owner == me.id:\n occupied_cells.append(shipyard_cell.position)\n\n logging.debug(\"oc1: {}\".format(occupied_cells))\n\n # entry lane are N/S\n n_cell = shipyard_cell.position.directional_offset(Direction.North)\n s_cell = shipyard_cell.position.directional_offset(Direction.South)\n e_cell = shipyard_cell.position.directional_offset(Direction.East)\n w_cell = shipyard_cell.position.directional_offset(Direction.West)\n for pos in [n_cell, s_cell, e_cell, w_cell]:\n if game.game_map[pos].is_occupied:\n occupied_cells.append(pos)\n\n logging.debug(\"oc2: {}\".format(occupied_cells))\n\n # need to keep track of ships docking instead, a ship in an adjacent cell could be leaving\n if occupied_cells:\n if DEBUG & (DEBUG_GAME): logging.info(\"Game - Spawn denied. Occupied cells: {}\".format(occupied_cells))\n return False\n\n return True", "def solution(self) -> State:", "def in_bomb_range(self, game_state: dict):\n is_in_bomb_range = False\n agent_position = game_state['self'][3]\n agent_position = list(agent_position)\n\n for bomb in game_state['bombs']:\n if agent_position == list(bomb[0]):\n is_in_bomb_range = True\n\n for i in range(3):\n agent_search = copy.copy(agent_position)\n if agent_position[0] - i - 1 >= 0:\n agent_search[0] = agent_position[0] - i - 1\n if agent_search == list(bomb[0]):\n is_in_bomb_range = True\n\n for i in range(3):\n agent_search = copy.copy(agent_position)\n if agent_position[0] + i + 1 <= 16:\n agent_search[0] = agent_position[0] + i + 1\n if agent_search == list(bomb[0]):\n is_in_bomb_range = True\n\n for i in range(3):\n agent_search = copy.copy(agent_position)\n if agent_position[1] - i - 1 >= 0:\n agent_search[1] = agent_position[1] - i - 1\n if agent_search == list(bomb[0]):\n is_in_bomb_range = True\n\n for i in range(3):\n agent_search = copy.copy(agent_position)\n if agent_position[1] + i + 1 <= 16:\n agent_search[1] = agent_position[1] + i + 1\n if agent_search == list(bomb[0]):\n is_in_bomb_range = True\n\n # check if a stone wall is between the agent an the bomb\n if is_in_bomb_range:\n if agent_position[0] == list(bomb[0])[0]:\n if agent_position[1] < list(bomb[0])[1]:\n for i in range(agent_position[1], list(bomb[0])[1]):\n if game_state['field'][agent_position[0]][i] == -1:\n is_in_bomb_range = False\n else:\n for i in range(list(bomb[0])[1], agent_position[1]):\n if game_state['field'][agent_position[0]][i] == -1:\n is_in_bomb_range = False\n elif agent_position[1] == list(bomb[0])[1]:\n if agent_position[0] < list(bomb[0])[0]:\n for i in range(agent_position[0], list(bomb[0])[0]):\n if game_state['field'][i][agent_position[1]] == -1:\n is_in_bomb_range = False\n else:\n for i in range(list(bomb[0])[0], agent_position[0]):\n if game_state['field'][i][agent_position[1]] == -1:\n is_in_bomb_range = False\n\n return is_in_bomb_range", "def part2(problem_input: Iterable[str]) -> int:\n\n blueprints: dict[int, tuple[RobotCost, RobotCost, RobotCost, RobotCost]] = {}\n for s in problem_input:\n num = re.search(r\"Blueprint (\\d+):\", s)[1] # type: ignore\n r_r = re.search(r\"Each ore robot costs (\\d+) ore.\", s)[1] # type: ignore\n c_r = re.search(r\"Each clay robot costs (\\d+) ore.\", s)[1] # type: ignore\n _, b_r, b_c = re.search(r\"Each obsidian robot costs (\\d+) ore and (\\d+) clay.\", s) # type: ignore\n _, g_r, g_b = re.search(r\"Each geode robot costs (\\d+) ore and (\\d+) obsidian.\", s) # type: ignore\n blueprints[int(num)] = (\n RobotCost(int(r_r), 0, 0),\n RobotCost(int(c_r), 0, 0),\n RobotCost(int(b_r), int(b_c), 0),\n RobotCost(int(g_r), 0, int(g_b)),\n )\n\n def max_geodes_harvested(blueprint: int, time: int) -> int:\n orebot_cost, claybot_cost, obsidianbot_cost, geodebot_cost = blueprints[\n blueprint\n ]\n initial_state = EconomicState(0, 0, 0, 0, 1, 0, 0, 0)\n end_states = set([initial_state])\n for t in range(time):\n print(f\"{blueprint} {t}..\")\n future_states: set[EconomicState] = set()\n for s in end_states:\n future_states.add(\n EconomicState(\n ore=s.ore + s.ore_robots,\n clay=s.clay + s.clay_robots,\n obsdian=s.obsdian + s.obsdian_robots,\n geodes=s.geodes + s.geode_robots,\n ore_robots=s.ore_robots,\n clay_robots=s.clay_robots,\n obsdian_robots=s.obsdian_robots,\n geode_robots=s.geode_robots,\n )\n )\n if (\n geodebot_cost.ore <= s.ore\n and geodebot_cost.clay <= s.clay\n and geodebot_cost.obsdian <= s.obsdian\n ):\n future_states.add(\n EconomicState(\n ore=s.ore - geodebot_cost.ore + s.ore_robots,\n clay=s.clay - geodebot_cost.clay + s.clay_robots,\n obsdian=s.obsdian\n - geodebot_cost.obsdian\n + s.obsdian_robots,\n geodes=s.geodes + s.geode_robots,\n ore_robots=s.ore_robots,\n clay_robots=s.clay_robots,\n obsdian_robots=s.obsdian_robots,\n geode_robots=s.geode_robots + 1,\n )\n )\n if (\n obsidianbot_cost.ore <= s.ore\n and obsidianbot_cost.clay <= s.clay\n and obsidianbot_cost.obsdian <= s.obsdian\n ):\n future_states.add(\n EconomicState(\n ore=s.ore - obsidianbot_cost.ore + s.ore_robots,\n clay=s.clay - obsidianbot_cost.clay + s.clay_robots,\n obsdian=s.obsdian\n - obsidianbot_cost.obsdian\n + s.obsdian_robots,\n geodes=s.geodes + s.geode_robots,\n ore_robots=s.ore_robots,\n clay_robots=s.clay_robots,\n obsdian_robots=s.obsdian_robots + 1,\n geode_robots=s.geode_robots,\n )\n )\n if (\n claybot_cost.ore <= s.ore\n and claybot_cost.clay <= s.clay\n and claybot_cost.obsdian <= s.obsdian\n ):\n future_states.add(\n EconomicState(\n ore=s.ore - claybot_cost.ore + s.ore_robots,\n clay=s.clay - claybot_cost.clay + s.clay_robots,\n obsdian=s.obsdian - claybot_cost.obsdian + s.obsdian_robots,\n geodes=s.geodes + s.geode_robots,\n ore_robots=s.ore_robots,\n clay_robots=s.clay_robots + 1,\n obsdian_robots=s.obsdian_robots,\n geode_robots=s.geode_robots,\n )\n )\n if (\n orebot_cost.ore <= s.ore\n and orebot_cost.clay <= s.clay\n and orebot_cost.obsdian <= s.obsdian\n ):\n future_states.add(\n EconomicState(\n ore=s.ore - orebot_cost.ore + s.ore_robots,\n clay=s.clay - orebot_cost.clay + s.clay_robots,\n obsdian=s.obsdian - orebot_cost.obsdian + s.obsdian_robots,\n geodes=s.geodes + s.geode_robots,\n ore_robots=s.ore_robots + 1,\n clay_robots=s.clay_robots,\n obsdian_robots=s.obsdian_robots,\n geode_robots=s.geode_robots,\n )\n )\n end_states = future_states\n if t >= 25:\n high_score = max(s.geodes for s in end_states)\n if high_score > 0:\n end_states = set(s for s in end_states if s.geodes > high_score * 0.67)\n g = max(s.geodes for s in end_states)\n print(f\"{blueprint} >>>{g}<<<\")\n return g\n\n return prod(max_geodes_harvested(n, 32) for n in [1,2,3])", "def get_n_best(self):\n pass", "def find_all_ORFs(dna):\n \n # YOUR IMPLEMENTATION HERE", "def all_valid(self, tower) -> int:\r\n count = 0\r\n for layer in range(1, len(tower.tower)):\r\n for index in range(1, 4):\r\n if self.is_valid(layer, index, tower):\r\n count += 1\r\n \r\n return count", "def findIslands(self):\n\n # First lets find the shores.\n shoreList = self.findShores()\n\n # Initialize Blank Values.\n N, S, E, W = (None for i in range(4))\n\n # Next, we find all the furthest extremities among all shore lists.\n # In theory, the only extremities that can occur for shorelines that\n # Don't belong to the main pond body are along the map edge.\n for index, shore in enumerate(shoreList):\n extremityHash = shore.findExtremities()\n if index == 0:\n N, S, E, W = ([shore] for i in range(4))\n continue\n if extremityHash['N'][0].x < N[0].findExtremities()['N'][0].x:\n N = [shore]\n elif extremityHash['N'][0].x == N[0].findExtremities()['N'][0].x:\n N.append(shore)\n if extremityHash['S'][0].x > S[0].findExtremities()['S'][0].x:\n S = [shore]\n elif extremityHash['S'][0].x == S[0].findExtremities()['S'][0].x:\n S.append(shore)\n if extremityHash['E'][0].y > E[0].findExtremities()['E'][0].y:\n E = [shore]\n elif extremityHash['E'][0].y == E[0].findExtremities()['E'][0].y:\n E.append(shore)\n if extremityHash['W'][0].y < W[0].findExtremities()['W'][0].y:\n W = [shore]\n elif extremityHash['W'][0].y == W[0].findExtremities()['W'][0].y:\n W.append(shore)\n\n # Now, lets flatten the list of cardinal extremities\n flatList = [val for sublist in [N, S, E, W] for val in sublist]\n counter = Counter(flatList)\n\n # In theory, the main pond shore should have the most extremities\n probablyPond = counter.most_common(1)\n\n # Wow, what a piece of crap. I feel ashamed of the next 6 lines.\n if probablyPond[0][0] < 4:\n raise Exception(\"Largest Pond does not have 4 max points.\"\n \" Something is horribly Wrong.\")\n if len(probablyPond) != 1:\n raise Exception(\"Equal number of extremities in pond?\"\n \" How can that be?\")\n\n probablyPond = probablyPond[0][0]\n\n # Find any map edges and add them to the Plain Blob Object mapEdge.\n self.mapEdge = self.findMapEdge()\n\n # Well, this probably isn't an island, so drop it from the list.\n shoreList.remove(probablyPond)\n\n # Find any map edges for the island, and create Island Objects.\n islands = list()\n for island in shoreList:\n islands.append(Island(island.points,\n self.analyzeData,\n self.elevation))\n return islands", "def branching_factor(data, loc):\n\n return 20", "def test_specific_case(self):\n fasta_file = os.path.normpath(get_fasta_file('T00759-tiny.fa'))\n seed = 'AAAACCCA'\n W = len(seed)\n num_sites = 4\n self.options.max_num_sites = num_sites\n self.options.min_num_sites = num_sites\n \n #\n # Load sequences and build index\n #\n algorithm = stempy.Algorithm(self.options)\n algorithm._initialise(fasta_file)\n data = algorithm.input_sequences.data\n\n model = algorithm.create_model_of_input(W)\n model.bs.seed(seed, True)\n model.set_lambda_for_sites(data.num_sequences)\n\n # look for best W-mers under model\n best_w_mer_finder = stempy.create_best_w_mer_finder(data, model, num_sites)\n best_w_mer_finder()\n if len(best_w_mer_finder.best_w_mers) < num_sites:\n if len(best_w_mer_finder.best_w_mers) != model.num_W_mers:\n raise ValueError('Did not find enough W-mers')\n \n # We want to get these W-mers\n # \n # 2011-08-09 10:11:32,846 - INFO - Z=8.00e-02; pos= 313 +; AAAACCCA; AAAACCCA\n # 2011-08-09 10:11:32,846 - INFO - Z=4.37e-02; pos= 668 -; TGAGTTTT; AAAACTCA\n # 2011-08-09 10:11:32,846 - INFO - Z=1.37e-02; pos= 710 -; TGGTTTTC; GAAAACCA\n # 2011-08-09 10:11:32,846 - INFO - Z=1.37e-02; pos= 681 -; TGGTTCTT; AAGAACCA\n # \n for wmer, (global_pos, rev_comp) in zip(best_w_mer_finder.best_w_mers, [(313, False), (668, True), (710, True), (681, True)]):\n if wmer.global_pos != global_pos and wmer.Z < model.calculate_Z(global_pos, rev_comp):\n raise ValueError('Got wrong W-mers')", "def _max_bits_used_in_function_in_round():\n word_size = 16\n bits_occupied = [word_size] * len(cost_functions)\n for (pa, pb) in all_possible_pairs:\n for i in range(len(cost_functions)):\n max_sum_of_cost = num_pairings_in_round * \\\n cost_functions[i](pa, pb)\n while (max_sum_of_cost >= 2**bits_occupied[i]):\n bits_occupied[i] *= 2\n bits_occupied = [2*b for b in bits_occupied] # Paranoia\n for b in bits_occupied:\n assert(b % word_size == 0)\n return max(bits_occupied)", "def lowest_cost_search(start, successors, is_goal, action_cost):\r\n Fail = []\r\n explored = set() # set of states we have visited\r\n frontier = [[start]] # ordered list of paths we have blazed\r\n while frontier:\r\n path = frontier.pop(0)\r\n state1 = final_state(path)\r\n if is_goal(state1):\r\n return path\r\n explored.add(state1)\r\n pcost = path_cost(path)\r\n for (state, action) in successors(state1).items():\r\n if state not in explored:\r\n total_cost = pcost + action_cost(action)\r\n path2 = path + [(action, total_cost), state]\r\n add_to_frontier(frontier, path2)\r\n return Fail", "def possible_rolls(D1,n):\n possibilities = []\n for D2 in range(1,7):\n for D3 in range(1,7):\n if D1+D2+D3 == n:\n possibilities.append((D1,D2,D3))\n return possibilities", "def correct_barcode_bitwise(query_seq, seq_possibilities,\r\n nt_to_bits=DEFAULT_GOLAY_NT_TO_BITS):\r\n if nt_to_bits is None:\r\n nt_to_bits = DEFAULT_NT_TO_BITS\r\n dists = []\r\n query_seq_bits = seq_to_bits(query_seq, nt_to_bits)\r\n for seq in seq_possibilities:\r\n possible_seq_bits = seq_to_bits(seq, nt_to_bits)\r\n dists.append(hamming_dist(query_seq_bits, possible_seq_bits))\r\n min_dist = min(dists)\r\n number_mins = dists.count(min_dist)\r\n if number_mins > 1:\r\n return None, min_dist\r\n else:\r\n best_hit = seq_possibilities[dists.index(min_dist)]\r\n return best_hit, min_dist", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n path_to_point = {}\n cost_to_point = {}\n\n # Get the start node\n start_node = problem.getStartState()\n fringe_node = [start_node]\n path_to_point[start_node] = []\n cost_to_point[start_node] = problem.getCostOfActions(path_to_point[start_node])\n\n goal_found = False\n\n while(not goal_found):\n #for i in range(100): \n nodes_to_expand = set()\n # get max value node in the fringe node\n min_val = float(\"inf\")\n for one_node in fringe_node:\n # Compute the cost to reach a node\n if cost_to_point[one_node] < min_val:\n min_val = cost_to_point[one_node]\n \n for one_node in fringe_node:\n # Compute the cost to reach a node\n if cost_to_point[one_node] == min_val:\n nodes_to_expand.add(one_node)\n fringe_node.remove(one_node)\n\n # Expand the fringe node \n for one_node in nodes_to_expand:\n path_to_parent = path_to_point[one_node]\n for nxt_node in problem.getSuccessors(one_node):\n pos = nxt_node[0]\n mv = nxt_node[1]\n # check if point already present in path to point\n prev_cost = float(\"inf\")\n if pos in cost_to_point:\n prev_cost = cost_to_point[pos]\n new_path = path_to_parent + [mv]\n if prev_cost > problem.getCostOfActions(new_path):\n path_to_point[pos] = new_path\n cost_to_point[pos] = problem.getCostOfActions(new_path)\n fringe_node.append(pos)\n\n # Check if destination is reached in the fringe node\n for one_node in fringe_node:\n if problem.isGoalState(one_node):\n final_node = one_node\n goal_found = True\n break\n \n #print(len(fringe_node))\n print(final_node)\n print(path_to_point[final_node])\n return path_to_point[final_node] \n\n util.raiseNotDefined()", "def gene_finder(dna, threshold):\n finder = []\n twoStrands = find_all_ORFs_both_strands(dna) #this calls the function that finds the compliment of dna and finds all ORFs \n print twoStrands \n for k in range(len(twoStrands)): #go through the list \"twoStrands\"\n if twoStrands[k]>threshold: #if the length of \n print twoStrands[k]\n print len(twoStrands[k])\n finder.append(twoStrands[k])\n return finder", "def test_possibilites(self):\n self.assertEqual(self.RNA(\"\").possibilities(), 1)\n self.assertEqual(self.RNA(\"ACGUgcaucagUCGuGCAU\").possibilities(), 1)\n self.assertEqual(self.RNA(\"N\").possibilities(), 4)\n self.assertEqual(self.RNA(\"R\").possibilities(), 2)\n self.assertEqual(self.RNA(\"H\").possibilities(), 3)\n self.assertEqual(self.RNA(\"nRh\").possibilities(), 24)\n self.assertEqual(\n self.RNA(\"AUGCnGUCAg-aurGauc--gauhcgauacgws\").possibilities(), 96\n )", "def lowest_cost_search(start, successors, is_goal, action_cost):\n explored = set()\n frontier = [ [start] ]\n if is_goal(start):\n return frontier[0]\n while frontier:\n path = frontier.pop(0)\n state1 = final_state(path)\n if is_goal(state1):\n return path\n explored.add(state1)\n pcost = path_cost(path)\n for (state, action) in successors(state1).items():\n if state not in explored:\n total_cost = pcost +action_cost(action)\n path2 = path + [(action, total_cost), state]\n frontier.append(path2)\n add_to_frontier(frontier, path2)\n return Fail", "def test_solvation_with_additional_ions(self):\n waters = np.random.randint(1000, 10000)\n cations = ['LI', 'Na+', 'K+', 'RB', 'CS']\n anions = ['F', 'Cl-', 'BR', 'IOD']\n n_cations = np.random.randint(1, 10)\n n_anions = np.random.randint(1, 10)\n random_cation = random.choice(cations)\n random_anion = random.choice(anions)\n log.debug('Trying {} waters with additional ions...'.format(waters))\n solvate(tleapfile='./cb6-but/tleap.in', pdbfile='cb6-but.pdb',\n bufferwater=waters, neutralize=0,\n addions=[random_cation, n_cations, random_anion, n_anions])\n # These should come in the RESIDUE_LABEL region of the prmtop and be before all the water.\n cation_number = sp.check_output([\"grep -A 99 RESIDUE_LABEL ./cb6-but/solvated.prmtop | \" +\n \"grep -oh '{} ' | wc -w\".format(random_cation)],\n shell=True)\n anion_number = sp.check_output([\"grep -A 99 RESIDUE_LABEL ./cb6-but/solvated.prmtop | \" +\n \"grep -oh '{} ' | wc -w\".format(random_anion)],\n shell=True)\n # Have to think about what to do here...\n # log.debug('Expecting...')\n # log.debug('cation = {}\\tn_cations={}'.format(random_cation, n_cations))\n # log.debug('anion = {}\\t n_anions={}'.format(random_anion, n_anions))\n # log.debug('Found...')\n # log.debug(' n_cations={}'.format(cation_number))\n # log.debug(' n_anions={}'.format(anion_number))\n\n self.assertTrue(int(cation_number) == n_cations and int(anion_number) == n_anions)", "def numberOfBoomerangsSlow(self, points):\n\n def is_boomerang(i, j, k):\n dist_a = pow(j[0] - i[0], 2) + pow(j[1] - i[1], 2)\n dist_b = pow(k[0] - i[0], 2) + pow(k[1] - i[1], 2)\n return dist_a == dist_b\n\n total = 0\n for i in points:\n for j in points:\n for k in points:\n if i != j and j != k and is_boomerang(i, j, k):\n total += 1\n return total", "def canto_oposto_6(tab, jog):\r\n jog*=-1\r\n if obter_linha(tab,1)[0]==jog and eh_posicao_livre(tab,9):\r\n return 9\r\n if obter_linha(tab,1)[2]==jog and eh_posicao_livre(tab,7):\r\n return 7\r\n if obter_linha(tab,3)[0]==jog and eh_posicao_livre(tab,3):\r\n return 3\r\n if obter_linha(tab,3)[2]==jog and eh_posicao_livre(tab,1):\r\n return 1", "def _strength5(self, tournoi, best=5):\n L = self.sorted_team_list()\n s = 0\n for i,e in enumerate(L):\n if tournoi in e._positions:\n s += 1\n if s == best:\n return i+1" ]
[ "0.6110882", "0.60308844", "0.5878015", "0.5863746", "0.5748723", "0.5709508", "0.56566536", "0.56528664", "0.5624614", "0.5575636", "0.5567551", "0.55536634", "0.5527198", "0.55191416", "0.5517331", "0.5514932", "0.5513137", "0.5498134", "0.5493011", "0.54866076", "0.5477608", "0.5471907", "0.54659927", "0.5455559", "0.5444958", "0.5419081", "0.5418015", "0.54089713", "0.53922737", "0.5386362", "0.53789437", "0.5374194", "0.53682625", "0.5359618", "0.53584284", "0.53495204", "0.5347124", "0.5346038", "0.5337049", "0.5336462", "0.5309436", "0.5309209", "0.5301492", "0.53006816", "0.5296525", "0.52879256", "0.528769", "0.5286726", "0.5285839", "0.52803755", "0.52803737", "0.5280032", "0.5274526", "0.5271573", "0.52688456", "0.5262489", "0.5252058", "0.52460986", "0.524433", "0.5242488", "0.52321696", "0.5229745", "0.52252674", "0.5222987", "0.52209073", "0.52206063", "0.52203536", "0.52184343", "0.5216354", "0.52153623", "0.52120864", "0.5211499", "0.52112097", "0.5209768", "0.52096236", "0.52041936", "0.52013737", "0.52008986", "0.5196617", "0.5194507", "0.5188689", "0.51825035", "0.5179124", "0.51790565", "0.51770186", "0.5172212", "0.51634425", "0.5159106", "0.51571864", "0.51549774", "0.51541835", "0.5153991", "0.5153843", "0.514682", "0.51420647", "0.5141114", "0.5136459", "0.5129706", "0.5126848", "0.5126787" ]
0.5761488
4
Returns a Pythonnormalized String object from unicode.
def normalize_unicode_data(data): normalized_data = unicodedata.normalize('NFKD', data).encode('ascii', 'ignore') return normalized_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load(self, string):\r\n if isinstance(string, unicode):\r\n u = string\r\n else:\r\n u = unicode(string, self.encoding)\r\n return u", "def as_unicode(string):\n return same_string_type_as(\"\", string)", "def _as_unicode(s):\n if isinstance(s, str):\n return s\n # Assume it is a bytes string\n # Note ISO-8859-1 aka Latin-1 preserves first 256 chars\n return codecs.latin_1_decode(s)[0]", "def u(s):\n if _IS_PYTHON_3 or type(s) == unicode:\n return s\n else:\n return codecs.unicode_escape_decode(s)[0]", "def make_unicode(string):\n if sys.version < '3' and isinstance(string, str):\n return unicode(string.decode('utf-8'))\n\n return string", "def force_unicode(s):\n return (s.decode('utf8')\n if isinstance(s, str)\n else unicode(s))", "def asunicode(s):\n if isinstance(s, bytes):\n return s.decode('utf-8', 'replace')\n else:\n return s", "def to_unicode(s, encoding=\"utf-8\"):\n if isinstance(s, six.text_type):\n return s\n elif isinstance(s, bytes):\n return s.decode(encoding)\n # TODO: warning? Exception?\n return s", "def to_unicode(s):\n if isinstance(s, basestring):\n return force_unicode(s)\n return s", "def force_unicode(s, encoding=encoding, errors='strict'):\n if isinstance(s, unicode):\n return s\n elif hasattr(s, '__unicode__'):\n return unicode(s)\n elif isinstance(s, str):\n return s.decode(encoding, errors)\n else:\n return str(s).decode(encoding, errors)", "def FromUnicode(val):\n if sys.version_info[0] >= 3:\n return val\n return val if isinstance(val, str) else val.encode('utf-8')", "def to_unicode(s):\n\n def brute_enc(s2):\n \"\"\"Trying to decode via simple brute forcing.\"\"\"\n encodings = (\"ascii\", \"utf8\", \"latin1\")\n for enc in encodings:\n try:\n return unicode(s2, enc)\n except UnicodeDecodeError:\n pass\n return None\n\n def chardet_enc(s2):\n \"\"\"Guess encoding via chardet.\"\"\"\n enc = chardet.detect(s2)[\"encoding\"]\n\n try:\n return unicode(s2, enc)\n except UnicodeDecodeError:\n pass\n return None\n\n # If already in unicode, skip.\n if isinstance(s, unicode):\n return s\n\n # First try to decode against a little set of common encodings.\n result = brute_enc(s)\n\n # Try via chardet.\n if not result:\n result = chardet_enc(s)\n\n # If not possible to convert the input string, try again with\n # a replace strategy.\n if not result:\n result = unicode(s, errors=\"replace\")\n\n return result", "def to_unicode(string):\n assert isinstance(string, basestring)\n if sys.version_info[0] >= 3:\n if isinstance(string, bytes):\n return string.decode('utf-8')\n else:\n return string\n else:\n if isinstance(string, str):\n return string.decode('utf-8')\n else:\n return string", "def to_unicode(self, _string):\n if not isinstance(_string, unicode):\n try:\n _string = unicode(_string)\n except:\n try:\n _string = _string.decode(\"utf-8\")\n except:\n _string = _string.decode(\"iso-8859-1\")\n return _string", "def pystr(s):\n if six.PY2 and isinstance(s, six.text_type):\n return s.encode('ascii', 'ignore')\n elif six.PY3 and isinstance(s, six.binary_type):\n return s.decode('utf-8')\n else:\n return s", "def string_unicode(text, encoding='utf-8'):\n try:\n if sys.version_info[0] >= 3:\n text = str(text)\n else:\n text = unicode(text, encoding) # pylint: disable=undefined-variable\n except: # pylint: disable=bare-except\n pass\n return text", "def to_unicode(string):\n\n if isinstance(string, str):\n return string.decode('utf-8')\n else:\n return unicode(string)", "def to_unicode(string):\n if not isinstance(string, unicode):\n for codec in CODECS:\r\n try:\r\n unic = unicode(string, codec)\r\n except UnicodeError:\r\n pass\r\n except TypeError:\r\n break\r\n else:\r\n return unic\r\n return string", "def test_unicodeToString(self):\n self.assertNativeString(u\"Good day\", \"Good day\")", "def smart_unicode(s, encoding='utf-8', strings_only=False, errors='strict'):\r\n # if isinstance(s, Promise):\r\n # # The input is the result of a gettext_lazy() call.\r\n # return s\r\n return force_unicode(s, encoding, strings_only, errors)", "def cast_unicode(s, encoding='utf-8'):\n if isinstance(s, bytes) and not PY3:\n return s.decode(encoding, \"replace\")\n return s", "def to_unicode (s, enc=ENCODING):\n\n if isinstance(s, basestring):\n if not isinstance(s, unicode):\n s = unicode(s, enc)\n return s", "def u(obj):\n return obj if isinstance(obj, unicode) else unicode(obj) # noqa: F821 pylint: disable=undefined-variable", "def ustr(obj):\n if IS_PY2:\n # If we are getting a string, then do an explicit decode\n # else, just call the unicode method of the object\n if type(obj) in [str, basestring]: # pragma: no cover # noqa\n return unicode(obj, DEFAULT_ENCODING) # pragma: no cover # noqa\n else:\n return unicode(obj) # pragma: no cover # noqa\n else:\n if type(obj) in [bytes]:\n return obj.decode(DEFAULT_ENCODING)\n else:\n return str(obj)", "def utf8(unicode_str):\n if six.PY2 and isinstance(unicode_str, __unicode__):\n return unicode_str.encode('utf-8')\n\n return unicode_str", "def smart_unicode(s, strings_only=False, errors='strict', encoding=None):\n\n return django_smart_unicode(\n s, encoding if encoding is not None else get_site_encoding(), strings_only, errors)", "def try_unicode(string):\n if isinstance(string, str):\n return string.decode(\"utf-8\")\n else:\n return string", "def smart_unicode(s, encoding='utf-8', strings_only=False, errors='strict'):\n return force_unicode(s, encoding, strings_only, errors)", "def to_unicode(text, encoding='utf8', errors='strict'):\n if isinstance(text, str):\n return text\n return str(text, encoding, errors=errors)", "def convert_to_unicode(text):\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\n \"Unsupported string type: %s\" % (type(text))\n ) # pragma: no cover", "def Astr(string):\n\n return unicode(string, encoding='utf-8')", "def str_to_unicode(encode_str, py_version=3):\n if (encode_str is None or encode_str == \"\" or encode_str == 'Null' or\n encode_str == 'null'):\n encode_str = \"\"\n elif ((py_version == 2 and isinstance(encode_str, str)) or (\n py_version == 3 and isinstance(encode_str, str))):\n pass\n else:\n code = get_encode(encode_str)\n encode_str = encode_str.decode(code, 'ignore')\n return encode_str", "def unicode2utf8(s):\n return s.encode(encoding='utf-8', errors='ignore')", "def _to_unicode(text):\n # both str and unicode inherit from basestring\n if not isinstance(text, basestring):\n tmpl = 'expected UTF-8 encoded string or unicode, got %s value %s'\n raise TypeError(tmpl % (type(text), text))\n # return unicode strings unchanged\n if isinstance(text, unicode):\n return text\n # otherwise assume UTF-8 encoding, which also works for ASCII\n return unicode(text, 'utf-8')", "def to_native_str(s):\n if not isinstance(s, str):\n return s.decode('ascii', 'strict')\n return s", "def to_unicode(text, charset=None):\n if not isinstance(text, str):\n if isinstance(text, Exception):\n # two possibilities for storing unicode strings in exception data:\n try:\n # custom __str__ method on the exception (e.g. PermissionError)\n return unicode(text)\n except UnicodeError:\n # unicode arguments given to the exception (e.g. parse_date)\n return ' '.join([to_unicode(arg) for arg in text.args])\n return unicode(text)\n if charset:\n return unicode(text, charset, 'replace')\n else:\n try:\n return unicode(text, 'utf-8')\n except UnicodeError:\n return unicode(text, locale.getpreferredencoding(), 'replace')", "def to_unicode(text, charset=None):\n if not isinstance(text, str):\n if isinstance(text, Exception):\n # two possibilities for storing unicode strings in exception data:\n try:\n # custom __str__ method on the exception (e.g. PermissionError)\n return unicode(text)\n except UnicodeError:\n # unicode arguments given to the exception (e.g. parse_date)\n return ' '.join([to_unicode(arg) for arg in text.args])\n return unicode(text)\n if charset:\n return unicode(text, charset, 'replace')\n else:\n try:\n return unicode(text, 'utf-8')\n except UnicodeError:\n return unicode(text, locale.getpreferredencoding(), 'replace')", "def _sanitize_string_for_python(self, s):\n s = repr(s)\n\n if s.startswith('u'):\n s = s[1:]\n\n return s", "def stringToUnicode(x):\n if sys.version < '3':\n import codecs\n return codecs.unicode_escape_decode(x)[0]\n return x", "def convert_to_unicode(text):\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n elif six.PY2:\n if isinstance(text, str):\n return text.decode(\"utf-8\", \"ignore\")\n elif isinstance(text, unicode):\n return text\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")", "def convert_to_unicode(text):\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n elif six.PY2:\n if isinstance(text, str):\n return text.decode(\"utf-8\", \"ignore\")\n elif isinstance(text, unicode):\n return text\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")", "def to_unicode(text, charset=None):\n if isinstance(text, str):\n try:\n return unicode(text, charset or 'utf-8')\n except UnicodeDecodeError:\n return unicode(text, 'latin1')\n elif isinstance(text, Exception):\n # two possibilities for storing unicode strings in exception data:\n try:\n # custom __str__ method on the exception (e.g. PermissionError)\n return unicode(text)\n except UnicodeError:\n # unicode arguments given to the exception (e.g. parse_date)\n return ' '.join([to_unicode(arg) for arg in text.args])\n return unicode(text)", "def utf82unicode(s):\n return s.decode(encoding='utf-8', errors='ignore')", "def native_(s, encoding='latin-1', errors='strict'):\n if isinstance(s, text_type):\n return s\n return str(s, encoding, errors)", "def force_str(s):\n return (s.encode('utf8')\n if isinstance(s, unicode)\n else str(s))", "def _string_convert(str):\n if isinstance(str, unicode):\n return str\n try:\n return str.decode(locale.getpreferredencoding(), 'strict')\n except UnicodeError:\n try:\n return str.decode(locale.getpreferredencoding(), 'replace')\n except UnicodeError:\n # unrepresentable string\n return u'????'", "def str_to_unicode(text, encoding='utf-8'):\n if isinstance(text, str):\n return text.decode(encoding)\n elif isinstance(text, unicode):\n return text\n else:\n raise TypeError('str_to_unicode must receive a str or unicode object, got %s' % type(text).__name__)", "def convert_to_unicode(text):\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s, %s\" % (type(text), text))\n elif six.PY2:\n if isinstance(text, str):\n return text.decode(\"utf-8\", \"ignore\")\n elif isinstance(text, unicode):\n return text\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")", "def to_unicode(text, encoding='utf8', errors='strict'):\r\n if isinstance(text, unicode):\r\n return text\r\n return unicode(text, encoding, errors=errors)", "def translateString(s):\n\n if HAS_UTF8:\n return s\n\n return str(s.encode(), encoding = \"ASCII\", errors=\"backslashreplace\")", "def safe_unicode(obj):\n try:\n return str(obj)\n except UnicodeDecodeError:\n return obj.decode(\"utf-8\")", "def to_unicode(text, encoding='utf8', errors='strict'):\n if isinstance(text, unicode):\n return text\n return unicode(text, encoding, errors=errors)", "def asunicode_win(s):\n if isinstance(s, bytes):\n return s.decode(locale.getpreferredencoding())\n else:\n return s", "def TO_UNICODE(string):\n try:\n # wroks if string is unicode already\n return unicode(string, \"utf-8\")\n except:\n pass\n try:\n # works if string is iso8859-1\n return unicode(string, \"iso-8859-1\")\n except:\n pass\n # try simple coding\n return unicode(string)", "def to_unicode(text, encoding='utf8', errors='strict'):\n if isinstance(text, unicode):\n return text\n else:\n return unicode(text, encoding=encoding, errors=errors)", "def unicode_to_str(str1, logger=None, str_encode='system', py_version=3):\n if str1 is None or str1 == \"\" or str1 == 'Null' or str1 == 'null':\n str1 = \"\"\n elif ((py_version == 2 and isinstance(str1, str)) or (\n py_version == 3 and isinstance(str1, str))):\n try:\n if str_encode.lower() == 'system':\n str1 = str1.encode(sys.getfilesystemencoding(), 'ignore')\n elif str_encode.lower() == 'utf-8':\n str1 = str1.encode('utf-8', 'ignore')\n elif str_encode.lower() == 'gbk':\n str1 = str1.encode('gbk', 'ignore')\n else:\n str1 = str1.encode(str_encode, 'ignore')\n except UnicodeDecodeError as err_message:\n if logger:\n logger.info('[err]unicode_to_str:encode %s to %s failed',\n str1, str_encode)\n logger.info(str(err_message))\n elif ((py_version == 2 and isinstance(str1, str)) or (\n py_version == 3 and isinstance(str1, bytes))):\n pass\n else:\n if logger:\n logger.info('%s is not unicode ', str1)\n return str(str1)", "def to_unicode(x):\n try: # This may never fail, but let's be safe\n encoding = locale.getpreferredencoding()\n except:\n encoding = 'utf-8'\n ret = x.decode(encoding, 'replace').encode('utf-8')\n return ret", "def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'):\r\n if strings_only and isinstance(s, (types.NoneType, int)):\r\n return s\r\n if not isinstance(s, basestring,):\r\n if hasattr(s, '__unicode__'):\r\n s = unicode(s)\r\n else:\r\n s = unicode(str(s), encoding, errors)\r\n elif not isinstance(s, unicode):\r\n s = unicode(s, encoding, errors)\r\n return s", "def unicodise(string, encoding = None, errors = \"replace\"):\n global preferred_encoding\n \n if not encoding:\n encoding = preferred_encoding\n\n if type(string) == unicode:\n return string\n try:\n return string.decode(encoding, errors)\n except UnicodeDecodeError:\n raise UnicodeDecodeError(\"Conversion to unicode failed: %r\" % string)", "def _convert_string_to_unicode(string):\n result = string\n\n try:\n if string is not None and not isinstance(string, six.text_type):\n result = string.decode(\"utf-8\")\n except (TypeError, UnicodeDecodeError, AttributeError):\n # Sometimes the string actually is binary or StringIO object,\n # so if you can't decode it, just give up.\n pass\n\n return result", "def as_unicode(obj):\n if sys.version_info.major < 3 and isinstance(obj, str):\n obj = obj.decode('utf-8')\n return unicode(obj)", "def force_unicode(s, strings_only=False, errors='strict'):\n if sys.version_info[0] > 2:\n return django.utils.encoding.force_str(s, get_site_encoding(), strings_only, errors)\n else:\n return django.utils.encoding.force_unicode(s, get_site_encoding(), strings_only, errors)", "def force_unicode(value):\n if IS_PY3:\n # Python 3.X\n if isinstance(value, bytes):\n value = value.decode(\"utf-8\", errors=\"replace\")\n elif not isinstance(value, str):\n value = str(value)\n else:\n # Python 2.X\n if isinstance(value, str):\n value = value.decode(\"utf-8\", \"replace\")\n elif not isinstance(value, basestring): # NOQA: F821\n value = unicode(value) # NOQA: F821\n\n return value", "def ensure_unicode_string(value):\n if not isinstance(value, six.string_types):\n raise TypeError(u'Expected string value, got: {}'.format(value))\n return six.text_type(value)", "def su(value):\n return safe_unicode(value, encoding=get_charset())", "def normalize_unicode(str_data):\n try:\n normalized = unicodedata.normalize('NFKD', unicode(str_data)).encode('ascii', 'ignore')\n except UnicodeError:\n normalized = unicode(str_data, 'ascii', 'ignore')\n return normalized", "def touni(x, enc='utf8', err='strict'):\r\n return x if isinstance(x, unicode) else unicode(str(x), enc, err)", "def safe_unicode(e):\n try:\n return str(e)\n except UnicodeError:\n pass\n\n try:\n return repr(e)\n except UnicodeError:\n pass\n\n return u'Unrecoverably corrupt evalue'", "def str_to_python(self, value):\r\n return unicode_safe(value)", "def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'):\n if strings_only and isinstance(s, (types.NoneType, int, long, datetime.datetime, datetime.date, datetime.time, float)):\n return s\n try:\n if not isinstance(s, basestring,):\n if hasattr(s, '__unicode__'):\n s = unicode(s)\n else:\n try:\n s = unicode(str(s), encoding, errors)\n except UnicodeEncodeError:\n if not isinstance(s, Exception):\n raise\n # If we get to here, the caller has passed in an Exception\n # subclass populated with non-ASCII data without special\n # handling to display as a string. We need to handle this\n # without raising a further exception. We do an\n # approximation to what the Exception's standard str()\n # output should be.\n s = ' '.join([force_unicode(arg, encoding, strings_only,\n errors) for arg in s])\n elif not isinstance(s, unicode):\n # Note: We use .decode() here, instead of unicode(s, encoding,\n # errors), so that if s is a SafeString, it ends up being a\n # SafeUnicode at the end.\n s = s.decode(encoding, errors)\n except UnicodeDecodeError, e:\n raise CartolaUnicodeDecodeError(s, *e.args)\n return s", "def display_unicode(self, string):\n if string is None:\n return ''\n return string.decode(\"utf16\", \"ignore\").encode(\"ascii\", 'backslashreplace')", "def native_(s, encoding='latin-1', errors='strict'):\n if isinstance(s, text_type):\n return s.encode(encoding, errors)\n return str(s)", "def safeunicode(obj, encoding='utf-8'):\n t = type(obj)\n if t is unicode:\n return obj\n elif t is str:\n return obj.decode(encoding, 'ignore')\n elif t in [int, float, bool]:\n return unicode(obj)\n elif hasattr(obj, '__unicode__') or isinstance(obj, unicode):\n try:\n return unicode(obj)\n except Exception as e:\n return u\"\"\n else:\n return str(obj).decode(encoding, 'ignore')", "def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'):\r\n # Handle the common case first, saves 30-40% in performance when s\r\n # is an instance of unicode. This function gets called often in that\r\n # setting.\r\n if isinstance(s, unicode):\r\n return s\r\n if strings_only and is_protected_type(s):\r\n return s\r\n try:\r\n if not isinstance(s, basestring,):\r\n if hasattr(s, '__unicode__'):\r\n s = unicode(s)\r\n else:\r\n try:\r\n s = unicode(str(s), encoding, errors)\r\n except UnicodeEncodeError:\r\n if not isinstance(s, Exception):\r\n raise\r\n # If we get to here, the caller has passed in an Exception\r\n # subclass populated with non-ASCII data without special\r\n # handling to display as a string. We need to handle this\r\n # without raising a further exception. We do an\r\n # approximation to what the Exception's standard str()\r\n # output should be.\r\n s = u' '.join([force_unicode(arg, encoding, strings_only,\r\n errors) for arg in s])\r\n elif not isinstance(s, unicode):\r\n # Note: We use .decode() here, instead of unicode(s, encoding,\r\n # errors), so that if s is a SafeString, it ends up being a\r\n # SafeUnicode at the end.\r\n s = s.decode(encoding, errors)\r\n except UnicodeDecodeError, e:\r\n if not isinstance(s, Exception):\r\n raise DjangoUnicodeDecodeError(s, *e.args)\r\n else:\r\n # If we get to here, the caller has passed in an Exception\r\n # subclass populated with non-ASCII bytestring data without a\r\n # working unicode method. Try to handle this without raising a\r\n # further exception by individually forcing the exception args\r\n # to unicode.\r\n s = u' '.join([force_unicode(arg, encoding, strings_only,\r\n errors) for arg in s])\r\n return s", "def as_unicode(value):\n assert value is None or isinstance(value,types.StringTypes)\n if isinstance(value,types.StringType):\n return value.decode('utf-8')\n else:\n return value", "def _to_unicode(obj, encoding=\"UTF-8\"):\n if isinstance(obj, basestring):\n if not isinstance(obj, unicode):\n obj = unicode(obj, encoding)\n return obj", "def to_unicode(text, encoding='utf-8', errors='strict'):\r\n if isinstance(text, bytes):\r\n return text.decode(encoding, errors=errors)\r\n return text", "def safe_unicode(obj, *args):\n try:\n return unicode(obj, *args)\n except UnicodeDecodeError:\n # obj is byte string\n ascii_text = str(obj).encode('string_escape')\n return unicode(ascii_text)", "def safe_unicode(obj, *args):\n try:\n return unicode(obj, *args)\n except UnicodeDecodeError:\n # obj is byte string\n ascii_text = str(obj).encode('string_escape')\n return unicode(ascii_text)", "def safe_unicode(obj, *args):\n try:\n return unicode(obj, *args)\n except UnicodeDecodeError:\n # obj is byte string\n ascii_text = str(obj).encode('string_escape')\n return unicode(ascii_text)", "def safe_unicode(obj, *args):\n try:\n return unicode(obj, *args)\n except UnicodeDecodeError:\n # obj is byte string\n ascii_text = str(obj).encode('string_escape')\n return unicode(ascii_text)", "def safe_unicode(obj, *args):\n try:\n return unicode(obj, *args)\n except UnicodeDecodeError:\n # obj is byte string\n ascii_text = str(obj).encode('string_escape')\n return unicode(ascii_text)", "def safe_unicode(obj, *args):\n try:\n return unicode(obj, *args)\n except UnicodeDecodeError:\n # obj is byte string\n ascii_text = str(obj).encode('string_escape')\n return unicode(ascii_text)", "def _sanitize_string(self, string):\n # get the type of a unicode string\n unicode_type = type(Pyasciigraph._u('t'))\n input_type = type(string)\n if input_type is str:\n if sys.version < '3':\n info = unicode(string)\n else:\n info = string\n elif input_type is unicode_type:\n info = string\n elif input_type is int or input_type is float:\n if sys.version < '3':\n info = unicode(string)\n else:\n info = str(string)\n else:\n info = str(string)\n return info", "def read_unicode_string(stream, size):\n\t\n\tvalue = u''\n\tif size > 0:\n\t\tdata = stream.read(size)\n\t\tdata = data.partition(chr(0))[0]\n\t\tvalue = unicode(data, 'utf_8')\n\treturn value", "def utf8tounicode(arg):\n\n try:\n if isinstance(arg, unicode):\n return arg.decode('utf-8')\n except NameError:\n pass # Python 3\n return arg", "def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'):\r\n if strings_only and is_protected_type(s):\r\n return s\r\n try:\r\n if not isinstance(s, basestring,):\r\n if hasattr(s, '__unicode__'):\r\n s = unicode(s)\r\n else:\r\n try:\r\n s = unicode(str(s), encoding, errors)\r\n except UnicodeEncodeError:\r\n if not isinstance(s, Exception):\r\n raise\r\n # If we get to here, the caller has passed in an Exception\r\n # subclass populated with non-ASCII data without special\r\n # handling to display as a string. We need to handle this\r\n # without raising a further exception. We do an\r\n # approximation to what the Exception's standard str()\r\n # output should be.\r\n s = ' '.join([force_unicode(arg, encoding, strings_only,\r\n errors) for arg in s])\r\n elif not isinstance(s, unicode):\r\n # Note: We use .decode() here, instead of unicode(s, encoding,\r\n # errors), so that if s is a SafeString, it ends up being a\r\n # SafeUnicode at the end.\r\n s = s.decode(encoding, errors)\r\n except UnicodeDecodeError, e:\r\n if not isinstance(s, Exception):\r\n raise TwitterTextUnicodeDecodeError(s, *e.args)\r\n else:\r\n # If we get to here, the caller has passed in an Exception\r\n # subclass populated with non-ASCII bytestring data without a\r\n # working unicode method. Try to handle this without raising a\r\n # further exception by individually forcing the exception args\r\n # to unicode.\r\n s = ' '.join([force_unicode(arg, encoding, strings_only,\r\n errors) for arg in s])\r\n return s", "def qstringToString( text ):\n\treturn unicode( text.toUtf8(), \"utf-8\" )\n\t# try:\n\t\t# return str( text.toUtf8() )\n\t# except UnicodeDecodeError:\n\t\t# return unicode( text.toUtf8(), \"utf-8\" )", "def decode_string(string):\n return unicode(string, 'utf-8')", "def _unicodeify(self, value, encoding=\"utf8\"):\n if isinstance(value, str):\n return value\n return str(value, encoding)", "def _to_str(s, encoding=\"utf8\", errors=\"ignore\"):\n if isinstance(s, bytes):\n return s.decode(encoding=encoding, errors=errors)\n return str(s)", "def to_ascii(ustr):\n return ustr.encode('utf8')", "def to_unicode(value):\r\n if isinstance(value, _TO_UNICODE_TYPES):\r\n return value\r\n if not isinstance(value, bytes_type):\r\n raise TypeError(\r\n \"Expected bytes, unicode, or None; got %r\" % type(value)\r\n )\r\n return value.decode(\"utf-8\")", "def safeunicode(obj, encoding='utf-8'):\n t = type(obj)\n if t is unicode:\n return obj\n elif t is str:\n return obj.decode(encoding)\n elif t in [int, float, bool]:\n return unicode(obj)\n elif hasattr(obj, '__unicode__') or isinstance(obj, unicode):\n return unicode(obj)\n else:\n return str(obj).decode(encoding)", "def toString(s):\n if type(s) == type(\"\"):\n return s\n else:\n return s.decode()", "def _to_native_string(string, encoding='ascii'):\n if isinstance(string, str):\n out = string\n else:\n out = string.decode(encoding)\n\n return out", "def to_unicode(data):\n if isinstance(data, bytes):\n return data.decode('utf-8')\n else:\n return data", "def deunicodise(string, encoding = None, errors = \"replace\"):\n\n\tif not encoding:\n\t\tencoding = Config.Config().encoding\n\n\tif type(string) != unicode:\n\t\treturn str(string)\n\tdebug(\"DeUnicodising %r using %s\" % (string, encoding))\n\ttry:\n\t\treturn string.encode(encoding, errors)\n\texcept UnicodeEncodeError:\n\t\traise UnicodeEncodeError(\"Conversion from unicode failed: %r\" % string)", "def remove_unicode(str):\n return unicodedata.normalize('NFKD', str).encode('ascii', 'ignore')", "def _unicode_encode(self, s):\n if isinstance(s, unicode):\n return s.encode('utf-8')\n else:\n return s", "def test_string_conversion():\n ob = ConversionTest()\n\n assert ob.StringField == \"spam\"\n assert ob.StringField == u\"spam\"\n\n ob.StringField = \"eggs\"\n assert ob.StringField == \"eggs\"\n assert ob.StringField == u\"eggs\"\n\n ob.StringField = u\"spam\"\n assert ob.StringField == \"spam\"\n assert ob.StringField == u\"spam\"\n\n ob.StringField = u'\\uffff\\uffff'\n assert ob.StringField == u'\\uffff\\uffff'\n\n ob.StringField = System.String(\"spam\")\n assert ob.StringField == \"spam\"\n assert ob.StringField == u\"spam\"\n\n ob.StringField = System.String(u'\\uffff\\uffff')\n assert ob.StringField == u'\\uffff\\uffff'\n\n ob.StringField = None\n assert ob.StringField is None\n\n with pytest.raises(TypeError):\n ConversionTest().StringField = 1\n\n world = UnicodeString()\n test_unicode_str = u\"안녕\"\n assert test_unicode_str == str(world.value)\n assert test_unicode_str == str(world.GetString())\n assert test_unicode_str == str(world)" ]
[ "0.7233125", "0.71349865", "0.7019128", "0.70103765", "0.6991322", "0.6916738", "0.6904967", "0.68421304", "0.6790128", "0.67792904", "0.671786", "0.6711641", "0.67108285", "0.66316116", "0.6597703", "0.6570314", "0.6567923", "0.65449303", "0.6489974", "0.645341", "0.64313215", "0.64283127", "0.64277136", "0.6379462", "0.63575", "0.6354504", "0.63370895", "0.6333765", "0.6331147", "0.63203037", "0.6301418", "0.6294504", "0.6293388", "0.629289", "0.6289198", "0.62811357", "0.62811357", "0.6238869", "0.6232138", "0.6221927", "0.6221927", "0.61935693", "0.61902726", "0.6185412", "0.61813194", "0.6169025", "0.6160391", "0.6156622", "0.6137323", "0.6126727", "0.6113291", "0.6108429", "0.61025006", "0.6096889", "0.6085063", "0.6084808", "0.60712713", "0.60656816", "0.6059236", "0.605465", "0.604833", "0.6042607", "0.6016483", "0.59988004", "0.59960717", "0.5995823", "0.5976453", "0.5976264", "0.5967838", "0.5962056", "0.59535813", "0.5945487", "0.5924718", "0.59099644", "0.59070736", "0.58992547", "0.5895074", "0.5848422", "0.5848422", "0.5848422", "0.5848422", "0.5848422", "0.5848422", "0.5841469", "0.58357006", "0.58246833", "0.5811975", "0.58106863", "0.5807386", "0.57971436", "0.57926047", "0.57898843", "0.5784179", "0.57753944", "0.5771162", "0.57689476", "0.5757701", "0.57539934", "0.5752122", "0.5751116", "0.5742965" ]
0.0
-1
Material saver. Saves material and their properties the JSON file for type building elements. If the Project parent is set, it automatically saves it to the file given in Project.data. Alternatively you can specify a path to a file with Materials. If this file does not exist, a new file is created.
def save_material(material, data_class): data_class.material_bind["version"] = "0.7" add_to_json = True warning_text = ("Material with same name and same properties already " "exists in JSON, consider this material or revising your " "properties") for id, check in data_class.material_bind.items(): if id != "version": if check["name"] == material.name and \ check["density"] == material.density and \ check["thermal_conduc"] == material.thermal_conduc and \ check["heat_capac"] == material.heat_capac and \ check[ "thickness_default"] == material.thickness_default and \ check["thickness_list"] == material.thickness_list: warnings.warn(warning_text) print(material.name) add_to_json = False break if add_to_json is True: data_class.material_bind[ material.material_id] = collections.OrderedDict() data_class.material_bind[ material.material_id]["name"] = material.name data_class.material_bind[ material.material_id]["density"] = material.density data_class.material_bind[ material.material_id]["thermal_conduc"] = material.thermal_conduc data_class.material_bind[ material.material_id]["heat_capac"] = material.heat_capac data_class.material_bind[ material.material_id][ "thickness_default"] = material.thickness_default data_class.material_bind[ material.material_id]["thickness_list"] = material.thickness_list data_class.material_bind[ material.material_id]["solar_absorp"] = material.solar_absorp with open(utilities.get_full_path(data_class.path_mat), 'w') as file: file.write(json.dumps( data_class.material_bind, indent=4, separators=(',', ': ')))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_material(filename, mat):\n out = np.array([mat.wav, mat.eps.real, mat.eps.imag,\n mat.mu.real, mat.mu.imag]).T\n header = \"Wavelength\\teps_real\\teps_imag\\tmu_real\\tmu_imag\"\n miepy.array_io.save(filename, out, header=header)", "def WriteStructuralMaterialsjson(save_path,dic_in_json_format):\n complete_name=os.path.join(save_path,\"StructuralMaterials.json\") \n with open(complete_name, \"w\") as save_file:\n save_file.write(dic_in_json_format)\n if(DEBUG):\n print(\"StructuralMaterials.json written\")", "def save_and_reload_scene():\n\n flg = logging.getLogger(\"lettuce.xgenSetup.save_and_reload_scene\")\n\n current_file = mc.file(save=True)\n flg.info(\"Current File: {}\".format(current_file))\n mc.file(current_file, ignoreVersion=True, open=True, force=True)", "def export_material(self, bo, bm):\n\n # Sometimes, a material might need to be single-use. Right now, the most apparent example\n # of that situation is when a lightmap image is baked. Wavesets are in the same boat, but\n # that's a special case as of the writing of this code.\n single_user = self._requires_single_user_material(bo, bm)\n if single_user:\n mat_name = \"{}_AutoSingle\".format(bm.name) if bo.name == bm.name else \"{}_{}\".format(bo.name, bm.name)\n self._report.msg(\"Exporting Material '{}' as single user '{}'\", bm.name, mat_name, indent=1)\n hgmat = None\n else:\n mat_name = bm.name\n self._report.msg(\"Exporting Material '{}'\", mat_name, indent=1)\n hsgmat = self._mgr.find_key(hsGMaterial, name=mat_name, bl=bo)\n if hsgmat is not None:\n return hsgmat\n\n hsgmat = self._mgr.add_object(hsGMaterial, name=mat_name, bl=bo)\n slots = [(idx, slot) for idx, slot in enumerate(bm.texture_slots) if self._can_export_texslot(slot)]\n\n # There is a major difference in how Blender and Plasma handle stencils.\n # In Blender, the stencil is on top and applies to every layer below is. In Plasma, the stencil\n # is below the SINGLE layer it affects. The main texture is marked BindNext and RestartPassHere.\n # The pipeline indicates that we can render 8 layers simultaneously, so we will collect all\n # stencils and apply this arrangement. We're going to limit to 6 stencils however. 1 layer for\n # main texture and 1 piggyback.\n num_stencils = sum((1 for i in slots if i[1].use_stencil))\n if num_stencils > _MAX_STENCILS:\n raise ExportError(\"Material '{}' uses too many stencils. The maximum is {}\".format(bm.name, _MAX_STENCILS))\n stencils = []\n restart_pass_next = False\n\n # Loop over layers\n for idx, slot in slots:\n # Prepend any BumpMapping magic layers\n if slot.use_map_normal:\n if bo in self._bump_mats:\n raise ExportError(\"Material '{}' has more than one bumpmap layer\".format(bm.name))\n du, dw, dv = self.export_bumpmap_slot(bo, bm, hsgmat, slot, idx)\n hsgmat.addLayer(du.key) # Du\n hsgmat.addLayer(dw.key) # Dw\n hsgmat.addLayer(dv.key) # Dv\n\n if slot.use_stencil:\n stencils.append((idx, slot))\n else:\n tex_layer = self.export_texture_slot(bo, bm, hsgmat, slot, idx)\n if restart_pass_next:\n tex_layer.state.miscFlags |= hsGMatState.kMiscRestartPassHere\n restart_pass_next = False\n hsgmat.addLayer(tex_layer.key)\n if slot.use_map_normal:\n self._bump_mats[bo] = (tex_layer.UVWSrc, tex_layer.transform)\n # After a bumpmap layer(s), the next layer *must* be in a\n # new pass, otherwise it gets added in non-intuitive ways\n restart_pass_next = True\n if stencils:\n tex_state = tex_layer.state\n if not tex_state.blendFlags & hsGMatState.kBlendMask:\n tex_state.blendFlags |= hsGMatState.kBlendAlpha\n tex_state.miscFlags |= hsGMatState.kMiscRestartPassHere | hsGMatState.kMiscBindNext\n curr_stencils = len(stencils)\n for i in range(curr_stencils):\n stencil_idx, stencil = stencils[i]\n stencil_name = \"STENCILGEN_{}@{}_{}\".format(stencil.name, bm.name, slot.name)\n stencil_layer = self.export_texture_slot(bo, bm, hsgmat, stencil, stencil_idx, name=stencil_name)\n if i+1 < curr_stencils:\n stencil_layer.state.miscFlags |= hsGMatState.kMiscBindNext\n hsgmat.addLayer(stencil_layer.key)\n\n # Plasma makes several assumptions that every hsGMaterial has at least one layer. If this\n # material had no Textures, we will need to initialize a default layer\n if not hsgmat.layers:\n layer = self._mgr.find_create_object(plLayer, name=\"{}_AutoLayer\".format(bm.name), bl=bo)\n self._propagate_material_settings(bm, layer)\n hsgmat.addLayer(layer.key)\n\n # Cache this material for later\n mat_list = self._obj2mat.setdefault(bo, [])\n mat_list.append(hsgmat.key)\n\n # Looks like we're done...\n return hsgmat.key", "def saveCallback(self):\n\n ## TODO // TEST IT\n self._pathsDict[\"sceneFile\"] = self.getSceneFile()\n try:\n openSceneInfo = self.getOpenSceneInfo()\n if not openSceneInfo:\n return\n except TypeError:\n return\n if openSceneInfo[\"jsonFile\"]:\n jsonInfo = self._loadJson(openSceneInfo[\"jsonFile\"])\n if jsonInfo[\"ReferenceFile\"]:\n absRefFile = os.path.join(self._pathsDict[\"projectDir\"], jsonInfo[\"ReferenceFile\"])\n # TODO : ref => Dict\n absBaseSceneVersion = os.path.join(self._pathsDict[\"projectDir\"], jsonInfo[\"Versions\"][int(jsonInfo[\"ReferencedVersion\"]) - 1][\"RelativePath\"])\n # if the refererenced scene file is the saved file (saved or saved as)\n if self._pathsDict[\"sceneFile\"] == absBaseSceneVersion:\n # copy over the forReference file\n try:\n shutil.copyfile(self._pathsDict[\"sceneFile\"], absRefFile)\n print \"Scene Manager Update:\\nReference File Updated\"\n except:\n pass", "def menu_save_scene(self):\n file_name = QtGui.QFileDialog().getSaveFileName(self, \"Save Scene to File\", get_data_path(), \"*.pkl\")\n with open(file_name, \"wb\") as f:\n pickle.dump(self.scene, f, pickle.HIGHEST_PROTOCOL)", "def save(self, filename=None):\n if filename is None:\n filename = \"morse_smale_complex.json\"\n with open(filename, \"w\") as fp:\n fp.write(self.to_json())", "def savemat(self, file_name, mdict=None, appendmat=True, **kwargs):\n # Set mdict default value to empty dictionary\n if mdict is None:\n mdict = {}\n\n # Merge mdict with attributes dictionary, giving mdict the upper-hand\n # in case of inconsistency\n dsavemat = {**vars(self), **mdict}\n\n # Save the merged dictionary to a .mat file\n scipy.io.savemat(file_name, dsavemat, appendmat, **kwargs)", "def AssembleStructuralMaterialsJson(KratosWindowManager):\n for key in KratosWindowManager.MatSave.keys():\n if(DEBUG):\n print(key)\n print(type(KratosWindowManager.MatSave[key]))\n sm.structuralmaterials_dict[\"properties\"][0][\"Material\"][\"Variables\"][key]=KratosWindowManager.MatSave[key]\n for bclistobject in KratosWindowManager.boundaryConditionEditor:\n if(DEBUG):\n print(bclistobject.name)\n if bclistobject.entityType=='Element':\n sm.structuralmaterials_dict[\"properties\"][0][\"model_part_name\"]=bclistobject.name\n\n\n if KratosWindowManager.is2D:\n sm.structuralmaterials_dict[\"properties\"][0][\"Material\"][\"constitutive_law\"][\"name\"]=\"KratosMultiphysics.StructuralMechanicsApplication.LinearElasticPlaneStrain2DLaw\"\n else:\n sm.structuralmaterials_dict[\"properties\"][0][\"Material\"][\"constitutive_law\"][\"name\"]=\"KratosMultiphysics.StructuralMechanicsApplication.LinearElastic3DLaw\"\n \n\n if(DEBUG):\n print(sm.structuralmaterials_dict)\n return sm.WriteMaterialToJson(sm.structuralmaterials_dict)", "def test_save_materials(temp_dir):\n image1 = [[[0, 0, 0], [0, 0, 0]], [[255, 255, 255], [255, 255, 255]]]\n image2 = [[[0, 0, 0], [255, 255, 255]], [[255, 255, 255], [0, 0, 0]]]\n image3 = [[[255, 255, 255], [255, 255, 255]], [[0, 0, 0], [0, 0, 0]]]\n\n data = [\n (\"image1.png\", Image.fromarray(np.array(image1, dtype=np.uint8))),\n (\"image2.png\", Image.fromarray(np.array(image2, dtype=np.uint8))),\n (\"image3.png\", Image.fromarray(np.array(image3, dtype=np.uint8))),\n ]\n save_materials(temp_dir, data, step=1)\n\n assert os.path.exists(os.path.join(temp_dir, \"images\", \"1\", \"image1.png\"))\n assert os.path.exists(os.path.join(temp_dir, \"images\", \"1\", \"image2.png\"))\n assert os.path.exists(os.path.join(temp_dir, \"images\", \"1\", \"image3.png\"))", "def read_material_data(self, material):\n material_yaml_file = glob.glob(os.path.join(material_dir, material + '.yaml'))\n\n inputs = utilities.yaml_reader(material_yaml_file, material_dir, material)\n self.name = inputs['Name']\n self.materialName = material\n self.elements = inputs['Elements']\n self.zaids = inputs['Elemental ZAIDs']\n self.weightFraction = inputs['Elemental Weight Fractions'] if 'Elemental Weight Fractions' in inputs else []\n self.enrichmentZaids = inputs['Elemental Adjustment ZAIDs'] if 'Elemental Adjustment ZAIDs' in inputs else []\n self.enrichmentIsotopes = inputs['Isotopic Adjustment ZAIDs'] if 'Isotopic Adjustment ZAIDs' in inputs else []\n self.enrichmentVector = inputs['Isotopic Weight Percents'] if 'Isotopic Weight Percents' in inputs else []\n self.isotopicAtomPercents = inputs['Isotopic Atom Percents'] if 'Isotopic Atom Percents' in inputs else []\n self.density = inputs['Density']\n self.linearCoeffExpansion = inputs['Linear Coefficient of Expansion']", "def save_meta(self):\n # jOut = os.path.join(self.meta[\"wdir\"], meta_file)\n with open(self.meta_filepath, \"w\") as f:\n json.dump(self.meta, f)", "def write_savefile(state: PhysicsState, file: Path):\n if file.suffix.lower() != '.json':\n # Ensure a .json suffix.\n file = file.parent / (file.name + '.json')\n log.info(f'Saving to savefile {file.resolve()}')\n\n savefile_json_dict = google.protobuf.json_format.MessageToDict(\n state.as_proto(),\n including_default_value_fields=False,\n preserving_proto_field_name=True,\n use_integers_for_enums=False,\n )\n\n for i, component in enumerate(savefile_json_dict['engineering']['components']):\n component['name'] = strings.COMPONENT_NAMES[i]\n\n with open(file, 'w') as outfile:\n json.dump(savefile_json_dict, outfile, indent=2)\n\n return file", "def set_material(properties,object,finish,normal):\n if object not in properties:\n properties[object.getName()]={}\n properties[object.getName()][\"finish\"]=finish\n properties[object.getName()][\"normal\"]=normal", "def write_saver_defs(self):\n assert self.savers_constructed\n full_saver_def = self.full_saver.as_saver_def()\n full_file = self.params.save_dir+self.params.model_name+\"_v\"+self.params.version+\".def\"\n with open(full_file, \"wb\") as f:\n f.write(full_saver_def.SerializeToString())\n self.logger.log_info(\"Full saver def saved in file %s\"%full_file)", "def save(self):\n for name, param in self.components.items():\n param_path = os.path.join(self.model_path, \"%s.mat\" % name)\n if hasattr(param, 'params'):\n param_values = {p.name: p.get_value() for p in param.params}\n else:\n param_values = {name: param.get_value()}\n scipy.io.savemat(param_path, param_values)", "def write_mat_file(self):\n mat_dict = {}\n mat_dict['Lx_p'] = self.Lx_p\n mat_dict['Ly_p'] = self.Ly_p\n mat_dict['Lz_p'] = self.Lz_p\n mat_dict['Lo'] = self.obst.get_Lo()\n mat_dict['Ny_divs'] = self.N_divs\n mat_dict['rho_p'] = self.rho_p\n mat_dict['nu_p'] = self.nu_p\n mat_dict['snl'] = list(np.union1d(self.obst_list[:],self.solid_list[:]))\n mat_dict['inl'] = list(self.inlet_list[:])\n mat_dict['onl'] = list(self.outlet_list[:])\n\n scipy.io.savemat('geometry_description',mat_dict)", "def make_settings():\n settings = {}\n num_of_rocks = 1\n\n obj = json.load(open('assets/add_mesh_rocks.json'))\n presets = [obj[\"settings\"][\"default\"]] + obj[\"settings\"][\"preset\"]\n\n for preset in presets:\n title = preset[\"title\"]\n # SIZE\n size = preset[\"size\"]\n\n x, y, z = size[\"scale\"]\n if title == \"Default\":\n scale = uniform(float(x[\"lower\"]), float(x[\"upper\"]))\n scale_X = [scale, scale]\n scale_Y = [scale, scale]\n scale_Z = [scale, scale]\n else:\n scale_X = [float(x[\"lower\"]), float(x[\"upper\"])]\n scale_Y = [float(y[\"lower\"]), float(y[\"upper\"])]\n scale_Z = [float(z[\"lower\"]), float(z[\"upper\"])]\n\n x, y, z = size[\"skew\"]\n skew_X = float(x[\"value\"])\n skew_Y = float(y[\"value\"])\n skew_Z = float(z[\"value\"])\n\n scale_fac = ast.literal_eval(size[\"scale_fac\"])\n use_scale_dis = bool(size[\"use_scale_dis\"])\n\n # SHAPE\n shape = preset[\"shape\"]\n\n deform = float(shape[\"deform\"])\n rough = float(shape[\"rough\"])\n detail = float(shape[\"detail\"])\n display_detail = float(shape[\"display_detail\"])\n smooth_fac = float(shape[\"smooth_fac\"])\n smooth_it = float(shape[\"smooth_it\"])\n\n\n # MATERIAL\n material = preset[\"material\"]\n \n mat_enable = bool(material[\"mat_enable\"])\n mat_color = ast.literal_eval(material[\"mat_color\"])\n mat_bright = float(material[\"mat_bright\"])\n mat_rough = float(material[\"mat_rough\"])\n mat_spec = float(material[\"mat_spec\"])\n mat_hard = float(material[\"mat_hard\"])\n mat_use_trans = bool(material[\"mat_use_trans\"])\n mat_alpha = float(material[\"mat_alpha\"])\n mat_cloudy = float(material[\"mat_cloudy\"])\n mat_IOR = float(material[\"mat_IOR\"])\n mat_mossy = float(material[\"mat_mossy\"])\n\n # RANDOM\n random = preset[\"random\"]\n\n use_generate = bool(random[\"use_generate\"])\n use_random_seed = bool(random[\"use_random_seed\"])\n user_seed = float(random[\"user_seed\"])\n\n\n settings[title] = [\n context,\n scale_X,\n skew_X,\n scale_Y,\n skew_Y,\n scale_Z,\n skew_Z,\n scale_fac,\n detail,\n display_detail,\n deform,\n rough,\n smooth_fac,\n smooth_it,\n mat_enable,\n mat_color,\n mat_bright,\n mat_rough,\n mat_spec,\n mat_hard,\n mat_use_trans,\n mat_alpha,\n mat_cloudy,\n mat_IOR,\n mat_mossy,\n num_of_rocks,\n user_seed,\n False,\n use_random_seed\n ]\n\n return settings", "def _save(self):\n\n out_dict = {}\n out_dict[\"version\"] = pyfx.__version__\n out_dict[\"name\"] = self._name\n out_dict[\"src\"] = self._src\n\n # Write out the background file as an image\n bg_file = os.path.join(self._name,\"master_bg_image.png\")\n pyfx.util.to_file(self._bg_frame,bg_file)\n out_dict[\"bg_frame\"] = bg_file\n\n f = open(os.path.join(self._name,\"pyfx.json\"),\"w\")\n json.dump(out_dict,f)\n f.close()", "def append_material(self, material):\n # First check if asset attribute exists; if not, define the asset attribute\n if not hasattr(self, \"asset\"):\n self.asset = ET.Element(\"asset\")\n # If the material name is not in shared materials, add this to our assets\n if material.name not in self.shared_materials:\n self.asset.append(ET.Element(\"texture\", attrib=material.tex_attrib))\n self.asset.append(ET.Element(\"material\", attrib=material.mat_attrib))\n # Add this material name to shared materials if it should be shared\n if material.shared:\n self.shared_materials.add(material.name)\n self.shared_textures.add(material.tex_attrib[\"name\"])\n # Update prefix for assets\n add_prefix(root=self.asset, prefix=self.naming_prefix, exclude=self.exclude_from_prefixing)", "def build_storage(self, mine, planet=None):\n self.send_build_post(\"resources\", planet, codes.storage[mine])", "def save(self):\n if self.loaded:\n list_embeddingNames = [self.embeddings.vsm_name, self.synset_embeddings.vsm_name, self.imagined_embeddings.vsm_name]\n full_file_name = self.resource_manager.get_multimodal_dataset(self.corpus, list_embeddingNames)\n logging.info('Saving dataset to [%s]', full_file_name)\n with lzma.open(full_file_name, 'wb') as f:\n pickle.dump(self, f)\n else:\n logging.error('Dataset not loaded, call \"build\" method first!')", "def save(self):\n memento = self.create_memento()\n import datetime\n f = open(str(datetime.datetime.now()).replace(' ','_')+'.saved_story','w')\n cPickle.dump(memento,f)\n f.close()\n zcanvas.message(\"Saved!\")", "def save(self, path=''):\n if not self.__isBuilt:\n self._rebuild()\n if not path:\n self.w.save(self.path)\n else:\n if not path.endswith('.shp'):\n path = os.path.splitext(path)[0] + '.shp'\n self.w.save(path)", "def serialize(file):\n global root_dir\n global wells_list\n global tops_list\n global project_file\n project_file = file\n\n current_project = Project(root_dir, wells_list, tops_list, file)\n f = open(file, 'wb')\n pickle.dump(current_project, f)", "def persist(self) -> None:\n logger.info('Generating or Updating meta data file {}'.format(self.file_path))\n with open(self.file_path, 'w', encoding='utf-8') as meta_file:\n meta_file.write(json.dumps(self, default=lambda value: value.__dict__))", "def create_blender_material(self, ogremat, mat, meshId, matIdx):\n logger.debug(\"create_blender_material\")\n textures = ogremat.textures\n bmat = None\n idx = 0\n mat_name = mat[\"name\"].split(\"/\")[0]\n try:\n bmat = bpy.data.materials[mat_name]\n if bversion == 3:\n bmat.name = \"tobedeleted\"\n bmat = bpy.data.materials.new(mat_name)\n except:\n bmat = bpy.data.materials.new(mat_name)\n self.set_uuid(bmat, ogremat.uuid)\n # material base properties\n if ogremat.doambient:\n if bversion == 2:\n bmat.setAmb(ogremat.ambient)\n else:\n bmat.ambient = ogremat.ambient\n if ogremat.specular:\n if bversion == 2:\n bmat.setSpec(1.0)\n bmat.setSpecCol(ogremat.specular[:3])\n bmat.setHardness(int(ogremat.specular[3]*4.0))\n else:\n bmat.specular_intensity = 1.0\n ogremat.specular[:3]\n bmat.specular_color = ogremat.specular[:3]\n bmat.specular_hardness = int(ogremat.specular[3]*4.0)\n if ogremat.alpha < 1.0:\n bmat.alpha = ogremat.alpha\n # specular\n for layerName, textureId in ogremat.layers.items():\n if layerName == 'shadowMap':\n if bversion == 2:\n bmat.setMode(Blender.Material.Modes['SHADOWBUF'] & bmat.getMode())\n else:\n bmat.use_cast_buffer_shadows = True\n if textureId:\n textureId = textureId\n pars = (bmat, layerName, mat[\"name\"], ogremat, idx, meshId,\n matIdx)\n if textureId in self._imported_assets:\n btex = self._imported_assets[textureId]\n self.layer_ready(btex, *pars)\n elif self.simrt:\n pars = (textureId,) + pars\n if not self.Asset.downloadAsset(textureId, 0,\n self.texture_downloaded, \n pars,\n main=self.doTextureDownloadTranscode):\n self.add_texture_callback(textureId, self.layer_ready, pars[1:])\n idx += 1\n self._imported_materials[mat[\"name\"]] = bmat\n return bmat", "def saveSettings(self):\n self.genFiles.applyData()\n self.genGraph.applyData()", "def __render_material_preview(self, scene):\n\n # Don't render material thumbnails.\n (width, height) = util.get_render_resolution(scene)\n if width <= 96:\n return\n\n # Collect objects and their materials in a object -> [materials] dictionary.\n objects_materials = {}\n for obj in (obj for obj in scene.objects if obj.is_visible(scene) and not obj.hide_render):\n for mat in util.get_instance_materials(obj):\n if mat is not None:\n if obj.name not in objects_materials.keys():\n objects_materials[obj] = []\n objects_materials[obj].append(mat)\n\n # Find objects that are likely to be the preview objects.\n preview_objects = [o for o in objects_materials.keys() if o.name.startswith('preview')]\n if not preview_objects:\n return\n\n # Find the materials attached to the likely preview object.\n likely_materials = objects_materials[preview_objects[0]]\n if not likely_materials:\n return\n\n # Build the path to the output preview project.\n preview_output_dir = os.path.join(tempfile.gettempdir(), \"blenderseed\", \"material_preview\")\n preview_project_filepath = os.path.join(preview_output_dir, \"material_preview.appleseed\")\n\n # Create target directories if necessary.\n if not os.path.exists(preview_output_dir):\n try:\n os.makedirs(preview_output_dir)\n except os.error:\n self.report({\"ERROR\"}, \"The directory {0} could not be created. Check directory permissions.\".format(preview_output_dir))\n return\n\n # Copy assets from template project to output directory.\n preview_template_dir = os.path.join(os.sep.join(util.realpath(__file__).split(os.sep)[:-1]), \"mat_preview\")\n existing_files = os.listdir(preview_output_dir)\n for item in os.listdir(preview_template_dir):\n if item not in existing_files:\n copyfile(os.path.join(preview_template_dir, item), os.path.join(preview_output_dir, item))\n\n prev_mat = likely_materials[0]\n prev_type = prev_mat.preview_render_type.lower()\n\n # Export the project.\n writer = projectwriter.Writer()\n file_written = writer.export_preview(scene,\n preview_project_filepath,\n prev_mat,\n prev_type,\n width,\n height)\n if not file_written:\n print('Error while exporting. Check the console for details.')\n return\n\n # Render the project.\n self.__render_project_file(scene, preview_project_filepath)", "def SaveJSON(self, filename):\n data = {\n 'files': self._files,\n 'ebuilds': self._ebuilds,\n }\n json.dump(data, open(filename, 'w'))", "def save(self, path_or_file, strict=True, fmt='auto'):\n\n self.validate(strict=strict)\n\n with _open(path_or_file, mode='w', fmt=fmt) as fdesc:\n json.dump(self.__json__, fdesc, indent=2)", "def create_main_saver_node(self, version):\n fps = 25\n if version:\n project = version.task.project\n fps = project.fps\n\n random_ref_id = uuid.uuid4().hex\n\n output_format_data = [\n {\n 'name': 'jpg',\n 'node_tree': {\n 'type': 'Saver',\n 'attr': {\n 'TOOLS_Name': self.output_node_name_generator('jpg'),\n },\n 'input_list': {\n 'Clip': self.output_path_generator(version, 'jpg'),\n 'CreateDir': 1,\n 'ProcessRed': 1,\n 'ProcessGreen': 1,\n 'ProcessBlue': 1,\n 'ProcessAlpha': 0,\n 'OutputFormat': 'JPEGFormat',\n 'JpegFormat.Quality': 85,\n },\n 'connected_to': {\n # 'ref_id': random_ref_id\n 'Input': {\n 'type': 'ColorCurves',\n 'ref_id': random_ref_id,\n 'input_list': {\n 'EditAlpha': 0.0,\n },\n 'connected_to': {\n 'Input': {\n 'type': 'CineonLog',\n 'input_list': {\n 'Mode': 1,\n # 'RedBlackLevel': 0.0,\n # 'RedWhiteLevel': 1023.0,\n 'RedFilmStockGamma': 1.0\n },\n }\n }\n }\n }\n },\n },\n {\n 'name': 'tga',\n 'node_tree': {\n 'type': 'Saver',\n 'attr': {\n 'TOOLS_Name': self.output_node_name_generator('tga'),\n },\n 'input_list': {\n 'Clip': self.output_path_generator(version, 'tga'),\n 'CreateDir': 1,\n 'ProcessRed': 1,\n 'ProcessGreen': 1,\n 'ProcessBlue': 1,\n 'ProcessAlpha': 0,\n 'OutputFormat': 'TGAFormat',\n },\n 'connected_to': {\n 'ref_id': random_ref_id\n }\n },\n },\n {\n 'name': 'exr',\n 'node_tree': {\n 'type': 'Saver',\n 'attr': {\n 'TOOLS_Name': self.output_node_name_generator('exr'),\n },\n 'input_list': {\n 'Clip': self.output_path_generator(version, 'exr'),\n 'CreateDir': 1,\n 'ProcessRed': 1,\n 'ProcessGreen': 1,\n 'ProcessBlue': 1,\n 'ProcessAlpha': 1,\n 'OutputFormat': 'OpenEXRFormat',\n 'OpenEXRFormat.Depth': 1, # 16-bit float\n 'OpenEXRFormat.RedEnable': 1,\n 'OpenEXRFormat.GreenEnable': 1,\n 'OpenEXRFormat.BlueEnable': 1,\n 'OpenEXRFormat.AlphaEnable': 1,\n 'OpenEXRFormat.ZEnable': 0,\n 'OpenEXRFormat.CovEnable': 0,\n 'OpenEXRFormat.ObjIDEnable': 0,\n 'OpenEXRFormat.MatIDEnable': 0,\n 'OpenEXRFormat.UEnable': 0,\n 'OpenEXRFormat.VEnable': 0,\n 'OpenEXRFormat.XNormEnable': 0,\n 'OpenEXRFormat.YNormEnable': 0,\n 'OpenEXRFormat.ZNormEnable': 0,\n 'OpenEXRFormat.XVelEnable': 0,\n 'OpenEXRFormat.YVelEnable': 0,\n 'OpenEXRFormat.XRevVelEnable': 0,\n 'OpenEXRFormat.YRevVelEnable': 0,\n 'OpenEXRFormat.XPosEnable': 0,\n 'OpenEXRFormat.YPosEnable': 0,\n 'OpenEXRFormat.ZPosEnable': 0,\n 'OpenEXRFormat.XDispEnable': 0,\n 'OpenEXRFormat.YDispEnable': 0,\n },\n 'connected_to': {\n 'ref_id': random_ref_id\n }\n }\n },\n {\n 'name': 'mp4',\n 'node_tree': {\n 'type': 'Saver',\n 'attr': {\n 'TOOLS_Name': self.output_node_name_generator('mp4'),\n },\n 'input_list': {\n 'Clip': self.output_path_generator(version, 'mp4'),\n 'CreateDir': 1,\n 'ProcessRed': 1,\n 'ProcessGreen': 1,\n 'ProcessBlue': 1,\n 'ProcessAlpha': 0,\n 'OutputFormat': 'QuickTimeMovies',\n 'ProcessMode': 'Auto',\n 'SaveFrames': 'Full',\n 'QuickTimeMovies.Compression': 'H.264_avc1',\n 'QuickTimeMovies.Quality': 95.0,\n 'QuickTimeMovies.FrameRateFps': fps,\n 'QuickTimeMovies.KeyFrames': 5,\n 'StartRenderScript': 'frames_at_once = comp:GetPrefs(\"Comp.Memory.FramesAtOnce\")\\ncomp:SetPrefs(\"Comp.Memory.FramesAtOnce\", 1)',\n 'EndRenderScript': 'comp:SetPrefs(\"Comp.Memory.FramesAtOnce\", frames_at_once)',\n },\n 'connected_to': {\n 'ref_id': random_ref_id\n }\n }\n },\n {\n 'name': 'mov',\n 'node_tree': {\n 'type': 'Saver',\n 'attr': {\n 'TOOLS_Name': self.output_node_name_generator('mov'),\n },\n 'input_list': {\n 'Clip': self.output_path_generator(version, 'mov'),\n 'CreateDir': 1,\n 'ProcessRed': 1,\n 'ProcessGreen': 1,\n 'ProcessBlue': 1,\n 'ProcessAlpha': 0,\n 'OutputFormat': 'QuickTimeMovies',\n 'ProcessMode': 'Auto',\n 'SaveFrames': 'Full',\n 'QuickTimeMovies.Compression': 'Apple ProRes 422 HQ_apch',\n 'QuickTimeMovies.Quality': 95.0,\n 'QuickTimeMovies.FrameRateFps': fps,\n 'QuickTimeMovies.KeyFrames': 5,\n\n 'QuickTimeMovies.LimitDataRate': 0.0,\n 'QuickTimeMovies.DataRateK': 1000.0,\n 'QuickTimeMovies.Advanced': 1.0,\n 'QuickTimeMovies.Primaries': 0.0,\n 'QuickTimeMovies.Transfer': 0.0,\n 'QuickTimeMovies.Matrix': 0.0,\n 'QuickTimeMovies.PixelAspectRatio': 0.0,\n 'QuickTimeMovies.ErrorDiffusion': 1.0,\n 'QuickTimeMovies.SaveAlphaChannel': 1.0,\n\n 'StartRenderScript': 'frames_at_once = comp:GetPrefs(\"Comp.Memory.FramesAtOnce\")\\ncomp:SetPrefs(\"Comp.Memory.FramesAtOnce\", 1)',\n 'EndRenderScript': 'comp:SetPrefs(\"Comp.Memory.FramesAtOnce\", frames_at_once)',\n\n\n\n },\n 'connected_to': {\n 'ref_id': random_ref_id\n }\n }\n },\n ]\n\n if version.task.type and version.task.type.name == 'Plate':\n # create a different type of outputs\n output_format_data = [\n {\n 'name': 'jpg',\n 'node_tree': {\n 'type': 'Saver',\n 'attr': {\n 'TOOLS_Name': self.output_node_name_generator('jpg'),\n },\n 'input_list': {\n 'Clip': self.output_path_generator(version, 'jpg'),\n 'CreateDir': 1,\n 'ProcessRed': 1,\n 'ProcessGreen': 1,\n 'ProcessBlue': 1,\n 'ProcessAlpha': 0,\n 'OutputFormat': 'JPEGFormat',\n 'JpegFormat.Quality': 85,\n },\n },\n },\n {\n 'name': 'exr',\n 'node_tree': {\n 'type': 'Saver',\n 'attr': {\n 'TOOLS_Name': self.output_node_name_generator('exr'),\n },\n 'input_list': {\n 'Clip': self.output_path_generator(version, 'exr'),\n 'CreateDir': 1,\n 'ProcessRed': 1,\n 'ProcessGreen': 1,\n 'ProcessBlue': 1,\n 'ProcessAlpha': 0,\n 'OutputFormat': 'OpenEXRFormat',\n 'OpenEXRFormat.Depth': 1, # 16-bit float\n 'OpenEXRFormat.RedEnable': 1,\n 'OpenEXRFormat.GreenEnable': 1,\n 'OpenEXRFormat.BlueEnable': 1,\n 'OpenEXRFormat.AlphaEnable': 0,\n 'OpenEXRFormat.ZEnable': 0,\n 'OpenEXRFormat.CovEnable': 0,\n 'OpenEXRFormat.ObjIDEnable': 0,\n 'OpenEXRFormat.MatIDEnable': 0,\n 'OpenEXRFormat.UEnable': 0,\n 'OpenEXRFormat.VEnable': 0,\n 'OpenEXRFormat.XNormEnable': 0,\n 'OpenEXRFormat.YNormEnable': 0,\n 'OpenEXRFormat.ZNormEnable': 0,\n 'OpenEXRFormat.XVelEnable': 0,\n 'OpenEXRFormat.YVelEnable': 0,\n 'OpenEXRFormat.XRevVelEnable': 0,\n 'OpenEXRFormat.YRevVelEnable': 0,\n 'OpenEXRFormat.XPosEnable': 0,\n 'OpenEXRFormat.YPosEnable': 0,\n 'OpenEXRFormat.ZPosEnable': 0,\n 'OpenEXRFormat.XDispEnable': 0,\n 'OpenEXRFormat.YDispEnable': 0,\n },\n },\n },\n {\n 'name': 'mp4',\n 'node_tree': {\n 'type': 'Saver',\n 'attr': {\n 'TOOLS_Name': self.output_node_name_generator('mp4'),\n },\n 'input_list': {\n 'Clip': self.output_path_generator(version, 'mp4'),\n 'CreateDir': 1,\n 'ProcessRed': 1,\n 'ProcessGreen': 1,\n 'ProcessBlue': 1,\n 'ProcessAlpha': 0,\n 'OutputFormat': 'QuickTimeMovies',\n 'ProcessMode': 'Auto',\n 'SaveFrames': 'Full',\n 'QuickTimeMovies.Compression': 'H.264_avc1',\n 'QuickTimeMovies.Quality': 95.0,\n 'QuickTimeMovies.FrameRateFps': fps,\n 'QuickTimeMovies.KeyFrames': 5,\n 'StartRenderScript': 'frames_at_once = comp:GetPrefs(\"Comp.Memory.FramesAtOnce\")\\ncomp:SetPrefs(\"Comp.Memory.FramesAtOnce\", 1)',\n 'EndRenderScript': 'comp:SetPrefs(\"Comp.Memory.FramesAtOnce\", frames_at_once)',\n },\n },\n },\n ]\n\n # selectively generate output format\n saver_nodes = self.get_main_saver_node()\n\n for data in output_format_data:\n format_name = data['name']\n node_tree = data['node_tree']\n\n # now check if a node with the same name exists\n format_node = None\n format_node_name = self.output_node_name_generator(format_name)\n for node in saver_nodes:\n node_name = node.GetAttrs('TOOLS_Name')\n if node_name.startswith(format_node_name):\n format_node = node\n break\n\n # create the saver node for this format if missing\n if not format_node:\n self.create_node_tree(node_tree)\n else:\n # just update the input_lists\n if 'input_list' in node_tree:\n input_list = node_tree['input_list']\n for key in input_list:\n node_input_list = format_node.GetInputList()\n for input_entry_key in node_input_list.keys():\n input_entry = node_input_list[input_entry_key]\n input_id = input_entry.GetAttrs()['INPS_ID']\n if input_id == key:\n value = input_list[key]\n input_entry[0] = value\n break\n\n try:\n os.makedirs(\n os.path.dirname(\n self.output_path_generator(version, format_name)\n )\n )\n except OSError:\n # path already exists\n pass", "def _saveProjects(self, data):\n logger.debug(\"Func: _saveProjects %s\")\n\n self._dumpJson(data, self._pathsDict[\"projectsFile\"])", "def dump_mat(filename, obj, **kwargs):\n return sio.savemat(filename, obj, **kwargs)", "def save(self, path):\n\n with open(path, \"w\") as f:\n json.dump(self.definition, f, separators=(\",\", \":\"), cls=JSONEncoder)", "def save(self, path):\n\n with open(path, \"w\") as f:\n json.dump(self.definition, f, separators=(\",\", \":\"), cls=JSONEncoder)", "def save_settings(self):\n settings = {'camera': self.comboCamera.currentIndex(),\n 'rotation': self.comboRotation.currentIndex(),\n 'colors': {\n 'min_hue': self.spinMinHue.value(),\n 'max_hue': self.spinMaxHue.value(),\n 'min_saturation': self.spinMinSaturation.value(),\n 'max_saturation': self.spinMaxSaturation.value(),\n 'min_value': self.spinMinValue.value(),\n 'max_value': self.spinMaxValue.value(),\n }, 'diameter': self.spinDiameter.value(),\n 'lifter': self.lineEditLifter.text(),\n 'save_video': self.checkSaveVideo.isChecked()\n }\n settings_file = open('./resources/settings.json', 'w')\n json.dump(settings, settings_file, indent=4)\n settings_file.close()\n self.statusbar.clearMessage()\n self.statusbar.showMessage('Settings saved.', 5000)", "def save_(self):\n if not self._edited:\n return\n data = {'history': self.dump()}\n with open(os.path.join(os.path.dirname(self.arch_handler.dicomdir_path), self.SAVE_NAME), \"w\") as outfile:\n json.dump(data, outfile)\n self._edited = False", "def save(self, filepath):\n writer = json.dump if Config.isjson(filepath) else yaml.dump\n with open(filepath, 'w') as f:\n writer(dict(self), f)", "def save(self):\n if self.loaded:\n full_file_name = self.resource_manager.get_dataset(self.corpus, self.embeddings.vsm_name)\n logging.info('Saving dataset to [%s]', full_file_name)\n with lzma.open(full_file_name, 'wb') as f:\n pickle.dump(self, f)\n else:\n logging.error('Dataset not loaded, call \"build\" method first!')", "def save(self):\n with open(str(self._path.resolve()), 'w+') as f:\n json.dump(self._database, f, indent=4)\n return self", "def AddMaterial(self, *args):\n return _XCAFDoc.XCAFDoc_MaterialTool_AddMaterial(self, *args)", "def loadMaterial(self):\n\t\tif not os.path.exists(down_dir):\n\t\t\traise TypeError(\"Directory 'downloads' with application materials must exist\")\n\n\t\tfor filename in glob.glob(down_dir + self.id+ \"*\"):\n\t\t\tdestination = self.dir + \"/\"+ filename[(len(self.id)+11)::]\n\t\t\tos.rename(filename, destination)", "def save_scene(force=True, **kwargs):\n\n pass", "def save(self):\n with open(FileStorage.__file_path, 'w') as saves:\n copy_dict = {key: self.__objects[key].to_dict()\n for key in self.__objects}\n json.dump(copy_dict, saves)", "def save(self):\n a_copy = FileStorage.__objects\n obj_dict = {obj: a_copy[obj].to_dict() for obj in a_copy.keys()}\n with open(FileStorage.__file_path, \"w\") as f:\n json.dump(obj_dict, f)", "def save(self, path):\n if not is_dry():\n with open(path, 'w') as f:\n json.dump(self.to_dict(), f, indent=4)\n return path", "def materials_adding_panel(self, context):\r\n \r\n AM = context.window_manager.asset_m\r\n layout = self.layout\r\n box = layout.box()\r\n view = context.space_data\r\n thumbnails_path = get_directory('icons')\r\n library_path = get_library_path()\r\n extentions = (\".jpg\", \".jpeg\", \".png\")\r\n thumb = [thumb.rsplit(\".\", 1)[0] for thumb in listdir(thumbnails_path) if thumb.endswith(extentions)]\r\n if AM.as_mat_scene:\r\n thumb_list = thumb + [\"AM_Cloth\", \"AM_Sphere\"]\r\n else: \r\n thumb_list = thumb\r\n\r\n cam_is_valid = False\r\n obj_is_valid = False\r\n \r\n \r\n if not AM.as_mat_scene and not bpy.context.object:\r\n box.prop(AM, \"as_mat_scene\", text = \"Save as material scene\")\r\n box.label(\"No active_object in the scene\", icon='ERROR')\r\n box.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')\r\n \r\n elif not AM.as_mat_scene and not bpy.context.active_object.active_material:\r\n box.prop(AM, \"as_mat_scene\", text = \"Save as material scene\")\r\n box.label(\"The object have no material\", icon='ERROR')\r\n box.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')\r\n \r\n else:\r\n if AM.as_mat_scene and not isdir(join(library_path, 'materials', \"Render Scenes\")):\r\n box.operator(\"object.create_rder_scn_lib\", text = \"Create render scene library\", icon = 'FILESEL')\r\n box.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')\r\n \r\n else:\r\n \r\n if AM.as_mat_scene:\r\n asset_name = AM.scene_name\r\n else:\r\n active_mat = context.active_object.active_material\r\n asset_name = active_mat.name\r\n \r\n if len(bpy.context.active_object.material_slots) == 1:\r\n AM.multi_materials = False\r\n \r\n if AM.as_mat_scene and (not asset_name in thumb_list or asset_name in thumb_list and AM.replace_rename == 'replace') or\\\r\n not AM.as_mat_scene and (AM.multi_materials and get_valid_materials() or not AM.multi_materials and asset_name not in thumb_list or asset_name in thumb_list and AM.replace_rename == 'replace'): \r\n if not AM.multi_materials:\r\n if asset_name in thumb_list and AM.replace_rename == 'replace':\r\n box.label(\"\\\" {} \\\" already exist\".format(asset_name), icon='ERROR')\r\n box.separator()\r\n if len(bpy.context.active_object.material_slots) >= 2 and AM.replace_rename == 'rename':\r\n box.prop(AM, \"multi_materials\", text = \"All materials\")\r\n row = box.row(align=True)\r\n row.prop(AM, \"replace_rename\", text=\" \", expand=True)\r\n if AM.replace_rename == 'rename':\r\n if AM.as_mat_scene:\r\n box.prop(AM, \"scene_name\", text = \"\")\r\n else:\r\n box.prop(AM, \"rename_mat\", text=\"\")\r\n \r\n box.prop(AM, \"as_mat_scene\", text = \"Save as material scene\")\r\n if not AM.as_mat_scene and len(bpy.context.active_object.material_slots) >= 2:\r\n if len(get_valid_materials()) != len(bpy.context.active_object.material_slots) and AM.multi_materials:\r\n box.label(\"Some materials wont be added\", icon = 'ERROR')\r\n box.label(\" because there already exist\")\r\n row = box.row()\r\n row.prop(AM, \"multi_materials\", text = \"All materials\")\r\n if AM.as_mat_scene:\r\n row = box.row(align = True)\r\n row.label(\"Scene name:\")\r\n row.prop(AM, \"scene_name\", text = \"\")\r\n \r\n row = box.row(align = True)\r\n row.prop(AM, \"render_type\", text = \" \", expand = True)\r\n row = box.row()\r\n row.label(\"Thumbnail extention:\")\r\n row = box.row(align = True)\r\n row.prop(AM, \"thumb_ext\", expand = True)\r\n \r\n if AM.as_mat_scene:\r\n for obj in context.scene.objects:\r\n if obj.type == 'CAMERA':\r\n cam_is_valid = True\r\n \r\n if len([obj for obj in context.selected_objects if obj.type != 'CAMERA' and bpy.context.active_object == obj]) == 1:\r\n obj_is_valid = True\r\n \r\n row = box.row()\r\n row.label(\"Selected object rendering\", icon = 'FILE_TICK' if obj_is_valid else 'CANCEL')\r\n row = box.row()\r\n row.label(\"Camera in the scene\", icon = 'FILE_TICK' if cam_is_valid else 'CANCEL')\r\n if not cam_is_valid:\r\n row = box.row()\r\n row.operator(\"object.camera_add\", text = \"Add camera\", icon = 'OUTLINER_OB_CAMERA')\r\n \r\n if not AM.as_mat_scene:\r\n # --------------------- # \r\n # RENDER THUMBNAIL #\r\n # --------------------- #\r\n \r\n if AM.render_type == 'render':\r\n row = box.row(align = True)\r\n row.label(\"Thumbnail:\")\r\n row.prop(AM, \"mat_thumb_type\", text = \"\")\r\n \r\n # --------------------- # \r\n # OPENGL THUMBNAIL #\r\n # --------------------- #\r\n \r\n if AM.render_type == 'opengl':\r\n row = box.row(align=True)\r\n row.operator(\"object.setup_ogl_render\", text=\"Setup OGL render\" if not \"AM_OGL_Camera\" in [obj.name for obj in context.scene.objects] else \"View camera\", icon='ZOOMIN')\r\n row.operator(\"object.remove_ogl_render\", text=\"\", icon='ZOOMOUT')\r\n row = layout.column()\r\n row = box.row(align=True) \r\n row.label(\"Background:\")\r\n row.prop(AM, \"background_alpha\", text=\"\")\r\n row = box.row(align=True)\r\n row.prop(view, \"show_only_render\")\r\n\r\n # -------------------- # \r\n # IMAGE THUMBNAIL #\r\n # -------------------- #\r\n \r\n elif AM.render_type == 'image':\r\n row = box.row(align=True)\r\n row.prop(AM, \"image_type\", text=\" \", expand=True)\r\n if AM.image_type == 'disk':\r\n box.label(\"Choose your thumbnail\")\r\n box.prop(AM, \"custom_thumbnail_path\", text=\"\")\r\n else:\r\n box.prop_search(AM, \"render_name\", bpy.data, \"images\", text=\"\") \r\n \r\n row = box.row(align=True)\r\n if (AM.as_mat_scene and AM.scene_name and cam_is_valid and obj_is_valid or not AM.as_mat_scene) and (AM.render_type == 'render' or (asset_name not in thumb_list or AM.replace_rename == 'replace') and AM.render_type == 'opengl' or AM.render_type == 'image' and (AM.image_type == 'disk' and AM.custom_thumbnail_path or AM.image_type == 'rendered' and AM.render_name)):\r\n if AM.as_mat_scene:\r\n row.operator(\"object.add_scene_in_library\", text=\"OK\", icon='FILE_TICK')\r\n else:\r\n row.operator(\"object.add_material_in_library\", text=\"OK\", icon='FILE_TICK')\r\n row.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')\r\n \r\n else:\r\n if AM.multi_materials and not get_valid_materials():\r\n box.label(\"All materials already exist\".format(asset_name), icon='ERROR')\r\n box.separator()\r\n if len(bpy.context.active_object.material_slots) >= 2:\r\n box.prop(AM, \"multi_materials\", text = \"All materials\")\r\n \r\n else:\r\n box.label(\"\\\" {} \\\" already exist\".format(asset_name), icon='ERROR')\r\n box.separator()\r\n if len(bpy.context.active_object.material_slots) >= 2:\r\n box.prop(AM, \"multi_materials\", text = \"All materials\")\r\n else:\r\n AM.multi_materials = False\r\n row = box.row(align=True)\r\n row.prop(AM, \"replace_rename\", text=\" \", expand=True)\r\n if AM.replace_rename == 'rename':\r\n if AM.as_mat_scene:\r\n box.prop(AM, \"scene_name\", text = \"\")\r\n else:\r\n box.prop(AM, \"rename_mat\", text=\"\")\r\n \r\n row = box.row()\r\n row.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')", "def _save(self):\n with open(self.file_path, 'w') as fid:\n json.dump(self.data, fid, indent=4, sort_keys=True)", "def set_save_building(self, positions, building, pick_up_vertical, side_number):\n current = self.get_save_buildings()\n exist = False\n for pos in positions:\n print([pos[0], pos[1]])\n\n # Check if there is a building with the same number saved already\n if len(current) > 0:\n for saved_building in current:\n print(saved_building.number)\n if saved_building.number == building:\n if saved_building.side_number == side_number:\n print(\"[ERROR] Side already exists!\")\n return\n\n # If it is a new building, create it and add it to the array\n if not exist:\n current.append(BuildingSide(positions, pick_up_vertical, building, side_number))\n print(\"[INFO] Saved building\")\n\n saved_file = open(self.file_name_building, \"w\")\n json.dump(json.dumps(current, default=lambda o: o.__dict__,\n sort_keys=True), saved_file)\n saved_file.close()", "def write_mat_file(self, geom_filename):\n mat_dict = {}\n mat_dict['Lx_p'] = self.Lx_p\n mat_dict['Ly_p'] = self.Ly_p\n mat_dict['Lz_p'] = self.Lz_p\n mat_dict['Lo'] = self.obst.get_Lo()\n mat_dict['Ny_divs'] = self.N_divs\n mat_dict['rho_p'] = self.rho_p\n mat_dict['nu_p'] = self.nu_p\n mat_dict['snl'] = list(np.union1d(self.obst_list[:],self.solid_list[:]))\n mat_dict['inl'] = list(self.inlet_list[:])\n mat_dict['onl'] = list(self.outlet_list[:])\n\n scipy.io.savemat(geom_filename,mat_dict)", "def save_configurations(self):\n # Get the file path\n self.data_path = self.data_path_entry.get()\n # Open the file\n with open(self.data_path, 'rb') as file:\n self.log('Opened ' + str(self.data_path))\n # Un-serialize\n info = pickle.load(file)\n # Write the new properties\n self.main_window.overwrite_properties(info)\n\n self.exit()", "def save(filename, mat):\n if not isinstance(mat, np.ndarray):\n raise ValueError('for now, we can only save numpy arrays')\n return sio.savemat(filename, {'data': mat}, appendmat=False)", "def _build_writer(self):\n # Create savers (one for current, one for best)\n self.saver_cur = tf.train.Saver()\n self.saver_best = tf.train.Saver()", "def save_waveform(self, chan=None):\n t, y, pre = self.device.retrieve_current_waveform()\n meta = self.create_meta()\n if chan != None:\n meta['Channel'] = chan\n for name in pre:\n meta[name] = pre[name]\n data = {'meta' : meta,\n 't' : t,\n 'y' : y}\n rsp = daq.Rsp('save', data, meta)\n self.shot += 1\n self.r_queue.put(rsp)", "def write_material_data(ka_red=255.0 / 255, ka_green=255.0 / 255, ka_blue=255.0 / 255,\n ka_texture_ID=9223372036854775807, # ambient\n ks_red=255.0 / 255, ks_green=255.0 / 255, ks_blue=255.0 / 255,\n ks_texture_ID=9223372036854775807, # specular\n kd_red=255.0 / 255, kd_green=255.0 / 255, kd_blue=255.0 / 255,\n kd_texture_ID=9223372036854775807, # diffuse\n ns=0.1, # specular exponent\n alpha=1 # opacity\n ):\n\n input_ = [(ka_red, 'float32'), (ka_green, 'float32'), (ka_blue, 'float32'),\n (ka_texture_ID, 'uint64'),\n (ks_red, 'float32'), (ks_green, 'float32'), (ks_blue, 'float32'),\n (ks_texture_ID, 'uint64'),\n (kd_red, 'float32'), (kd_green, 'float32'), (kd_blue, 'float32'),\n (kd_texture_ID, 'uint64'),\n (ns, 'float32'), (alpha, 'float32')]\n\n block_bytes = encode(input_)\n return block_bytes", "def build_saver(self):\n #this is used to restore and save the graph default_saver.restore\n\n default_saver = tf.train.Saver(max_to_keep=3, allow_empty=True)\n self.savers = {self.name: default_saver}", "def create_material_data(self):\n for num, zaid in enumerate(self.enrichmentZaids):\n enriched_isotope_dict = {}\n for isoNum, isotopes in enumerate(self.enrichmentIsotopes[num]):\n enriched_isotope_dict[isotopes] = self.enrichmentVector[num][isoNum]\n self.enrichmentDict[zaid] = enriched_isotope_dict\n for num, element in enumerate(self.elements):\n self.elementDict[self.zaids[num]] = Element.Element(element)\n\n if self.isotopicAtomPercents:\n self.atomDensity = self.density\n self.set_atom_fractions()\n else:\n self.set_elemental_enrichment()\n self.set_weight_percent()\n self.atomDensity, self.atomPercent = set_atom_percent(self.weightPercent, self.density,\n self.elementDict)", "def save(self, settings=None):\n json_string = json.dumps(self.variables)\n with open(self.filepath, 'w', encoding='utf-8') as fh:\n fh.write(json_string)", "def save_game(partie):\n fichier= open(\"save_game.json\",\"w\")\n json.dump(partie,fichier)\n fichier.close()", "def get_save_buildings(self):\n saved_building = []\n print(\"Get saved buildings\")\n try:\n saved_file = open(self.file_name_building)\n try:\n data = json.loads(json.load(saved_file))\n for p in data:\n saved_building.append(\n BuildingSide(p.get(\"side\"), p.get(\"pick_up_vertical\"), p.get(\"number\"), p.get(\"side_number\")))\n\n except json.decoder.JSONDecodeError:\n saved_building = []\n\n saved_file.close()\n except FileNotFoundError:\n saved_building = []\n\n return saved_building", "def save(self):\n with open(self.file_path, 'w', encoding=Config.ENCODING) as file:\n json.dump(self.data, file, indent=2, ensure_ascii=False)", "def save(self):\r\n os.makedirs(self.settings.save_path, exist_ok=True)\r\n current_file = os.path.join(self.settings.save_path, 'current.json')\r\n if os.path.exists(current_file):\r\n raise FileExistsError()\r\n current_folder = os.path.join(self.settings.save_path, 'current')\r\n os.makedirs(current_folder, exist_ok=True)\r\n\r\n tosave = {\r\n 'generation': self.generation,\r\n 'approach_ind': self.approach[0],\r\n 'approach_params': Evolver._clean_params(self.approach_params),\r\n 'sensitive_params': Evolver._clean_params(self.sensitive_params)\r\n }\r\n\r\n with open(current_file, 'w') as outfile:\r\n json.dump(tosave, outfile)", "def generate_file(material_id):\n apr=get_doc_from_MP(material_id)\n mat_list=generate_matrix(apr)\n formu=POSCAR_title(apr)\n cell_for=generate_cell_formula(apr)\n needed_dos=generate_dos_str(material_id)\n revise_dos=dos_into_string(needed_dos)\n ordered_list=generate_ordered_list(revise_dos)\n my_ordered_elements=generate_ordered_elements(revise_dos,ordered_list)\n my_ordered_numbers=generate_ordered_numbers(revise_dos,ordered_list,cell_for)\n generate_POSCAR(formu,mat_list,my_ordered_elements,my_ordered_numbers,revise_dos)", "def saveBaseScene(self, categoryName, baseName, subProjectIndex=0, makeReference=True, versionNotes=\"\", sceneFormat=\"nk\", *args, **kwargs):\n logger.debug(\"Func: saveBaseScene\")\n\n now = datetime.datetime.now().strftime(\"%d/%m/%Y-%H:%M\")\n completeNote = \"[%s] on %s\\n%s\\n\" % (self.currentUser, now, versionNotes)\n\n # Check if the base name is unique\n scenesToCheck = self.scanBaseScenes(categoryAs=categoryName, subProjectAs=subProjectIndex)\n for key in scenesToCheck.keys():\n if baseName.lower() == key.lower():\n msg = (\"Base Scene Name is not unique!\\nABORTING\")\n self._exception(360, msg)\n return -1, msg\n\n projectPath = self.projectDir\n databaseDir = self._pathsDict[\"databaseDir\"]\n scenesPath = self._pathsDict[\"scenesDir\"]\n categoryPath = os.path.normpath(os.path.join(scenesPath, categoryName))\n self._folderCheck(categoryPath)\n\n ## if its going to be saved as a subproject\n if not subProjectIndex == 0:\n subProjectPath = os.path.normpath(os.path.join(categoryPath, self._subProjectsList[subProjectIndex]))\n self._folderCheck(subProjectPath)\n shotPath = os.path.normpath(os.path.join(subProjectPath, baseName))\n self._folderCheck(shotPath)\n\n jsonCategoryPath = os.path.normpath(os.path.join(databaseDir, categoryName))\n self._folderCheck(jsonCategoryPath)\n jsonCategorySubPath = os.path.normpath(os.path.join(jsonCategoryPath, self._subProjectsList[subProjectIndex]))\n self._folderCheck(jsonCategorySubPath)\n jsonFile = os.path.join(jsonCategorySubPath, \"{}.json\".format(baseName))\n\n else:\n shotPath = os.path.normpath(os.path.join(categoryPath, baseName))\n self._folderCheck(shotPath)\n\n jsonCategoryPath = os.path.normpath(os.path.join(databaseDir, categoryName))\n self._folderCheck(jsonCategoryPath)\n jsonFile = os.path.join(jsonCategoryPath, \"{}.json\".format(baseName))\n\n\n version = 1\n sceneName = \"{0}_{1}_{2}_v{3}\".format(baseName, categoryName, self._usersDict[self.currentUser], str(version).zfill(3))\n sceneFile = os.path.join(shotPath, \"{0}.{1}\".format(sceneName, sceneFormat))\n ## relativity update\n relSceneFile = os.path.relpath(sceneFile, start=projectPath)\n # killTurtle()\n # TODO // cmds may be used instead\n # pm.saveAs(sceneFile)\n nuke.scriptSaveAs(sceneFile)\n\n thumbPath = self.createThumbnail(dbPath=jsonFile, versionInt=version)\n\n jsonInfo = {}\n\n if makeReference:\n # TODO // Find an elegant solution and add MA compatibility. Can be merged with makeReference function in derived class\n referenceName = \"{0}_{1}_forReference\".format(baseName, categoryName)\n referenceFile = os.path.join(shotPath, \"{0}.{1}\".format(referenceName, sceneFormat))\n ## relativity update\n relReferenceFile = os.path.relpath(referenceFile, start=projectPath)\n shutil.copyfile(sceneFile, referenceFile)\n jsonInfo[\"ReferenceFile\"] = relReferenceFile\n jsonInfo[\"ReferencedVersion\"] = version\n else:\n jsonInfo[\"ReferenceFile\"] = None\n jsonInfo[\"ReferencedVersion\"] = None\n\n jsonInfo[\"ID\"] = \"SmNukeV02_sceneFile\"\n jsonInfo[\"NukeVersion\"] = [nuke.NUKE_VERSION_MAJOR, nuke.NUKE_VERSION_MINOR]\n jsonInfo[\"Name\"] = baseName\n jsonInfo[\"Path\"] = os.path.relpath(shotPath, start=projectPath)\n jsonInfo[\"Category\"] = categoryName\n jsonInfo[\"Creator\"] = self.currentUser\n jsonInfo[\"CreatorHost\"] = (socket.gethostname())\n jsonInfo[\"Versions\"] = [ # PATH => Notes => User Initials => Machine ID => Playblast => Thumbnail\n {\"RelativePath\": relSceneFile,\n \"Note\": completeNote,\n \"User\": self._usersDict[self.currentUser],\n \"Workstation\": socket.gethostname(),\n \"Preview\": {},\n \"Thumb\": thumbPath,\n \"Ranges\": self._getTimelineRanges()\n }\n ]\n\n jsonInfo[\"SubProject\"] = self._subProjectsList[subProjectIndex]\n self._dumpJson(jsonInfo, jsonFile)\n return [0, \"\"]", "def save(self):\n with open(self.__file_path, \"w\", encoding=\"UTF-8\") as file:\n parsed_dict = {\n key: value.to_dict()\n for key, value in self.__objects.items()\n }\n save_data(parsed_dict, file)", "def save(aircraft, settings):\n\n filepath = settings.paths('f_aircraft')\n logger.info(f\"Writing aircraft model to file '{truncate_filepath(filepath)}'...\")\n\n # ====== Aircraft top level =====\n output = {}\n output['uid'] = aircraft.uid\n\n output['refs'] = {}\n for key, value in aircraft.refs.items():\n output['refs'][key] = value\n\n # ====== Wings =====\n output['wings'] = []\n for wing in aircraft.wings.values():\n wing_entry = {}\n wing_entry['uid'] = wing.uid\n wing_entry['symmetry'] = wing.symmetry\n\n # ====== Segments =====\n wing_entry['segments'] = []\n for segment in wing.segments.values():\n segment_entry = {}\n segment_entry['uid'] = segment.uid\n segment_entry['vertices'] = dict(segment.vertices)\n\n segment_entry['geometry'] = {}\n for key, value in segment.geometry.items():\n segment_entry['geometry'][key] = value\n\n segment_entry['airfoils'] = {}\n for key, value in segment.airfoils.items():\n # If airfoil is \"blade\" file, make sure to save as relative path\n # Note: Airfoil definition may also be for instance \"NACA1234\"\n if \"blade.\" in value:\n # Make path relative!\n value = os.path.join(PATHS.DIR.AIRFOILS, os.path.basename(value))\n segment_entry['airfoils'][key] = value\n\n segment_entry['panels'] = {}\n for key, value in segment.panels.items():\n segment_entry['panels'][key] = value\n\n wing_entry['segments'].append(segment_entry)\n\n # ====== Controls =====\n wing_entry['controls'] = []\n for control in wing.controls.values():\n control_entry = {}\n\n control_entry['uid'] = control.uid\n control_entry['device_type'] = control.device_type\n control_entry['deflection'] = control.deflection\n control_entry['deflection_mirror'] = control.deflection_mirror\n\n control_entry['segment_uid'] = {}\n for key, value in control.segment_uid.items():\n control_entry['segment_uid'][key] = value\n\n control_entry['rel_vertices'] = {}\n for key, value in control.rel_vertices.items():\n control_entry['rel_vertices'][key] = value\n\n control_entry['rel_hinge_vertices'] = {}\n for key, value in control.rel_hinge_vertices.items():\n control_entry['rel_hinge_vertices'][key] = value\n\n control_entry['panels'] = {}\n for key, value in control.panels.items():\n control_entry['panels'][key] = value\n\n wing_entry['controls'].append(control_entry)\n\n output['wings'].append(wing_entry)\n\n with open(filepath, 'w') as fp:\n dump_pretty_json(output, fp)", "def dumpme(self) :\n fileName = \"./data/oP4_ModelBuilder.dump\"\n with open(fileName,\"wb\") as dumpedFile:\n oPickler = pickle.Pickler(dumpedFile)\n oPickler.dump(self)", "def save(self, json_path):\n with open(json_path, 'w') as f:\n json.dump(self.__dict__, f, indent = 4)", "def save(self):\n # Create needed subfolder if not yet exist\n f = get_subfolder(f\"population{'_backup' if self.use_backup else ''}/storage/\", f'{self.folder_name}')\n if len(str(self).split(\"/\")) > 1: get_subfolder(f, f'{str(self).split(\"/\")[0]}')\n f = get_subfolder(f, f'{self}')\n f = get_subfolder(f, 'generations')\n \n # Save the population\n store_pickle(self, f'{f}gen_{self.generation:05d}')\n self.log(f\"Population '{self}' saved! Current generation: {self.generation}\")", "def setMaterial(self,massFraction,polymer):\n M = Materials()\n num = self.material['Detector']['mt']\n if polymer == 'PS':\n self.material['Detector']['matString'] = M.GetPSLiF(massFraction,num)\n elif polymer == 'PEN':\n self.material['Detector']['matString'] = M.GetPENLiF(massFraction,num)\n else:\n raise ValueError('Polymer {} is not in the material database'.format(polymer))", "def update_makes_file(target_make=None):\n persisted_makes = load_make_models_json()\n persisted_makes_by_slug = {make[\"make_slug\"]: make for make in persisted_makes}\n\n all_makes = fetch_all_makes()\n for make in tqdm(all_makes):\n if make[\"make_name\"] == \"FISKER AUTOMOTIVE\":\n # This is a dumb name that doesn't match canada's name.\n make[\"make_name\"] = \"Fisker\"\n make[\"make_slug\"] = \"fisker\"\n if make[\"make_id\"] == 1033:\n # Skip the generic \"FISKER\" make, which lists no cars for some reason.\n continue\n if target_make and target_make != make[\"make_slug\"]:\n continue\n\n if make_is_whitelisted(make, warn_if_unlisted=True):\n persisted_makes_by_slug[make[\"make_slug\"]] = make\n print(f\"{make['make_name']} produces passenger vehicles: {make}\")\n else:\n continue\n\n if not make_produces_passenger_vehicles(make[\"make_id\"]):\n # There are too many random car brands, so we focus on just those makings cars and/or trucks\n continue\n\n makes_and_models = list(sorted(persisted_makes_by_slug.values(), key=lambda make: make[\"make_slug\"]))\n persist_json_file(makes_and_models, \"data\", \"makes_and_models.json\")", "def saveObjectMaps(self):\n if self.objectMaps == None: return\n path = os.path.join(self.dir,settings['mosh.modInfos.objectMaps'])\n outDir = os.path.split(path)[0]\n if not os.path.exists(outDir): os.makedirs(outDir)\n cPickle.dump(self.objectMaps,open(path,'wb'),2)", "def save(self):\n\n\t\tdirectory = os.path.dirname(self.path)\n\n\t\tif not os.path.exists(directory):\n\t\t\tos.makedirs(directory)\n\n\t\twith open(self.path, \"w\") as f:\n\t\t\tf.write(\n\t\t\t\tjson.dumps(\n\t\t\t\t\tself.dump(),\n\t\t\t\t\tindent=4,\n\t\t\t\t\tsort_keys=True\n\t\t\t\t)\n\t\t\t)", "def save(self, json_path):\n with open(json_path, 'w') as f:\n json.dump(self.__dict__, f, indent=4)", "def save(self, json_path):\n with open(json_path, 'w') as f:\n json.dump(self.__dict__, f, indent=4)", "def save(self, json_path):\n with open(json_path, 'w') as f:\n json.dump(self.__dict__, f, indent=4)", "def save(self):\n\n with open(FileStorage.__file_path, \"w\") as file:\n dictionary = {}\n for a, b in FileStorage.__objects.items():\n dictionary[a] = b.to_dict()\n ink = json.dumps(dictionary)\n file.write(ink)", "def saver(filename = None):\n save(self, filename)", "def save(self, filename, path=None):\n import os\n\n if path is None:\n from art import DATA_PATH\n full_path = os.path.join(DATA_PATH, filename)\n else:\n full_path = os.path.join(path, filename)\n folder = os.path.split(full_path)[0]\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n self._model.save(str(full_path))\n logger.info('Model saved in path: %s.', full_path)", "def save(self, filename):\n data = {\"sizes\": self.sizes,\n \"weights\": [w.tolist() for w in self.weights],\n \"biases\": [b.tolist() for b in self.biases]}\n f = open(filename, \"w\")\n json.dump(data, f)\n f.close()", "def save(self, filename='mesh.json', verbose=False):\n\n f = os.path.abspath(filename) # make sure we are working with abs path\n with open(f, 'w') as outfile:\n json.dump(self.serialize(), outfile)\n\n if verbose is True:\n print('Saved {}'.format(f))\n\n return f", "def savemat(self, file_name):\n d = {'dG0_prime': self.dG0_prime,\n 'dG0': self.dG0,\n 'T': self.T,\n 'I': self.I,\n 'pH': self.pH,\n 'pMg': self.pMg,\n 'weight': self.weight,\n 'cids': self.cids,\n 'S': self.S.values}\n savemat(file_name, d, oned_as='row')", "def saveSettings(self):\n helpers.saveFile(self.dataDir, self.settingsFilename, json.dumps(self.settings))", "def save(self, projectData, filename):\n f = open(filename, 'wb')\n pickle.dump(projectData, f, protocol=1)\n f.close()", "def save(self, uri):\r\n pf = PyFolder(os.path.dirname(os.path.realpath(uri)), allow_override=True)\r\n pf[os.path.basename(uri)+\"_options.json\"] = {\r\n 'input_cells': self._input_cells,\r\n 'latent_space': self._latent_space,\r\n }\r\n\r\n save_model(self._autoencoder, uri+\"_lstm_autoencoder.hdf5\")\r\n save_model(self._encoder, uri+\"_lstm_encoder.hdf5\")", "def save(self):\n\n toStore = {\n key: obj.to_dict()\n for key, obj in FileStorage.__objects.items()\n }\n with open(FileStorage.__file_path, 'wt') as file:\n json.dump(toStore, file)", "def store(self, file=None):\n params = self.info()\n if not file:\n folder = os.path.dirname(self.file)\n assert os.path.isdir(folder)\n file = os.path.join(folder, self.generate_name() + \".json\")\n assert os.path.splitext(file)[1] == \".json\"\n self.file = file\n self.tf_checkpoint_path = os.path.splitext(file)[0]\n if os.path.isfile(file):\n print(\"Overwriting '{}' ...\".format(self.tf_checkpoint_path))\n else:\n print(\"Creating '{}' ...\".format(self.tf_checkpoint_path))\n with open(file, 'w') as open_file:\n json.dump(params, open_file, indent=2, sort_keys=True)", "def save(self):\n data = \"\"\n for y in xrange(0, BLOCK_NUM_HEIGHT):\n for x in xrange(0, BLOCK_NUM_WIDTH):\n data += self.blocks[y][x]\n data += '\\n'\n print data\n options = {'defaultextension': '.lvl',\n 'filetypes': [('Levels', '.lvl'), ('All files', '*')],\n 'initialdir': 'levels',\n 'initialfile': '',\n 'title': 'Save level'}\n # filename = tkFileDialog.asksaveasfile(**options)\n filename = asksaveasfilename(**options)\n if filename:\n with open(filename, \"w\") as level:\n level.write(data)", "def save(self, filename=\"matpipe.p\"):\n temp_backend = self.learner.backend\n self.learner._backend = self.learner.backend.fitted_pipeline_\n for obj in [self, self.learner, self.reducer, self.cleaner,\n self.autofeaturizer]:\n obj._logger = None\n with open(filename, 'wb') as f:\n pickle.dump(self, f)\n self.learner._backend = temp_backend", "def _write(self, preset_type, data):\n logger.debug('write presets for %s', self._device.name)\n with self._file_open_rlock(preset_type) as f:\n f.seek(0)\n yaml.dump(data, f, default_flow_style=False)\n f.truncate()", "def save(self, file=\"setup\", path=\"settings\"):\n\n # check if filename already contains file extension, if not, add it\n if file[-5:] != '.json':\n file += '.json'\n # save mappings data to file\n with open(os.path.join(path, file), 'w') as file:\n json.dump(self.data, file)", "def save(self):\n # Ensure store path exists\n store_path = self.manager.store_path\n if not os.path.exists(store_path):\n os.makedirs(store_path)\n \n # Get filepath\n filename = self._filename\n \n # Write into file\n raw = self.to_json()\n self.service.log.store('Saving %s' % filename)\n f = open(filename, 'w')\n f.write(raw)\n f.close()", "def save_training(self):\n\n filename = str(hashlib.sha1(str(self.training_data).encode(\"utf-8\"))\n .hexdigest())\n path = \"./training/\" + filename + \".json\"\n\n data = {\n \"states\": self.states,\n \"transitions\": self.transitions,\n \"matrix\": self.matrix.tolist()\n }\n\n with open(path, \"w\") as outfile:\n json.dump(data, outfile)", "def save_root(self, filename=None):\n if not filename:\n filename = tkFileDialog.asksaveasfilename()\n if filename:\n maketree.populate(self.initial.itervalues(),\n self.detectors.itervalues())\n maketree.write(filename)", "def save_as_mat(data, name=\"model\"):\n import scipy.io as sio\n\n # sio.savemat(name'.mat',{name:b})\n sio.savemat(name + \".mat\", {name: data})", "def save(self):\n if PYTHON3:\n fileobj = open(self.filename, 'w', encoding=self.ENCODING, errors=\"replace\")\n else:\n fileobj = open(self.filename, 'w')\n self.save_to_fileobj(fileobj)\n fileobj.close()", "def save(self, dir_name=None):\n root_dir = os.getcwd()\n cur_datetime = str(datetime.datetime.now()).split(\".\")[0] # remove fractional seconds\n if not dir_name:\n dir_name = \"game_\" + cur_datetime + \"/\"\n save_dir = root_dir + \"/saved_games/\" + dir_name + \"_\" + cur_datetime + \"/\"\n\n # Check if the filepath already exists\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n # Game\n game_dir = save_dir + \"game/\"\n os.makedirs(game_dir)\n with open(game_dir + \"game.json\", \"w\") as file_handle:\n game_dict = self.to_json_dict()\n json.dump(game_dict, file_handle)\n\n # Player\n player_dir = save_dir + \"player/\"\n os.makedirs(player_dir)\n with open(player_dir + \"player.json\", \"w\") as file_handle:\n player_dict = self.player.to_json_dict()\n json.dump(player_dict, file_handle)\n\n # Items\n items_dir = save_dir + \"items/\"\n os.makedirs(items_dir)\n for i in self.items:\n with open(items_dir + i.get_name() + \"_\" + str(i.get_id()) + \".json\", \"w\") as file_handle:\n item_dict = i.to_json_dict()\n json.dump(item_dict, file_handle)\n\n # Characters\n characters_dir = save_dir + \"characters/\"\n os.makedirs(characters_dir)\n for c in self.characters:\n with open(characters_dir + c.get_name() + \"_\" + str(c.get_id()) + \".json\", \"w\") as file_handle:\n character_dict = c.to_json_dict()\n json.dump(character_dict, file_handle)\n\n # Spaces\n spaces_dir = save_dir + \"spaces/\"\n os.makedirs(spaces_dir)\n for s in self.spaces:\n with open(spaces_dir + s.get_name() + \"_\" + str(s.get_id()) + \".json\", \"w\") as file_handle:\n spaces_dict = s.to_json_dict()\n json.dump(spaces_dict, file_handle)\n\n # Exits\n exits_dir = save_dir + \"exits/\"\n os.makedirs(exits_dir)\n for e in self.exits:\n with open(exits_dir + e.get_name() + \"_\" + str(e.get_id()) + \".json\", \"w\") as file_handle:\n exits_dict = e.to_json_dict()\n json.dump(exits_dict, file_handle)", "def save(self):\n self.lock.acquire()\n try:\n self.xml.set(\"name\",self.name)\n self.xml.set(\"room\",self.room)\n self.xml.set(\"type\",self.type)\n self.xml.find(\"address\").text = \":\".join([str(x) for x in self.address])\n if self.pos is not None:\n self.xml.find(\"pos\").text = \" \".join([str(x) for x in self.pos])\n self.xml.find(\"icon\").text = self.icon\n \n finally:\n self.lock.release()\n \n self.house.save_devices()", "def save(self, path):\n if path.endswith(\".gz\"):\n file = gzip.open(path, \"w\", 9)\n else:\n file = open(path, \"wb\")\n\n # update the settings in the data to the latest value\n data = json.loads(self.value)\n data[\"settings\"] = self.settings\n\n file.write(json.dumps(data).encode(\"utf8\"))\n file.close()" ]
[ "0.57051945", "0.55286336", "0.54615587", "0.53826904", "0.5350502", "0.53496337", "0.5319556", "0.52859074", "0.5285472", "0.52815086", "0.5229431", "0.5202984", "0.5185666", "0.5141686", "0.5140259", "0.513041", "0.51060146", "0.5105606", "0.50921017", "0.5083165", "0.5072597", "0.50634295", "0.50603575", "0.50601643", "0.5031712", "0.5016729", "0.4997566", "0.4988507", "0.49884725", "0.4957583", "0.4953412", "0.4932087", "0.49268728", "0.49133462", "0.48966336", "0.48966336", "0.48881665", "0.48849356", "0.48841685", "0.4878622", "0.48684347", "0.48544347", "0.48509106", "0.48427403", "0.4842499", "0.48400858", "0.4835588", "0.48325646", "0.48279443", "0.4827573", "0.48093584", "0.48024896", "0.47998542", "0.47979054", "0.47962317", "0.4770425", "0.47696048", "0.47669035", "0.47666064", "0.4764619", "0.47591543", "0.47540972", "0.4746854", "0.47439", "0.47400868", "0.4737125", "0.47353977", "0.47351545", "0.47294658", "0.47260055", "0.4721709", "0.4721595", "0.4718116", "0.4716243", "0.47155356", "0.47155356", "0.47155356", "0.47152436", "0.47151843", "0.47083977", "0.46986645", "0.46939287", "0.46922138", "0.4686666", "0.46823832", "0.4676022", "0.46758276", "0.46746403", "0.46741337", "0.46737993", "0.46649662", "0.46649566", "0.46642447", "0.4663662", "0.46615082", "0.46613178", "0.46603063", "0.46584737", "0.46551445", "0.4647729" ]
0.65841603
0
Create a new Settings, reading from a default location for the given domain (~/Library/Preferences/%s.plist).
def __init__(self, domain='com.markfickett.gors'): settingsDir = os.path.expanduser(self.__SETTINGS_DIR) if not os.path.isdir(settingsDir): os.makedirs(settingsDir) self.__settingsFileName = os.path.join(settingsDir, domain + '.plist') if os.path.isfile(self.__settingsFileName): self.__settings = plistlib.readPlist( self.__settingsFileName) else: self.clear() self.__currentGroupNames = []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def settings_create(ctx):\n # Choose where and whether to save the configuration file.\n path = ctx.obj['load_path']\n if path:\n click.confirm(\n 'A settings file already exists. Continuing will override it. '\n 'Do you want to continue?',\n abort=True,\n )\n else:\n path = ctx.obj['save_path']\n\n # Get information about Pulp.\n pulp_config = {'pulp': _get_pulp_properties()}\n pulp_config['hosts'] = [\n _get_host_properties(pulp_config['pulp']['version'])\n ]\n pulp_config['pulp']['version'] = str(pulp_config['pulp']['version'])\n try:\n config.validate_config(pulp_config) # This should NEVER fail!\n except exceptions.ConfigValidationError:\n print(\n 'An internal error has occurred. Please report this to the Pulp '\n 'Smash developers at https://github.com/PulpQE/pulp-smash/issues',\n file=sys.stderr,\n )\n raise\n\n # Write the config to disk.\n with open(path, 'w') as handler:\n handler.write(json.dumps(pulp_config, indent=2, sort_keys=True))\n click.echo('Settings written to {}.'.format(path))", "def open_settings(location, show_defaults=False, settings_type=None, **kwargs):\n prefs = kwargs.get('prefs', settings.InternalSettings())\n if settings_type:\n settings_type = settings_type.lower()\n\n target_path = prefs.get_settings_path(location, settings_type)\n if not target_path.exists():\n dirname = target_path.parent\n makedirs(dirname, mode=0o775, exist_ok=True)\n with open(target_path, 'a', newline='\\n') as settings_file:\n settings_file.write('{}')\n\n if show_defaults:\n openable = [prefs.get_settings_path('default', settings_type, target_path.suffix), target_path]\n else:\n openable = [target_path]\n return result.Success(openable=openable)", "def load_settings_from_cli():\n load_user_from_cli()\n load_local_contacts()", "def loadSettings(home_dir,pd_dir):\n\n settingsXML = os.path.join(pd_dir,\"settings.xml\")\n\n #print(\"Loading settings from {0}\".format(settingsXML))\n\n global installationTree\n global installationSettings\n global domainPath\n global userEmail\n global userToken\n\n if os.path.isfile(settingsXML):\n installationTree = etree.parse(settingsXML)\n installationSettings = installationTree.getroot()\n\n for child in installationSettings:\n if child.tag == \"domain_path\":\n domainPath = child.text\n\n if not os.path.isdir(domainPath):\n fetchPlanningDomains(domainPath)\n\n if child.tag == \"email\":\n userEmail = child.text\n\n if child.tag == \"token\":\n userToken = child.text\n\n return\n\n if installationSettings is None:\n installationSettings = etree.Element(\"{http://settings.planning.domains}settings\")\n installationTree = etree.ElementTree(installationSettings)\n\n domainPath = input(\"Enter path for installing files (or hit enter to use {0}): \".format(os.path.join(home_dir,\"planning.domains\")))\n\n domainPath = domainPath.lstrip()\n domainpath = domainPath.rstrip()\n\n if domainPath == \"\":\n domainPath = os.path.join(home_dir,\"planning.domains\")\n\n if os.path.isfile(domainPath):\n print(\"Fatal error: there is already a file called {0}\".format(domainPath))\n exit(1)\n\n if not os.path.isdir(domainPath):\n fetchPlanningDomains(domainPath)\n\n etree.SubElement(installationSettings,\"domain_path\").text = domainPath\n\n userEmail = input(\"Enter email for API updates: \")\n userToken = input(\"Enter token for API updates (leave blank if none provided): \")\n\n etree.SubElement(installationSettings,\"email\").text = userEmail\n etree.SubElement(installationSettings,\"token\").text = userToken\n\n saveSettings()", "def set_by_domain(domain):\r\n if not has_configuration_set() or not domain:\r\n return\r\n\r\n for key, value in settings.MICROSITE_CONFIGURATION.items():\r\n subdomain = value.get('domain_prefix')\r\n if subdomain and domain.startswith(subdomain):\r\n _set_current_microsite(key, subdomain, domain)\r\n return\r\n\r\n # if no match on subdomain then see if there is a 'default' microsite defined\r\n # if so, then use that\r\n if 'default' in settings.MICROSITE_CONFIGURATION:\r\n _set_current_microsite('default', subdomain, domain)", "def make_pref_file():\r\n pref_dict = {\"default_user\": None}\r\n\r\n with open(os.path.join(os.path.dirname(__file__), \"preferences.json\"), \"w\") as pref:\r\n pref.write(json.dumps(pref_dict, indent=4))\r\n\r\n return pref_dict", "def create_domain(self, domain: str) -> Session:\n uri = f\"{self.uri}/domains\"\n data = {\n \"hostname\": domain\n }\n response = self.request(uri=uri, method=\"POST\", data=data)\n\n return response", "def create_default_settings():\n from flaskbb.fixtures.settings import fixture\n create_settings_from_fixture(fixture)", "def make_settings(pypirc):\n default_pypirc = \"\"\"\n [pypi]\n username:foo\n password:bar\n \"\"\"\n\n def _settings(pypirc_text=default_pypirc, **settings_kwargs):\n pypirc.write(textwrap.dedent(pypirc_text))\n\n settings_kwargs.setdefault(\"sign_with\", None)\n settings_kwargs.setdefault(\"config_file\", str(pypirc))\n\n return settings.Settings(**settings_kwargs)\n\n return _settings", "def load_settings(self):\n\n self.domains = []\n self.clear_settings()\n api_keys = self.api_key_instance.get_api_keys()\n if api_keys:\n for domain, api_key in list(api_keys.items()):\n self.domains.append(domain)\n getattr(self.dlg, \"uTextDomain{0}\".format(len(self.domains))).setText(\n domain\n )\n getattr(self.dlg, \"uTextAPIKey{0}\".format(len(self.domains))).setText(\n api_key\n )\n\n # Hide un-populated domain rows\n for entry in range(len(self.domains) + 1, 11):\n getattr(self.dlg, \"uTextDomain{0}\".format(entry)).hide()\n getattr(self.dlg, \"uTextAPIKey{0}\".format(entry)).hide()\n getattr(self.dlg, \"uBtnRemoveDomain{0}\".format(entry)).hide()\n getattr(self.dlg, \"uBtnSaveDomain{0}\".format(entry)).hide()", "def create_domain(DomainName=None):\n pass", "def get_domain_config():\n query = {\n \"type\": \"config\",\n \"action\": \"get\",\n \"xpath\": \"/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/domain\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def populate_domain_data(self, domain):\n self.domain_resolve(domain)\n domain_data = server.get_domain_data(domain)['data']['userdata']\n\n self.domain_data[domain] = self.domain_resolve(domain)\n\n if domain in self.domain_data.keys():\n try:\n self.domain_data[domain]['documentroot'] = domain_data['documentroot']\n self.domain_data[domain]['ip'] = domain_data['ip']\n except KeyError:\n self.domain_data[domain]['documentroot'] = \"No domain data found, admin should check\"\n self.domain_data[domain]['ip'] = \"No domain data found, admin should check\"", "def get_domain_config(self, defaultcfg, wireframe):\n\n dnsdata = dnslib.DNSRecord.parse(wireframe)\n dnsdomain = dnsdata.q.get_qname()\n\n for ruleset in globals.config.rules.match:\n if re.search(str(ruleset.domain), str(dnsdomain)):\n # domain config matches!\n return Box({**defaultcfg, **ruleset})\n\n return defaultcfg", "def __init__(__self__, *,\n domain: pulumi.Input[str]):\n pulumi.set(__self__, \"domain\", domain)", "def __init__(__self__, *,\n domain: pulumi.Input[str]):\n pulumi.set(__self__, \"domain\", domain)", "def loadSettings():\r\n try:\r\n settingsFile = open(sys.argv[1], \"r\")\r\n except IOError:\r\n logging.exception(\"Error opening settings.\")\r\n exitApp()\r\n \r\n settingStr = settingsFile.read()\r\n settingsFile.close()\r\n \r\n try:\r\n settings = json.loads(settingStr)\r\n except ValueError:\r\n logging.exception(\"Error parsing settings.\")\r\n exitApp()\r\n \r\n # Check integrity\r\n if (len(settings[\"reddit_username\"]) == 0):\r\n logging.critical(\"Reddit username not set.\")\r\n exitApp()\r\n \r\n if (len(settings[\"reddit_password\"]) == 0):\r\n logging.critical(\"Reddit password not set.\")\r\n exitApp()\r\n \r\n if (len(settings[\"reddit_subreddit\"]) == 0):\r\n logging.critical(\"Subreddit not set.\")\r\n exitApp()\r\n \r\n if (len(settings[\"reddit_ua\"]) == 0):\r\n logging.critical(\"Reddit bot user agent not set.\")\r\n exitApp()\r\n \r\n settings[\"repost_protection\"] = bool(settings[\"repost_protection\"])\r\n \r\n return settings", "def default_user_settings(self) -> pulumi.Input['DomainUserSettingsArgs']:\n return pulumi.get(self, \"default_user_settings\")", "def load_from_defaults(self):\n default_settings = import_module('mindinsight.conf.defaults')\n for setting in dir(default_settings):\n if setting.isupper():\n setattr(self, setting, getattr(default_settings, setting))\n self._default_settings.add(setting)", "def create_default_config():\n import codecs\n config = ConfigParser.SafeConfigParser()\n config.readfp(StringIO(DEFAULT_CONFIG))\n\n # Load user settings\n filename = get_user_config_filename()\n if not os.path.exists(filename):\n from wizard import setup_wizard\n setup_wizard(config)\n else:\n try:\n fi = codecs.open(filename, 'r', encoding='utf-8')\n config.readfp(fi)\n finally:\n fi.close()\n return config", "def generate_settings():\r\n conf_file = os.path.join(os.path.dirname(base_settings.__file__),\r\n 'example', 'conf.py')\r\n conf_template = open(conf_file).read()\r\n default_url = 'http://salmon.example.com'\r\n site_url = raw_input(\"What will be the URL for Salmon? [{0}]\".format(\r\n default_url))\r\n site_url = site_url or default_url\r\n secret_key = base64.b64encode(os.urandom(KEY_LENGTH))\r\n api_key = base64.b64encode(os.urandom(KEY_LENGTH))\r\n output = conf_template.format(api_key=api_key, secret_key=secret_key,\r\n site_url=site_url)\r\n return output", "def set_domain(domain):\n set_hosts(domain)\n click.echo(\n 'Host file was set: {} -> 127.0.0.1'.format(', '.join(domain))\n )", "def setup_domain(domain):\n bucket = BUCKET_MANAGER.get_bucket(domain)\n\n zone = DOMAIN_MANAGER.find_hosted_zone(domain) \\\n or DOMAIN_MANAGER.create_hosted_zone(domain)\n\n endpoint = util.get_endpoint(BUCKET_MANAGER.get_region_name(bucket))\n a_record = DOMAIN_MANAGER.create_s3_domain_record(zone, domain, endpoint)\n print(\"Domain configure: http://{}\".format(domain))\n print(\"A record created: {}\".format(a_record))", "def init_settings(self):\n if not os.path.exists(self.settingsFilePath):\n settings_dir = os.getenv(\"APPDATA\") + \"\\\\\" + qApp.applicationName()\n if not os.path.exists(settings_dir):\n os.makedirs(settings_dir)\n setting_path = \"\"\n if getattr(sys, 'frozen', False):\n setting_path = os.path.dirname(sys.executable)\n elif __file__:\n setting_path = os.path.dirname(__file__)\n shutil.copyfile(os.path.join(setting_path, \"resources\\eksettings.ini\"), self.settingsFilePath)\n return", "def findSettingsFile():\n settingsName = 'oct-fire-settings.json'\n userPath = os.path.expanduser('~')\n if os.path.exists(settingsName):\n return settingsName\n elif os.path.exists(os.path.join(userPath, settingsName)):\n return os.path.join(userPath, settingsName)\n elif os.path.exists(os.path.join(userPath, 'Desktop', settingsName)):\n return os.path.join(userPath, 'Desktop', settingsName)\n elif os.path.exists(os.path.join(userPath, 'Documents', settingsName)):\n return os.path.join(userPath, 'Documents', settingsName)\n elif os.path.exists(os.path.join(userPath, 'Downloads', settingsName)):\n return os.path.join(userPath, 'Downloads', settingsName)\n raise Exception('Could not locate settings file')", "def ini(filename, **defaults):\n filename = sh.path(filename)\n defaults.update(home=sh.path('~'))\n return ConfigObject(filename=filename, defaults=defaults)", "def __init__(self, settings, valid, defaults=None):\n\n try:\n with open(settings, 'r') as settings_file:\n self._settings = json.load(settings_file)\n except TypeError:\n self._settings = dict(settings)\n self._settings = Settings._inject_defaults(self._settings, defaults)\n Settings._validity_check(self._settings, valid)", "def getDefaultSettings(self) -> ghidra.docking.settings.Settings:\n ...", "def set_domain_path(self):\n\n self.domain_path = os.path.join(self.docs_path, self.domain)\n if not os.path.exists(self.domain_path):\n os.makedirs(self.domain_path)", "def settings() -> Settings:\n return Settings()", "def load_settings(self):\n\n self.std = settings.settings", "def pref(pref_name):\n default_prefs = {\n 'ServerURL': 'http://sal',\n }\n pref_value = CFPreferencesCopyAppValue(pref_name, BUNDLE_ID)\n if pref_value == None:\n pref_value = default_prefs.get(pref_name)\n # we're using a default value. We'll write it out to\n # /Library/Preferences/<BUNDLE_ID>.plist for admin\n # discoverability\n set_pref(pref_name, pref_value)\n if isinstance(pref_value, NSDate):\n # convert NSDate/CFDates to strings\n pref_value = str(pref_value)\n return pref_value", "def from_settings(settings):", "def load_settings(self, outfile='settings.p'):\n settings = pickle.load(open(path,'rb'))\n self.__dict__.update(settings)", "def set_domain(self, var, domain) :\n if var not in self.variables :\n raise KeyError(str(var) + \" is not a variable in this problem.\")\n self.domains[var] = sorted(domain[:])\n return self", "def get_settings(hostname: Optional[str] = None, device_type: Optional[DeviceType] = None):\n with open('/etc/cnaas-nms/repository.yml', 'r') as repo_file:\n repo_config = yaml.safe_load(repo_file)\n\n local_repo_path = repo_config['settings_local']\n try:\n verify_dir_structure(local_repo_path, DIR_STRUCTURE)\n except VerifyPathException as e:\n logger.exception(\"Exception when verifying settings repository directory structure\")\n raise e\n\n # 1. Get CNaaS-NMS default settings\n data_dir = pkg_resources.resource_filename(__name__, 'data')\n with open(os.path.join(data_dir, 'default_settings.yml'), 'r') as f_default_settings:\n settings: dict = yaml.safe_load(f_default_settings)\n\n settings_origin = {}\n for k in settings.keys():\n settings_origin[k] = 'default'\n\n # 2. Get settings repo global settings\n settings, settings_origin = read_settings(\n local_repo_path, ['global', 'base_system.yml'], 'global', settings, settings_origin)\n # 3. Get settings from special fabric classification (dist + core)\n if device_type and (device_type == DeviceType.DIST or device_type == DeviceType.CORE):\n settings, settings_origin = read_settings(\n local_repo_path, ['fabric', 'base_system.yml'], 'fabric',\n settings, settings_origin)\n # 4. Get settings repo device type settings\n if device_type:\n settings, settings_origin = read_settings(\n local_repo_path, [device_type.name.lower(), 'base_system.yml'], 'devicetype',\n settings, settings_origin)\n # 5. Get settings repo device specific settings\n if hostname:\n if os.path.isdir(os.path.join(local_repo_path, 'devices', hostname)):\n settings, settings_origin = read_settings(\n local_repo_path, ['devices', hostname, 'base_system.yml'], 'device',\n settings, settings_origin)\n # Verify syntax\n check_settings_syntax(settings, settings_origin)\n return f_root(**settings).dict(), settings_origin", "def setup_settings():\n # pylint: disable=import-outside-toplevel\n from django.conf import settings\n import tiny_erp.settings as defaults\n\n for name in dir(defaults):\n if name.isupper() and not hasattr(settings, name):\n setattr(settings, name, getattr(defaults, name))", "def get_preferred_domain(project=None, default_to_appid=True):\n projects_prefs = settings.PREFERRED_DOMAIN_NAMES.get(settings.APP_ID, {})\n preferred_domain = projects_prefs.get(project)\n if not preferred_domain:\n preferred_domain = projects_prefs.get(None)\n if not preferred_domain:\n if default_to_appid:\n preferred_domain = '%s.appspot.com' % app_identity.get_application_id()\n return preferred_domain", "def defaultConf():\n from config import lwbdUrl, userAndPass\n baseUrl = lwbdUrl\n lucidAuth = userAndPass\n return LucidSdaConfiguration(baseUrl,\n lucidAuth)", "def setPreference(self, newPreference):\n\t\tif newPreference == \"d\":\n\t\t\tself.showDomains = 1\n\t\telif newPreference == \"w\":\n\t\t\tself.showDomains = 0\n\t\telif newPreference == \"l\":\n\t\t\tself.showFullTitles = 1\n\t\telif newPreference == \"o\":\n\t\t\tself.showFullTitles = 0\n\t\telif newPreference == \"c\":\n\t\t\tself.collapseOldStories = 1\n\t\telif newPreference == \"e\":\n\t\t\tself.collapseOldStories = 0\n\t\telif newPreference[0] == \"/\":\n\t\t\tself.hnUserName = newPreference[1:]\n\n\t\twriteWentWell = self.writePreferenceToFile(newPreference)\n\t\tif not writeWentWell:\n\t\t\tinput = raw_input(\"hnsh_prefs.txt not found. Preferences changed will only be kept until this program is closed. Press Return to continue. \")", "def find_settings():\n return Setting()", "def default_settings(self, settings):\n return {}", "def default_user_settings(self) -> pulumi.Output['outputs.DomainUserSettings']:\n return pulumi.get(self, \"default_user_settings\")", "def getDefaultSettings():\n return {}", "def __init__(self, bot, name, default_settings=None):\n if default_settings is None:\n default_settings = {}\n self.bot = bot\n self.name = name\n self.default_settings = default_settings\n\n # set up storage for settings and load from persistent file\n self.settings_path = pathlib.Path(\".settings\", f\"{self.name}.yml\")\n self.id_dict = load_persistent_settings(self.settings_path)", "def __init__(self, location=None, default_location=None):\n RawConfigParser.__init__(self)\n\n self.location = location\n self._saving = False\n self._dirty = False\n\n if default_location is not None:\n try:\n self.read(default_location)\n except:\n pass\n\n if location is not None:\n try:\n self.read(self.location) or \\\n self.read(self.location + \".new\") or \\\n self.read(self.location + \".old\")\n except:\n pass\n\n if self.get_option('settings/version', 0) is None:\n self.set_option('settings/version', self.__version__)", "def __init__(self):\n for name, default in self.defaults.items():\n value = getattr(django.conf.settings, name, default)\n setattr(self, name, value)", "def create_settings(root_directory, files, variables, main_file, output):\n\tfiles = [format_path(f) for f in files]\n\tvariables = format_variables(variables)\n\tsettings = {\n\t\tSETTINGS_ROOTDIR : root_directory if root_directory.endswith('/') else root_directory+'/',\n\t\tSETTINGS_FILES : files,\n\t\tSETTINGS_VARIABLES : variables,\n\t\tSETTINGS_MAIN : format_path(main_file),\n\t\tSETTINGS_OUTPUT : output + SETTINGS_OUTPUT_PATH\n\t}\n\treturn settings", "def save_domain(self):\n del_domain = 0\n save_domain = 0\n\n sending_btn = self.dlg.sender().objectName()\n if sending_btn[:-1] == \"uBtnRemoveDomain\":\n del_domain = sending_btn[-1]\n if sending_btn[:-1] == \"uBtnSaveDomain\":\n save_domain = sending_btn[-1]\n\n keys = {}\n for entry in range(1, len(self.domains) + 2):\n if int(del_domain) == entry:\n continue\n domain = getattr(self.dlg, \"uTextDomain{0}\".format(entry)).text()\n key = getattr(self.dlg, \"uTextAPIKey{0}\".format(entry)).text().strip()\n if domain and key:\n keys[domain] = key\n self.api_key_instance.set_api_keys(keys)\n\n # remove store capability docs for the removed or add domain/key\n # if they already exits .i.e these will be reloaded\n if save_domain:\n ui_elem_num = save_domain\n else:\n ui_elem_num = del_domain\n\n domain = getattr(self.dlg, \"uTextDomain{0}\".format(ui_elem_num)).text()\n self.local_store.del_domains_xml(domain)\n\n # load / Reload service data\n self.load_settings()\n self.dlg.uWarningSettings.hide()\n self.dlg.uLabelWarning.hide()\n if self.curr_list_wid_index is not None:\n self.dlg.uListOptions.setCurrentItem(self.curr_list_wid_index)\n else:\n self.dlg.uListOptions.setCurrentRow(0)\n\n self.dlg.uStackedWidget.setCurrentIndex(0)\n self.services_loaded = False # key change, load data again\n self.load_ui()", "def default(cls) -> 'Config':\n parser: configparser.ConfigParser = configparser.ConfigParser()\n parser.read_dict(dict(wpwatcher=Config.DEFAULT_CONFIG))\n return cls.fromparser(parser)", "def get_defaults():\r\n profile = settings.profile_manager.get(\"default\")\r\n config = profile.get_config('freeseer.conf', settings.FreeseerConfig, storage_args=['Global'], read_only=True)\r\n return {\r\n \"video_directory\": config.videodir,\r\n \"oauth2_token\": os.path.join(settings.configdir, \"oauth2_token.json\"),\r\n \"client_secrets\": os.path.join(settings.configdir, \"client_secrets.json\")\r\n }", "def get_defaults():\n\n # get package defaults\n with open(os.path.join(iLoop_RNAseq_pipeline.__path__[0], 'defaults', 'RNAseq_pipeline_defaults.txt')) as rpd:\n defaults = {}\n for line in rpd.readlines():\n if line.strip():\n defaults[line.split(',')[0].strip()] = line.split(',')[1].strip()\n\n try:\n with open(os.path.join(os.path.expanduser(\"~\"), 'RNAseq_pipeline_defaults.txt')) as rpd:\n for line in rpd.readlines():\n if line.strip():\n defaults[line.split(',')[0].strip()] = line.split(',')[1].strip()\n except FileNotFoundError:\n logger.warning('\"RNAseq_pipeline_defaults.txt\" does not exist under home path. An email address and project ID should be should be define in that file.')\n\n # replace with user defaults\n try:\n with open('RNAseq_pipeline_defaults.txt') as rpd:\n for line in rpd.readlines():\n if line.strip():\n defaults[line.split(',')[0].strip()] = line.split(',')[1].strip()\n except FileNotFoundError:\n logger.info(\n '\"RNAseq_pipeline_defaults.txt\" does not exist under this folder. Defaults from the package and home path will be used.')\n\n if 'email' not in defaults:\n if not validate_email(defaults['email']):\n while True:\n email = input('Enter a valid email address for job status: \\n')\n if validate_email(email):\n defaults['email'] = email\n print('Writing email to \"RNAseq_pipeline_defaults.txt\" under home path.')\n f = open(os.path.join(os.path.expanduser(\"~\"), 'RNAseq_pipeline_defaults.txt'), 'w+')\n f.write('\\nemail,{}'.format(email))\n f.close()\n break\n else:\n print('{} is not valid, try again.'.format(email))\n\n if ('project' not in defaults) or (defaults['project'] == 'projectid'):\n project = input('Enter Computerome project ID for billing: \\n')\n # TODO It is possible to validate this by checking folder name under \"/home/projects\".\n defaults['project'] = project\n print('Writing project ID to \"RNAseq_pipeline_defaults.txt\" under home path.')\n f = open(os.path.join(os.path.expanduser(\"~\"), 'RNAseq_pipeline_defaults.txt'), 'w+')\n f.write('\\nproject,{}'.format(project))\n f.close()\n\n return defaults", "def initialize_settings(tool_name, source_path, dest_file_name=None):\n settings_dir = os.path.join(SETTINGS_DIRECTORY, tool_name)\n if not os.path.exists(settings_dir):\n os.mkdir(settings_dir)\n if not dest_file_name:\n dest_file_name = os.path.basename(source_path)\n settings_path = os.path.join(settings_dir, dest_file_name)\n if not os.path.exists(settings_path):\n shutil.copy(source_path, settings_path)\n else:\n try:\n SettingsMigrator(source_path, settings_path).migrate()\n except ConfigObjError, parsing_error:\n print 'WARNING! corrupted configuration file replaced with defaults'\n print parsing_error\n shutil.copy(source_path, settings_path)\n return os.path.abspath(settings_path)", "def load_settings(self):\n # Set the default settings. In case in a later version of this script the settings change, new default variables will be added automatically\n self.settings = {\n # Connection settings to OBS Studio websockets plugin\n \"host\": \"localhost\",\n \"port\": 4444,\n \"password\": \"\",\n \"update_frequency\": 1, # seconds, how often the script loads the SC2 UI location\n }\n if os.path.isfile(self.settings_path):\n with open(self.settings_path) as f:\n self.settings.update(json.load(f))", "def _create_default_config(self):\n self.options.setdefault('options.admin_passwd', '')\n sys.path.append(self.openerp_dir)\n sys.path.extend([egg.location for egg in self.ws])\n from openerp.tools.config import configmanager\n configmanager(self.config_path).save()", "def set_nfs_domain(session, domain, return_type=None, **kwargs):\n body_values = {'domain': domain}\n\n path = '/api/settings/nfs_domain.json'\n\n return session.post_api(path=path, body=body_values,\n return_type=return_type, **kwargs)", "def default_space_settings(self) -> Optional[pulumi.Input['DomainDefaultSpaceSettingsArgs']]:\n return pulumi.get(self, \"default_space_settings\")", "def domain_prompt(self):\n\n # Domain selection prompt\n domain_completer = FuzzyWordCompleter(\n list(map(str.title, DOMAINS))\n ) # Titlecase for aesthetics\n selected_domain = DOMAINS.index(\n prompt(\"Domain: \", completer=domain_completer).lower()\n )\n\n print(\"Selected Domain: {}\".format(DOMAINS[selected_domain]))\n self.domain = DOMAINS[selected_domain]\n logging.info(\"Domain Selected\")", "def __init__(self, name, defaults = {} ):\n self.defaults = defaults\n self.filename = os.path.expanduser(name)+\".ini\"\n self.conf = {}\n self.reset()\n if os.path.exists(self.filename):\n self.load()", "def test_defaults_overrides_with_settings(settings):\n\n settings.MARION_DOCUMENT_ISSUER_CHOICES_CLASS = (\n \"howard.documents.DocumentIssuerChoices\"\n )\n settings.MARION_DOCUMENTS_ROOT = Path(\"/tmp/documents/abc\")\n settings.MARION_DOCUMENTS_TEMPLATE_ROOT = Path(\"howard/documents/abc\")\n\n # Force module reload to take into account setting override as it is loaded\n # very early in the stack\n importlib.reload(defaults)\n\n assert (\n defaults.DOCUMENT_ISSUER_CHOICES_CLASS\n == \"howard.documents.DocumentIssuerChoices\"\n )\n assert defaults.DOCUMENTS_ROOT == Path(\"/tmp/documents/abc\")\n assert defaults.DOCUMENTS_TEMPLATE_ROOT == Path(\"howard/documents/abc\")", "def _define_settings(self):\n\n self.settings = {}\n\n ##### ORIGINALLY IN THE DOMAIN FILE #######\n\n # Maximum input in the C-Space : no constituent can be more than 100% present\n self.settings['maxInp'] = 1\n\n #### ORIGINALLY IN THE SETTINGS FILE #####\n self.settings[\"epochs\"] = 3 # Training epochs\n self.settings[\"tgtStd\"] = 12e-6\n self.settings['TInit'] = 1e-6\n self.settings[\"TMin\"] = 0\n self.settings[\"TDecayRate\"] = 0.05\n self.settings[\"lambdaInit\"] = 0.011387\n self.settings['lambdaMin'] = 0.0001\n self.settings[\"lambdaDecayRate\"] = 0.60\n self.settings[\"maxSteps\"] = 300000\n self.settings[\"emaSpeedTol\"] = 0.009\n self.settings[\"emaFactor\"] = .005\n self.settings[\"printInterval\"] = 3000\n self.settings[\"summary_file\"] = \"data/summary.txt\"\n mean = torch.ones(self.grammar.bind.nF,\n self.grammar.bind.nR)/self.grammar.bind.nF\n self.settings[\"initStateMean\"] = mean\n self.settings[\"initStateStdev\"] = .025\n self.settings['clamp'] = False\n\n if self.custom_settings is not None:\n for key, value in self.custom_settings.items():\n if key in self.settings:\n self.settings[key] = value", "def read_settings():\n \n settings = OrdDic()\n settings.update(json.load(open(\"resources/files/settings.txt\", \"r\")))\n\n ## OLD WAY BELOW\n\n #r = open(\"resources/files/settings.txt\", \"r\", newline=\"\\n\")\n # for option in r.read().split('\\n'):\n # try:\n # #option = option.split('\\\\')\n # #settings.update({option[0]: option[1]})\n # # settings.update(json.loads(option))\n # except IndexError:\n # pass\n return settings", "def _new():\n\treturn ConfigParser(\n\tdelimiters = ('=',),\n\tcomment_prefixes = ('#', ';'),\n\tdefault_section = 'default',\n\tallow_no_value = False,\n\tstrict = False,\n\tinterpolation = ExtendedInterpolation(),\n\tdefaults = {\n\t\t'debug': False,\n\t\t'datadir': path.join(path.expanduser('~'), '.local', 'rosshm'),\n\t\t'log.level': 'warn',\n\t\t'core.enable': True,\n\t\t'db.driver': 'sqlite',\n\t\t'db.name': 'rosshmdb',\n\t\t'db.config': '',\n\t\t'static.enable': True,\n\t\t'web.enable': True,\n\t},\n)", "def add_new_domain(self):\n\n domain = self.dlg.uComboBoxDomain.currentText()\n\n if domain in self.domains:\n self.dlg.uWarningSettings.show()\n self.dlg.uWarningSettings.setText(\n \"Warning: Domains must be unique. \" \"Please edit the domain below\"\n )\n return\n\n if len(self.domains) >= 10:\n self.dlg.uWarningSettings.show()\n self.dlg.uWarningSettings.setText(\n \"Warning: You can only store up to . \" \"10 domain entries\"\n )\n return\n\n if domain == \"OTHER\":\n domain = \"\"\n getattr(self.dlg, \"uTextDomain{0}\".format(len(self.domains) + 1)).setText(\n domain\n )\n getattr(self.dlg, \"uTextDomain{0}\".format(len(self.domains) + 1)).show()\n getattr(self.dlg, \"uTextAPIKey{0}\".format(len(self.domains) + 1)).show()\n getattr(self.dlg, \"uBtnRemoveDomain{0}\".format(len(self.domains) + 1)).show()\n getattr(self.dlg, \"uBtnSaveDomain{0}\".format(len(self.domains) + 1)).show()\n self.dlg.uWarningSettings.hide()", "def store_domain(self, store_domain):\n self._store_domain = store_domain\n return self", "def configure(prompt_list):\n darwin_vers = int(os.uname()[2].split('.')[0])\n edited_prefs = {}\n for (key, prompt) in prompt_list:\n newvalue = get_input_with_default('%15s: ' % prompt, pref(key))\n if darwin_vers == 10:\n # old behavior in SL: hitting return gives you an empty string,\n # and means accept the default value.\n edited_prefs[key] = newvalue or pref(key) or ''\n else:\n # just use the edited value as-is\n edited_prefs[key] = newvalue\n\n if FOUNDATION_SUPPORT:\n for key, value in edited_prefs.items():\n try:\n CFPreferencesSetAppValue(key, value, BUNDLE_ID)\n except BaseException:\n print('Could not save configuration!', file=sys.stderr)\n raise ConfigurationSaveError\n # remove repo_path if it exists since we don't use that\n # any longer (except for backwards compatibility) and we don't\n # want it getting out of sync with the repo_url\n CFPreferencesSetAppValue('repo_path', None, BUNDLE_ID)\n CFPreferencesAppSynchronize(BUNDLE_ID)\n\n else:\n try:\n existing_prefs = readPlist(PREFSPATH)\n existing_prefs.update(edited_prefs)\n # remove repo_path if it exists since we don't use that\n # any longer (except for backwards compatibility) and we don't\n # want it getting out of sync with the repo_url\n if 'repo_path' in existing_prefs:\n del existing_prefs['repo_path']\n writePlist(existing_prefs, PREFSPATH)\n except (IOError, OSError, ExpatError):\n print('Could not save configuration to %s' % PREFSPATH,\n file=sys.stderr)\n raise ConfigurationSaveError", "def get(cls):\n try:\n return cls.objects.get(id=1)\n except cls.DoesNotExist:\n default_settings = SiteSettings(id=1)\n default_settings.save()\n return default_settings", "def init(args):\n # reading existing config file, convert to configparser object\n config = config_from_file()\n config_ = configparser.ConfigParser()\n config_.add_section('osf')\n if 'username' not in config.keys():\n config_.set('osf', 'username', '')\n else:\n config_.set('osf', 'username', config['username'])\n if 'project' not in config.keys():\n config_.set('osf', 'project', '')\n else:\n config_.set('osf', 'project', config['project'])\n\n # now we can start asking for new values\n print('Provide a username for the config file [current username: {}]:'.format(\n config_.get('osf', 'username')))\n username = input()\n if username:\n config_.set('osf', 'username', username)\n\n print('Provide a project for the config file [current project: {}]:'.format(\n config_.get('osf', 'project')))\n project = input()\n if project:\n config_.set('osf', 'project', project)\n\n cfgfile = open(\".osfcli.config\", \"w\")\n config_.write(cfgfile)\n cfgfile.close()", "def setDefaultSettings():\n if PLATFORM == 'Windows':\n font = 'Consolas'\n else:\n font = 'Monospace'\n\n preferenceNode = nuke.toNode('preferences')\n # viewer settings\n preferenceNode['maxPanels'].setValue(5)\n preferenceNode['TextureSize'].setValue('2048x2048')\n preferenceNode['viewer_bg_color_3D'].setValue(1280068863)\n preferenceNode['viewer_fg_color_3D'].setValue(4294967295L)\n preferenceNode['Viewer3DControlEmulation'].setValue('Maya')\n preferenceNode['middleButtonPans'].setValue(False)\n preferenceNode['dot_node_scale'].setValue(1.5)\n\n # script editor settings\n preferenceNode['clearOnSuccess'].setValue(False)\n preferenceNode['echoAllCommands'].setValue(True)\n preferenceNode['ScriptEditorFont'].setValue(font)\n preferenceNode['ScriptEditorFontSize'].setValue(12.0)\n preferenceNode['kwdsFgColour'].setValue(2629566719L)\n preferenceNode['stringLiteralsFgColourDQ'].setValue(10354943)\n preferenceNode['stringLiteralsFgColourSQ'].setValue(10354943)\n preferenceNode['commentsFgColour'].setValue(2442236415L)", "def create_default_data():\n db = sh.open(the_phone_book_name, flag='n', writeback=True)\n phone_list = {'Bob': '6145617554',\n 'Leslie': '3095551212',\n 'Carol': '61455551212',\n 'Rick': '8955551212'}\n\n for name_key in phone_list:\n db[name_key] = phone_list[name_key]\n db.sync()\n db.close()", "def create_settings_file():\n with open('./cfg/settings.cfg'.replace(\"/\", os.path.sep), 'w') as cfg:\n cfg.write('[report]\\nlogo = ./cfg/logo.png\\ncompany =\\nrecord =\\nunit =\\nexaminer =\\nnotes =\\n\\n[auth]\\ngmail = [email protected]\\npassw = yourpassword\\ndevid = 1234567887654321\\ncelnumbr = BackupPhoneNunmber\\n\\n[app]\\npkg = com.whatsapp\\nsig = 38a0f7d505fe18fec64fbf343ecaaaf310dbd799\\n\\n[client]\\npkg = com.google.android.gms\\nsig = 38918a453d07199354f8b19af05ec6562ced5788\\nver = 9877000'.replace(\"/\", os.path.sep))", "def __getSettingsFromStorage():\n return AccountSettings.getSettings(NEW_SETTINGS_COUNTER)", "def __init__(self, argv: list, company: str, appname: str, Liststr=None):\n if company is None:\n company = self.__class__.__name__\n QSettings.setPath(QSettings.IniFormat, QSettings.UserScope, str(FOLDER.parent / \"settings\"))\n self.settings = QSettings(QSettings.IniFormat, QSettings.UserScope, company, appname)\n super().__init__(argv)", "def test_settings_instantiation(self):\n ## no settings passed on instantiation\n bd = BorrowDirect() # no settings info\n self.assertEqual(\n True, isinstance(bd, BorrowDirect) )\n ## dict settings\n settings_dict = {} ## empty dct\n bd = BorrowDirect( settings_dict )\n self.assertEqual(\n None, bd.UNIVERSITY_CODE )\n settings_dict = { 'UNIVERSITY_CODE': '123' } ## populated dct\n bd = BorrowDirect( settings_dict )\n self.assertEqual(\n '123', bd.UNIVERSITY_CODE )\n ## module settings\n s = imp.new_module( 'settings' ) ## empty module\n bd = BorrowDirect( s )\n self.assertEqual(\n None, bd.UNIVERSITY_CODE )\n s = imp.new_module( 'settings' ) ## populated module\n s.UNIVERSITY_CODE = '234'\n bd = BorrowDirect( s )\n self.assertEqual(\n '234', bd.UNIVERSITY_CODE )", "def get_pref(self, key, domain=BUNDLE_ID):\n value = CFPreferencesCopyAppValue(key, domain) or None\n # Casting NSArrays and NSDictionaries to native Python types.\n # This a workaround for 10.6, where PyObjC doesn't seem to\n # support as many common operations such as list concatenation\n # between Python and ObjC objects.\n if isinstance(value, NSArray):\n value = list(value)\n elif isinstance(value, NSDictionary):\n value = dict(value)\n return value", "def domain(self, domain):\n self._domain = domain", "def domain(self, domain):\n self._domain = domain", "def default_space_settings(self) -> pulumi.Output[Optional['outputs.DomainDefaultSpaceSettings']]:\n return pulumi.get(self, \"default_space_settings\")", "def get_pref(key, domain=BUNDLE_ID):\n if CFPreferencesCopyAppValue(key, domain):\n return CFPreferencesCopyAppValue(key, domain)\n return None", "def load_default_space_and_org(self):\n try:\n space_url = self.lookup_cf_response(\"default_space_url\")\n except AttributeError:\n return\n space = self._fetcher.get_entities(space_url)\n if 'organization_url' in space:\n org_url = space['organization_url']\n org = self._fetcher.get_entities(org_url)\n\n if ('name' in space) and ('name' in org):\n self._default_organization = org['name']\n self._default_space = space['name']", "def site(self, domain):\r\n return resource.Site(self, domain)", "def pref(pref_name):\n default_prefs = {\n 'ServerURL': 'http://munkiwebadmin',\n 'authKey': '',\n }\n pref_value = CFPreferencesCopyAppValue(pref_name, BUNDLE_ID)\n if pref_value is None:\n pref_value = default_prefs.get(pref_name)\n # we're using a default value. We'll write it out to\n # /Library/Preferences/<BUNDLE_ID>.plist for admin\n # discoverability\n set_pref(pref_name, pref_value)\n if isinstance(pref_value, NSDate):\n # convert NSDate/CFDates to strings\n pref_value = str(pref_value)\n return pref_value", "def read_settings(self):\n self.settings = read_settings(self.settings_path)", "def setPreferencesAtStartup(self):\n\t\tif os.path.isfile(self.userPrefsFileName):\n\t\t\tprefs = open(self.userPrefsFileName, 'r')\n\t\t\tprefsLine = prefs.readline()\n\t\t\tprefs.close()\n\t\t\t\n\t\t\tfor i in range(0,len(prefsLine)):\n\t\t\t\tc = prefsLine[i]\n\t\t\t\tif c is not \"/\":\n\t\t\t\t\tself.setPreference(c)\n\t\t\t\telse:\n\t\t\t\t\tself.setPreference(prefsLine[i:])\n\t\t\t\t\tbreak", "def setupDefaultProperties( self, p, id, title, description, email_from_address, email_from_name,\n validate_email, server_url, stemmer ):\n p._setProperty( 'email_from_address', email_from_address, 'string' )\n p._setProperty( 'email_from_name', email_from_name, 'string' )\n p._setProperty( 'validate_email', validate_email and 1 or 0, 'boolean' )\n p._setProperty( 'email_antispam', '', 'string' )\n p._setProperty( 'email_error_address', '', 'string' )\n p._setProperty( 'instance', id, 'string' )\n p._setProperty( 'remote_url', '', 'string' )\n\n p._setProperty( 'apply_threading', 1, 'boolean' )\n p._setProperty( 'use_timeout', 1, 'boolean' )\n p._setProperty( 'duration', 0.001, 'float' )\n p._setProperty( 'p_resolve_conflict', 0, 'boolean' )\n\n p._setProperty( 'max_involved_users', 10, 'int' )\n p._setProperty( 'service_timeout', 30, 'int' )\n p._setProperty( 'created_search_interval', 999, 'int' )\n p._setProperty( 'common_url', '', 'string' )\n\n p._setProperty( 'send_to_support', 0, 'boolean' )\n p._setProperty( 'member_activity', 1, 'boolean' )\n p._setProperty( 'emergency_service', 0, 'boolean' )\n p._setProperty( 'p_log', 0, 'boolean' )\n\n p._setProperty( 'suspended_mail', 1, 'boolean' )\n p._setProperty( 'mail_frequency', 1, 'int' )\n p._setProperty( 'mail_threshold', 500, 'int' )\n\n p._setPropValue( 'server_url', server_url )\n p._setPropValue( 'stemmer', stemmer )\n\n p.title = title\n p.description = description", "def create_default_config(self, parser):\n parser.add_section('irc')\n parser.set('irc', 'channels', '')\n \n # create the full path, and the file\n try:\n os.makedirs(self.config_dir_path, mode=0700)\n except OSError:\n pass\n file_resource = open(self.config_file_path, 'w')\n parser.write(file_resource)", "def fromenv(cls) -> 'Config':\n files = Config.find_config_files()\n if not files:\n log.info(\n \"Could not find default config: `~/.wpwatcher/wpwatcher.conf`, `~/wpwatcher.conf` or `./wpwatcher.conf`\"\n )\n return cls.default()\n else:\n return cls.fromfiles(files)", "def create_domain_name(self, DomainName: str, DomainNameConfigurations: List = None) -> Dict:\n pass", "def __init__(self):\n super(sppasPathSettings, self).__init__()\n\n sppas_dir = os.path.dirname(os.path.dirname(\n os.path.dirname(os.path.abspath(__file__))))\n\n self.__dict__ = dict(\n sppas=sppas_dir,\n cli=os.path.join(sppas_dir, \"bin\"),\n etc=os.path.join(sppas_dir, \"etc\"),\n po=os.path.join(sppas_dir, \"po\"),\n src=os.path.join(sppas_dir, \"src\"),\n plugins=os.path.join(os.path.dirname(sppas_dir), \"plugins\"),\n resources=os.path.join(os.path.dirname(sppas_dir), \"resources\"),\n samples=os.path.join(os.path.dirname(sppas_dir), \"samples\"),\n logs=os.path.join(os.path.dirname(sppas_dir), \".logs\"),\n wkps=os.path.join(os.path.dirname(sppas_dir), \"workspaces\"),\n trash=os.path.join(os.path.dirname(sppas_dir), \".trash\"),\n )", "def create_domain(domain_id, default_role):\n tx = iroha.transaction(\n [iroha.command(\"CreateDomain\", domain_id=domain_id, default_role=\"user\")]\n )\n ic.sign_transaction(tx, user_private_key)\n send_transaction_print_status_and_return_result(tx)", "def add_local_settings():\n put('/Users/peter/Dropbox/Projects/ChromeFiddle/Local\\ Settings/prod/local_settings.py', \n '/home/django/web/chromefiddle/chromefiddle/settings')", "def loadDefaults(self):\n # (025) Merged into settings.RawSettings.\n pass", "def test_domain(self):\n self.assertEqual(self.gmail_case.domain, 'google.com')\n self.gmail_case.domain = 'yahoo.com'\n self.assertEqual(self.gmail_case.domain, 'yahoo.com')\n self.assertEqual(self.sld_case.domain, 'amazon.co.uk')\n self.assertEqual(self.gmail_case.tld, 'com')\n self.assertEqual(self.sld_case.tld, 'co.uk')\n self.gmail_case.tld = 'co.ke'\n self.sld_case.tld = 'gov'\n self.assertEqual(self.gmail_case.tld, 'co.ke')\n self.assertEqual(self.sld_case.tld, 'gov')\n self.assertEqual(self.gmail_case.domain, 'yahoo.co.ke')\n self.assertEqual(self.sld_case.domain, 'amazon.gov')", "def auth_domain(request):\n return request.registry.settings.get('h.auth_domain', request.domain)", "def load_preferences(self):\n\n print 'Loading current DNAtool preferences'\n self.preferences = Preferences('DNAtool',{'canvas_height':600})\n\n for key in self.defaultprefs:\n if key in self.preferences.prefs:\n self.__dict__[key].set(self.preferences.get(key))\n else:\n self.preferences.set(key, self.defaultprefs[key])\n\n self.update_window_formatting()\n return", "def settings():\n return _get_settings()[1]", "def return_domain_values(hostname, domain_name, username):\n myconnection = ssh_connection(hostname, username)\n if myconnection == 1:\n return \"Connection to %s failed\" % hostname\n else:\n #If the user want the defaults values\n if domain_name == \"default\":\n list = []\n #Get the default destination\n stdin, stdout, stderr = myconnection.exec_command(\"/bin/cat /etc/postfix/main.cf | grep default_destination | cut -d '=' -f 2\" )\n out=stdout.read().splitlines()\n else:\n # Send the command (non-blocking)\n commandline=\"/bin/cat /etc/postfix/main.cf | grep %s | cut -d '=' -f 2\" % (domain_name)\n stdin, stdout, stderr = myconnection.exec_command(commandline)\n\n #On récupère la sortie standard\n out=stdout.read().splitlines()\n\n if not out:\n # Disconnect from the host\n myconnection.close()\n return \"No value for this domain. Are you sure this domain exist ?\"\n exit(1)\n else:\n # Disconnect from the host\n myconnection.close()\n #On retourne la liste des domaines\n return out", "def domain_profile(self, domain):\n return self.apiquery('/v1/{}'.format(domain))", "def _update_site_configuration(self):\n self.site.configuration.site_values = {'THIRD_PARTY_AUTH_ONLY_DOMAIN': self.email_domain_name}\n self.site.configuration.save()", "def readSettings(self):\n settings = QtCore.QSettings()\n # defaults to the current directory path\n current_dir_abspath = os.path.abspath('')\n defaultValue = QtCore.QString(current_dir_abspath)\n # Warning:\n # QSettings.value can return different types (QVariant types) depending on the platform it's running on,\n # so the safest way to use it is always casting the result to the desired type, e.g.: int(settings.value(\"myKey\")).\n output_directory = settings.value(\"output_directory\", defaultValue=defaultValue).toString()\n self.ui.outputDirLineEdit.setText(output_directory)" ]
[ "0.5657144", "0.548896", "0.5319334", "0.5285291", "0.52768016", "0.5255273", "0.5222265", "0.5218223", "0.5189833", "0.5173989", "0.5170184", "0.51397663", "0.513596", "0.5086622", "0.50810987", "0.50810987", "0.50761354", "0.5024908", "0.5006228", "0.5000491", "0.5000396", "0.49986142", "0.4996676", "0.49652752", "0.49533844", "0.49051794", "0.48797652", "0.4865534", "0.48620728", "0.48614177", "0.48597506", "0.4847769", "0.4843209", "0.4833179", "0.48037443", "0.47932178", "0.47857103", "0.47780803", "0.47713098", "0.47710988", "0.47681555", "0.47639725", "0.47537604", "0.47521138", "0.474266", "0.4737899", "0.47365564", "0.4732378", "0.47241792", "0.47217092", "0.47179455", "0.47144186", "0.47105816", "0.47013402", "0.46968982", "0.46849087", "0.46848226", "0.46632504", "0.46605426", "0.4656445", "0.4655388", "0.46544835", "0.46500567", "0.46403474", "0.46385905", "0.46312255", "0.46265006", "0.46198565", "0.46143794", "0.46005353", "0.45965722", "0.45923698", "0.4590082", "0.45832923", "0.45782843", "0.4578194", "0.4578194", "0.45761475", "0.45709032", "0.45672768", "0.45634302", "0.456294", "0.4557878", "0.45555392", "0.45546755", "0.45505056", "0.45333612", "0.45328987", "0.45310566", "0.45269844", "0.4523721", "0.451705", "0.4515607", "0.45094514", "0.4509354", "0.45090604", "0.45041662", "0.45019066", "0.44985747", "0.44964528" ]
0.7057614
0
Create and return a GroupGuard for this Settings object, which prepends a group name to all settings names referenced while it is active.
def groupGuard(self, groupNameRaw): groupName = str(groupNameRaw) if self.__DELIMITER in groupName: raise ValueError(("Illegal group name '%s' contains" + " delimiter '%s'.") % (groupName, self.__DELIMITER)) return self._GroupGuard(self, groupName)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_grp(self, name='grp'):\n self.base[name] = self.get_group_array()", "def createGroup(self):\n return _libsbml.GroupsModelPlugin_createGroup(self)", "def save_group_and_return_instance(self, groupname=\"\"):\n if groupname == \"\":\n raise ValueError(\"group needs its name.\")\n group = Group(name=groupname)\n self.save_object(group)\n return group", "def get_new_config_group(self):\n filename = \"%(config_dir)s/%(group)s.%(time)s\" % \\\n { \"config_dir\": self.config_dir(),\n \"group\": self.group_name(),\n \"time\": common.time_suffix(),}\n common.write_file(\"w\", 0o644, filename, self.get_match_criteria())", "def make_grp(self, name='grp', v=False):\n self.base[name] = self.get_group_array(v=v) #np.zeros(len(self.base), dtype=int)#self.get_group_array()", "def group(self):\n cook = cookie()\n G = Group('%s.G_%s' % (self.groupName, cook), self.socket, self)\n self.call('group', cook)\n return G", "def settings_grp(self):\n settings_grp = self.h5[SETTINGS]\n return settings_grp", "def gen_group(group_name=None, group_vars={}):\n group = Group(name=group_name)\n for key, value in group_vars.iteritems():\n group.set_variable(key, value)\n return group", "def make_grp(self):\n try:\n self.base['grp']\n except:\n self.base['grp'] = np.zeros(len(self.base),dtype='i')\n\n for halo in self._halos.values():\n halo[name][:] = halo._halo_id\n\n if config['verbose']: print \"writing %s\"%(self._base().filename+'.grp')\n self._base().write_array('grp',overwrite=True,binary=False)", "def make_groups(self):\n for g in self.groups:\n self.add_group(groupname=g['groupname'],\n grouptitle=g['grouptitle'],\n path_to_group=g['path'])", "def createMainGroup(self):\n\t\tmc.group( n = self.grp.name, em = True )", "def __create_group(self):\n\n group = time.strftime(_GROUP_NAME_FORMAT, time.localtime())\n LOG.info(\"Creating backup group '%s'.\", group)\n\n group_path = self.group_path(group)\n\n try:\n os.mkdir(group_path)\n except EnvironmentError as e:\n if e.errno != errno.EEXIST:\n raise Error(\"Unable to create a new backup group '{}': {}.\",\n group_path, psys.e(e))\n\n self.__on_group_created(group)\n\n return group", "def createGroup(self):\n return _libsbml.ListOfGroups_createGroup(self)", "def group(self) -> \"Group\":\n return Group(connection=self)", "def get_group_name(self):\n return self.groupname", "def groups(self):\n new = self.copy()\n new._filter = [\"groups\"]\n return new", "def group(self, val):\n self.set_property(\"Group\", val)", "def group_name(self):\n\n return self._group_name", "def collapsed_to_hg_group(self):\n return op.join(self.collapse_to_hg_dir, \"touse.group.txt\")", "def require_group(self, name) -> \"GroupBase\":\n if name not in self:\n return self.create_group(name)\n\n group = self[name]\n if not isinstance(group, GroupBase):\n raise TypeError(f\"Incompatible object ({type(group)}) already exists\")\n\n return group", "def as_group(self, name=None):\n from . import groups\n\n if name:\n _rv = groups.NamedGroup((self,), name)\n else:\n _rv = groups.Group((self,))\n return _rv", "def _create_child_group(self, name) -> \"GroupBase\":\n pass", "def add_group(self, key):\n grp = GroupExplorator(self, key)\n self._grps[key] = grp\n return grp", "def group_name(self) -> str:\n return pulumi.get(self, \"group_name\")", "def with_group(self, group):\n\t\tself.variables['group'] = group\n\t\treturn self", "def set_group_name(self, name):\n self.groupname = name", "def group(self):\n return self.properties.get('Group', None)", "def getGroup(self, name):\n return Group.create(self.pm_getUserManager().getGroup(self._unbox(name)), self._modelDataManager)", "def get_group(self) -> Optional[str]:\n return self.group", "def group(self) -> str:\n return pulumi.get(self, \"group\")", "def get_group(self):\n\t\treturn self.variables.get('group')", "def group(self) -> str:\n return self._db_data.group", "def give_group(self, key):\n return self._grps[key]", "def create_group(self, name) -> \"GroupBase\":\n ancestor, group_names, last_name = self._descend(name)\n parent = ancestor._require_descendant_groups(*group_names)\n if last_name in parent:\n raise FileExistsError(f\"Group or dataset found at '{name}'\")\n return parent._create_child_group(last_name)", "def Group_created_by(Groupname):\r\n return Group.Group_created_by(Groupname)", "def get_group(self, group_name):\n\n return self._group[group_name]", "def _make_group(self, _rk, _group_hint):\n\n if isinstance(_group_hint, dict):\n # _group_hint is a single key/value pair\n g = _group_hint[list(_group_hint)[0]]\n\n r_type = g.get(\"type\", \"none\")\n if r_type != \"OS::Nova::ServerGroup\":\n return \"support only ServerGroup resource\"\n\n properties = g.get(\"properties\", {})\n if len(properties) == 0:\n return \"no properties\"\n\n group_name = properties.get(\"name\", None)\n if group_name is None:\n return \"no group name\"\n group_name = group_name.strip()\n\n policies = properties.get(\"policies\", [])\n if len(policies) == 0:\n return \"no policy of the group\"\n\n if len(policies) > 1:\n return \"multiple policies\"\n\n # TODO: exclude soft-affinity and soft-anti-affinity?\n\n if group_name in self.groups.keys():\n group = self.groups[group_name]\n else:\n group = Group(group_name)\n\n policy = policies[0].strip()\n if policy == \"anti-affinity\":\n group_type = \"diversity\"\n else:\n group_type = policy\n\n group.group_type = group_type\n group.factory = \"server-group\"\n group.level = \"host\"\n\n self.groups[group_name] = group\n else:\n # group hint is uuid string.\n rg = self.resource.get_group_by_uuid(_group_hint)\n if rg is None:\n return \"unknown group found while making group\"\n\n # TODO: exclude soft-affinity and soft-anti-affinity?\n\n if rg.name in self.groups.keys():\n group = self.groups[rg.name]\n else:\n group = Group(rg.name)\n\n group.group_type = rg.group_type\n group.factory = rg.factory\n group.level = \"host\"\n\n self.groups[rg.name] = group\n\n if group is not None:\n group.server_list.append(self.app_name + \":\" + _rk)\n\n return \"ok\"", "def set_up_groups(self):\n groups = []\n groups.append({'groupname': 'th',\n 'grouptitle': 'TH',\n 'path': '/'})\n groups.append({'groupname': 'neutronics',\n 'grouptitle': 'Neutronics',\n 'path': '/'})\n groups.append({'groupname': 'metadata',\n 'grouptitle': 'Simulation Metadata',\n 'path': '/'})\n return groups", "def __make_group_by_res(self, group_name, name_list):\r\n if group_name not in self.groups:\r\n res_group = self.group['Residue'].getChildGrps()\r\n groups = [ res for res in res_groups if res.name in name_list ]\r\n new_group = Group(parent=[], id=-1, type=group_name, childs=groups)\r\n self.groups[group_name] = new_group", "def with_groups_allowed(self, groups_allowed: int) -> Creature:\n result = self.clone()\n result.groups_allowed = groups_allowed\n return result", "def group(self) -> Optional[str]:\n return pulumi.get(self, \"group\")", "def group(self):\n return self._group", "def group(self):\n return self._group", "def group(self):\n return self._group", "def get_group(self):\n return self._group", "def group_name(self):\n return \"room-%s\" % self.id", "def group_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"group_name\")", "def group_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"group_name\")", "def group_name(self):\n return \"room-%s\" % self.pk", "def group_name(self):\n return \"room-%s\" % self.pk", "def createGroup(self, name):\n new_group = ET.SubElement(self._root,'group')\n group_name = ET.SubElement(new_group, 'name')\n group_name.text = name\n # update the document's groups\n self._groups = self._root.findall('group') \n print 'Creating group, \\'%s\\'' % name\n return CAGroup(new_group)", "def ad_group_name(self):\n\n return self._ad_group_name", "def ad_group_name(self):\n\n return self._ad_group_name", "def _set_group_name(self):\n self._scene_gen.group_name = self._group_name_le.text()\n self._refresh_view()", "def make_group(self, id, name='', attrs={}, link='', abort=True ): \n gid = id + \"/\"\n sgd = self.get_sgd(gid, name)\n path = self.full_path\n link_info = self.file.extract_link_info(name, link, Group)\n if not abort:\n # id = sgd['id'].rstrip('/') # not sure if need this\n grp = self.file.get_existing_group(path, id, name)\n if grp:\n return grp\n grp = Group(self.file, sgd, name, path, attrs, self, link_info)\n # self.mstats[gid]['created'].append(grp)\n return grp", "def copy_group(self):\n dd = self.destination_directory\n sg = self.source_group\n dg = self.destination_group\n\n data = {\n 'description': sg.description,\n 'name': sg.name,\n 'status': sg.status,\n }\n\n # If this Group already exists, we'll just update it.\n if dg:\n for key, value in data.items():\n setattr(dg, key, value)\n\n while True:\n try:\n dg.save()\n return dg\n except StormpathError as err:\n logger.error('Failed to copy Group: {} into Directory: {} ({})'.format(sg.name.encode('utf-8'), dd.name.encode('utf-8'), err))\n\n # If we get here, it means we need to create the Group from scratch.\n while True:\n try:\n return dd.groups.create(data)\n except StormpathError as err:\n logger.error('Failed to copy Group: {} into Directory: {} ({})'.format(sg.name.encode('utf-8'), dd.name.encode('utf-8'), err))", "def condition_group_options(self):\n if \"no-groups\" in self.options and self.options[\"no-groups\"]:\n self.options[\"groups\"] = []\n if \"exclude-groups\" in self.options:\n del self.options[\"exclude-groups\"]\n\n return\n\n super().condition_group_options()", "def group_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"group_name\")", "def settings_group_options():\n return [('', _('No group')), *[(str(a.id), str(a)) for a in Group.objects.all()]]", "def __str__(self):\n\n return 'Group(%s)' % self.id", "def create_placement_group(self, name, strategy='cluster'):\r\n params = {'GroupName':name, 'Strategy':strategy}\r\n group = self.get_status('CreatePlacementGroup', params, verb='POST')\r\n return group", "def create_new_group(self, a, b):\n self.groups[self.group_id] = set([a,b])\n self.node_id[a] = self.node_id[b] = self.group_id\n self.group_id += 1", "def group(cls):\n return relationship.many_to_one(cls, 'group')", "def _process_group(self, **config_kwargs) -> RobotGroupConfig:\n return RobotGroupConfig(self.sim_scene, **config_kwargs)", "def group(*args, absolute: bool=True, empty: bool=True, name: AnyStr=\"\", parent: AnyStr=\"\",\n relative: bool=True, useAsGroup: AnyStr=\"\", world: bool=True, **kwargs)->AnyStr:\n pass", "def space_group(self) -> PermutationGroup:\n return self._full_translation_group @ self.point_group", "def get_security_group_short_name(self):\n return self.config['security_group']", "def add_bu_group(self, **kwargs):\n _stringify_kw(kwargs)\n \n bu_group = ElementTree.Element(xml_strings['backup_group'], **kwargs)\n self._root.append(bu_group)\n return XMLGroupOverlay(bu_group, self._root)", "def get_grp_string(self):\n\n grp = self.get_grp()\n\n if grp == -1:\n\n return \"\"\n\n return \"grp \" + str(grp)", "def koie_group():\n return GroupFactory(name=\"Koiene\")", "def append_groups(self, name, groups):\r\n self.__groups[name] = groups", "def autoCreateGroup(cleaned_data, cookie_user, isAutoApproved=False, querystring_content=False):\n existingSites = Site.objects.filter(\n domain=cleaned_data['domain'],\n )\n if len(existingSites) > 0:\n\n try:\n site = existingSites[0]\n group = site.group\n except Exception, e:\n raise Exception(\"Site \"+cleaned_data['domain']+\" has no group.\")\n else:\n # make a group and site\n try:\n group = Group.objects.create(\n name=cleaned_data['name'],\n short_name=cleaned_data['short_name'],\n approved=False,\n temp_interact=0,\n requires_approval=False,\n )\n except Exception, e:\n print \"* * * ** * * * * * * * EXCEPTION \"\n print e\n logger.warn(e)\n groups = Group.objects.filter(\n short_name=cleaned_data['short_name']\n )\n if len(groups) == 1:\n group = groups[0]\n elif len(groups) > 1:\n raise Exception(\"More than one group with shortname found: \" + cleaned_data['short_name'])\n else:\n raise Exception(\"No groups found with shortname: \" + cleaned_data['short_name'])\n\n site = Site.objects.create(\n name=cleaned_data['domain'],\n domain=cleaned_data['domain'],\n group=group,\n # this is whether or not a querystring is counted in the url - we should rename this\n querystring_content=querystring_content,\n )\n\n blessed_tags = addDefaultsForNewGroup(group, cookie_user)\n autoApproveUserAsAdmin(group, cookie_user, isAutoApproved=isAutoApproved)\n\n return group, site, blessed_tags", "def space_group(self, point_group: Optional[PointGroup] = None) -> PermutationGroup:\n return self.space_group_builder(point_group).space_group", "def clone(self):\n return _libsbml.Group_clone(self)", "def findGroup(self, name):\n for g in self._groups:\n if g.find('name').text.strip() == name:\n return CAGroup(g)\n \n # If we are here the group does not exist\n\n g = ET.SubElement(self._root, 'group')\n n = ET.SubElement(g, 'name')\n n.text = name.strip()\n return CAGroup(g)", "def create_group(self, name):\n\t\tdata = {\"name\":name}\n\t\tresponse = self.client.post(self._endpoint + \"/group\", content=data)\n\t\treturn Group(\n\t\t\tresponse.json['group_id'],\n\t\t\tself.user_id,\n\t\t\tself.site_id,\n\t\t\tdata=response.json\n\t\t)", "def log_group_name(self) -> typing.Optional[str]:\n return self._values.get('log_group_name')", "def make_group(self, qid, name='', path='', attrs={}, link='', abort=True):\n gqid = qid + \"/\"\n sdef = self.get_sdef(gqid, self.default_ns, \"referenced in make_group\")\n id = sdef['id']\n ns = sdef['ns']\n path = self.deduce_path(id, ns, path)\n if not abort:\n id_noslash = id.rstrip('/') # could be different from gqid if namespace present\n grp = self.get_existing_group(path, id_noslash, name)\n if grp:\n # found already existing group\n return grp \n link_info = self.extract_link_info(name, link, Group)\n # create the group\n parent = None # no parent since this node created from File object (top level)\n grp = Group(self, sdef, name, path, attrs, parent, link_info)\n return grp", "def group(self):\n return self.tr(self.groupId())", "def group(self):\n return self.tr(self.groupId())", "def group(self):\n return self.tr(self.groupId())", "def group(self):\n return self.tr(self.groupId())", "def instance_group(self):\n return self._instance_group", "def getGroupingStrategyFactory(self) -> cern.japc.core.spi.group.GroupSubscriptionStrategyFactory:\n ...", "def __create_new_group(self, group_name) -> None:\n group = Group(name=group_name)\n group.save()\n\n self.__add_permission_to_group(group)", "def create_group(self, label):\n group = OptionGroup(label)\n self.append(group)\n return group", "def create_group(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/GroupV2/Create/\"))", "def group(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"group\")", "def group(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"group\")", "def group(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"group\")", "def log_group_name(self) -> str:\n ...", "def create_group(self, name, track_order=False):\n full_name = self._get_h5_path(name)\n with open_hdf5(self.file_name, mode=\"a\") as h:\n try:\n h.create_group(full_name, track_order=track_order)\n except ValueError:\n pass\n h_new = self[name].copy()\n return h_new", "def placement_group(template, name):\n p = PlacementGroup(name, template=template)\n p.Strategy = 'cluster'\n return p", "def get_joining_group_property(value, limit_ascii=False):\n\n obj = unidata.ascii_joining_group if limit_ascii else unidata.unicode_joining_group\n\n if value.startswith('^'):\n negated = value[1:]\n value = '^' + unidata.unicode_alias['joininggroup'].get(negated, negated)\n else:\n value = unidata.unicode_alias['joininggroup'].get(value, value)\n\n return obj[value]", "def register_group(self, cls):\n return self.register_entity('group', cls)", "def group_settings_greedy(obs_expt: ObservablesExperiment):\n diag_sets = _max_tpb_overlap(obs_expt)\n grouped_expt_settings_list = list(diag_sets.values())\n grouped_obs_expt = ObservablesExperiment(grouped_expt_settings_list, program=obs_expt.program)\n return grouped_obs_expt", "def clean_group_name(self):\r\n group_name = self.cleaned_data['group_name']\r\n master_id = 0\r\n\r\n if self.instance:\r\n # master id is used to exclude current master so that it is not checked as duplicate\r\n master_id = self.instance.id\r\n\r\n if LedgerGroup.objects.filter(company=self.company, group_name__iexact=group_name).exclude(id=master_id).exists():\r\n raise forms.ValidationError(\"Group name already exists\")\r\n\r\n return group_name", "def setGroup(self, group):\n\t\tself.config.GROUP = group", "def reserve_group(self, name, sco):\n\t\tif self.client is None:\n\t\t\traise UsageError(\"Not connected!\")\n\t\treturn self.client.reserve_group(name, sco)", "def test_save(self, name='test'):\n group = Group(name=name)\n group.save()\n return group" ]
[ "0.6209012", "0.5984325", "0.5788384", "0.568222", "0.56628835", "0.563917", "0.5622585", "0.56192756", "0.56191015", "0.55996215", "0.5569049", "0.5563115", "0.5557951", "0.55311483", "0.550896", "0.54411197", "0.54324687", "0.5423106", "0.5397997", "0.53885156", "0.53813946", "0.53632164", "0.53505796", "0.5339749", "0.5275209", "0.5269216", "0.526579", "0.5263766", "0.52470654", "0.521963", "0.52088445", "0.5176984", "0.5169597", "0.515446", "0.51513135", "0.51204866", "0.5120229", "0.51128656", "0.51046693", "0.5103994", "0.51027495", "0.5097854", "0.5097854", "0.5097854", "0.50927633", "0.50817597", "0.50773007", "0.50773007", "0.50755256", "0.50755256", "0.5064863", "0.50646526", "0.50646526", "0.50474006", "0.5038595", "0.503742", "0.5025156", "0.5020198", "0.5001938", "0.49956775", "0.49760523", "0.49579725", "0.49555078", "0.49529397", "0.49523017", "0.49281657", "0.4927576", "0.492138", "0.49213234", "0.49208438", "0.4916125", "0.49143296", "0.4910319", "0.4909751", "0.49063545", "0.49046493", "0.4890871", "0.48863885", "0.48816508", "0.48816508", "0.48816508", "0.48816508", "0.4879832", "0.48770487", "0.4856341", "0.48561046", "0.48540485", "0.48439813", "0.48439813", "0.48439813", "0.48400763", "0.48337492", "0.4824906", "0.48225257", "0.48182976", "0.48049164", "0.47935003", "0.47884715", "0.47748235", "0.47719073" ]
0.5727455
3
Return a list of all groups under the current group. (This is potentially slow, as it recomputes every time.)
def getChildGroups(self): groupPrefix = self.__DELIMITER.join(self.__currentGroupNames) if groupPrefix: groupPrefix += self.__DELIMITER skipLen = len(groupPrefix) childGroups = set() for keyName in self.__settings.keys(): if keyName.startswith(groupPrefix): childKey = keyName[skipLen:] groupKey, _, grandChildKey = \ childKey.partition(self.__DELIMITER) if grandChildKey: childGroups.add(groupKey) return filter(bool, childGroups)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getGroups(self):\n return [g[0] for g in grp.getgrall()]", "def list_groups():\n return _list_tindyb_unique_values(\"group\", dbpath=__dbpath__)", "def get_all_groups(self):\n return self.groups + ['all']", "def getGroups():\r\n return Group.getGroups()", "def get_pingroups(self):\n return self.groups[:]", "def all_groups(self):\n return self._all_groups", "def iter_groups(self):\n\t\treturn iter(self._groups)", "def groups(self):\n return []", "def get_groups(self):\n user_node = self.get()\n grouplist = []\n if user_node:\n for rel in graph.match(start_node=user_node, rel_type='in'):\n grouplist.append(Usergroup(id=rel.end_node()['id']))\n return grouplist", "def groups(self):\n return self._groups", "def groups(self):\n return self._groups", "def groups(self):\n return self._groups", "def get_all_groups(self):\n self.cursor.execute(\"select * from groups\")\n self.connection.commit()\n return self.cursor.fetchall()", "def groups(self):\n return self.get_data(\"groups\")", "def get_group_names(self):\r\n return self.groups.keys()", "def list_all_groups(self):\n groups = set()\n [groups.update(x) for x in self._groups.values()]\n return groups", "def get_groups(self):\n return [self.primary_group] + list(self.secondary_groups)", "def groups(self):\n #return self.get('{}/groups'.format(ApiVersion.A1.value))\n return self.get('{}/groups'.format(ApiVersion.CM1.value))", "def groups(self):\n # type: (...) -> Set[str]\n return self._groups", "def get_groups(self):\n return Client._get(self)", "def groups(self):\n yield self\n for member_group in self._groups():\n yield member_group", "def list_groups(args):\n\n for group in get_groups(args):\n print(group)", "def find_groups(self, mesh):\n grp_names = []\n for grp in self._grps.values():\n grp_names.extend(grp.find_groups(mesh))\n return grp_names", "def groups(self):\r\n return resources.Groups(self)", "def all_groups(parent=None):\n if parent is None:\n parent = QgsProject.instance().layerTreeRoot()\n\n def do_a_group(grp, level=0):\n for child in grp.children():\n if isinstance(child, QgsLayerTreeGroup):\n yield child\n do_a_group(child, level=level + 1)\n\n do_a_group(parent)", "def list_groups(self):\n return self.get_admin(\"groups\")", "def getListOfGroups(self, *args):\n return _libsbml.GroupsModelPlugin_getListOfGroups(self, *args)", "def get_list_groups(self):\n list_response = requests.get(self.groups_url, headers=self.headers)\n return list_response.json()[\"groups\"]", "def get_groups(self, username):\n groups = []\n for group in grp.getgrall():\n if username in group.gr_mem:\n groups.append(group.gr_name)\n\n return groups", "def groups(self):\r\n return users.Groups(self)", "def groupby(self):\n try:\n return plist([x.groupby() for x in self])\n except Exception:\n groups = collections.OrderedDict()\n for i, x in enumerate(self):\n if x not in groups:\n groups[x] = plist()\n groups[x].append(self.__root__[i])\n return plist(groups.values())", "def get_group_list(self):\n return [(item[0], item[1][0]) for item in self.contacts_by_group_list]", "def get_groups(self, group_name):\r\n assert group_name in self.groups.keys(), group_name\r\n try:\r\n group_list = self.groups[group_name]\r\n except KeyError:\r\n raise GroupKeyError()\r\n return group_list", "def get(self, *args):\n return _libsbml.ListOfGroups_get(self, *args)", "def groups(self) -> list[Group]:\n return self._connection.groups", "def GetAncestorGroups(self):\n return [node for node in self.GetAncestors() if node.IsGroup()]", "def group(self):\n instances = self.instances\n groups = []\n for i in range(len(self.labels)):\n groups.append([instance for instance in instances if instance[-1] == self.labels[i]])\n return groups", "def get_groups(self, obj):\n groupsForCompany = get_groups_with_perms(obj)\n return [x.id for x in groupsForCompany]", "def get_group(tkn: Token = Depends(from_authotization_header_nondyn),):\n assert_has_clearance(tkn.owner, \"sni.read_group\")\n return [\n GetGroupShortOut(group_id=str(grp.pk), group_name=grp.group_name)\n for grp in Group.objects().order_by(\"group_name\")\n ]", "def get_groups(self):\n\n if not self.check_prereqs():\n raise StopIteration\n\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n\n query=self.create_query(self.sql_get_groups_query+\" ORDER BY $groupname_field$\",{'groupname_field':self.sql_groupname_field})\n self.log.debug(\"sqlflexibleauthstore: get_groups: %s\" % (query,))\n\n cursor.execute(query)\n desc=[i[0] for i in cursor.description]\n for row in cursor:\n dictrow=dict(zip(desc,row))\n yield dictrow[self.sql_groupname_field]", "def list_group():\n data, code, message = FIELD_SERVICE.list_group()\n return __result(data, code, message)", "def GetGroupMembers(self, group):\n return []", "def find_groups(self, mesh):\n grps = []\n dim = mesh.give_dim()\n if dim:\n ctypes = self._dct[dim]\n grps = self._exp.find_groups_from_ctypes(mesh, ctypes)\n log.debug(\"GroupExplorator.find_groups for mesh %s returns %s with dim %s and dct %s\", mesh, grps, dim, self._dct)\n return grps", "def GetGroupList(setting):\n groups = set()\n\n for name in setting:\n dev = setting[name]\n format_, group = GetFieldDef(dev, fields=\"format_, group\")\n if group is not None and len(group) > 0:\n groups.add(group.title())\n if isinstance(format_, dict):\n subgroups = GetGroupList(format_)\n if subgroups is not None and len(subgroups) > 0:\n for group in subgroups:\n groups.add(group.title())\n\n groups=list(groups)\n groups.sort()\n return groups", "async def get_groups(self) -> list:\n results = await self._api.call('group', 'get_groups')\n return [models.Group(grp) for grp in results.payload]", "def getGroup():\n\tprint\n\tprint \"Requesting the list of groups for this account\"\n\n\tgroups_result = getResult('/papi/v0/groups')\n\n\treturn (groups_result)", "def groups():\n access_token = session['access_token']\n return \"%s\" % list_groups(access_token)", "def getPeopleGroups(self):\n return [FoursquarePeopleGroup(le) for le in self.base.get(\"groups\", [])]", "def groupfinder(name, request):\n #FIXME: Implement\n return ()\n return request.context.get_groups(name)", "def get_group_list(self) -> Sequence[str]:\n return [group.Name.lower() for group in self.LedGroups]", "def get_groups(self):\n response = self._get(\"groups\")\n\n return response.json()", "def groups(self):\n new = self.copy()\n new._filter = [\"groups\"]\n return new", "def get_groups_for_current_user():\n return UserAPI.get_groups_for_user_id(current_user)", "def get_groups(self):\n result = self.conn.usergroup.get(status=0, output='extend', selectUsers=\"extend\")\n groups = {group[\"name\"]: Group(\n name=group[\"name\"],\n id=group[\"usrgrpid\"],\n members=group[\"users\"],\n ) for group in result}\n return groups", "def list_group(self, groupname):\n return self.get_admin(\"groups/{}\".format(groupname))", "def groups(self, deep=False, exclude_prefix=None):\n\n for group in self._groups:\n if exclude_prefix is None or not group.startswith(exclude_prefix):\n yield group\n if deep:\n yield from (group + \"/\" + subgroup\n for subgroup in self[group].groups(deep))", "def get_groups(self, principal):\n groups = set()\n for location in lineage(self):\n location_groups = location._groups\n try:\n if self is location:\n groups.update(location_groups[principal])\n else:\n groups.update([x for x in location_groups[principal]])\n except KeyError:\n continue\n\n return tuple(groups)", "def grouplist(self, namespace=None):\n source = self._source(namespace)\n return self._list(source, 'list')", "def get_all_as_groups(as_connection):\n as_groups_list = []\n get_as_groups = as_connection.get_all_groups()\n as_groups_list.extend(get_as_groups)\n\n token = get_as_groups.next_token\n while token is not None:\n get_as_groups = as_connection.get_all_groups(\n next_token=token)\n as_groups_list.extend(get_as_groups)\n token = get_as_groups.next_token\n print \"Processed {0} AutoScaling Group\"\\\n .format(len(as_groups_list))\n return as_groups_list", "def get_all_patents(self):\n return list(self._groups_groupby_patent.groups.keys())", "def get_all(isamAppliance, check_mode=False, force=False):\n return isamAppliance.invoke_get(\"Retrieving groups\", \"/sysaccount/groups/v1\")", "def listGroups(self):\n return tuple(Group.create(groupName, self._modelDataManager) for groupName in self.pm_getUserManager().listGroups())", "def get_all_events_by_group() -> Group:\n level = Event.objects.filter(level__contains='information').values_list('agent')\n agent = Agent.objects.filter(pk__in=level).values_list('user')\n user = User.objects.filter(pk__in=agent).values_list('group')\n return Group.objects.filter(pk__in=user)", "def refreshGroups(self):\n self.groups = []\n\n self.addGroupsWithIds(self._getGroupIdsJoined())\n self.addGroupsWithIds(self._getGroupIdsInvited(), False)", "def get_all_groups():\n return jsonify(admin.get_all_groups(current_app.scoped_session()))", "def get_groups(self) -> dict:\n return dict(self._groups)", "def get_item_groups(dataset):\n return dataset.groupby(\"name\", as_index=False, sort=False).groups", "def pull_all_rhds_group(self):\n return self.ldap_connection.search_s(\"ou=managedGroups,dc=redhat,dc=com\",\n ldap.SCOPE_SUBTREE)", "def ObjectGroups(object_id):\n rhino_object = rhutil.coercerhinoobject(object_id, True, True)\n if rhino_object.GroupCount<1: return []\n group_indices = rhino_object.GetGroupList()\n rc = [scriptcontext.doc.Groups.GroupName(index) for index in group_indices]\n return rc", "def get_relevant_perm_groups(self):\n\n groups = Group.objects.filter(Q(name=\"everyone\") | Q(name=self.admin_group_name()) | Q(name=self.participants_group_name()))\n return groups", "def get_current_grp():\n return get_group_grp(os.getgid())", "def get_groups(nodes):\n return list(set([node.color for node in nodes]))", "def get_groups(id_project):\n data = sql.list_groups(id_project)\n names = [(d['id'], d['name']) for d in data]\n return names", "def _get_check_groups(self, group=None):\n groups = [g for g in self.config_dict]\n if group:\n if group in groups:\n check_groups = [group]\n else:\n check_groups = []\n else:\n check_groups = groups\n return check_groups", "def get_groups(self, username):\n return []", "def output_groups(self) -> List[str]:\n return self._output_groups", "def getUserGroups(self, user):\n return [gu[0] for gu in grp.getgrall() if user in gu[3]]", "def global_node_groups(self) -> pulumi.Output[Sequence['outputs.GlobalReplicationGroupGlobalNodeGroup']]:\n return pulumi.get(self, \"global_node_groups\")", "def list_groups(self, **params):\n url = 'groups'\n if params:\n url += '?%s' % urllib.urlencode(params)\n resp, body = self.get(url)\n self.expected_success(200, resp.status)\n body = json.loads(body)\n return rest_client.ResponseBody(resp, body)", "def get_all_nda(self):\n return list(self._groups_groupby_NDA.groups.keys())", "def pull_groups(self, org):\n pass", "def _complete_groups(self, text):\r\n groups = []\r\n for info in self._get_complete_info():\r\n if info['group'] not in groups:\r\n groups.append(info['group'])\r\n return [ g + ' ' for g in groups if g.startswith(text) ]", "def get_groups(self, cell: Cell) -> Set[Group]:\n\t\tassert cell in self._cells\n\t\treturn self._cells_to_group_map[cell]", "def get_nested_groups(self, conn, group: str) -> typing.List[str]:\n nested_groups = list()\n conn.search(\n search_base=self.group_search_base,\n search_filter=self.group_search_filter.format(group=group),\n search_scope=ldap3.SUBTREE)\n if conn.response:\n for nested_group in conn.response:\n if 'dn' in nested_group:\n nested_groups.extend([nested_group['dn']])\n groups = self.get_nested_groups(conn, nested_group['dn'])\n nested_groups.extend(groups)\n nested_groups = list(set(nested_groups))\n return nested_groups", "def get_group_list(org_id):\n tList = get_template('app/usermanagementorg/group_list.html')\n groups = get_groups(org_id)\n return tList.render(Context({ 'groups': groups, }))", "def __iter__(self):\n try:\n groupNames = sorted(self.groups)\n except Exception: # pragma: no cover\n groupNames = self.groups.keys()\n\n for groupName in groupNames:\n yield groupName, self[groupName]", "def get(self):\r\n return UserGroupService.getAllUserGroups(self)", "def groupsChanged(self):\n # Get the list of groups for the present user according to\n # the checklist.\n nglist = []\n for r in self.liststore:\n if (r[1] and (r[0] != self.gidnm)):\n nglist.append(r[0])\n if (gui.getUserGroups(gui.currentUser) != nglist):\n return nglist\n else:\n return None", "def path_groups(self):\n return self._path_groups", "def get_all_groups(self, account_name=None, account_id=None, path=None, group_name=None,\n group_id=None, search=False ):\n grouplist=[]\n accounts = self.get_all_accounts(account_id=account_id, account_name=account_name,\n search=search)\n for account in accounts:\n groups = self.get_groups_from_account(path=path,\n group_name=group_name,\n group_id=group_id,\n delegate_account=account['account_name'],\n search=search)\n for group in groups:\n group['account_name']=account['account_name']\n group['account_id']=account['account_id']\n grouplist.append(group)\n return grouplist", "def get_groups():\n\n groups = [\"shelter\", \"sharing\", \"unsheltered\", \"motel\"]\n\n for item in groups:\n group = Group(group_name=item)\n\n db.session.add(group)\n\n db.session.commit()", "def get_all():\n\n return AGE_GROUPS", "def getGroups(self, proteinId):\n return [self.groups[gId] for gId in self._proteinToGroupIds[proteinId]]", "def running_groups(self):\n return set(\n cmd.group_by for id, cmd in self.commands\n if cmd.is_running and cmd.group_by is not None\n )", "def get_all_groups(self, path_prefix='/', marker=None, max_items=None):\r\n params = {}\r\n if path_prefix:\r\n params['PathPrefix'] = path_prefix\r\n if marker:\r\n params['Marker'] = marker\r\n if max_items:\r\n params['MaxItems'] = max_items\r\n return self.get_response('ListGroups', params,\r\n list_marker='Groups')", "def group_nodes(self, group, namespace=None):\n source = self._source(namespace)\n return self._list(source, 'map', group)", "def getGroup(group: int, name=\"\") -> list:\n groups = mongo.db.groups.find({'id':group},{'_id':0})\n userID_list = []\n user_list = []\n for entry in groups:\n if entry[\"id\"] == group:\n userID_list = userID_list + entry[\"members\"]\n if len(userID_list) != 0:\n for entry in userID_list:\n x = fetchUser(userId=entry)\n user_list = user_list + x\n return user_list", "def granted_groups(self):\n return [\n g\n for g in Group.objects.filter()\n if ManagedObject.objects.filter(GroupAccess.Q(g) & Q(id=self.id)).exists()\n ]", "def fusion_api_get_directory_groups(self, body, api=None, headers=None):\n return self.logindomain.groups(body, api, headers)", "def readGroups(self):\n\t\tgroups = self._fileSystem.readGroups()\n\t\tif groups is None:\n\t\t\treturn\n\t\treturn groups" ]
[ "0.80298066", "0.7753639", "0.7704329", "0.76670724", "0.7662189", "0.7483736", "0.74683553", "0.7443777", "0.7393224", "0.7341", "0.7341", "0.7341", "0.73013186", "0.7216928", "0.7213653", "0.7186486", "0.7179003", "0.7083968", "0.7080749", "0.70386165", "0.7009138", "0.6971467", "0.695359", "0.6932292", "0.6920581", "0.6915589", "0.68814", "0.68720835", "0.6824134", "0.6812193", "0.6797456", "0.67920125", "0.67647165", "0.67603", "0.67272866", "0.67109436", "0.6684428", "0.66820747", "0.66811085", "0.6672757", "0.66502285", "0.6648071", "0.66436553", "0.6638446", "0.66262966", "0.6600514", "0.6599885", "0.6590717", "0.659059", "0.6585226", "0.65730435", "0.65420926", "0.65278465", "0.6519516", "0.65170264", "0.65115476", "0.6507825", "0.6507458", "0.6505606", "0.6467711", "0.64584297", "0.6457396", "0.6457171", "0.6440403", "0.64367527", "0.6426195", "0.6390032", "0.6385837", "0.6376669", "0.6371384", "0.63549846", "0.6351824", "0.6346657", "0.6346364", "0.63414437", "0.63388675", "0.63154554", "0.6307393", "0.6306336", "0.6302119", "0.62926793", "0.6284295", "0.62838274", "0.6280281", "0.62772834", "0.6274501", "0.62701243", "0.62512016", "0.6248682", "0.62476104", "0.6238385", "0.623585", "0.6229484", "0.62213016", "0.6219128", "0.6192638", "0.61903054", "0.6181303", "0.61672324", "0.6162663" ]
0.67759746
32
Get the full name of the given keyName under the current group. If extant is True, only return extant keys; otherwise return None.
def __getKey(self, keyNameRaw, extant=True): fullKeyName = self.__DELIMITER.join( self.__currentGroupNames + [str(keyNameRaw)]) if extant and (fullKeyName not in self.__settings): return None return fullKeyName
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def key_name(self) -> Optional[str]:\n return pulumi.get(self, \"key_name\")", "def get_name(self):\n return self.key().name().split(':', 1)[1]", "def extract_key_name(self):\n # quick and dirty regex parsing..\n # consider using gnupg.\n _, out, _ = self.as_user('/usr/bin/gpg --list-keys')\n patterns = [\n 'pub\\s+.*?uid\\s+debrepo.*?sub\\s+\\w+/(\\w+)\\s+[\\w-]+$',\n '^pub.*?\\n\\s+(.*?)\\nuid',\n ]\n keyname = None\n out_str = out.decode('utf8')\n for pattern in patterns:\n m=re.search(pattern, out_str, flags=re.M|re.DOTALL)\n if m:\n keyname=m.group(1)\n break\n return keyname", "def kms_key_name(self) -> Optional[str]:\n return pulumi.get(self, \"kms_key_name\")", "def kms_key_name(self) -> Optional[str]:\n return pulumi.get(self, \"kms_key_name\")", "def get_post_extra_content_key_name(obj, key_name=None):\n if obj:\n key, name = key_name.split(',')\n return obj.get_extra_content().filter(key__iexact=key, name__iexact=name)\n return ''", "def key_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key_name\")", "def key_name(self) -> str:\n return pulumi.get(self, \"key_name\")", "def get_name(self):\n return m2.x509_extension_get_name(self.x509_ext)", "def kms_key_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"kms_key_name\")", "def kms_key_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"kms_key_name\")", "def _get_key_name(self, name):\n base_path = force_text(self.location)\n final_path = urljoin(base_path + \"/\", name)\n name = os.path.normpath(final_path.lstrip('/'))\n\n if six.PY2:\n name = name.encode('utf-8')\n return name", "def filename_for_key(self, key, extension=None):\n if extension is None:\n extension = self.file_extension\n f = self.key2basename(key) + extension\n return os.path.join(self.basepath, f)", "def actual_key(self, key):\n key_list = []\n if key.scope == Scope.children:\n key_list.append('children')\n elif key.scope == Scope.parent:\n key_list.append('parent')\n else:\n key_list.append([\"usage\", \"definition\", \"type\", \"all\"][key.scope.block])\n\n if key.block_scope_id is not None:\n key_list.append(key.block_scope_id)\n if key.student_id:\n key_list.append(key.student_id)\n return \".\".join(key_list)", "def GroupsExtension_getPackageName():\n return _libsbml.GroupsExtension_getPackageName()", "def getKey(self, namespace, ns_key):\n namespace = self._fixNS(namespace)\n if namespace == BARE_NS:\n return ns_key\n\n ns_alias = self.namespaces.getAlias(namespace)\n\n # No alias is defined, so no key can exist\n if ns_alias is None:\n return None\n\n if ns_alias == NULL_NAMESPACE:\n tail = ns_key\n else:\n tail = '%s.%s' % (ns_alias, ns_key)\n\n return 'openid.' + tail", "def getKeyPath(self, keyPath):\n parent = self\n parts = keyPath.split(\".\")\n for part in parts[:-1]:\n child = parent.get(part, None)\n if child is None:\n return None\n parent = child\n return parent.get(parts[-1], None)", "def kms_key_name(self) -> str:\n return pulumi.get(self, \"kms_key_name\")", "def kms_key_name(self) -> str:\n return pulumi.get(self, \"kms_key_name\")", "def kms_key_name(self) -> str:\n return pulumi.get(self, \"kms_key_name\")", "def GetKey(self, obj, keyName):\n\n key = (self._configKey is None and [\"Persistence_Options\"] or [self._configKey])[0]\n\n key += CONFIG_PATH_SEPARATOR + obj.GetKind()\n key += CONFIG_PATH_SEPARATOR + obj.GetName()\n key += CONFIG_PATH_SEPARATOR + keyName\n\n return key", "def _filename(self, key):\n return os.path.join(self.root, key[:2], key)", "def get_env_key(obj, key=None):\n return str.join('_', [obj.__module__.replace('.','_').upper(),\n key.upper()])", "def get_key_recursive(lang_map, lang_code, key_name, default=None):\n key_val = lang_map.get(lang_code, {}).get(key_name, sentinel)\n\n if key_val is not sentinel:\n return key_val\n\n parts = lang_code.split('_')\n parts.pop()\n if not parts:\n return default\n\n _lang_code = '_'.join(parts)\n return get_key_recursive(lang_map, _lang_code, key_name, default)", "def sub_key(dirname):\n return SUB_PREFIX + dirname", "def key_pair_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"key_pair_name\")", "def key_name(self):\n return self._key_name", "def _key_name(self, key):\n if type(key) == type(\"\"):\n return str(curses.keyname(ord(key)).decode(\"utf-8\"))\n return False", "def fname(key):\n return key.rsplit(\"/\", 1)[-1]", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def name(self):\n return self.key", "def getKey(instance):\n return instance['name']", "def GetSubkeyByName(self, name):", "def _get_key(self, file_name, config) -> str:\n pass", "def fullkey(self, key):\n if len(self.basekey) > 0:\n return \"{}:{}\".format(self.basekey, key)\n else:\n return key", "def extract_key_usage(self, ext):\n res = []\n fields = KU_FIELDS[:]\n\n # \"error-on-access\", real funny\n if not ext.key_agreement:\n fields.remove('encipher_only')\n fields.remove('decipher_only')\n\n for k in fields:\n val = getattr(ext, k, False)\n if val:\n res.append(k)\n return res", "def util_read_keyname(conf_file=None, verbose=0, dryrun=False):\n dr = DebRepo(dryrun=dryrun, **config(conf_file=conf_file, verbose=verbose))\n keyname = dr.read_keyname()\n print(keyname)", "def get_key (self, name):\n return self + name", "def get_key(self, key, default=_MISSING):\n if '.' in key:\n # Nested key of form \"foo.bar\"\n key, remainder = key.split('.', 1)\n if default != _MISSING:\n try:\n value = self[key].get_key(remainder, default)\n except KeyError:\n # subdict exists, but doesn't contain key\n return default\n except AttributeError:\n # key points to non-dict thing, so no get_key attribute\n return default\n else:\n value = self[key].get_key(remainder)\n else:\n # Single, non-nested key of form \"foo\"\n if default != _MISSING:\n return self.get(key, default)\n else:\n return self[key]\n return value", "def get_root_key(name):\n return \"REZ_{name}_ROOT\".format(name=name.upper())", "def get_key_and_internalname(self, public_id):\n query = \"\"\"SELECT aeskey,\n internalname\n FROM yubikeys\n WHERE (active = '1' OR active = 'true')\n AND publicname = %s\"\"\"\n self._execute(query, (public_id,))\n return self._dictfetchone()", "def getName(self):\n return _libsbml.GroupsExtension_getName(self)", "def key_object_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key_object_name\")", "def key_pair_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key_pair_name\")", "def key_pair_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key_pair_name\")", "def key_pair_name(self) -> str:\n return pulumi.get(self, \"key_pair_name\")", "def get_full_name(self):\n return \"{vol} ({fs})\".format(vol=self.volume_id, fs=self.fs.get_full_name())", "def get_name_activated_plugin(self, key):\n plugins = self.get_all_childname(key)\n for plugin in plugins:\n if self.get(key, plugin, format=bool):\n return plugin\n return None", "def build_flattened_key(prefix, key):\n return key if not prefix else prefix + \".\" + key", "def _get_raw_key(args, key_field_name):\n flag_key = getattr(args, key_field_name, None)\n if flag_key is not None:\n return flag_key\n return _read_key_store_file().get(key_field_name)", "def root_given_key(prob_key):\n root = ''\n for i, info in enumerate(prob_key):\n if i != 0:\n root += '_'\n root += str(info)\n return root.replace('.', '_')", "def getKey( self, key ):\n if key in self.conf:\n return self.conf[key]\n else:\n return None", "def get_key(self, language_name, key):\n self.language_name_global = language_name\n self.key_global = key\n package_path = f\"language_manager/packages/{language_name}.langpkg\"\n if not self.__copy_check_same__(package_path):\n self.__copy__(package_path)\n\n # Import the temp Python file coming out of the package\n import temp\n self.__log__(f\"Getting key \\\"{key}\\\"\\n\")\n # Take the key (type: str)\n return_val = temp.LANGUAGE[\"contents\"][key]\n self.__log__(\"Done.\")\n # Delete the language package from the RAM to free up space\n del temp\n self.__log__(\"\\n--- Finished function call ---\\n\\n\")\n # Return the value of the key\n return return_val", "def get_store_key(asset):\n return '.'.join([asset.name, asset.uid, asset.ext])", "def _format_key(self, k: str) -> str:\n if k[0] == '.':\n k = k[1:]\n k = k.replace('.', '_')\n k = k.upper()\n k = re.sub(self.KEY_REGEX, '', k)\n return k", "def key_from_req(req):\n if hasattr(req, \"key\"):\n # from pkg_resources, such as installed dists for pip-sync\n key = req.key\n else:\n # from packaging, such as install requirements from requirements.txt\n key = req.name\n\n key = key.replace(\"_\", \"-\").lower()\n return key", "def shorter_name(key):\n key_short = key\n for sep in ['#', '/']:\n ind = key_short.rfind(sep)\n if ind is not None:\n key_short = key_short[ind+1:]\n else:\n key_short = key_short\n return key_short.replace('-', '_').replace('.', '_')", "def key(self):\n return key_for_name(self.name)", "def get_ext(f_name):\n \n for i in range(len(f_name)-1,-1,-1):\n if f_name[i]=='.':\n return f_name[i:]\n return None", "def get_all_childname(self, key):\n return [x.split(\"/\")[1] for x in self.get_all_keys() if x.split(\"/\")[0] == key]", "def generate_key(self):\n cmd = self.generate_key_cmd()\n self.show(cmd)\n if self.dryrun:\n return None\n s, _, _ = self.as_user(cmd)\n assert s == 0, ('failed to generate key', cmd)\n keyname = self.extract_key_name()\n return keyname", "def _get_key_path(self, key_name, serial):\n return '%s%s/%d_%s.key' % (self.ca_dir, PRIVATE_DIR_NAME, serial,\n key_name)", "def _get_deployment_friendly_name(auth_header, async_kvstore_client, request_context):\n\n response = yield async_kvstore_client.async_kvstore_get_request(\n constants.META_COLLECTION_NAME, auth_header=auth_header, owner=constants.NOBODY)\n\n if response.code == http.OK:\n response_json = yield response.json()\n defer.returnValue(response_json[0][constants.DEPLOYMENT_FRIENDLY_NAME])\n\n LOGGER.error(\"Unable to fetch deployment friendly name for instance, code={}\".format(response.code))\n defer.returnValue(\"\")", "def _getKEX(group, version):\n if group in GroupName.allFF:\n return FFDHKeyExchange(group, version)\n return ECDHKeyExchange(group, version)", "def read_keyname(self):\n self.show(f'cat {self.keyname_file}')\n with open(self.keyname_file) as f:\n keyname = f.readline().strip()\n self.report('Using key:', keyname)\n return keyname", "def get_store_key(asset, variation):\n return '.'.join([\n asset.name,\n asset.uid,\n variation.name,\n variation.version,\n variation.ext\n ])", "def get_filename(self):\n name, ext = self.fkit.filename.rsplit('.', 1)\n if self._field.extension():\n ext = self._field.extension()\n return '.'.join((name, ext))", "def get_item_key(self, filename):\n _instrument, filekind = utils.get_file_properties(self.observatory, filename)\n return filekind.upper() if self.observatory == \"jwst\" else filekind.lower()", "def subkey(cls, key: str, start: int, end: Optional[int]) -> Optional[str]:\n key_parts = key.split(\".\")\n\n # if end not specified set to max.\n if end is None:\n end = len(key_parts)\n\n if len(key_parts) < start or len(key_parts) < end:\n return None\n\n res = \".\".join(key_parts[start:end])\n return res", "def get_key(self, key):\n ret = None\n qkey = key.__qualname__\n ret = self.get(qkey)\n if not ret:\n # check all entries if qualname match\n for k in self:\n if k.__qualname__ == qkey:\n return self.get(k)\n return", "def cache_key_part(self) -> str:\n return self.name", "def get_name_for_actor(actor, keyName=\"MeshName\"):\n information = actor.GetPropertyKeys()\n if information is None:\n return None\n\n iterator = vtkInformationIterator()\n iterator.SetInformation(information)\n iterator.InitTraversal()\n while (not iterator.IsDoneWithTraversal()):\n key = iterator.GetCurrentKey()\n if key.GetName() == keyName:\n return information.Get(key)\n break\n iterator.GoToNextItem()\n return None", "def extension_name(ext):\n return \"script_extensions::%s\" % \"_\".join([e.upper() for e in ext])" ]
[ "0.61867285", "0.6151179", "0.59994215", "0.59087443", "0.59087443", "0.58050305", "0.57887447", "0.57743907", "0.57119745", "0.5711618", "0.56672543", "0.5649241", "0.5592001", "0.55778885", "0.55366206", "0.55205065", "0.55190045", "0.55060714", "0.55060714", "0.55060714", "0.54835165", "0.5469332", "0.5467664", "0.546364", "0.54382837", "0.53961414", "0.5376713", "0.5361389", "0.5351113", "0.5327895", "0.5327895", "0.5327895", "0.5327895", "0.5327895", "0.5327895", "0.5327895", "0.5327895", "0.5327895", "0.5327895", "0.5327895", "0.5327895", "0.5327895", "0.5327895", "0.5327895", "0.5327895", "0.5327895", "0.5327895", "0.5327895", "0.5327895", "0.5327895", "0.5327895", "0.5327895", "0.5327895", "0.5327895", "0.5327895", "0.5327895", "0.5327895", "0.5325407", "0.53189397", "0.52868634", "0.5285683", "0.5267129", "0.52354467", "0.5235304", "0.523", "0.5225603", "0.52222806", "0.5219983", "0.52100414", "0.5196049", "0.51808256", "0.51808256", "0.5179719", "0.5178627", "0.51777536", "0.51777405", "0.51681334", "0.5167156", "0.5164629", "0.51409614", "0.5135669", "0.5127195", "0.5121901", "0.51085705", "0.5102542", "0.51005733", "0.5090503", "0.50850207", "0.5073992", "0.5059809", "0.50549346", "0.5050695", "0.5039707", "0.50334877", "0.5031385", "0.50285214", "0.50285155", "0.5025204", "0.50206715", "0.5013364" ]
0.77827585
0
This function gets the total occurrences of words and syllables in the original Unicode Garshana corpus. To do this, it opens a .csv file with utf16 encoding, and splits on commans, expecting the line of sumerian text to be in the 8th column. Filters annotations from each line, and tracks the occurrence of each word and syllable. All combinations of unigrams, bigrams, and trigrams are treated as individual syllables.
def get_counts(data): word_count = {} syll_count = {} infile = data.corpus try: open_file = codecs.open(infile, 'r', encoding='utf-16') for line in open_file: line = line.lower() # Remove tablet indexing info and line numbers. Grab only text data line = line.split(',') text = clean_line(line[7]) # Update the occurrences of the words in the line for word in text.split(): count = word_count.setdefault(word, 0) word_count[word] = count + 1 # Track occurrences of syllables update_syllable_count(word, syll_count) open_file.close() except IOError: print("Cannot open: " + infile) return (word_count, syll_count)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_word_counts(filename):\n raw_rows = csv_rows(filename)\n word_counts = defaultdict(lambda: 0)\n\n for line_number, raw_row in enumerate(raw_rows, 2):\n count = int(raw_row[\"count\"])\n ipa = raw_row[\"IPA\"]\n if '*' in ipa:\n continue\n\n # Fixes random badness.. hopefully doesn't hide anything?\n mod_ipa = ipa.replace('(', '').replace(')', '')\n\n # Work around a passage with an error in it:\n gloss = raw_row[\"Gloss\"] or raw_row[\"Text\"]\n\n category = raw_row[\"Category\"]\n\n skipword_characters = {'?'}\n try:\n for i, g in izip(mod_ipa.split('/'), gloss.split('/')):\n word = make_word(i, g, category)\n word_counts[word] += count\n except WordParseError as e:\n print (u\"Error on line %d: %s [%s || %s]\" %\n (line_number, repr(e), ipa, gloss)).encode('utf-8')\n except IndexError as e:\n unknown_index = e.args[0]\n if unknown_index in skipword_characters:\n print (u\"Bad char on line %d: %s [%s || %s]\" %\n (line_number, repr(e), ipa, gloss)).encode('utf-8')\n else:\n print \"FATAL ERROR ON LINE %d\" % line_number\n raise\n except:\n print \"FATAL ERROR ON LINE %d\" % line_number\n raise\n return word_counts", "def total_syllables(target_text):\n\n splited_text = target_text.split()\n count = 0\n for word in splited_text:\n count = count + word_syllables(word)\n return count", "def get_analyze_per_file(self):\n \"\"\"Exclude tags, exclude binary (img), count words without non literal characters and digits\"\"\"\n filename = f'{self.path}/{self.filename}'\n file = open(filename, 'r', encoding='utf-8')\n df_tmp = pd.DataFrame(columns=['word', 'cnt', 'word_low'])\n w_cnt = 0\n word_counter = {}\n data = file.read()\n head, sep, tail = data.partition('<binary')\n head = re.sub('\\\\s\\\\s*', ' ', (re.sub('\\\\W|\\\\d', ' ', re.sub('<.*?>', '', head))))\n word_list = head.split()\n for word in word_list:\n\n if word not in word_counter:\n word_counter[word] = 1\n else:\n word_counter[word] = word_counter[word] + 1\n w_cnt += 1\n\n for word, occurance in word_counter.items():\n df_tmp = df_tmp.append({'word': '{:15}'.format(word), 'cnt': '{:3}'.format(occurance),\n 'word_low': '{:15}'.format(word).lower()}, ignore_index=True)\n df_tmp = df_tmp.sort_values(by='word_low')\n df_tmp.loc[(df_tmp.word != df_tmp.word_low), 'word'] = df_tmp.cnt\n df_tmp.loc[(df_tmp.word == df_tmp.cnt), 'cnt'] = 0\n df_tmp.loc[(df_tmp.word == df_tmp.word_low), 'word'] = 0\n df_tmp['word'] = df_tmp.word.astype(int)\n df_tmp['cnt'] = df_tmp.cnt.astype(int)\n df_tmp = df_tmp.groupby(['word_low'])['cnt', 'word'].sum().reset_index()\n conn = sqlite3.connect('for_python_ht.db')\n try:\n try:\n sqlite_for_ht.CreateTableSingle.delete_table(f_3, self.filename)\n print(datetime.now(), '-', self.filename, 'Table deleted at the start point')\n except Exception:\n print(datetime.now(), '-', 'Something went wrong')\n traceback.print_exc()\n df_tmp.to_sql(name=self.filename, con=conn, index=False)\n print(datetime.now(), '-', self.filename, 'Table created and filled with data')\n except Exception:\n print(datetime.now(), '-', 'file with name {} already exists'.format(self.filename))\n traceback.print_exc()\n print(datetime.now(), '-', 'word analyse for', self.filename, 'done')\n sqlite_for_ht.HandleTemp.update_table(f_2, 'status', 'Done', self.filename)\n return None", "def process():\n words = read_csv('american-words.80', header=None)\n def get_value(word):\n \"\"\"\n A sub-process run on each word. It gets the value of each letter, and add up the values for the whole word.\n \"\"\"\n letters = 'abcdefghijklmnopqrstuvwxyz'\n sum = 0\n for letter in word:\n letter_value = letters.find(letter)\n if letter_value == -1:\n letter_value = 0\n sum += letter_value\n return sum\n words['values'] = words[0].apply(get_value)\n # get those words whose values are 100\n words = words[words['values'] == 100]\n # get the length of these words and sort ascending\n words['length'] = words[0].apply(len)\n words = words.sort(columns='length')\n return words[0].values", "def analyze_data(df, sentiment_col, tweet_col, path):\n\n # create empty dictionaries to store all encountered words and their frequencies\n all_dict = {}\n pos_dict = {}\n neg_dict = {}\n neu_dict = {}\n # initialize counters to counter total number of tweets based on their emotion\n pos_count = 0\n neg_count = 0\n neu_count = 0\n\n # iterate through each row of the df\n for index, row in df.iterrows():\n if row[sentiment_col] == \"positive\":\n pos_count = iterate_words(\n pos_count, row[tweet_col], all_dict, pos_dict)\n\n if row[sentiment_col] == \"negative\":\n neg_count = iterate_words(\n neg_count, row[tweet_col], all_dict, neg_dict)\n\n if row[sentiment_col] == \"neutral\":\n neu_count = iterate_words(\n neu_count, row[tweet_col], all_dict, neu_dict)\n\n # visualize statistics\n visualize_stats(all_dict, 'all_plot.png', 'all_cloud.png',\n 'Word frequency in all tweets', path)\n visualize_stats(pos_dict, 'pos_plot.png', 'pos_cloud.png',\n 'Word frequency in positive tweets', path)\n visualize_stats(neg_dict, 'neg_plot.png', 'neg_cloud.png',\n 'Word frequency in negative tweets', path)\n visualize_stats(neu_dict, 'neu_plot.png', 'neu_cloud.png',\n 'Word frequency in neutral tweets', path)\n\n # make plot for emotion frequency\n emotions = ('Positive', 'Negative', 'Neutral')\n freq = [pos_count, neg_count, neu_count]\n sns.set_style(\"darkgrid\")\n ax = plt.figure().gca()\n ax.xaxis.grid(False)\n ax.yaxis.set_major_locator(MaxNLocator(integer=True))\n plt.bar(range(len(emotions)), freq, align='center',\n color=['forestgreen', 'firebrick', 'goldenrod'])\n plt.xticks(range(len(emotions)), emotions)\n plt.title('Tweet frequency based on emotion')\n plt.savefig(path + 'emotion_plot.png')\n plt.close()\n\n # make pie for emotion frequency\n sizes = [pos_count / len(df.index), neg_count /\n len(df.index), neu_count / len(df.index)]\n colors = ['forestgreen', 'firebrick', 'goldenrod']\n plt.pie(sizes, labels=emotions, colors=colors,\n autopct='%1.1f%%', startangle=140)\n plt.title('Tweet frequency based on emotion')\n plt.axis('equal')\n plt.savefig(path + 'emotion_pie.png')\n plt.close()", "def main ():\n fio = FileIo(\"../input2.txt\")\n text = fio.getInput()\n p = re.compile(r'#?\\d[\\s\\.]?[\\s]?')\n out = filter(None, p.split(text))\n #print out[2]\n #print len(out)\n wc = 0\n\n for s in out:\n text = nltk.word_tokenize(s)\n wc += wordCount( text )\n print wc", "def extract_syllable_features_from_txt():\n input_files = sys.argv[1]\n csv_name = sys.argv[2]\n syllable_stats = pd.DataFrame(columns=SYLLABLE_COLUMNS)\n re_word = re.compile(r'[\\w-]+')\n i = 0\n for filename in os.listdir(input_files):\n if filename != '.DS_Store':\n print(filename, i)\n syllable_count = 0\n for line in open(input_files+filename):\n for word in re_word.findall(line):\n syllable_count += estimate(word)\n syllable_stats = syllable_stats.append({\n TRANSCRIPT_ID: filename[:-4],\n MEMORY_SYLLABLE_COUNT: syllable_count,\n }, ignore_index=True)\n i += 1\n syllable_stats = syllable_stats.set_index(TRANSCRIPT_ID)\n syllable_stats.to_csv(csv_name+'.csv')", "def count_words(filename):", "def analyze_embeddings(emb):\n dic = {\"Hi\": 0, \"En\": 1, \"Ot\": 2}\n count = [0, 0, 0, 0]\n count_zero = [0, 0, 0, 0]\n for i, j in zip(emb, corpus_trans):\n for k, l in zip(i, j):\n count[dic[l[1]]] += 1\n if sum(k) == 0:\n count_zero[dic[l[1]]] += 1\n count[-1] = sum(count)\n count_zero[-1] - sum(count_zero)\n print(\"hi, en, ot, total\")\n print(\"count: \", count)\n print(\"zero count: \", count_zero)", "def updateWordCounts():\n emaildata = loadEmailData()\n englishwords = importDictionary()\n countAllWords(emaildata, englishwords)", "def summarize_corpus():\n\t\n\t# get metadata\n\t#get_metadata.from_TEIP5(wdir, corpus_inpath, \"metadata\", md_mode)\n\t\n\t# visualize some metadata\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"author-continent\")\n\tvisualize_metadata.describe_corpus(wdir, md_csv, \"author-country\")\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"language\")\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"subgenre_hist\")\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"subgenre_x\")\n\tvisualize_metadata.plot_pie(wdir, md_csv, \"subgenre\")\n\n\tvisualize_metadata.describe_corpus(wdir, md_csv, \"subgenre\")\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"gender\")\n\t\n\t# make some counts\n\tmd_table = pd.DataFrame.from_csv(os.path.join(wdir, md_csv), header=0)\n\tnum_texts = len(md_table)\n\t#num_language = len(md_table.groupby([\"language\"]))\n\t#num_continent = len(md_table.groupby([\"author-continent\"]))\n\t#num_countries = len(md_table.groupby([\"author-country\"]))\n\t#num_authors = len(md_table.groupby([\"author-name\"]))\n\tnum_authors = len(md_table.groupby([\"author-name\"]))\n\tnum_subgenre = len(md_table.groupby([\"subgenre\"]))\n\t#num_subgenre_x = len(md_table.groupby([\"subgenre_x\"]))\n\t#fr_subgenre_hist = md_table.groupby([\"subgenre_hist\"]).count()\n\t#num_historical = fr_subgenre_hist[\"idno\"][\"historical\"]\n\t#num_not_historical = fr_subgenre_hist[\"idno\"][\"not_historical\"]\n\t\n\t\n\td = {\"texts\":[num_texts], \n\t#\"languages\":[num_language],\n\t#\"continents\":[num_continent],\n\t#\"countries\":[num_countries],\n\t\"authors\":[num_authors],\n\t#\"subgenre_x\":[num_subgenre_x],\n\t\"subgenre\":[num_subgenre]}\n\t#\"num_historical\":[num_historical],\n\t#\"num_not_historical\":[num_not_historical]}\n\t\n\t\n\t\n\tcount_fr = pd.DataFrame(d)\n\tcount_fr.to_csv(os.path.join(wdir, \"corpus-description.csv\"), sep=\",\", header=True)\n\tprint(\"Done: summarize corpus\")", "def _count_vocab(self,raw_documents, fixed_vocab=False):\n if fixed_vocab:\n vocabulary = self.vocabulary_\n else:\n # Add a new value when a new vocabulary item is seen\n vocabulary = defaultdict()\n vocabulary.default_factory = vocabulary.__len__\n\n analyze = super().build_analyzer()\n \n j_indices = []\n indptr = []\n\n values = array.array(str('f'))\n indptr.append(0)\n for doc in raw_documents:\n #doc = tupla[0]\n feature_counter = {}\n #texttlist = doc.split(sep=\" \")\n for feature in analyze(doc):#texttlist:\n try:\n \n # Ignore out-of-vocabulary items for fixed_vocab=True\n feature_idx = vocabulary[feature]\n #print(feature_idx)\n #fti_feature = calc_fti(feature,raw_documents)\n \n if feature_idx not in feature_counter:\n feature_counter[feature_idx] = 1\n else:\n feature_counter[feature_idx] += 1\n #print(feature_counter[feature_idx])\n except KeyError:\n # Ignore out-of-vocabulary items for fixed_vocab=True\n continue\n\n\n j_indices.extend(feature_counter.keys())\n values.extend(feature_counter.values())\n indptr.append(len(j_indices))\n\n if not fixed_vocab:\n # disable defaultdict behaviour\n vocabulary = dict(vocabulary)\n if not vocabulary:\n raise ValueError(\"empty vocabulary; perhaps the documents only\"\n \" contain stop words\")\n\n if indptr[-1] > np.iinfo(np.int32).max: # = 2**31 - 1\n if _IS_32BIT:\n raise ValueError(('sparse CSR array has {} non-zero '\n 'elements and requires 64 bit indexing, '\n 'which is unsupported with 32 bit Python.')\n .format(indptr[-1]))\n indices_dtype = np.int64\n\n else:\n indices_dtype = np.int32\n \n j_indices = np.asarray(j_indices, dtype=indices_dtype)\n indptr = np.asarray(indptr, dtype=indices_dtype)\n \n #print (vocabulary)\n X = sp.csr_matrix((values, j_indices, indptr),\n shape=(len(indptr) - 1, len(vocabulary)),\n dtype=np.float32)\n X.sort_indices() \n \n self.vocabulary_calculated = vocabulary\n\n return vocabulary, X", "def analyzeFile(filename): \n fileData = open(filename, encoding=\"utf-8\") # open the file\n \n counts = {}\n\n for line in fileData:\t\t # iterates over every line of the file\n words = line.split() # turns each line into a list\n for word in words: #iterates over the words in each line list\n word = word.lower().strip(string.whitespace+string.punctuation)\n if len(word) > 0: #make sure word is longer than 0 before adding it to the dictionary\n counts[word] = counts.get(word, 0) + 1 #look up if the dictionary has that word and if not then it'll add that word with the value 0 associated with it and then add one to that, if it has seen it it'll add 1 to the value stored in the counts dictionary\n #when it gets here for the first line it goes back up to the top and repeats for the 2nd line\n mostCommonWord = [word]\n leastCommonWord = [word]\n shortestWord = [word]\n longestWord = [word]\n \n for item in counts:\n if counts[mostCommonWord[0]] < counts[item]:\n mostCommonWord = [item]\n elif counts[mostCommonWord[0]] == counts[item]:\n mostCommonWord.append(item)\n if counts[leastCommonWord[0]] > counts[item]:\n leastCommonWord = [item]\n elif counts[leastCommonWord[0]] == counts[item]:\n leastCommonWord.append(item)\n if len(shortestWord[0]) > len(item):\n shortestWord = [item] \n elif len((shortestWord[0])) == len(item):\n shortestWord.append(item)\n if len(longestWord[0]) < len(item):\n longestWord = [item]\n elif len(longestWord[0]) == len(item):\n longestWord.append(item)\n \n return (mostCommonWord, leastCommonWord, shortestWord, longestWord)", "def extractWords(self, inputDataset):\n reviewFile = open(inputDataset, \"r\", encoding=\"utf-8-sig\")\n for record in reviewFile:\n record = record.strip().split(\"\\t\") # tab-delimited .txt file\n self.addUnigrams(int(record[0]), record[1])\n reviewFile.close()", "def test_counts(self):\n lines, words, chars = analyze_text(self.filename)\n self.assertEqual(lines, 4)\n self.assertEqual(words, 8)\n self.assertEqual(chars, 36)", "def count_ngrams(self, corpus):\n \n self.unigramcounts = {} # might want to use defaultdict or Counter instead\n self.bigramcounts = {} \n self.trigramcounts = {} \n\n self.total = 2\n ##Your code here\n\n for sentence in corpus:\n temp_1 = get_ngrams(sentence,1)\n temp_2 = get_ngrams(sentence,2)\n temp_3 = get_ngrams(sentence,3)\n for i in range(len(temp_1)):\n if temp_1[i] in self.unigramcounts:\n self.unigramcounts[temp_1[i]] += 1\n else:\n self.unigramcounts[temp_1[i]] = 1\n self.total += 1\n\n for i in range(len(temp_2)):\n if temp_2[i] in self.bigramcounts:\n self.bigramcounts[temp_2[i]] += 1\n else:\n self.bigramcounts[temp_2[i]] = 1\n\n for i in range(len(temp_3)):\n if temp_3[i] in self.trigramcounts:\n self.trigramcounts[temp_3[i]] += 1\n else:\n self.trigramcounts[temp_3[i]] = 1\n return", "def word_frequency():\n\n song = open(\"data/yellow_submarine.txt\")\n d = dict()\n for line in song:\n line = line.strip()\n line = line.lower()\n punctuations = \"\"\"!()-[]{};:'\"\\,<>./?@#$%^&*_~\"\"\" # remove punctuation https://www.programiz.com/python-programming/examples/remove-punctuation\n no_punct = \"\" # remove punctuation\n for char in line: # remove punctuation\n if char not in punctuations: # remove punctuation\n no_punct = no_punct + char # remove punctuation\n words = line.split(\" \")\n for word in words:\n d[word] = d.get(word, 0) + 1\n return d", "def _count(self):\n words = [word.lower() for word in self.corpus.words()]\n bigrams_words = bigrams(words)\n for bigram in bigrams_words:\n self._bigrams[bigram] += 1", "def cleanCsv(): \n\n count_neutral = 0\n count_sad = 0\n count_angry = 0\n count_happy = 0\n\n count_session_neutral = 0 \n\n for column_values in raw_data:\n\n if significant_data.fieldnames is None:\n dh = dict((h, h) for h in raw_data.fieldnames)\n significant_data.fieldnames = raw_data.fieldnames\n significant_data.writerow(dh)\n\n if column_values['AOI[Sad_Right]Hit'] == '1':\n significant_data.writerow(column_values)\n count_sad = count_sad + 1\n\n if column_values['AOI[Neutral_Left]Hit'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n if column_values['AOI[Neutral_Left]Hit_0'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n if column_values['AOI[Neutral_Right]Hit'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n if column_values['AOI[Angry_Left]Hit'] == '1':\n significant_data.writerow(column_values)\n count_angry = count_angry + 1\n\n if column_values['AOI[Neutral_Right]Hit_0'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n if column_values['AOI[Happy_Right]Hit'] == '1':\n significant_data.writerow(column_values)\n count_happy = count_happy + 1\n\n if column_values['AOI[Neutral_Left]Hit_1'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n if column_values['AOI[Happy_Left]Hit'] == '1':\n significant_data.writerow(column_values)\n count_happy = count_happy + 1\n\n if column_values['AOI[Neutral_Right]Hit_1'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n if column_values['AOI[Sad_Left]Hit'] == '1':\n significant_data.writerow(column_values)\n count_sad = count_sad + 1\n\n if column_values['AOI[Neutral_Right]Hit_2'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n if column_values['AOI[Angry_Right]Hit'] == '1':\n significant_data.writerow(column_values)\n count_angry = count_angry + 1\n\n if column_values['AOI[Neutral_Left]Hit_2'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n return {\n 'count_neutral': count_neutral,\n 'count_sad': count_sad,\n 'count_angry': count_angry,\n 'count_happy': count_happy,\n }", "def count_ngrams(self, corpus):\n \n self.unigramcounts = defaultdict(int)\n self.bigramcounts = defaultdict(int)\n self.trigramcounts = defaultdict(int)\n\n self.sentence_counts = 0\n self.word_count = 0\n\n for line in corpus:\n sequence = line\n self.sentence_counts +=1\n\n unigrams = get_ngrams(sequence, n=1)\n for gram in unigrams:\n self.word_count += 1\n self.unigramcounts[gram] +=1\n\n bigrams = get_ngrams(sequence, n=2)\n for gram in bigrams:\n self.bigramcounts[gram] +=1\n\n trigrams = get_ngrams(sequence, n=3)\n for gram in trigrams:\n self.trigramcounts[gram] +=1\n\n #self.unigramcounts[('START')] = self.sentence_counts *2\n self.bigramcounts[('START', 'START')] = self.sentence_counts\n\n #return self", "def count_syllables(words):\n\n\n count = 0\n\n for word in words:\n word_count = count_syllables_in_word(word)\n count = count + word_count\n return count", "def word_frequency( tokenized, dic ):\n print( 'computing word frequencies' )\n start = time.time()\n for i, text in enumerate( tokenized ):\n for token in text:\n if token not in dic:\n dic[ token ] = 1\n else:\n dic[ token ] += 1\n if i % 10000 == 0:\n sys.stdout.write( '\\rprocessed : {}/{} reviews in {}s'.format( i, NO_REVIEWS, time.time() - start ) )\n sys.stdout.write( '\\rprocessed : {}/{} reviews in {}s\\n'.format( i, NO_REVIEWS, time.time() - start ) )", "def count_same_words(cuisine_file, menu):\n\n cuisine_list = separate_words(cuisine_file)\n \n same_word_count = 0\n \n for i in cuisine_list:\n for j in menu:\n if i == j:\n same_word_count += 1\n \n return same_word_count", "def count_words(self, contents):\n wordCounts = {}\n for i in self.ngramCounts:\n if i == 0: # want the default to be the size of the corpus\n total = 0\n for line in contents:\n words = line.split(\" \")\n words = [ w.strip() for w in words if w] #remove nulls\n for word in words:\n if word:\n total += 1\n wordCounts[i] = defaultdict(lambda: total)\n continue\n else:\n counts = defaultdict(lambda: 0)\n for line in contents:\n words = line.split(\" \")\n words = [ w.strip() for w in words if w] #remove nulls\n for k, word in enumerate(words): \n if k < (i-1) or not word:\n continue\n key = \"\"\n for j in range(k-i+1, k+1):\n key += words[j] + \" \"\n counts[key.strip()] += 1\n wordCounts[i] = counts\n return wordCounts", "def __parse_corpus(self, corpus):\n corpus = self.__handle_corpus_unkwon_words(corpus)\n start_token = ' '.join([NGramModel.START_SENTENCE_TOKEN]*(self.__n-1))\n word_list = corpus.replace(NGramModel.START_SENTENCE_TOKEN, start_token).split()\n \n for n in range(1, self.__n+1): \n self.__ngram_counts[n] = {}\n for ngram, count in Counter(self.__generate_n_grams(word_list, n)).items():\n self.__ngram_counts[n][' '.join(ngram)] = count", "def train(self, corpus):\n for sentence in corpus.corpus:\n for datum in sentence.data: \n self.unigramCounts[datum.word] += 1\n self.totalCount += 1", "def annotate_tsv_freq(in_tsv_gz,annotation_tsv):\n sys.stderr.write(\"Reading TSV file ...\\n\")\n nicollo = pd.read_csv(BOLLI, sep=\"\\t\")\n nicollo = nicollo.iloc[:,[1,2,4,5,23]]\n nicollo_counts = nicollo.groupby(['CHR','START'])['MT'].count()\n nol_var = nicollo.drop(['WT','MT'], axis = 1) \n nol_var = nol_var.set_index(['CHR', 'START'])\n\n #nicollo_counts = nicollo.groupby([\"CHR\",\"START\",\"WT\",\"MT\"]).size().reset_index(name=\"count\")\n #nicollo_counts = nicollo_counts[[\"CHR\", \"START\",\"count\"]].set_index(['CHR','START'])\n\n mmrf = pd.read_csv('/ifs/res/leukgen/home/yellapav/MMRF/MMRF_CoMMpass_IA9_All_Canonical_Variants.txt', sep=\"\\t\")\n mmrf=mmrf.iloc[:,[0,1,2,4,5,19,23]]\n mmrf=mmrf.drop_duplicates()\n\n mmrfM=mmrf.groupby(['CHROM','POS'])['GEN[1].AR'].median()\n mmrfC=mmrf.groupby(['CHROM','POS'])['GEN[1].AR'].count()\n mmrfQ25=mmrf.groupby(['CHROM','POS'])['GEN[1].AR'].quantile(q=0.25)\n mmrfQ75=mmrf.groupby(['CHROM','POS'])['GEN[1].AR'].quantile(q=0.75)\n \n\n #anno_tsv = pd.read_csv(annotation_tsv, comment='#',sep=\"\\t\")\n anno_tsv = pd.read_csv(annotation_tsv, comment='#',sep=\"\\t\", low_memory=False)\n #anno_tsv[anno_tsv['FILTER'] == \"PASS\"]\n counts_tsv=anno_tsv.groupby([\"CHR\",\"START\",\"REF\",\"ALT\"]).size().reset_index(name=\"count\")\n counts_tsv=counts_tsv[[\"CHR\", \"START\",\"count\"]].set_index(['CHR','START'])\n counts_median=anno_tsv.groupby(['CHR','START'])['TARGET_VAF'].median()\n\n\n\n inFile = gzip.open(in_tsv_gz,'r')\n \n sys.stderr.write(\"Annotating ...\\n\")\n for record in inFile:\n record=record.decode(\"utf-8\")\n record=record.rstrip()\n recArr=record.split(\"\\t\")\n \n cl = [] \n freq = [] \n medVAF = [] \n Q25 = [] \n Q75 = [] \n positions = [] \n normal = \"0\" \n normalVAF = \"0\" \n bolli_cl = [] \n bolli_freq = [] \n bolli_positions = [] \n bolli_anno = [] \n flag = 0\n bolli_flag = 0\n if record.startswith(\"#\"):\n continue\n\n if recArr[0] == \"ID_VARIANT\":\n cl = \"MMRF_Class\"\n freq = \"MMRF_Frequency\"\n medVAF = \"MMRF_VAF\"\n Q25 = \"MMRF_Q25\"\n Q75 = \"MMRF_Q75\"\n positions = \"MMRF_Positions\"\n normal = \"Normals_Frequency\"\n normalVAF = \"Normals_median_VAF\"\n bolli_cl = \"Bolli_Class\"\n bolli_freq = \"Bolli_Frequency\"\n bolli_positions = \"Bolli_Positions\"\n bolli_anno = \"Bolli_Annotation\"\n record = [ record, cl, freq, medVAF, Q25, Q75, positions, bolli_cl, bolli_freq, bolli_anno, bolli_positions, normal, normalVAF ]\n record = (\"\\t\".join(record))\n print(record)\n continue\n\n try:\n chrom = str(recArr[3])\n pos = int(recArr[4])\n start = int(recArr[4]) - 9\n end = int(recArr[4]) + 9\n if (chrom, pos) in mmrfC.index:\n cl = \"genomic_exact\"\n freq = str(mmrfC.loc[(chrom,pos)])\n medVAF = str(mmrfM.loc[(chrom,pos)])\n Q25 = str(mmrfQ25.loc[(chrom,pos)])\n Q75 = str(mmrfQ75.loc[(chrom,pos)])\n positions = str(pos)\n record = [ record, cl, freq, medVAF, Q25, Q75, positions ]\n record = (\"\\t\".join(record))\n flag = 1\n if flag == 0:\n mmrfCsub=mmrfC.loc[chrom]\n if not mmrfCsub[(mmrfCsub.index >= start) & (mmrfCsub.index <= end)].empty:\n for i in mmrfCsub[(mmrfCsub.index >= start) & (mmrfCsub.index <= end)].index.values:\n cl = \"genomic_close\"\n freq.append(str(mmrfC.loc[(chrom,i)]))\n medVAF.append(str(mmrfM.loc[(chrom,i)]))\n Q25.append(str(mmrfQ25.loc[(chrom,i)]))\n Q75.append(str(mmrfQ75.loc[(chrom,i)]))\n positions.append(str(i))\n freq = (\":\".join(freq))\n medVAF = (\":\".join(medVAF))\n Q25 = (\":\".join(Q25))\n Q75 = (\":\".join(Q75))\n positions = (\":\".join(positions))\n record = [ record, cl, freq, medVAF, Q25, Q75, positions ]\n record = (\"\\t\".join(record))\n else:\n cl = \"NA\"\n freq = \"NA\"\n medVAF = \"NA\"\n Q25 = \"NA\"\n Q75 = \"NA\"\n positions = \"NA\"\n record = [ record, cl, freq, medVAF, Q25, Q75, positions ]\n record = (\"\\t\".join(record))\n\n\n except:\n cl = \"NA\"\n freq = \"NA\"\n medVAF = \"NA\"\n Q25 = \"NA\"\n Q75 = \"NA\"\n positions = \"NA\"\n record = [ record, cl, freq, medVAF, Q25, Q75, positions ]\n record = (\"\\t\".join(record))\n\n\n\n try:\n chrom = str(recArr[3])\n pos = int(recArr[4])\n start = int(recArr[4]) - 9\n end = int(recArr[4]) + 9\n if (chrom, pos) in nicollo_counts.index:\n bolli_cl = \"genomic_exact\"\n bolli_freq = str(nicollo_counts.loc[(chrom,pos)]) \n bolli_positions = str(pos)\n bolli_anno = str(nol_var.loc[chrom, pos]['Variant_class'].values[0])\n record = [ record, bolli_cl, bolli_freq, bolli_anno, bolli_positions ]\n record = (\"\\t\".join(record))\n bolli_flag = 1\n\n\n if bolli_flag == 0: \n nicollo_counts_sub=nicollo_counts.loc[chrom]\n if not nicollo_counts_sub[(nicollo_counts_sub.index >= start) & (nicollo_counts_sub.index <= end)].empty:\n for i in nicollo_counts_sub[(nicollo_counts_sub.index >= start) & (nicollo_counts_sub.index <= end)].index.values:\n #if not nicollo_counts_sub.ix[start:end].empty:\n # for i in nicollo_counts_sub.ix[start:end].index.values:\n #print(\"XXXXXXX\",i, nicollo_counts_sub.loc[(chrom,i)], start, end)\n bolli_cl = \"genomic_close\"\n bolli_freq.append(str(nicollo_counts.loc[(chrom,i)]))\n bolli_anno.append(str(nol_var.loc[(chrom,i)]['Variant_class'].values[0]))\n bolli_positions.append(str(i))\n bolli_freq = (\":\".join(bolli_freq))\n bolli_positions = (\":\".join(bolli_positions))\n bolli_anno = (\":\".join(bolli_anno))\n record = [ record, bolli_cl, bolli_freq, bolli_anno, bolli_positions ]\n record = (\"\\t\".join(record))\n else:\n bolli_cl = \"NA\"\n bolli_freq = \"NA\"\n bolli_positions = \"NA\"\n bolli_anno = \"NA\"\n record = [ record, bolli_cl, bolli_freq, bolli_anno, bolli_positions ]\n record = (\"\\t\".join(record))\n\n\n except:\n bolli_cl = \"NA\"\n bolli_freq = \"NA\"\n bolli_anno = \"NA\"\n bolli_positions = \"NA\"\n record = [ record, bolli_cl, bolli_freq, bolli_anno, bolli_positions ]\n record = (\"\\t\".join(record))\n\n\n normal = \"0\"\n normalVAF = \"0\"\n try:\n chrom=str(recArr[3])\n pos=int(recArr[4])\n normal = counts_tsv.loc[(chrom,pos),\"count\"]\n normal = normal.ix[0]\n normal = str(normal)\n\n normalVAF = str(counts_median.loc[(chrom,pos)])\n\n record = [ record, normal, normalVAF ]\n record = (\"\\t\".join(record))\n print(record)\n\n except:\n normal = \"0\"\n normalVAF = \"0\"\n record = [ record, str(normal), str(normalVAF) ]\n record = (\"\\t\".join(record))\n print(record)", "def word_analogy(self):\n data = open(\"data/word_analogy_subset.en.ar.txt\").read().split('\\n')\n data = [x for x in data if len(x.split()) == 4]\n cnt = 0\n keys = list(self.embeddings_index.keys())\n vectors = np.array(list(self.embeddings_index.values()))\n norms = np.linalg.norm(vectors, axis=1)\n for i in data:\n i = self.preprocessor(i).split()\n try:\n v = self.embeddings_index[i[0]] - self.embeddings_index[i[1]] + self.embeddings_index[i[2]]\n except:\n continue\n unit = v / np.linalg.norm(v)\n dists = np.dot(vectors, unit) / norms\n best = np.argpartition(-dists, 10)[:10 + 1]\n best = best.take(np.argsort((-dists).take(best)))\n result = [(keys[sim], float(dists[sim]))\n for sim in best]\n sbv = result[:10]\n for j in sbv:\n if j[0] == i[3]:\n cnt += 1\n return cnt/ len(data)", "def _get_num_syllables(doc: Doc, min_syllables: int = 1):\n text = (word for word in doc if not word.is_punct and \"'\" not in word.text)\n syllables_per_word = tuple(syllapy.count(word.text) for word in text)\n return sum(c for c in syllables_per_word if c >= min_syllables)", "def count_words_and_dublicates(novel):", "def __yago_counts(self):\n\n num_lines = 0\n print(\"Calculating Yago occurrences\")\n custom_freq = {}\n with open(\n os.path.join(self.base_url, \"generic/p_e_m_data/aida_means.tsv\"),\n \"r\",\n encoding=\"utf-8\",\n ) as f:\n for line in f:\n num_lines += 1\n\n if num_lines % 5000000 == 0:\n print(\"Processed {} lines.\".format(num_lines))\n\n line = line.rstrip()\n line = unquote(line)\n parts = line.split(\"\\t\")\n mention = parts[0][1:-1].strip()\n\n ent_name = parts[1].strip()\n ent_name = ent_name.replace(\"&amp;\", \"&\")\n ent_name = ent_name.replace(\"&quot;\", '\"')\n\n x = ent_name.find(\"\\\\u\")\n while x != -1:\n code = ent_name[x : x + 6]\n replace = unicode2ascii(code)\n if replace == \"%\":\n replace = \"%%\"\n\n ent_name = ent_name.replace(code, replace)\n x = ent_name.find(\"\\\\u\")\n\n ent_name = self.wikipedia.preprocess_ent_name(ent_name)\n if ent_name in self.wikipedia.wiki_id_name_map[\"ent_name_to_id\"]:\n if mention not in custom_freq:\n custom_freq[mention] = {}\n ent_name = ent_name.replace(\" \", \"_\")\n if ent_name not in custom_freq[mention]:\n custom_freq[mention][ent_name] = 1\n\n return custom_freq", "def process(self):\n\n linelang = defaultdict(int)\n wordlang = defaultdict(int)\n\n linefont = defaultdict(int)\n wordfont = defaultdict(int)\n\n inputfiles = self.input_files\n for input_file in inputfiles:\n\n alignurl = input_file.url\n pcgts = parse(alignurl, True)\n page = pcgts.get_Page()\n regions = page.get_TextRegion()\n\n for region in regions:\n lines = region.get_TextLine()\n\n for line in lines:\n try:\n llang = line.primaryLanguage\n linelang[llang] += 1\n except TypeError:\n pass\n\n try:\n lfont = line.fontFamily\n linefont[lfont] += 1\n except TypeError:\n pass\n\n words = line.get_Word()\n for word in words:\n try:\n wlang = word.language\n wordlang[wlang] += 1\n except TypeError:\n pass\n\n try:\n wfont = word.get_TextStyle().fontFamily\n wordfont[wfont] += 1\n except TypeError:\n pass\n\n #predominant language\n try:\n lang = max(linelang, key=lambda k: linelang[k])\n except TypeError:\n try:\n lang = max(wordlang, key=lambda k: wordlang[k])\n except TypeError:\n lang = 'German'\n\n #predominant font\n try:\n font = max(linefont, key=lambda k: linefont[k])\n except TypeError:\n try:\n font = max(wordfont, key=lambda k: wordfont[k])\n except TypeError:\n font = 'Antiqua'\n\n\n print(lang)\n print(font)", "def update_syllable_count(word, syll_count):\n\n syllables = word.split('-')\n for i in range(1, 4):\n for j in range(len(syllables) - i + 1):\n gram = '-'.join(syllables[j: j + i])\n count = syll_count.setdefault(gram, 0)\n syll_count[gram] = count + 1", "def corpora_stats(output):\n igFiles = []\n for root, directories, filenames in os.walk(output + \"/ig/\"):\n for filename in filenames:\n igFiles.append(os.path.join(root, filename))\n igFiles = filter(lambda x: \".txt\" in x, igFiles)\n words = []\n for file in igFiles:\n fileH = open(file, \"r\")\n words = words + fileH.read().split(\" \")\n print(\"Number of words in IG corpus: {}\".format(len(words)))\n print(\"Vocabulary size of IG corpus: {}\".format(len(set(words))))", "def __init__(self, n, sents):\n assert n > 0\n self._n = n\n print(\"Counting...\")\n count = defaultdict(int)\n while n >= 0:\n for sent in sents:\n s = sent[:] # En una oracion auxiliar agrego el item de start y end para contarlos\n s.insert(0, \"<s>\")\n s.append(\"</s>\")\n for i in range(len(s) - n + 1):\n count[tuple(s[i:i + n])] += 1\n n -= 1\n count[()] = count[()] - count[('<s>',)] - count[\n ('</s>',)] # Pero no quiero que <s> y </s> sean considerados por ()\n self._count = count\n print(\"Computing vocabulary...\")\n self._voc = voc = set()\n for sent in sents:\n voc = voc.union(set(sent))\n voc.add('</s>')\n self._voc = voc\n self._V = len(voc) # vocabulary size\n print(\"Done\")", "def process_corpus(args):\n\n fs = open(args.input,'r')\n out = list()\n for line in fs:\n blob = TextBlob(line.strip())\n result_info = dict()\n result_info\n result_info['correct'] = str(blob.correct())\n if args.parse :\n result_info['parse'] = get_parsed_text(blob)\n if args.tokenize:\n result_info['tokenize'] = get_tokenizer_result(blob)\n if args.sentiment:\n result_info['sentiment'] = analyze_sentiment(blob)\n if args.sentence_sentiment:\n result_info['sentence_sentiment'] = analyze_sentence_sentiment(blob)\n if args.noun_phrase:\n result_info['noun_phrase'] = get_noun_phrases(blob)\n if args.pos:\n result_info['pos'] = get_pos_tags(blob)\n\n out.append(result_info)\n print out\n json.dump(out,open('out.json','w'))\n fs.close()\n print '******************************* Execution completed *********************************'", "def tokenize_and_split_bis(sms_file):\n \n dic = {}\n list1 = []\n list2 = []\n list3 = []\n list4 = []\n i = -1\n document = 0\n terms = 0\n new_document = True\n ham = True\n for line in open(sms_file, 'r').readlines():\n w = []\n document += 1\n new_document = True\n for word in line.split():\n i = i + 1\n if word == \"ham\":\n ham = True\n i = i - 1\n elif word == \"spam\":\n ham = False\n i = i - 1\n else:\n if word not in dic:\n dic[word] = i\n w.append(dic[word])\n list3.append(1)\n list4.append(1)\n new_document = False\n terms += 1\n else : \n i = i - 1\n w.append(dic[word])\n list4[dic[word]] += 1\n terms += 1\n if new_document: \n list3[dic[word]] += 1\n new_document = False\n \n if ham and w !=[]:\n list2.append(w)\n elif ham == False and w !=[]:\n list1.append(w)\n\n moy = 0\n len_dic = len(dic.keys())\n list5 = [0 for x in range(len_dic)]\n for key in dic.keys():\n if list4[dic[key]] > 0:\n tf = list4[dic[key]] / terms\n idf = math.log(document / list3[dic[key]])\n tfIdf = tf * idf\n list5[dic[key]] = tfIdf\n # print(\"the word \" + str(key) + \" appairs \" + str(list4[dic[key]]) + \" times.\")\n # print(\"his frequency is \" + str(list4[dic[key]] / terms) )\n # print(\"the word \" + str(key) + \" appairs \" + str(list3[dic[key]]) + \" times in each document.\")\n # print(\"his frequency is \" + str(idf))\n # print(\"utility \" + str(tfIdf))\n moy += tfIdf\n \n moy = moy / len_dic \n # print(moy)\n dic_bis = {}\n i = -1\n for key in dic.keys():\n value = list5[dic[key]]\n # print(str(value))\n if (value > oracle * moy):\n i += 1\n dic_bis[key] = i\n # else:\n # print(\"not pass \" + key + \" \" + str(value))\n \n \n # print(dic_bis == dic)\n # print(dic)\n return dic_bis,list1,list2", "def estimate(word):\n parts = re.split(r'[^aeiouy]+', word)\n valid_parts = []\n\n for part in parts:\n if part != '':\n valid_parts.append(part)\n\n syllables = 0\n\n for p in re_subsyllables:\n if p.match(word):\n syllables -= 1\n\n for p in re_addsyllables:\n if p.match(word):\n syllables += 1\n\n syllables += len(valid_parts)\n\n if syllables <= 0:\n syllables = 1\n\n return syllables", "def countsyllables_en(word):\r\n\tif not word:\r\n\t\treturn 0\r\n\r\n\t# Remove final silent 'e'\r\n\tif word[-1] == \"e\":\r\n\t\tword = word[:-1]\r\n\r\n\t# Check for a cached syllable count\r\n\tif word in fallback_cache:\r\n\t\treturn fallback_cache[word]\r\n\r\n\t# Count vowel groups\r\n\tresult = 0\r\n\tprev_was_vowel = False\r\n\tfor char in word:\r\n\t\tis_vowel = char in VOWELS or char == 'y'\r\n\t\tif is_vowel and not prev_was_vowel:\r\n\t\t\tresult += 1\r\n\t\tprev_was_vowel = is_vowel\r\n\r\n\t# Add & subtract syllables\r\n\tfor r in fallback_addsyl:\r\n\t\tif r.search(word):\r\n\t\t\tresult += 1\r\n\tfor r in fallback_subsyl:\r\n\t\tif r.search(word):\r\n\t\t\tresult -= 1\r\n\r\n\t# Cache the syllable count\r\n\tfallback_cache[word] = result\r\n\r\n\treturn result", "def train(self, corpus): \n for sentence in corpus.corpus:\n prev_word = None\n for datum in sentence.data:\n word = datum.word\n self.unigram_count[word] += 1\n if prev_word != None:\n self.bigram_count[prev_word][word] += 1\n prev_word = word\n \n self.vocabulary_size = len(self.unigram_count)\n self.num_words = sum(self.unigram_count.values())", "def analyze_text (self, testing_string): \n self.length = len(self.testing_string)\n self.total_words = (self.testing_string).split()\n self.total_unique_words = set(self.total_words)\n\n self.total_characters = (int)(0)\n for ch in self.testing_string :\n if(ch.isspace() != True):\n self.total_characters = self.total_characters + 1 \n\n self.total_unique_characters = set(self.testing_string)\n \n Linguist.about_given_string[\"Length\"] = self.length\n Linguist.about_given_string[\"Total_words\"] = len(self.total_words)\n Linguist.about_given_string[\"Total_unique_words\"] = len(self.total_unique_words)\n Linguist.about_given_string[\"Total_characters\"] = self.total_characters\n Linguist.about_given_string[\"Total_unique_characters\"] = len(self.total_unique_characters)", "def num_syllables(self, word):\n \"\"\"\n using the logic of vowel counting, count all vowels in the pronunciations\n \"\"\"\n dictionary = self._pronunciations;\n # check if word is present in the CMU dictionary\n if word in dictionary :\n word_pronunciations = dictionary[word.lower()]\n else :\n return 1\n \n vowels = ['A', 'E', 'I', 'O', 'U']\n \n ## find the shorter pronunciation for word\n shorter_arr = [];\n for pronunciation in word_pronunciations :\n if len(pronunciation) > len(shorter_arr) : shorter_arr = pronunciation\n \n num_length = 0\n \n for phoneme in shorter_arr :\n if phoneme[:1] in vowels : num_length += 1\n \n return num_length", "def train(self, corpus):\n lastToken = \"#\"\n for sentence in corpus.corpus:\n for datum in sentence.data:\n token = datum.word\n self.reverseBigramCount[token][lastToken] += 1\n self.bigramCount[lastToken][token] += 1\n self.unigramCount[token] += 1\n self.total += 1\n lastToken = token", "def syllable_dict():\n counts = dict()\n \n with open('data/Syllable_dictionary.txt') as file:\n for line in file:\n arr = line.split(' ', 1)\n if 'E' in arr[1]:\n cts = arr[1].split(' ', 1)\n counts[arr[0].strip('\\'')] = int(cts[1][0])\n counts[(arr[0].strip('\\'') + \"_\")] = int(cts[0][1])\n else:\n counts[arr[0].strip('\\'')] = int(arr[1][0])\n return counts", "def train(self, corpus):\n for sentence in corpus.corpus:\n cleanSentence = sentence.cleanSentence()\n for datum in cleanSentence.data:\n token = datum.word\n self.unigramCounts[token] = self.unigramCounts[token] + 1\n self.total += 1\n\n i = 0\n while i < len(sentence.data) - 1:\n token = str(cleanSentence.get(i))\n self.followingWords[token].add(str(cleanSentence.get(i+1)))\n i += 1\n\n i = 1\n while i < len(sentence.data):\n bigram = str(cleanSentence.get(i-1)) + \" \" + str(cleanSentence.get(i))\n self.bigramCounts[bigram] = self.bigramCounts[bigram] + 1\n\n self.precedingWords[str(cleanSentence.get(i))].add(str(cleanSentence.get(i-1)))\n i += 1\n self.precedingWordsTotal = sum(map(lambda x: len(x), self.precedingWords.values()))\n\n i = 2\n while i < len(sentence.data):\n trigram = str(cleanSentence.get(i-2)) + \" \" + str(cleanSentence.get(i-1)) + \" \" + str(cleanSentence.get(i))\n self.trigramCounts[trigram] = self.trigramCounts[trigram] + 1\n i += 1\n\n #print('precedingWords')\n #print(self.precedingWords)\n #print('followingWords')\n #print(self.followingWords)\n #print('unigrams')\n #print(self.unigramCounts)\n #print('bigrams')\n #print(self.bigramCounts)\n\n #self.discount(self.trigramCounts)\n #self.discount(self.bigramCounts)\n #self.discount(self.unigramCounts)", "def findSubcorpora():\n counterTwo = 0\n\n with open('bc.processed2.csv', 'r') as readfile:\n for line in readfile:\n counterTwo += 1\n if re.match('^<subcorpus', line):\n print(str(counterTwo) + '\\t' + line + '\\n')", "def get_feature_set_SB(tweet):\n #pos-tag frequencies\n# print \"Tagged words in tweet: \", tweet.tagged_words\n pos_tag_freq = {}\n additional_freq = {}\n for phrase in tweet.tagged_words:\n for word in phrase:\n try:\n tag = word['pos']\n pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# if tag=='PRtinf':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJS':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJ':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='NP':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='DET':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='P':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n if tag in ADJECTIVES:\n additional_freq['adjectives'] = additional_freq.get(tag, 0) + 1\n elif tag in ADVERBS: \n additional_freq['adverbs'] = additional_freq.get(tag, 0) + 1\n elif tag in PRONOUNS:\n additional_freq['pronoun'] = 1\n except KeyError:\n continue\n# print \"Tag frequencies: \", pos_tag_freq\n for key in pos_tag_freq.keys():\n pos_tag_freq[key] = pos_tag_freq[key]*1.0\n #number of adjectives in sentence, number of adverbs in sentence(except ikke), pronoun in sentence(binary) \n #Number of exclamation marks, number of emoticons,\n emoticons = tweet.nrof_happyemoticons+tweet.nrof_sademoticons\n if emoticons>0:\n additional_freq['emoticons'] = emoticons*1.0\n if tweet.nrof_exclamations>0:\n additional_freq['exclamations'] = tweet.nrof_exclamations*1.0\n \n# print \"Additional frequencies: \", additional_freq\n# raw_input(\"Continue?\")\n \n #Concatenate the dicts\n features= dict(pos_tag_freq.items() + additional_freq.items())\n# print \"All features: \", features\n# raw_input(\"Continue?\")\n return features", "def count_syllables(book):\n d = dict(cmudict.entries())\n with open(book, 'r') as myfile:\n booky = myfile.read().lower()\n tokenized_book = nltk.word_tokenize(booky)\n\n count = 0\n for word in tokenized_book:\n count += ( nsly(word, d))\n\n return count", "def setCounts(self):\n N=len(self.y)\n self.counts=np.zeros(len(self.y))\n self.idf=np.zeros(len(self.y))\n for i in range(0,len(self.counts)):\n for word in self.qtext:\n wc=self.atext[i].count(word)\n self.counts[i]+=wc\n if wc>0:\n d=0\n for sentence in self.atext:\n if word in sentence:\n d+=1\n self.idf[i]+=wc*np.log(N/d)", "def analyze_social_patterns(input_file):\n # First read in the spelling variants\n # standard, non standard\n # us, uk spelling\n standard_non_standard_pairs = util.read_variants_from_file(input_file)\n all_forms = set()\n all_forms = all_forms.union(standard_non_standard_pairs.keys())\n all_forms = all_forms.union(standard_non_standard_pairs.values())\n \n # Keeping track of stats\n subreddit_count_standard = {}\n subreddit_count_non_standard = {}\n\n # Data\n sentences_metadata = DataWrapper.DataWrapperGzipMulti(config.REDDIT_PROCESSED_DATA,\n config.REDDIT_METADATA_DATA)\n \n for sentence, metadata in sentences_metadata:\n\n author, author_text, subreddit, link = metadata.split(\"\\t\")\n\n # Now check for tokens in our list\n for token in sentence:\n if token in all_forms:\n\n if subreddit not in subreddit_count_standard:\n subreddit_count_standard[subreddit] = 0\n subreddit_count_non_standard[subreddit] = 0\n\n # standard or US spelling\n if token in standard_non_standard_pairs:\n subreddit_count_standard[subreddit] += 1\n else:\n subreddit_count_non_standard[subreddit] += 1\n\n # Print out stats\n output_file_name = os.path.splitext(os.path.basename(input_file))[0]\n with open(output_file_name + \"_subreddits_distr.txt\", \"w\") as output_file:\n for subreddit, count in subreddit_count_standard.items():\n output_file.write(\"%s\\t%s\\t%s\\n\" % (subreddit, count, subreddit_count_non_standard[subreddit]))", "def train(self, corpus):\n\n\n temp = \"\"\n for sentence in corpus.corpus:\n\n i = 0\n for datum in sentence.data:\n # print str(sentence.data)\n self.total=self.total+1\n token = datum.word\n self.unigramCounts[token] = self.unigramCounts[token] + 1\n if (i == 0):\n temp = datum.word\n i = i + 1\n continue\n\n i = i + 1\n\n key = temp + \",\" + token\n self.bigramCounts[key] = self.bigramCounts[key] + 1\n # print token\n temp = token\n\n pass", "def getCounter(self):\n word_count, noun_word_count = Counter(), Counter()\n word_rating, noun_word_rating = defaultdict(list), defaultdict(list)\n docs = self.nlp.pipe(\n self.docs, n_process=1, disable=self.disablelayers)\n \n\n for index, doc in enumerate(docs):\n for token in doc:\n if not token.is_stop and not token.is_punct and token.pos_ in self.pos:\n if token.pos_ == 'PROPN':\n word_count[token.lemma_] += 1\n word_rating[token.lemma_].append(self.ratings[index])\n else:\n noun_word_count[token.lemma_] += 1\n noun_word_rating[token.lemma_].append(self.ratings[index])\n\n # if 0<=proper nouns<=5 found, add regular nouns\n if not word_count or len(word_count) <= 5:\n word_count += noun_word_count\n word_rating = {**word_rating, **noun_word_rating}\n \n word_color = {word: self.getColor(\n ratings)[1] for word, ratings in word_rating.items()}\n word_sentiment = {word: self.getColor(\n ratings)[0] for word, ratings in word_rating.items()}\n\n return word_count, word_color, word_sentiment", "def count_syllables_in_word(word):\n\n count = 0\n\n endings = '!,;.?:'\n last_char = word[-1]\n\n if last_char in endings:\n processed_word = word[0:-1]\n else:\n processed_word = word\n\n\n if len(processed_word) <= 3:\n return 1\n if processed_word[-1] in 'Ee':\n processed_word = processed_word[0:-1]\n\n vowels = 'aeiouAEIOU'\n prev_char_was_vowel = False\n\n for char in processed_word:\n if char in vowels:\n if not prev_char_was_vowel:\n count += 1\n prev_char_was_vowel = True\n\n else:\n prev_char_was_vowel = False\n\n if processed_word[-1] in 'yY':\n count += 1\n \n\n return count", "def main():\n token_dict_dict = {}\n all_dict = {}\n pronoun_proportion_list = []\n tag = 'PRP' # base tag for all pronouns, see 'https://www.clips.uantwerpen.be/pages/MBSP-tags' for more info\n\n for text in glob.glob(file_loc):\n file_title = os.path.basename(text).split('.')[0]\n\n with open(text, 'r') as f:\n speech = f.read()\n text_dict = {}\n\n try:\n #TextBlob goodness that tags all the words for me\n speech_blob = TextBlob(clean(speech))\n speech_blob.tags\n except:\n #for some reason Trump's address contained a unicode 128 character that I couldn't find\n #instead of getting rid of it in a single file, i decided to have an except that could catch that case in\n #all sitations and handle them accordingly\n\n #lets the user know that there was an issue, and that it's been handled\n print file_title,\n print \"contains unexpected unicode characters. they have been removed and the document has been processed\"\n\n #gets rid of all unicode characters. i could do this by default, but all the other files ran fine\n #so i didn't think it was worth it\n speech_blob = TextBlob(clean(speech.decode('unicode_escape').encode('ascii','ignore')))\n\n for token in speech_blob.tags:\n # builds the inital dictionary of data, only looks at words with a specified tag\n if tag in token[1]:\n try:\n text_dict[token[0]] += 1\n except:\n text_dict[token[0]] = 1\n try:\n all_dict[token[0]] += 1\n except:\n all_dict[token[0]] = 1\n #breaks the title into 3 pieces: number, president, date\n token_dict_dict[file_title] = text_dict\n partial_split, date = string.rsplit(file_title, '_', 1)\n num_pres, pres = string.split(partial_split, '_', 1)\n\n pronoun_proportion_list.append(\n (pres, date, total_to_proportion(pronoun_breakdown(token_dict_dict[file_title])))\n )\n create_pronoun_graph(sort_list_by_president_order(pronoun_proportion_list))", "def get_vocabulary(text_fname, vocab_fname):\n with codecs.open(text_fname,'r','utf-8') as infile, \\\n codecs.open(vocab_fname,'w','utf-8') as outfile: \n\n count_map={}\n for line in infile:\n sent=line.strip().split(' ')\n for w in sent:\n count_map[w]=count_map.get(w,0.0)+1.0\n\n for w,c in count_map.iteritems(): \n outfile.write(u'{}|{}\\n'.format(w,c))", "def getTextStatsFeat(text, stemmRequired = True,\r\n excludeStopwordsRequired = True):\r\n #length = len(text)\r\n sentenceCount = len(re.findall(\"[.?!]\", text))\r\n exclamationMarkCount = len(re.findall(\"[!]\", text))\r\n questionMarkCount = len(re.findall(\"[?]\", text))\r\n digitsCount = len(re.findall(\"[0-9]+\", text))\r\n text = text.replace(\",\", \" \").replace(\".\", \" \")\r\n cleanText = re.sub('[^a-zа-я0-9]', ' ', text.lower())\r\n wordCount = 0.0\r\n charCount = 0.0\r\n rusCharCount = 0.0\r\n engCharCount = 0.0\r\n if excludeStopwordsRequired:\r\n for w in cleanText.split():\r\n if len(w)>1 and w not in stopwords:\r\n if not (not stemmRequired or re.search(\"[0-9a-z]\", w)):\r\n w = stemmer.stem(w)\r\n wordCount += 1\r\n c, rus, eng = getWordCharCount(w)\r\n charCount += c\r\n rusCharCount += rus\r\n engCharCount += eng\r\n else:\r\n for w in cleanText.split():\r\n if len(w)>1:\r\n if not (not stemmRequired or re.search(\"[0-9a-z]\", w)):\r\n w = stemmer.stem(w)\r\n wordCount += 1\r\n c, rus, eng = getWordCharCount(w)\r\n charCount += c\r\n rusCharCount += rus\r\n engCharCount += eng\r\n # per sentence\r\n wordPerSentence = tryDivide(wordCount, sentenceCount)\r\n charPerSentence = tryDivide(charCount, sentenceCount)\r\n rusCharPerSentence = tryDivide(rusCharCount, sentenceCount)\r\n engCharPerSentence = tryDivide(engCharCount, sentenceCount)\r\n # per word\r\n charPerWord = tryDivide(charCount, wordCount)\r\n rusCharPerWord = tryDivide(rusCharCount, wordCount)\r\n engCharPerWord = tryDivide(engCharCount, wordCount)\r\n # ratio\r\n rusCharRatio = tryDivide(rusCharCount, charCount)\r\n engCharRatio = tryDivide(engCharCount, charCount)\r\n rusCharVsEngChar = tryDivide(rusCharCount, engCharCount)\r\n engCharVsRusChar = tryDivide(engCharCount, rusCharCount)\r\n \r\n stats = [\r\n sentenceCount,\r\n wordCount,\r\n charCount,\r\n rusCharCount,\r\n engCharCount,\r\n digitsCount,\r\n exclamationMarkCount,\r\n questionMarkCount,\r\n wordPerSentence,\r\n charPerSentence,\r\n rusCharPerSentence,\r\n engCharPerSentence,\r\n charPerWord,\r\n rusCharPerWord,\r\n engCharPerWord,\r\n rusCharRatio,\r\n engCharRatio,\r\n rusCharVsEngChar,\r\n engCharVsRusChar,\r\n ]\r\n statsFeat = \"\"\r\n for i,f in enumerate(stats):\r\n if f != 0:\r\n statsFeat += \"%s:%s \" % (i+1, f)\r\n statsFeat = statsFeat[:-1] \r\n return statsFeat", "def get_text_frequencies(connection, feature, text_id):\n tindex2mtindex = {}\n findex2mfindex = {}\n word_counts = Counter()\n word_feature_pairs = set()\n text_token_count = 0\n unit_proj = {\n '_id': False,\n 'tokens.features.form': True\n }\n if feature != 'form':\n unit_proj['tokens.features.'+feature] = True\n db_cursor = connection.connection[Unit.collection].find(\n {'text': text_id, 'unit_type': 'line'},\n unit_proj\n )\n for unit in db_cursor:\n text_token_count += len(unit['tokens'])\n for token in unit['tokens']:\n cur_features = token['features']\n # use the form index as an identifier for this token's word\n # type\n cur_tindex = cur_features['form'][0]\n if cur_tindex not in tindex2mtindex:\n tindex2mtindex[cur_tindex] = len(tindex2mtindex)\n mtindex = tindex2mtindex[cur_tindex]\n # we want to count word types by matrix indices for faster\n # lookup when we get to the stage of counting up word type\n # occurrences\n word_counts[mtindex] += 1\n for cur_findex in cur_features[feature]:\n if cur_findex not in findex2mfindex:\n findex2mfindex[cur_findex] = len(findex2mfindex)\n mfindex = findex2mfindex[cur_findex]\n # record when a word type is associated with a feature type\n word_feature_pairs.add((mtindex, mfindex))\n csr_rows = []\n csr_cols = []\n for mtindex, mfindex in word_feature_pairs:\n csr_rows.append(mtindex)\n csr_cols.append(mfindex)\n word_feature_matrix = csr_matrix(\n (\n np.ones(len(csr_rows), dtype=np.bool),\n (np.array(csr_rows), np.array(csr_cols))\n ),\n shape=(len(tindex2mtindex), len(findex2mfindex))\n )\n # if matching_words_matrix[i, j] == True, then the word represented by\n # position i shared at least one feature type with the word represented\n # by position j\n matching_words_matrix = word_feature_matrix.dot(\n word_feature_matrix.transpose())\n\n mtindex2tindex = {\n mtindex: tindex for tindex, mtindex in tindex2mtindex.items()}\n freqs = {}\n coo = matching_words_matrix.tocoo()\n for i, j in zip(coo.row, coo.col):\n # since only matching tokens remain, the column indices indicate\n # which tokens match the token represented by row i; we need to\n # count up how many times each word appeared\n cur_token = mtindex2tindex[i]\n if cur_token not in freqs:\n freqs[cur_token] = word_counts[j]\n else:\n freqs[cur_token] += word_counts[j]\n for tok_ind in freqs:\n freqs[tok_ind] = freqs[tok_ind] / text_token_count\n return freqs", "def get_word2freq_and_pos(self, files: List[str]) -> Tuple[Dict[str, int], List[str]]:\n word2freq = defaultdict(int)\n pos_list = []\n\n logger.info(\"Tokenization\")\n for file in files:\n logger.info(\"\\tReading file: {}\".format(file))\n with open(file, 'r') as f:\n for line in tqdm(f):\n line = line.strip()\n if line != \"\":\n for m in self.tokenize(line):\n word2freq[self.word_formatter(m)] += 1\n pos_list.append(pos_substitution_format(m))\n\n return word2freq, list(set(pos_list))", "def word_count(self):\n print(self.words())\n return len(self.words())\n #count = 0\n #for lines in self.lines:\n # line = lines.strip(os.linesep)\n # wordslst = line.split()\n # count += len(wordslst)\n #return count\n #joined_string = ''.join(self.lines)\n #for word in joined_string:\n # if word != ' ' and word != '\\n' and word != '\\t':\n # count += 1\n #print('READ ME ––––––––––', self.lines)\n #print(joined_string)\n #print(line)\n #print(wordslst)\n #print(count)", "def _counter(title_list):\n t = Tokenizer()\n words_count = defaultdict(int)\n words = []\n for title in title_list:\n tokens = t.tokenize(title)\n for token in tokens:\n pos = token.part_of_speech.split(',')[0]\n if pos == '名詞':\n words_count[token.base_form] += 1\n words.append(token.base_form)\n return words_count, words", "def analyze(self, word_count_thresh):", "def count_words_fast(text):\n skip = [\",\",\".\",\";\",\":\",\"'\",'\"']\n for ch in skip:\n text = text.replace('ch',\"\")\n \n word_counts = Counter(text.split(\" \"))\n return word_counts", "def anlSentence(self, sentence):\n cleanStr = re.sub(self._wrdSeps, \" \",\n re.sub(self._stcSeps, \"\", sentence))\n for word in cleanStr.split():\n self._wordCounter[word] += 1\n self._totalWords += 1\n else:\n self._totalSentences += 1", "def alice_in_wonderland():\n from collections import Counter\n with open(\"lib/alice_in_wonderland.txt\") as f:\n #~ table = maketrans(\" \",\" \")\n #~ wordlist = f.read().lower().translate(table, punctuation).split()\n # Translate actually performs fastest here but we use list comprehension\n # because we like it.\n wordlist = [i.lower() for i in f.read().split() if i.isalpha()]\n counted_words = Counter(wordlist)\n # Sort and write our counted wordlist to a new file:\n with open(\"lib/alice_counted.txt\", \"w\") as fout:\n length = 0\n for k, v in sorted(counted_words.items()):\n if len(k) > length:\n length = len(k)\n print length\n fout.write(k + \" \" + str(v) + \"\\n\")\n\n # 3 Solutions for counting characters (not words):\n #~ import operator\n #~ from string import lowercase, punctuation\n \n # 1: Reading the file into a string, then performing dictionary comprehension.\n #~ s = f.read().lower()\n #~ # Incredibly stupid and slow because it goes through the whole string\n #~ # with each iteration. DO NOT DO THIS.\n #~ L = {i: s.count(i) for i in s if i in lowercase}\n #~ L_sorted = sorted(L.iteritems(), key=operator.itemgetter(0))\n #~ print L_sorted\n\n # 2: Reading the file line by line into a dictionary.\n #~ d = {}\n #~ for i in f:\n #~ i = i.lower().strip()\n #~ i = [c for c in i if c in lowercase]\n #~ for char in i:\n #~ if char in d:\n #~ d[char] += 1\n #~ else:\n #~ d[char] = 1\n #~ keys = d.keys()\n #~ keys.sort()\n #~ for i in keys:\n #~ print (i, d[i]),\n\n # 3: Using Counter\n #~ s = [i for i in f.read().lower() if i in lowercase]\n #~ d = Counter(s)\n # Long version:\n #~ keys = sorted(d.keys())\n #~ for i in keys:\n #~ print (i, d[i]),\n #~ # Concise:\n #~ for k, v in sorted(d.items()): print (k, v),", "def trainInternal():\n\n con_counts = Counter()\n deflike = Counter()\n\n for record in records:\n data = [re.split(\"\\t\", d) for d in re.split(\"\\n\", record)]\n tokens, tags = zip(*data)\n\n for i, token in enumerate(tokens):\n denom = len(token)\n for indices, f in fqs(token, 0.5): #perform analysis on one word at a time\n context, numer = internalContext(indices, token)\n if tags[i] != \"O\": #only want the named entities\n deflike[context] += f * numer/denom #need to normalize by word length\n con_counts[context] += f * numer/denom\n\n deflike = Counter({context: deflike[context]/con_counts[context] for context in deflike}) #perform division on each entry\n\n return deflike", "def analyze(self, text):\n\n # start from 0 for each Analyser variable\n self.positives = 0\n self.negatives = 0\n\n # precise self text value\n self.text = text\n\n # declare a tokenased word\n tokenizer = nltk.tokenize.TweetTokenizer()\n tokens = tokenizer.tokenize(text)\n\n # indicate the length of list tokens\n size = len(tokens)\n\n # all the word stuff to ckeck\n for word in tokens:\n\n # chaque mots est converti en mot sans majuscule\n word = str.lower(word)\n\n linespos = [line.rstrip('\\n') for line in open('positive-words.txt')]\n linesneg = [line.rstrip('\\n') for line in open('negative-words.txt')]\n\n # check for positive or negative or neutral words\n if word in linespos:\n self.positives += 1\n elif word in linesneg:\n self.negatives += 1\n else:\n continue\n\n # score calculculated and reurned\n score = self.positives - self.negatives\n\n return score", "def get_feature_set_SC(tweet, sentimentvalues):\n pos_tag_freq = {}\n additional_freq = {}\n for phrase in tweet.tagged_words:\n for word in phrase:\n try:\n tag = word['pos']\n pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# if tag=='PRtinf':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJS':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJ':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='NP':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='DET':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='P':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n if tag in ADJECTIVES:\n additional_freq['adjectives'] = additional_freq.get(tag, 0) + 1\n elif tag in ADVERBS: \n additional_freq['adverbs'] = additional_freq.get(tag, 0) + 1\n elif tag in PRONOUNS:\n additional_freq['pronoun'] = 1\n except KeyError:\n continue\n for key in pos_tag_freq.keys():\n pos_tag_freq[key] = pos_tag_freq[key]*1.0\n #number of adjectives in sentence, number of adverbs in sentence(except ikke), pronoun in sentence(binary) \n #Number of exclamation marks, number of emoticons,\n emoticons = tweet.nrof_happyemoticons+tweet.nrof_sademoticons\n if emoticons>0:\n additional_freq['emoticons'] = emoticons*1.0\n if tweet.nrof_exclamations>0:\n additional_freq['exclamations'] = tweet.nrof_exclamations*1.0\n \n #Add lexicon values\n #total subjectivity score from word polarities, total objectivity score, number of subjective words, number of objective words, e\n sub_score = 0.0\n obj_score = 0.0\n nrof_subwords = 0\n nrof_objwords = 0\n for word in sentimentvalues.keys():\n if sentimentvalues[word][0]>0:\n sub_score = sub_score + sentimentvalues[word][0]\n nrof_subwords = nrof_subwords + 1\n if sentimentvalues[word][1]>0:\n sub_score = sub_score + sentimentvalues[word][1]\n nrof_subwords = nrof_subwords + 1\n if sentimentvalues[word][2]>0:\n obj_score = obj_score + sentimentvalues[word][2]\n nrof_objwords = nrof_objwords + 1\n if sub_score>0:\n additional_freq[\"sub_score\"] = sub_score+1.0\n if obj_score>0:\n additional_freq[\"obj_score\"] = obj_score+1.0\n if nrof_subwords>0:\n additional_freq[\"subjective_words\"] = nrof_subwords*1.0\n if nrof_objwords>0:\n additional_freq[\"objective_words\"] = nrof_objwords*1.0\n \n #Concatenate the dicts\n features= dict(pos_tag_freq.items() + additional_freq.items())\n \n return features", "def corpus_statistics(corpus, d_corp):\n print('There are {} types of a total of {} tokens in the corpus.\\n' .format(number_types(corpus), corpus_length(corpus)))\n print('There average token length is {}.\\n' .format(average_length(corpus)))\n print('The longest token is {}.\\n' .format(longest_token(corpus)))\n print('The number of hapaxes is {} and represents the {} of the corpus.\\n.' .format(hapaxes(corpus), percentage(hapaxes(corpus), corpus_length(corpus))))\n print('The 10 most frequent types of the total tokens are {} and represent the {}%.\\n' .format(most_frequent(corpus), percentage_common_types(corpus))) \n print('The hapaxes present in each of the 9 partitions are {}.\\n' .format(hapaxes_parts(d_corp)))\n print('The percentage of hapaxes for each partition is {}.\\n' .format(percentage_hapaxes(d_corp, corpus)))\n plots(d_corp, corpus)\n print('\\nIn the tupla {}, the first element is the number of unique bigrams, and the second element is the percentage of unique bigrams from all the bigrams in the corpus. Similarly, in this tupla {}, the first element is the number of unique trigrams, and the second element is the percentage of unique trigrams from all the bigrams in the corpus.' .format(ngram(corpus, 2), ngram(corpus, 3)))", "def analyse(self):\n logging.info(\"transferring text to CorpusCook...\")\n\n paragraphs = self.text.split('\\n\\n')\n print(\"mean length of splitted lines\", (mean([len(p) for p in paragraphs])))\n\n # If TIKA resolved '\\n'\n if (mean([len(p) for p in paragraphs])) > 80:\n paragraphs = [re.sub(r\"- *\\n\", '', p) for p in paragraphs]\n paragraphs = [p.replace('\\n', \" \") for p in paragraphs]\n paragraphs = [p.replace(';', \" \") for p in paragraphs]\n joiner = \" \"\n else:\n # If TIKA did not\n joiner = \" \"\n\n processed_text = joiner.join([p\n for p in paragraphs\n if\n p and\n ks_2samp(self.normal_data, list(p)).pvalue > self.threshold\n ]\n )\n\n return processed_text.strip()[:self.length_limit]", "def _count_word_frequency(self, data):\n _dict = {}\n for _docs in data:\n for _word in _docs:\n if _word in _dict:\n _dict[_word] += 1\n else:\n _dict[_word] = 1\n return _dict", "def train():\n\tA = collections.defaultdict(dict)\n\tB = collections.defaultdict(dict)\n\tpos_list = []\n\tword_list = []\n\tstr_buf = []\n\n\n\t# read each line and count A and B\n\tfor line in sys.stdin:\n\t\tline = line.split()\n\t\t# print(line)\n\t\tif len(line) == 3:\n\t\t\tstr_buf.append((str(line[0]), str(line[1])))\n\t\t\tword_list.append(str(line[0]))\n\t\t\tpos_list.append(str(line[1]))\n\n\t\telse:\n\t\t\t# if come to the end of a sentence\n\t\t\tif len(str_buf) != 0:\n\t\t\t\tstr_buf = [('<s>','BOS')] + str_buf + [('</s>', 'EOS')]\n\t\t\t\tword_list += ['<s>', '</s>']\n\t\t\t\tpos_list += ['BOS', 'EOS']\n\n\t\t\t\tfor i, s in enumerate(str_buf):\n\t\t\t\t\tif s[0] in B[s[1]]:\n\t\t\t\t\t\tB[s[1]][s[0]] += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tB[s[1]][s[0]] = 1\n\n\t\t\t\t\tif s[0] != '</s>':\n\t\t\t\t\t\t# print('strbuf[i]:',str_buf[i], 's[1]', s[1])\n\t\t\t\t\t\tif str_buf[i+1][1] in A[s[1]]:\n\t\t\t\t\t\t\tA[s[1]][str_buf[i+1][1]] += 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tA[s[1]][str_buf[i+1][1]] = 1\n\n\t\t\t\tstr_buf = []\n\n\t# get unique POS list and word list\n\tpos_list_uniq = list(set(pos_list))\n\tword_list_uniq = list(set(word_list))\n\n\n\t# assume <UNK>, smoothing, normalize\n\tB_sm = collections.defaultdict(dict)\n\tA_sm = A.copy()\n\n\t# assume words apeear less than 2 times as <UNK>\n\tword_count = collections.Counter(word_list)\n\tfor pos in B:\n\t\tfor word in B[pos]:\n\t\t\tif word_count[word] > 1:\n\t\t\t\tB_sm[pos][word] = B[pos][word]\n\n\t\t\telse: # add <UNK> to B_sm\n\t\t\t\tword_list_uniq.remove(word)\n\t\t\t\tif '<UNK>' in B_sm[pos]:\n\t\t\t\t\tB_sm[pos]['<UNK>'] += 1\n\t\t\t\telse:\n\t\t\t\t\tB_sm[pos]['<UNK>'] = 1\n\n\tword_list_uniq += ['<UNK>']\n\n\t# add 1 smoothing\n\tfor pos in pos_list_uniq:\n\t\tfor word in word_list_uniq:\n\t\t\tif word in B_sm[pos]:\n\t\t\t\tB_sm[pos][word] += 1\n\t\t\telse:\n\t\t\t\tB_sm[pos][word] = 1\n\n\tfor prev in pos_list_uniq:\n\t\tfor next in pos_list_uniq:\n\t\t\tif next in A_sm[prev]:\n\t\t\t\tA_sm[prev][next] += 1\n\t\t\telse:\n\t\t\t\tA_sm[prev][next] = 1\n\n\t# delete instances like A[VB][BOS], A[EOS][VB],\n\t# B[VB]['</s>'], B[EOS]['Jack']\n\n\tfor pos in B_sm:\n\t\tfor word in B_sm[pos]:\n\t\t\tif (pos == 'BOS' and word != '<s>') or \\\n\t\t\t(pos == 'EOS' and word != '</s>') or \\\n\t\t\t(word == '<s>' and pos != 'BOS') or \\\n\t\t\t(word == '</s>' and pos != 'EOS'):\n\t\t\t\tB_sm[pos][word] = 0\n\n\tfor prev in A_sm:\n\t\tfor next in A_sm[prev]:\n\t\t\tif prev == 'EOS' or next == 'BOS':\n\t\t\t\tA_sm[prev][next] = 0\n\n\t# normalize\n\tfor pos in B_sm:\n\t\ts = sum(B_sm[pos].values())\n\t\tfor word in B_sm[pos]:\n\t\t\tif B_sm[pos][word] != 0:\n\t\t\t\tB_sm[pos][word] /= s\n\n\tfor prev in A_sm:\n\t\ts = sum(A_sm[prev].values())\n\t\tfor next in A_sm[prev]:\n\t\t\tif A_sm[prev][next] != 0:\n\t\t\t\tA_sm[prev][next] /= s\n\n\treturn A_sm, B_sm, word_list_uniq", "def num_syllables(self, word):\n # TODO: provide an implementation!\n word = word.lower()\n D = self._pronunciations\n #D = nltk.corpus.cmudict.dict()\n if(word not in D.keys()):\n #print word not in CMUDictionary\n return 1\n\n #count stores no of syllables for each pronunciation of the word\n count = []\n\n #for each pronunciation\n for x in D[word]:\n n = 0\n #for each syllable\n for y in x:\n #if vowel sound\n if y[-1].isdigit():\n n = n + 1\n count.append(n)\n # return the pronunciation having least syllables\n return min(count)\n #return min([len([y for y in x if y[-1].isdigit()]) for x in D[word.lower()]])", "def get_word_frequency():\n counter = Counter()\n with open('resource/word-count.txt', encoding=\"utf8\") as f:\n for line in f.readlines():\n try:\n word, count = line.split(':')\n if (word == \"RT\"):\n continue\n count = int(count)\n counter[word] += count\n except Exception as e:\n continue\n return counter", "def analyze(self, text):\n\n # TODO\n # tokens = tokenizer.tokenize(tweet)\n tokenizer = nltk.tokenize.TweetTokenizer()\n tokens = tokenizer.tokenize(text)\n score = 0\n\n for word in tokens:\n # iterate over tokens#str.lower\n\n if word.lower() in self.positives:\n score = score+1\n\n elif word.lower() in self.negatives:\n score = score-1\n\n else:\n continue\n return score", "def process_text(self, text):\n\n flags = (re.UNICODE if sys.version < '3' and type(text) is unicode # noqa: F821\n else 0)\n pattern = r\"\\w[\\w']*\" if self.min_word_length <= 1 else r\"\\w[\\w']+\"\n regexp = self.regexp if self.regexp is not None else pattern\n\n words = re.findall(regexp, text, flags)\n # remove 's\n words = [word[:-2] if word.lower().endswith(\"'s\") else word\n for word in words]\n # remove numbers\n if not self.include_numbers:\n words = [word for word in words if not word.isdigit()]\n # remove short words\n if self.min_word_length:\n words = [word for word in words if len(word) >= self.min_word_length]\n\n stopwords = set([i.lower() for i in self.stopwords])\n if self.collocations:\n word_counts = unigrams_and_bigrams(words, stopwords, self.normalize_plurals, self.collocation_threshold)\n else:\n # remove stopwords\n words = [word for word in words if word.lower() not in stopwords]\n word_counts, _ = process_tokens(words, self.normalize_plurals)\n\n return word_counts", "def all_categories_for_phrase(db, phrase, access_codes):\n ratings = [0, 0, 0]\n for access_code in access_codes:\n category_index = annotator_category_for_phrase(db, phrase, access_code)\n ratings[category_index] += 1\n return ratings", "def sentiment_analysis(self):\n train_pos = pd.read_csv(\"data/train_Arabic_tweets_positive_20190413.tsv\", sep='\\t', names=[\"label\", \"tweet\"])\n train_neg = pd.read_csv(\"data/train_Arabic_tweets_negative_20190413.tsv\", sep='\\t', names=[\"label\", \"tweet\"])\n train = pd.concat([train_pos, train_neg])\n train.tweet = train.tweet.apply(self.preprocessor).apply(tokenization).apply(lambda x: x.tokens[0])\n le = LabelEncoder()\n le.fit(train.label)\n train.label = le.transform(train.label)\n\n sentence_inds, vocab, self.num_tokens, word_index, index_word = helper.encode_tokens(train.tweet.values)\n\n\n self.embeddings_matrix = helper.load_embedding_matrix(self.num_tokens, self.embedding_size, \n word_index, self.embeddings_index)\n\n\n train_padded = pad_sequences(sentence_inds, padding=\"post\", truncating=\"post\", maxlen=100)\n\n self.X_train, self.X_valid, self.y_train, self.y_valid = train_test_split(train_padded, train.label.values, test_size=0.5,random_state=0, stratify=train.label.values)\n\n model = self.train_model()\n y_pred = model.predict(self.X_valid)\n return (np.argmax(y_pred, axis=1) == self.y_valid).sum() / self.y_valid.shape[0]", "def get_data(articles): # Here, the articles will be very long strings\r\n vdictionary = {} # dictionary for tokens that are found in dictionary\r\n _odata = [0] * 12 # list collecting everything except date; last number of words=index:0\r\n word_length = 0 # initializing the value of word length; will be updated via loop\r\n tokens = re.findall('\\w+', articles) # Note that \\w+ splits hyphenated words\r\n for token in tokens: # Goes through generated tokens from articles\r\n if (not token.isdigit()) and (len(token) > 1) and (token in lm_dictionary.keys()): # conditions for checking if token is in dictionary\r\n _odata[1] += 1 # updating word count \r\n word_length += len(token) # updating word length\r\n if token not in vdictionary: # initial statement regarding steps for handling tokens not in the dictionary\r\n vdictionary[token] = 1 # count of tokens in text that show up in dictionary\r\n \r\n####### Keeping Track of Categorical Token Counts (Nonzero entry=True) also checks if word is stop word\r\n if lm_dictionary[token].positive and not lm_dictionary[token].stopword: _odata[2] += 1\r\n if lm_dictionary[token].negative and not lm_dictionary[token].stopword: _odata[3] += 1\r\n if lm_dictionary[token].uncertainty and not lm_dictionary[token].stopword: _odata[4] += 1\r\n if lm_dictionary[token].litigious and not lm_dictionary[token].stopword: _odata[5] += 1\r\n if lm_dictionary[token].weak_modal and not lm_dictionary[token].stopword: _odata[6] += 1\r\n if lm_dictionary[token].moderate_modal and not lm_dictionary[token].stopword: _odata[7] += 1\r\n if lm_dictionary[token].strong_modal and not lm_dictionary[token].stopword: _odata[8] += 1\r\n if lm_dictionary[token].constraining and not lm_dictionary[token].stopword: _odata[9] += 1\r\n #total_syllables += lm_dictionary[token].syllables # interesting parameter to measure\r\n\r\n #_odata[12] = len(re.findall('[0-9]', doc))\r\n # drop punctuation within numbers for number count\r\n articles = re.sub('(?!=[0-9])(\\.|,)(?=[0-9])', '', articles)\r\n articles = articles.translate(str.maketrans(string.punctuation, \" \" * len(string.punctuation)))\r\n #_odata[13] = len(re.findall(r'\\b[-+\\(]?[$€£]?[-+(]?\\d+\\)?\\b', doc))\r\n # _odata[14] = total_syllables / _odata[2]\r\n #print(_odata[1])\r\n _odata[10] = word_length / _odata[1] # computing average word length\r\n _odata[11] = len(vdictionary) # total vocab count\r\n \r\n # Convert counts to %\r\n for i in range(2, 9 + 1): # specifying range of percentages\r\n try:\r\n _odata[i] = (_odata[i] / _odata[1]) * 100 # updating count to percent\r\n except:\r\n print(\"zero denominator\")\r\n # Vocabulary\r\n \r\n return _odata # returning the data\r", "def computeWordsFrequencies(self):\n token_stream = self._tokenize(self.readable)\n token_map = self._countTokens(token_stream)\n # print token_map.items()\n return sorted(token_map.items(), key = lambda x : x[1], reverse = True)", "def analyze(self, text):\n\n sent = 0\n for word in text.split():\n # check each word in tweet\n if word.strip(\":, \").lower() in self.posWords:\n sent += 1\n elif word.strip(\":, \").lower() in self.negWords:\n sent -= 1\n\n return sent", "def analyze_emoji_sentimens(text):\n sum = 0.0\n count = 0\n for character in list(text):\n value = index.get(character, None)\n if value != None:\n sum += value\n count += 1\n if count == 0:\n return 0.0\n\n return sum/count", "def sentiment_analyzer(text):\n\n\tlower_text = text.lower()\n\t\t\n\thashtag_scaling = 0.3\n\texclamation_scaling = 0.5\n\tuppercase_scaling = 0.2\n\n\n\tsent_index = 0\n\n\tfor x in range(len(positive_words)):\n\t\tsent_index += lower_text.count(positive_words[x])\n\tfor x in range(len(negative_words)):\n\t\tsent_index -= lower_text.count(negative_words[x])\n\tif '!' in text:\n\t\tsent_index *= exclamation_scaling * lower_text.count('!') + 1\n\tif '#' in text:\n\t\tsent_index *= hashtag_scaling * lower_text.count('#') + 1\n\tsent_index *= uppercase_scaling * sum(1 for c in text if c.isupper())\n\t\t\n\treturn sent_index", "def get_corpus_counts(x,y,label):\n raise NotImplementedError", "def read_gulordava_corpus(corpus_dir: str) -> dict:\n def _read_file(path: str) -> List[str]:\n with open(path, \"r\") as f:\n return f.readlines()\n\n sentences = _read_file(f\"{corpus_dir}/generated.text\")\n sentence_info = _read_file(f\"{corpus_dir}/generated.tab\")[1:] # Skip header line\n labelled_corpus = {}\n\n for i, sentence in enumerate(sentences):\n right_info, wrong_info = sentence_info[2*i], sentence_info[2*i+1]\n\n # Parse lines\n right_info, wrong_info = right_info.split(\"\\t\"), wrong_info.split(\"\\t\")\n constr_id, sent_id, correct_number, right_form, class_, type_ = right_info[1:7]\n len_context, len_prefix, sent = right_info[11:14]\n constr_id_wrong, sent_id_wrong, _, wrong_form, class_wrong, type_wrong = wrong_info[1:7]\n sent_wrong = wrong_info[13]\n\n assert class_ == \"correct\" and class_wrong == \"wrong\"\n assert constr_id == constr_id_wrong and sent_id == sent_id_wrong and sent == sent_wrong and type_ == type_wrong\n\n len_prefix, len_context = int(len_prefix), int(len_context)\n subj_pos = len_prefix - len_context\n verb_pos = len_prefix\n sentence = sent.split()\n\n misc_info = {\n \"raw\": sent,\n \"subj_pos\": subj_pos,\n \"verb_pos\": verb_pos,\n \"right_form\": right_form,\n \"wrong_form\": wrong_form,\n \"correct_number\": correct_number,\n \"sent_id\": sent_id,\n \"constr_id\": constr_id,\n \"type\": type_\n }\n\n labelled_sentence = {\n \"sen\": sentence, \"labels\": [0 if correct_number == \"sing\" else 1] * len(sentence), **misc_info\n }\n labelled_corpus[i] = labelled_sentence\n\n return labelled_corpus", "def word_count():\n word_counter = Counter()\n\n # read files and count words\n for file_path in Path(INPUTS_PATH).rglob(\"*\"):\n if file_path.is_file():\n print(f\"Processing input file: {file_path.as_posix()}\")\n word_counter += Counter(file_path.read_text().split())\n\n # write the result to OUTPUTS_PATH\n output_file = Path(OUTPUTS_PATH) / \"output.txt\"\n with output_file.open(\"w\") as f:\n for word, count in word_counter.most_common():\n f.write(f\"{word}: {count}\\n\")\n print(f\"Generated output file: {output_file.as_posix()}\")", "def load_input(path):\n counts = defaultdict(int)\n if not os.path.exists(mode+'indices.p'):\n root = '/'.join(path.split('/')[0:-1])\n all_paths = [root+'/'+x for x in os.listdir(root)] #'/'.join(path.split('/')[0:-1]))\n else:\n all_paths = [path]\n \n for path in all_paths:\n print(path)\n with open(path) as f:\n if mode == 'word':\n words = tokenize(f.read())\n else:\n words = f.read()\n\n for word in words:\n counts[word] += 1 \n\n words = [x for x in words if len(x) > 0]\n return words, counts", "def analyze_reviews(reviews):\n\n good_reviews=reviews[reviews['rs_review_movie_score']>=9]\n bad_reviews=reviews[reviews['rs_review_movie_score']<=2]\n\n print 'len(good_reviews)=%s' % len(good_reviews)\n print 'len(bad_reviews)=%s' % len(bad_reviews)\n\n m = re.compile('\\d')\n\n english_stop_words=stopwords.words('english')\n\n\n def tokenize(text):\n tokens=nltk.word_tokenize(text)\n # strip out trailing puncutation\n tokens = [ token[:-1] if token[-1] in ['.',',','!','?'] else token for token in tokens]\n\n # lower case\n tokens = [token.lower() for token in tokens]\n\n # Take only relativly long characters\n tokens = [token for token in tokens if len(token)>=3]\n\n # remove words with numbers/digits\n tokens = [token for token in tokens if m.search(token) is None]\n\n # Remove stop words: http://nltk.googlecode.com/svn/trunk/doc/book/ch02.html\n tokens = [token for token in tokens if token not in english_stop_words]\n return tokens\n\n good_tokens_list = []\n for i,review in good_reviews.iterrows():\n text=review['rs_review_text']\n good_tokens_list.append(tokenize(text))\n\n bad_tokens_list = []\n for i,review in bad_reviews.iterrows():\n text=review['rs_review_text']\n bad_tokens_list.append(tokenize(text))\n\n all_words=Counter()\n for tokens in good_tokens_list + bad_tokens_list:\n for token in tokens:\n all_words[token]+=1\n\n most_common=all_words.most_common(2000)\n most_common=zip(*most_common)[0]\n\n print 'most_common_words = ',most_common[-20:]\n\n def document_features(tokens):\n return {word:word in tokens for word in most_common}\n\n good_set=[(document_features(tokens), 'pos') for tokens in good_tokens_list]\n bad_set=[(document_features(tokens), 'neg') for tokens in bad_tokens_list]\n\n train_set = good_set + bad_set\n random.shuffle(train_set) # dunno if this is necessary\n\n classifier = nltk.NaiveBayesClassifier.train(train_set)\n\n print 'accuracy',nltk.classify.accuracy(classifier, train_set)\n\n classifier.show_most_informative_features(300)\n\n return classifier", "def structure_anslysis(text, display = None):\n st.write('Text Size Exceeded! Truncating...')\n doc = nlp(text[:100000])\n pos_freq = pos_tag_counts(doc)\n ent_freq = entity_counts(doc)\n \n fig, axs = plt.subplots(1, 2, figsize = (15, 6))\n \n sns.barplot(list(pos_freq.keys()), list(pos_freq.values()), color='#e84118', ax = axs[0])\n axs[0].set_title('POS COUNTS')\n axs[0].set_xticklabels(labels = list(pos_freq.keys()), rotation = 90)\n \n sns.barplot(list(ent_freq.keys()), list(ent_freq.values()), color='#273c75', ax = axs[1])\n axs[1].set_title('ENTITY COUNTS')\n axs[1].set_xticklabels(labels = list(ent_freq.keys()), rotation = 90) \n \n plt.show()\n \n if display:\n spacy_streamlit.visualize_ner(doc, labels = nlp.get_pipe('ner').labels)\n \n \n return pos_freq, ent_freq", "def add_unique_word_count(self):\n call_count = lambda letter_: self._count_unique_words(letter_)\n self.dataframe['unique_words'] = self.dataframe['letter'].map(call_count)", "def measure(colloc_tag, colloc_count, n1grams, unigrams, n):\r\n collocation_words, collocation_tags = colloc_tag.split('/')\r\n collocation_words = collocation_words.split(' ')\r\n collocation_tags = collocation_tags.split(' ')\r\n pattern_words = ' '.join(collocation_words[:-1])\r\n pattern_tags = ' '.join(collocation_tags[:-1])\r\n\r\n pattern = pattern_words + '/' + pattern_tags\r\n last_word = collocation_words[-1] + '/' + collocation_tags[-1]\r\n c_pattern = int(n1grams[pattern])\r\n c_lw = int(unigrams[last_word])\r\n colloc_count = int(colloc_count)\r\n\r\n tsc = t_score(colloc_count, c_pattern, c_lw, n)\r\n pmisc = pmi(colloc_count, c_pattern, c_lw, n)\r\n logdsc = logDice(colloc_count, c_pattern, c_lw)\r\n return logdsc, pmisc, tsc", "def gather_counts(directory):\n counts_un = defaultdict(int)\n counts_bi = defaultdict(int)\n counts_tri = defaultdict(int)\n prev_prev = \"<s>\"\n prev = \"<s>\"\n for filename in os.listdir(f\"./{directory}\"):\n if \".DS_Store\" in filename:\n continue\n with open(f\"./{directory}/{filename}\", \"r\") as f:\n for line in f:\n line = line.strip()\n if len(line) == 0:\n continue\n counts_un[line+\"\\n\"] += 1\n counts_bi[prev+\"\\n\"+line+\"\\n\"] += 1\n counts_tri[prev_prev+\"\\n\"+prev+\"\\n\"+line+\"\\n\"] += 1\n prev_prev = prev\n prev = line\n counts_un[\"</s>\\n\"] += 2\n counts_bi[\"</s>\\n</s>\\n\"] += 1\n counts_bi[prev+\"\\n\"+\"</s>\\n\"] += 1\n counts_tri[prev_prev+\"\\n\"+prev+\"\\n\" + \"</s>\\n\"] += 1\n counts_tri[prev+\"\\n</s>\\n</s>\\n\"] += 1\n return counts_un, counts_bi, counts_tri", "def preprocessing(raw_text_df):\r\n \r\n stemmer = nltk.stem.porter.PorterStemmer()\r\n tokenizer = RegexpTokenizer(r'\\w+')\r\n # iterate over all lines for preprocessing\r\n for index, line in enumerate(raw_text_df):\r\n \r\n # if there is mention of stars from 1-5, change the integer into\r\n # text and combine the number and the word \"star\" to make a new word\r\n # example: \"I give this product 1 star\" is now \"I give this product onestar\"\r\n # why? numbers are removed as part of preprocessing\r\n if \"1 star\" in line:\r\n line = line.replace(\"1 star\", \"onestar\")\r\n if \"1 stars\" in line:\r\n line = line.replace(\"1 stars\", \"onestar\")\r\n if \"2 star\" in line:\r\n line = line.replace(\"2 star\", \"twostars\")\r\n if \"2 stars\" in line:\r\n line = line.replace(\"2 stars\", \"twostars\")\r\n if \"3 star\" in line:\r\n line = line.replace(\"3 star\", \"threestars\")\r\n if \"3 stars\" in line:\r\n line = line.replace(\"3 stars\", \"threestars\")\r\n if \"4 star\" in line:\r\n line = line.replace(\"4 star\", \"fourstars\")\r\n if \"4 stars\" in line:\r\n line = line.replace(\"4 stars\", \"fourstars\")\r\n if \"5 star\" in line:\r\n line = line.replace(\"5 star\", \"fivestars\")\r\n if \"5 stars\" in line:\r\n line = line.replace(\"5 stars\", \"fivestars\")\r\n \r\n # tokenize lines\r\n tokens = re.split('(\\d+)',line)\r\n # remove numbers\r\n no_digits = [w for w in tokens if not w.isdigit()]\r\n # join tokens\r\n joined_text = \" \".join(no_digits)\r\n # re tokenize\r\n tokens = tokenizer.tokenize(joined_text)\r\n # make tokens lowercase\r\n lower_tokens = [w.lower() for w in tokens if type(w) == str] \r\n # remove stopwords\r\n stopped_tokens = [w for w in lower_tokens if not w in stopwords.words('english')]\r\n # stem words\r\n clean_tokens = [stemmer.stem(w) for w in stopped_tokens]\r\n # join text\r\n joined_text = \" \".join(clean_tokens)\r\n # replace line with preprocessed line\r\n raw_text_df[index] = joined_text\r\n print(index)", "def calculate_word_counts(text : Text)->Counter:\n return Counter(tokenized_text(text))", "def get_all_words(annotation_files):\n all_words = np.array([])\n for annotation_file in annotation_files:\n annotations = []\n with open(annotation_file, 'r', encoding='utf-8') as f:\n lines = f.readlines()\n for line in lines:\n line = line.strip().rstrip('\\n').lstrip('\\ufeff').strip().split(',', maxsplit=8)\n if str(line[-1]).strip() != \"\" and str(line[-1]).strip() is not None:\n annotations.append(str(line[-1]).strip())\n annotations = np.array(annotations)\n all_words = np.concatenate((all_words, annotations), axis=0)\n unique_words = list(set(all_words))\n print(\"Get_All_Words::All words num / Unique words num is {0}/{1}\".format(len(all_words), len(unique_words)))\n return all_words, unique_words", "def convert_word_to_count(counter={}, doc=[]):\n for sentence in doc:\n for word in sentence.split():\n if word not in counter:\n counter[word] = 1\n else:\n counter[word] += 1\n return counter", "def wordFreq(parseThis):\n \n freq = {}\n nono = ('\"', \"'\", '%', '$', '!', '.', '?', '-', ','\n , '\\n', '\\t', '\\r', ':', ';')\n\n for c in nono:\n parseThis = parseThis.replace(c, \" \")\n \n words = parseThis.split()\n \n for word in words:\n temp = word.lower()\n freq[temp] = freq.get(temp, 0) + 1\n\n return freq", "def basic_statistics_of_email(data):\n word_counts = []\n character_count = 0\n\n for ind, row in data.iterrows():\n tokenizer = RegexpTokenizer(r'\\w+')\n real_words = tokenizer.tokenize(row[\"RawText\"].lower())\n\n character_count += sum(map(len, real_words))\n word_counts.append(len(real_words))\n\n return character_count, pd.Series(word_counts)", "def train_ngrams(dataset):\n trigram_counts = dict()\n bigram_counts = dict()\n unigram_counts = dict()\n token_count = 0\n\n ### YOUR CODE HERE\n def enterDic(phrase, dict):\n if phrase in dict:\n dict[phrase] += 1\n else:\n dict[phrase] = 1\n\n unigram_counts[word_to_num['UUUNKKK']] = 0\n\n for sentence in dataset:\n enterDic(sentence[1], unigram_counts) # count number of start of sentences\n enterDic((sentence[0], sentence[1]), bigram_counts) # count number of start of sentences\n token_count += 2\n for i in range(2, len(sentence)):\n token_count += 1\n enterDic(sentence[i], unigram_counts)\n enterDic((sentence[i - 1], sentence[i]), bigram_counts)\n enterDic((sentence[i - 2], sentence[i - 1], sentence[i]), trigram_counts)\n ### END YOUR CODE\n return trigram_counts, bigram_counts, unigram_counts, token_count", "def train(self, corpus): \n for sentence in corpus.corpus: # iterate over sentences in the corpus\n for token in sentence: # iterate over datums in the sentence\n self.unigrams[token] += 1\n self.total += 1\n V = len(self.unigrams) # vocabulary size \n for ug,count in self.unigrams.iteritems():\n \tself.f1[ug] = math.log10(count+1) - math.log10(self.total + V)", "def get_num_words_spoken_by_character_per_episode(content):\n content = list(csv.reader(content.splitlines(), delimiter=','))\n characters = [name[2] for name in content]\n characters = list(dict.fromkeys(characters))\n del characters[0]\n res = defaultdict()\n for character in characters:\n episode = 1\n dic = {}\n count = 0\n for row in content: \n if row[2] == character:\n if str(episode) == row[1]:\n count += len(row[3].split())\n else:\n dic[str(episode)] = count\n episode = int(row[1])\n count = len(row[3].split())\n if '13' not in dic.keys():\n dic['13'] = count \n dic = Counter(dic)\n res[character] = dic\n return res" ]
[ "0.6335053", "0.57268775", "0.57179636", "0.5714977", "0.57052946", "0.5632471", "0.56277466", "0.56243765", "0.5612738", "0.5593558", "0.55885386", "0.5571546", "0.5560268", "0.55578226", "0.55549335", "0.55408674", "0.55253506", "0.5510573", "0.55039716", "0.5502862", "0.5502827", "0.5487121", "0.5435187", "0.5422129", "0.53818375", "0.5378401", "0.53673434", "0.5360341", "0.53580433", "0.53432876", "0.5335175", "0.5329313", "0.53272194", "0.5311351", "0.53058684", "0.5282008", "0.52775645", "0.52738386", "0.5271024", "0.52704257", "0.5264546", "0.52574384", "0.52499396", "0.5244697", "0.5239421", "0.52232456", "0.5214361", "0.5210121", "0.52069247", "0.51841426", "0.5178881", "0.51779014", "0.5174941", "0.51709265", "0.5166267", "0.51652825", "0.5165065", "0.51609915", "0.51502705", "0.51382136", "0.51336014", "0.5123653", "0.511272", "0.5107917", "0.5100428", "0.51002264", "0.51001096", "0.5098017", "0.50958323", "0.5095243", "0.508407", "0.5081947", "0.5073839", "0.50737953", "0.507253", "0.5071211", "0.50669014", "0.5065129", "0.506511", "0.5061986", "0.50545025", "0.50448054", "0.5042495", "0.5034689", "0.5033884", "0.5026838", "0.50244224", "0.5024383", "0.5015165", "0.50118285", "0.5010954", "0.50091964", "0.50089985", "0.5008505", "0.50070006", "0.5002425", "0.5002252", "0.4994001", "0.4991696", "0.49900976" ]
0.65279776
0
Update the total occurrence counts of each unigram, bigram, and trigram
def update_syllable_count(word, syll_count): syllables = word.split('-') for i in range(1, 4): for j in range(len(syllables) - i + 1): gram = '-'.join(syllables[j: j + i]) count = syll_count.setdefault(gram, 0) syll_count[gram] = count + 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _count(self):\n words = [word.lower() for word in self.corpus.words()]\n bigrams_words = bigrams(words)\n for bigram in bigrams_words:\n self._bigrams[bigram] += 1", "def count_ngrams(self, corpus):\n \n self.unigramcounts = {} # might want to use defaultdict or Counter instead\n self.bigramcounts = {} \n self.trigramcounts = {} \n\n self.total = 2\n ##Your code here\n\n for sentence in corpus:\n temp_1 = get_ngrams(sentence,1)\n temp_2 = get_ngrams(sentence,2)\n temp_3 = get_ngrams(sentence,3)\n for i in range(len(temp_1)):\n if temp_1[i] in self.unigramcounts:\n self.unigramcounts[temp_1[i]] += 1\n else:\n self.unigramcounts[temp_1[i]] = 1\n self.total += 1\n\n for i in range(len(temp_2)):\n if temp_2[i] in self.bigramcounts:\n self.bigramcounts[temp_2[i]] += 1\n else:\n self.bigramcounts[temp_2[i]] = 1\n\n for i in range(len(temp_3)):\n if temp_3[i] in self.trigramcounts:\n self.trigramcounts[temp_3[i]] += 1\n else:\n self.trigramcounts[temp_3[i]] = 1\n return", "def count_ngrams(self, corpus):\n \n self.unigramcounts = defaultdict(int)\n self.bigramcounts = defaultdict(int)\n self.trigramcounts = defaultdict(int)\n\n self.sentence_counts = 0\n self.word_count = 0\n\n for line in corpus:\n sequence = line\n self.sentence_counts +=1\n\n unigrams = get_ngrams(sequence, n=1)\n for gram in unigrams:\n self.word_count += 1\n self.unigramcounts[gram] +=1\n\n bigrams = get_ngrams(sequence, n=2)\n for gram in bigrams:\n self.bigramcounts[gram] +=1\n\n trigrams = get_ngrams(sequence, n=3)\n for gram in trigrams:\n self.trigramcounts[gram] +=1\n\n #self.unigramcounts[('START')] = self.sentence_counts *2\n self.bigramcounts[('START', 'START')] = self.sentence_counts\n\n #return self", "def count_ngrams(self):\n self.unigram = self.count_unigram(self.word_list)\n self.bigram = self.count_bigram(self.word_list)\n # self.trigram = self.count_trigram(self.word_list)\n # self.four_gram = self.count_four_gram(self.word_list)\n # self.five_gram = self.count_five_gram(self.word_list)", "def updateWordCounts():\n emaildata = loadEmailData()\n englishwords = importDictionary()\n countAllWords(emaildata, englishwords)", "def train(self, corpus):\n for sentence in corpus.corpus:\n for datum in sentence.data: \n self.unigramCounts[datum.word] += 1\n self.totalCount += 1", "def train(self, corpus): \n for sentence in corpus.corpus:\n prev_word = None\n for datum in sentence.data:\n word = datum.word\n self.unigram_count[word] += 1\n if prev_word != None:\n self.bigram_count[prev_word][word] += 1\n prev_word = word\n \n self.vocabulary_size = len(self.unigram_count)\n self.num_words = sum(self.unigram_count.values())", "def setCounts(self):\n N=len(self.y)\n self.counts=np.zeros(len(self.y))\n self.idf=np.zeros(len(self.y))\n for i in range(0,len(self.counts)):\n for word in self.qtext:\n wc=self.atext[i].count(word)\n self.counts[i]+=wc\n if wc>0:\n d=0\n for sentence in self.atext:\n if word in sentence:\n d+=1\n self.idf[i]+=wc*np.log(N/d)", "def train(self, corpus):\n lastToken = \"#\"\n for sentence in corpus.corpus:\n for datum in sentence.data:\n token = datum.word\n self.reverseBigramCount[token][lastToken] += 1\n self.bigramCount[lastToken][token] += 1\n self.unigramCount[token] += 1\n self.total += 1\n lastToken = token", "def get_counts(data):\n\n bigrams = {}\n unigrams = {}\n #range is len-1 because the bigram uses ith+1 element\n data=list(data)\n for i in range(0, len(data)-1):\n #ith element and ith+1 element\n bigram=(data[i],data[i+1])\n if(bigram in bigrams):\n count=bigrams[bigram]\n bigrams[bigram]= count+1\n else:\n #if bigram not in dict of bigrams, add with count 1\n bigrams[bigram]=1\n\n for unigram in data:\n if(unigram in unigrams):\n count=unigrams[unigram]\n unigrams[unigram]= count+1\n else:\n #if unigram not present, add with count 1\n unigrams[unigram]=1\n\n return bigrams,unigrams", "def pop_bigrams_old(self, corpus):\n for sentence in corpus.sents():\n for w1, w2 in bigrams(word_tokenize(sentence), pad_right=True, pad_left=True):\n self.bigrams[w1][w2] += 1\n\n # Convert trigrams to probabilities\n for wp in self.bigrams:\n total_count = float(sum(self.bigrams[wp].values()))\n for w2 in self.bigrams[wp]:\n self.bigrams[wp][w2] /= total_count", "def count_ngrams(self, corpus):\n self.unigramcounts = defaultdict(int) # might want to use defaultdict or Counter instead\n self.bigramcounts = defaultdict(int)\n self.trigramcounts = defaultdict(int)\n\n self.total_word_count = 0\n self.total_sentence_count = 0\n\n # For each sentence, generate ngram_counts\n for sentence in corpus :\n # 2 extra START tokens and 1 extra STOP token\n self.total_word_count += len(sentence) + 3\n self.total_sentence_count += 1\n for unigram in get_ngrams(sentence, 1) :\n self.unigramcounts[unigram] += 1\n for bigram in get_ngrams(sentence, 2) :\n self.bigramcounts[bigram] += 1\n for trigram in get_ngrams(sentence, 3) :\n self.trigramcounts[trigram] += 1\n\n return", "def calcCountDict(TFdict):\n\n countDict = {}\n\n for doc in TFdict:\n for term in doc:\n if term in countDict:\n countDict[term] +=1\n else:\n countDict[term] = 1\n\n return countDict", "def update_frequencies():\n pass", "def update(self, iterable):\n for word in iterable:\n if word in self:\n self[word] = self[word] + 1\n self.tokens += 1\n else:\n self[word] = 1\n self.types += 1\n self.tokens += 1", "def _count_word_frequency(self, data):\n _dict = {}\n for _docs in data:\n for _word in _docs:\n if _word in _dict:\n _dict[_word] += 1\n else:\n _dict[_word] = 1\n return _dict", "def _collect_counts(self, instance_list):\n \"\"\" Based on each instance, I augment empirical counts for every word and its BIO label in feature_count_table and for every transition from previous label to current label in transition_count_table.\n All \"rare words\" (those words that appear less than 3 times) are replaced by <UNK>.\n I also add label|START counts.\n \"\"\"\n # Build feature_count_table of V x labels and transition_count_table of labels x labels\n for instance in instance_list: # Set of <(w, pos), l>\n index = 0\n for t in instance.data: # Tuple of (w, pos)\n index = instance.data.index(t)\n # print t[0] # word\n # print instance.label[index] # label\n if t in self.V:\n self.feature_count_table[self.V.index(t)][self.labels.index(instance.label[index])] +=1\n else:\n self.feature_count_table[self.V.index('<UNK>')][self.labels.index(instance.label[index])] +=1\n if index > 0:\n self.transition_count_table[self.labels.index(instance.label[index-1])][self.labels.index(instance.label[index])] += 1\n else:\n self.transition_count_table[len(self.labels)][self.labels.index(instance.label[index])] += 1", "def calculate(self, tokens: list):\n vektor_tf_unigram = {}\n\n for token in tokens:\n if token in vektor_tf_unigram:\n vektor_tf_unigram[token] = vektor_tf_unigram[token] + 1\n else:\n vektor_tf_unigram[token] = 1\n\n return vektor_tf_unigram", "def print_trigrams_count(self):\n for u_v in self.trigram_counts:\n for w in self.trigram_counts[u_v]:\n count=self.trigram_counts[u_v][w]\n print \"{2}\\tc({0} {1})\".format(u_v,w,count)", "def score(self, sentence):\n # TODO your code here\n\n # initialize count with trained data\n unigram_count = self.count.copy()\n N = self.total\n\n # make a new key for UNK, add-one later\n for token in sentence:\n if token not in unigram_count:\n unigram_count[token] = 0\n\n # calcutate lopP(<s>) + logP(w1) + logP(w2) + ...\n score = 0.0 # P(<s>) = 1\n V = len(unigram_count) # the number of vocab including UNK\n for word in sentence:\n prob = float((unigram_count[word] + 1) / (N + V)) # c(w) + 1 / N + V\n score += math.log(prob)\n\n return score", "def _getCountForUnigram(self,word1):\n count=self.unigrams[(word1)]\n if count==0:\n count=0.001\n return count", "def get_counts(self):\n counts = {}\n for document in self.docs:\n for word in document:\n if word not in counts.keys():\n counts[word] = 1\n else:\n counts[word] += 1\n return counts", "def __init__(self, n, sents, gamma=None, addone=True):\n assert n > 0\n self._n = n\n\n if gamma is not None:\n # everything is training data\n train_sents = sents\n else:\n # 90% training, 10% held-out\n m = int(0.45 * len(sents))\n l = int(0.65 * len(sents))\n train_sents = sents[:m] + sents[l:]\n held_out_sents = sents[m:l]\n\n print('Computing counts...')\n count = defaultdict(int)\n while (n >= 0):\n for sent in train_sents:\n s = sent[:] ## En una oracion auxiliar agrego el item de start y end para contarlos\n s.insert(0, \"<s>\")\n s.append(\"</s>\")\n for i in range(len(s) - n + 1):\n count[tuple(s[i:i + n])] += 1\n n -= 1\n count[()] = count[()] - count[('<s>',)] - count[\n ('</s>',)] # Pero no quiero que <s> y </s> sean considerados por ()\n self._count = count\n # WORKed HERE!!\n # COMPUTE COUNTS FOR ALL K-GRAMS WITH K <= N\n\n # compute vocabulary size for add-one in the last step\n self._addone = addone\n if addone:\n print('Computing vocabulary...')\n self._voc = voc = set()\n for sent in sents:\n voc = voc.union(set(sent))\n voc.add('</s>')\n self._voc = voc\n self._V = len(voc)\n\n # compute gamma if not given\n if gamma is not None:\n self._gamma = gamma\n else:\n print('Computing gamma...')\n self._gamma = gamma = 1\n p = self.log_prob(held_out_sents)\n new_gamma = 2\n streak = 1\n growing = True\n turns = 0\n while (turns < 15):\n self._gamma = new_gamma\n np = self.log_prob(held_out_sents)\n gamma = new_gamma\n if (np > p):\n if growing:\n streak += 1\n else:\n turns += 1\n streak = 0\n growing = True\n new_gamma = new_gamma + 2 ** streak\n else:\n if growing:\n turns += 1\n streak = 0\n growing = False\n else:\n streak += 1\n new_gamma = new_gamma - 2 ** streak\n p = np\n self._gamma = new_gamma\n print(self._gamma)", "def word_frequency( tokenized, dic ):\n print( 'computing word frequencies' )\n start = time.time()\n for i, text in enumerate( tokenized ):\n for token in text:\n if token not in dic:\n dic[ token ] = 1\n else:\n dic[ token ] += 1\n if i % 10000 == 0:\n sys.stdout.write( '\\rprocessed : {}/{} reviews in {}s'.format( i, NO_REVIEWS, time.time() - start ) )\n sys.stdout.write( '\\rprocessed : {}/{} reviews in {}s\\n'.format( i, NO_REVIEWS, time.time() - start ) )", "def pop_bigrams(self):\n bigram_counts = self.process_file(self.bigram_file)\n N = sum(bigram_counts.values())\n for bigram in bigram_counts:\n self.bigrams[bigram.lower()] = bigram_counts[bigram] / N", "def store_counts(storage, counter):\n for ngram, count in counter.items():\n try:\n stored_count = storage[ngram]\n except KeyError:\n stored_count = 0\n storage[ngram] = stored_count + count\n\n storage.sync()\n counter.clear()", "def train(self, corpus): \n for sentence in corpus.corpus: # iterate over sentences in the corpus\n for token in sentence: # iterate over datums in the sentence\n self.unigrams[token] += 1\n self.total += 1\n V = len(self.unigrams) # vocabulary size \n for ug,count in self.unigrams.iteritems():\n \tself.f1[ug] = math.log10(count+1) - math.log10(self.total + V)", "def initialize_document_frequencies():\n global document_frequency\n for term in dictionary:\n document_frequency[term] = len(postings[term])", "def train_ngrams(dataset):\n trigram_counts = dict()\n bigram_counts = dict()\n unigram_counts = dict()\n token_count = 0\n\n ### YOUR CODE HERE\n def enterDic(phrase, dict):\n if phrase in dict:\n dict[phrase] += 1\n else:\n dict[phrase] = 1\n\n unigram_counts[word_to_num['UUUNKKK']] = 0\n\n for sentence in dataset:\n enterDic(sentence[1], unigram_counts) # count number of start of sentences\n enterDic((sentence[0], sentence[1]), bigram_counts) # count number of start of sentences\n token_count += 2\n for i in range(2, len(sentence)):\n token_count += 1\n enterDic(sentence[i], unigram_counts)\n enterDic((sentence[i - 1], sentence[i]), bigram_counts)\n enterDic((sentence[i - 2], sentence[i - 1], sentence[i]), trigram_counts)\n ### END YOUR CODE\n return trigram_counts, bigram_counts, unigram_counts, token_count", "def update(self,haiku, typenum):\n self.occurrences += 1\n for i in range(2):\n for x in (haiku.triple[i]).wordarray:\n if (self.wordtype == dictionary.wordtype(x) and \n dictionary.word_filter(x) != self.word):\n self.update_adj_dict(x, i==typenum)", "def _update_counts(self, msg, subtype, by):\n\n try:\n counts = self.get_local(msg, \"counts\")\n except KeyError:\n counts = defaultdict(int)\n\n counts['all'] += by\n counts[subtype] += by\n self.set_local(msg, \"counts\", counts)", "def __init__(self, corpus):\n self.unigramCounts = collections.defaultdict(lambda: 0)\n self.bigramCounts = collections.defaultdict(lambda: 0)\n self.total = 0\n self.train(corpus)", "def on_text(self, event):\n self.get_counts()\n self.save()", "def fit(self, text):\n\n if self.lowercase:\n text = text.lower()\n\n print(\"Tokenize sentences...\")\n tokens = word_tokenize(text)\n\n self.words_set_size = len(set(tokens))\n\n print(\"Collecting of ngram counters...\")\n\n self.unigram_counts = Counter(tokens)\n self.bigram_counts = Counter(bigrams(tokens))\n\n return self", "def generate_counts():\n\n counts_dict = {}\n folder_path = os.listdir(args.f)\n for subfolder in folder_path:\n subfolder_path = os.path.join(args.f, subfolder)\n for filename in os.listdir(subfolder_path):\n doc_path = os.path.join(subfolder_path, filename)\n with open(doc_path, 'r') as file:\n read_file = file.read()\n normalised_text = re.sub(r\"[^\\s\\w]\", \" \", read_file.lower())\n counts_dict.update({doc_path: collections.Counter(normalised_text.split())})\n #print(counts_dict.get('file/crude/article560.txt'))\n\n vocab = generate_vocab()\n for value in counts_dict.values():\n for k in vocab.keys():\n if k not in value.items():\n value.update({k: 0})\n\n #print(counts_dict.get('file/crude/article560.txt'))\n return counts_dict", "def frequency(self):\n # BEGIN\n \n freq = {} \n # for word in my_list:\n # for letter in word:\n # keys=freq.keys()\n # if letter in keys:\n # freq[letter]+=1\n # else:\n # freq[letter]=1\n # return freq\n\n whole = ''.join(WordSet(self.text).words())\n \n for m in whole:\n if m in freq:\n freq[m] += 1\n else:\n freq[m] = 1\n return freq\n # END", "def partial_accumulate(self, texts, window_size):\n self._current_doc_num = -1\n self._token_at_edge = None\n self._counter.clear()\n\n super(WordOccurrenceAccumulator, self).accumulate(texts, window_size)\n for combo, count in iteritems(self._counter):\n self._co_occurrences[combo] += count\n\n return self", "def train_ngrams(dataset):\n trigram_counts = dict()\n bigram_counts = dict()\n unigram_counts = dict()\n token_count = 0\n ### YOUR CODE HERE\n raise NotImplementedError\n ### END YOUR CODE\n return trigram_counts, bigram_counts, unigram_counts, token_count", "def compute_vocab_count(sents):\n counter = collections.Counter()\n for sentence in sents:\n counter.update(untag(sentence))\n return counter", "def _count_vocab(self,raw_documents, fixed_vocab=False):\n if fixed_vocab:\n vocabulary = self.vocabulary_\n else:\n # Add a new value when a new vocabulary item is seen\n vocabulary = defaultdict()\n vocabulary.default_factory = vocabulary.__len__\n\n analyze = super().build_analyzer()\n \n j_indices = []\n indptr = []\n\n values = array.array(str('f'))\n indptr.append(0)\n for doc in raw_documents:\n #doc = tupla[0]\n feature_counter = {}\n #texttlist = doc.split(sep=\" \")\n for feature in analyze(doc):#texttlist:\n try:\n \n # Ignore out-of-vocabulary items for fixed_vocab=True\n feature_idx = vocabulary[feature]\n #print(feature_idx)\n #fti_feature = calc_fti(feature,raw_documents)\n \n if feature_idx not in feature_counter:\n feature_counter[feature_idx] = 1\n else:\n feature_counter[feature_idx] += 1\n #print(feature_counter[feature_idx])\n except KeyError:\n # Ignore out-of-vocabulary items for fixed_vocab=True\n continue\n\n\n j_indices.extend(feature_counter.keys())\n values.extend(feature_counter.values())\n indptr.append(len(j_indices))\n\n if not fixed_vocab:\n # disable defaultdict behaviour\n vocabulary = dict(vocabulary)\n if not vocabulary:\n raise ValueError(\"empty vocabulary; perhaps the documents only\"\n \" contain stop words\")\n\n if indptr[-1] > np.iinfo(np.int32).max: # = 2**31 - 1\n if _IS_32BIT:\n raise ValueError(('sparse CSR array has {} non-zero '\n 'elements and requires 64 bit indexing, '\n 'which is unsupported with 32 bit Python.')\n .format(indptr[-1]))\n indices_dtype = np.int64\n\n else:\n indices_dtype = np.int32\n \n j_indices = np.asarray(j_indices, dtype=indices_dtype)\n indptr = np.asarray(indptr, dtype=indices_dtype)\n \n #print (vocabulary)\n X = sp.csr_matrix((values, j_indices, indptr),\n shape=(len(indptr) - 1, len(vocabulary)),\n dtype=np.float32)\n X.sort_indices() \n \n self.vocabulary_calculated = vocabulary\n\n return vocabulary, X", "def score(self, sentence):\n # count each incremented word\n for word in sentence:\n if word not in self.unigramCounts:\n self.zeroCount += 1\n\n # apply laplace smoothing to unigram model\n score = 0.0\n for word in sentence:\n count = self.unigramCounts[word]\n score += math.log(count + 1)\n score -= math.log(self.totalCount + self.zeroCount)\n return score", "def add_ngrams(mydict,sentence):\n ngrams = get_ngrams(sentence,2,3)\n for ngram in ngrams:\n if ngram in mydict:\n mydict[ngram]+=1\n else:\n mydict[ngram]=1\n return mydict", "def _ngram_counter(self, ngram, ngram_length, text_id, doc):\n\n # Only process this ngram is it's punctuation-free (punct --> token.dep == ss.punct) and the 1st / last\n # words are not stopwords (line mechanics: make a set, look for an intersection with another set)\n if ([word for word in ngram if word.dep == ss.punct] or\n {ngram[0].lemma_, ngram[ngram_length - 1].lemma_}.intersection(self.stop_words)):\n return\n\n # Only keep this ngram is it has 1+ nouns in it\n if len([word for word in ngram if word.pos in self.nouns or word.ent_type in self.entities]) == 0:\n return\n\n ngram_lemma = ' '.join([word.text.lower() if word.lemma_ == '-PRON-' else word.lemma_ for word in ngram])\n verbatim = ' '.join([word.text.lower() for word in ngram])\n\n # add the ngram_lemma to each proximal topic\n window_start = 0 if ngram[0].i < 7 else ngram[0].i - 7\n window_end = len(doc) if ngram[0].i + 7 + ngram_length > len(doc) else ngram[0].i + 7 + ngram_length\n for word in doc[window_start:window_end]:\n if word.lemma_ in self.topics: # is this a topic we're tracking?\n # Yes. So let's add it to the subtopic dictionary (with an occurrence count)\n if ngram_lemma in self.topics[word.lemma_]['subtopics']:\n self.topics[word.lemma_]['subtopics'][ngram_lemma].add(text_id)\n else:\n self.topics[word.lemma_]['subtopics'][ngram_lemma] = {text_id}\n\n # TODO: Do I even need self.ngrams anymore? Maybe track everything in the subtopics area?\n # Keep it! And it's not the first time we've found it.\n if ngram_lemma in self.ngrams:\n self.ngrams[ngram_lemma][\"count\"] += 1\n self.ngrams[ngram_lemma][\"textIDs\"] |= {text_id}\n self.ngrams[ngram_lemma][\"verbatims\"] |= {verbatim}\n # Keep it! This is the 1st instance.\n else:\n self.ngrams[ngram_lemma] = {\"name\": ngram_lemma,\n \"count\": 1,\n \"textIDs\": {text_id},\n \"n\": ngram_length,\n \"verbatims\": {verbatim},\n \"topic_lemmas\": []}", "def update_words(data, book_num):\n\tglobal word_count\n\t#find count of each word in the book and update the dictionary\n\tfor words in data:\n\t\tword_count[words][book_num] = (word_count.get(words,0)[book_num] + 1)\n\t#print(word_count)", "def train(self, corpus):\n\n\n temp = \"\"\n for sentence in corpus.corpus:\n\n i = 0\n for datum in sentence.data:\n # print str(sentence.data)\n self.total=self.total+1\n token = datum.word\n self.unigramCounts[token] = self.unigramCounts[token] + 1\n if (i == 0):\n temp = datum.word\n i = i + 1\n continue\n\n i = i + 1\n\n key = temp + \",\" + token\n self.bigramCounts[key] = self.bigramCounts[key] + 1\n # print token\n temp = token\n\n pass", "def __init__(self, corpus):\n self.unigramCounts = collections.defaultdict(lambda: 0)\n self.totalCount = 0\n self.zeroCount = 0\n self.train(corpus)", "def trigram_model(list_of_words, bigram_count, trigram_count):\n c_start = list_of_words.count(start_phrase)\n c_end = list_of_words.count(end_phrase)\n if c_start == 0:\n list_of_words.insert(0, start_phrase)\n list_of_words.insert(0, start_phrase)\n if c_start == 1:\n list_of_words.insert(0, start_phrase)\n if c_end == 0:\n list_of_words.append(end_phrase)\n list_of_words.append(end_phrase)\n if c_end == 1:\n list_of_words.append(end_phrase)\n bigram_count = pd.read_csv(bigram_count)\n trigram_count = pd.read_csv(trigram_count)\n proba_dict = {list_of_words[i] + \" \" + list_of_words[i+1] + \" \" + list_of_words[i+2]:\n ((trigram_count[list_of_words[i] + \" \" + list_of_words[i+1] + \" \" + list_of_words[i+2]].values[0]) /\n float(bigram_count[list_of_words[i] + \" \" + list_of_words[i+1]].values[0]))\n if list_of_words[i] + \" \" + list_of_words[i+1] + \" \" + list_of_words[i+2] in trigram_count.columns.values else 0.0 for i in xrange(len(list_of_words) - 2)}\n return proba_dict", "def unigram(tokens):\n model = collections.defaultdict(lambda: 0.01)\n for token in tokens:\n try:\n model[token] += 1\n except KeyError:\n model[token] = 1\n continue\n for word in model:\n model[word] = model[word]/float(sum(model.values()))\n return model", "def computeTF(self):\n for word in self.dictionary:\n self.dictionary[word].setTF(self.getTotalTerms())", "def train(self, corpus):\n for sentence in corpus.corpus:\n cleanSentence = sentence.cleanSentence()\n for datum in cleanSentence.data:\n token = datum.word\n self.unigramCounts[token] = self.unigramCounts[token] + 1\n self.total += 1\n\n i = 0\n while i < len(sentence.data) - 1:\n token = str(cleanSentence.get(i))\n self.followingWords[token].add(str(cleanSentence.get(i+1)))\n i += 1\n\n i = 1\n while i < len(sentence.data):\n bigram = str(cleanSentence.get(i-1)) + \" \" + str(cleanSentence.get(i))\n self.bigramCounts[bigram] = self.bigramCounts[bigram] + 1\n\n self.precedingWords[str(cleanSentence.get(i))].add(str(cleanSentence.get(i-1)))\n i += 1\n self.precedingWordsTotal = sum(map(lambda x: len(x), self.precedingWords.values()))\n\n i = 2\n while i < len(sentence.data):\n trigram = str(cleanSentence.get(i-2)) + \" \" + str(cleanSentence.get(i-1)) + \" \" + str(cleanSentence.get(i))\n self.trigramCounts[trigram] = self.trigramCounts[trigram] + 1\n i += 1\n\n #print('precedingWords')\n #print(self.precedingWords)\n #print('followingWords')\n #print(self.followingWords)\n #print('unigrams')\n #print(self.unigramCounts)\n #print('bigrams')\n #print(self.bigramCounts)\n\n #self.discount(self.trigramCounts)\n #self.discount(self.bigramCounts)\n #self.discount(self.unigramCounts)", "def buildDict(self, dict):\n self.all_words = set(dict)\n self.wc_dict = collections.defaultdict(int)\n for w in dict:\n for wc in self.get_wildcards(w):\n self.wc_dict[wc] += 1", "def __cross_wiki_counts(self):\n\n print(\"Updating counts by merging with CrossWiki\")\n\n cnt = 0\n crosswiki_path = os.path.join(\n self.base_url, \"generic/p_e_m_data/crosswikis_p_e_m.txt\"\n )\n\n with open(crosswiki_path, \"r\", encoding=\"utf-8\") as f:\n for line in f:\n parts = line.split(\"\\t\")\n mention = unquote(parts[0])\n\n if (\"Wikipedia\" not in mention) and (\"wikipedia\" not in mention):\n if mention not in self.wiki_freq:\n self.wiki_freq[mention] = {}\n\n num_ents = len(parts)\n for i in range(2, num_ents):\n ent_str = parts[i].split(\",\")\n ent_wiki_id = int(ent_str[0])\n freq_ent = int(ent_str[1])\n\n if (\n ent_wiki_id\n not in self.wikipedia.wiki_id_name_map[\"ent_id_to_name\"]\n ):\n ent_name_re = self.wikipedia.wiki_redirect_id(ent_wiki_id)\n if (\n ent_name_re\n in self.wikipedia.wiki_id_name_map[\"ent_name_to_id\"]\n ):\n ent_wiki_id = self.wikipedia.wiki_id_name_map[\n \"ent_name_to_id\"\n ][ent_name_re]\n\n cnt += 1\n if (\n ent_wiki_id\n in self.wikipedia.wiki_id_name_map[\"ent_id_to_name\"]\n ):\n if mention not in self.mention_freq:\n self.mention_freq[mention] = 0\n self.mention_freq[mention] += freq_ent\n\n ent_name = self.wikipedia.wiki_id_name_map[\n \"ent_id_to_name\"\n ][ent_wiki_id].replace(\" \", \"_\")\n if ent_name not in self.wiki_freq[mention]:\n self.wiki_freq[mention][ent_name] = 0\n self.wiki_freq[mention][ent_name] += freq_ent", "def __parse_corpus(self, corpus):\n corpus = self.__handle_corpus_unkwon_words(corpus)\n start_token = ' '.join([NGramModel.START_SENTENCE_TOKEN]*(self.__n-1))\n word_list = corpus.replace(NGramModel.START_SENTENCE_TOKEN, start_token).split()\n \n for n in range(1, self.__n+1): \n self.__ngram_counts[n] = {}\n for ngram, count in Counter(self.__generate_n_grams(word_list, n)).items():\n self.__ngram_counts[n][' '.join(ngram)] = count", "def term_frequency(ngrams,lang):\n token_dictionary = {}\n for ng in ngrams:\n try:\n token_dictionary[ng] = token_dictionary[ng] + 1\n except KeyError:\n token_dictionary[ng] = 1\n return token_dictionary", "def add_to_dict(self, tokens):\n# TODO: ?add normalization of a token?\n for token in tokens:\n if self.embedding_words and (token not in self.embedding_words):\n continue\n self.freq[token] += 1\n if token not in self.tok2ind:\n index = len(self.tok2ind)\n self.tok2ind[token] = index\n self.ind2tok[index] = token", "def count_words(self, contents):\n wordCounts = {}\n for i in self.ngramCounts:\n if i == 0: # want the default to be the size of the corpus\n total = 0\n for line in contents:\n words = line.split(\" \")\n words = [ w.strip() for w in words if w] #remove nulls\n for word in words:\n if word:\n total += 1\n wordCounts[i] = defaultdict(lambda: total)\n continue\n else:\n counts = defaultdict(lambda: 0)\n for line in contents:\n words = line.split(\" \")\n words = [ w.strip() for w in words if w] #remove nulls\n for k, word in enumerate(words): \n if k < (i-1) or not word:\n continue\n key = \"\"\n for j in range(k-i+1, k+1):\n key += words[j] + \" \"\n counts[key.strip()] += 1\n wordCounts[i] = counts\n return wordCounts", "def convert_word_to_count(counter={}, doc=[]):\n for sentence in doc:\n for word in sentence.split():\n if word not in counter:\n counter[word] = 1\n else:\n counter[word] += 1\n return counter", "def count_words_and_dublicates(novel):", "def __init__(self, n, sents):\n assert n > 0\n self._n = n\n print(\"Counting...\")\n count = defaultdict(int)\n while n >= 0:\n for sent in sents:\n s = sent[:] # En una oracion auxiliar agrego el item de start y end para contarlos\n s.insert(0, \"<s>\")\n s.append(\"</s>\")\n for i in range(len(s) - n + 1):\n count[tuple(s[i:i + n])] += 1\n n -= 1\n count[()] = count[()] - count[('<s>',)] - count[\n ('</s>',)] # Pero no quiero que <s> y </s> sean considerados por ()\n self._count = count\n print(\"Computing vocabulary...\")\n self._voc = voc = set()\n for sent in sents:\n voc = voc.union(set(sent))\n voc.add('</s>')\n self._voc = voc\n self._V = len(voc) # vocabulary size\n print(\"Done\")", "def get_counts(data):\n\n word_count = {}\n syll_count = {}\n\n infile = data.corpus\n try:\n\n open_file = codecs.open(infile, 'r', encoding='utf-16')\n for line in open_file:\n line = line.lower()\n # Remove tablet indexing info and line numbers. Grab only text data\n line = line.split(',')\n text = clean_line(line[7])\n\n # Update the occurrences of the words in the line\n for word in text.split():\n count = word_count.setdefault(word, 0)\n word_count[word] = count + 1\n\n # Track occurrences of syllables\n update_syllable_count(word, syll_count)\n\n open_file.close()\n except IOError:\n print(\"Cannot open: \" + infile)\n\n return (word_count, syll_count)", "def update_vocab(self, text):\n for char in text:\n if char not in self.vocab:\n self.vocab[char] = len(self.vocab)\n if char not in self.char2count:\n self.char2count[char] = 0\n self.char2count[char] += 1\n return", "def phrase_scoring_ranking(phrases,model,dataset,bitext):\n e_phrases = []\n f_phrases = []\n count = 0\n f_phrase_count = {}\n e_phrase_count = {} #not needed\n #e_f_pair_count = {} #e words as rows and f words as columns\n f_e_pair_count = {} #e words as rows and f words as columns\n for phrase_set in phrases:\n for phrase in phrase_set:\n e_phrases.append(phrase[3])\n f_phrases.append(phrase[2])\n if phrase[2] in f_phrase_count:\n f_phrase_count[phrase[2]] += 1\n else:\n f_phrase_count[phrase[2]] = 1\n if phrase[2] in f_e_pair_count:\n if phrase[3] in f_e_pair_count[phrase[2]]:\n f_e_pair_count[phrase[2]][phrase[3]] += 1\n else:\n f_e_pair_count[phrase[2]][phrase[3]] = 1\n else:\n f_e_pair_count[phrase[2]]={}\n f_e_pair_count[phrase[2]][phrase[3]] = 1\n\n e_phrases = list(set(e_phrases))\n f_phrases = list(set(f_phrases))\n ep_count = len(e_phrases)\n fp_count = len(f_phrases)\n #pmatrix = np.empty(ep_count*fp_count) # ######Not needed if dictionary is used\n #pmatrix = pmatrix.reshape(ep_count,fp_count)\n #pmatrix.fill(0)\n ef_prob_dict = {}\n for e in e_phrases:\n for f in f_phrases:\n ef_count =count_fe_pair(e,f,f_e_pair_count)# f_e_pair_count[e][f]\n f_count = f_phrase_count[f]\n e_idx = e_phrases.index(e) ###Check the count logic again\n f_idx = f_phrases.index(f)\n pair_prob = ef_count/f_count\n #pmatrix[e_idx][f_idx] = pair_prob\n if f in f_e_pair_count:\n if e in f_e_pair_count[f]:\n if f in ef_prob_dict:\n ef_prob_dict[f][e]=pair_prob\n else:\n ef_prob_dict[f] = {}\n ef_prob_dict[f][e] = pair_prob\n\n #if pmatrix[e_idx][f_idx] != 0:\n # print(e,f,ef_count,f_count,pair_prob)\n return ef_prob_dict", "def score(self, sentence):\n\n score = 0.0\n i = 0\n temp = \"\"\n for token in sentence:\n count = self.unigramCounts[token]\n if (i == 0):\n i = i + 1\n temp = token\n continue\n\n key = temp + \",\" + token\n bicount = self.bigramCounts[key]\n unicount = self.unigramCounts[temp]\n temp = token\n if bicount > 0 :\n\n score += (math.log(bicount) - math.log(unicount))\n else:\n unicount = self.unigramCounts[token]\n score += math.log(unicount + 1) + math.log(0.4)\n score -= math.log(self.total + len(self.unigramCounts))\n\n return score", "def construct_ngrams_dict(ngrams_list):\n counts = {}\n\n for t in ngrams_list:\n key = hash_function(t)\n if key in counts:\n counts[key] += 1\n else:\n counts[key] = 1\n return counts", "def countize(word, ind, count_words, features):\n word = clean(word)\n word = word.split()\n if len(word)>1:\n for i in range(1,len(word)):\n bigram = (word[i-1],word[i])\n count_words[ind].append(bigram)\n features.append(bigram)\n if len(word)>2:\n for i in range(2,len(word)):\n trigram = (word[i-2],word[i-1], word[i])\n count_words[ind].append(trigram)\n features.append(trigram)\n for i in range(len(word)):\n unigram = word[i]\n count_words[ind].append((unigram))\n features.append((unigram))\n return count_words, features", "def calc_tf(doc):\r\n tf = {}\r\n for term in doc:\r\n if term not in tf:\r\n tf[term] = doc.count(term)\r\n return tf", "def get_frequencies(tokens):\n cnt = {}\n\n for word in tokens:\n if word not in cnt:\n cnt[word] = 0\n\n cnt[word] += 1\n\n return cnt", "def set_grams(data_path,top=100):\n files = glob.glob(data_path + \"/*/*word*.txt\") # txt files in subfolders\n ngram = []\n table = str.maketrans(\"\",\"\",string.punctuation)\n for f_in in files:\n with open(f_in, 'r') as fi:\n for lines in fi:\n item = lines.replace(\"\\n\",\"\").split()\n term = \"\"\n count = 0\n if len(item)==3: # bigrams\n term0 = str(item[0]).translate(table).strip()\n term1 = str(item[1]).translate(table).strip()\n term = \"{},{}\".format(term0,term1) if (len(term0)>2 and len(term1)>2 and not term0.isnumeric() and not term1.isnumeric()) else (term0 if (len(term0)>2 and not term0.isnumeric()) else (term1 if (len(term1)>2 and not term1.isnumeric()) else \"\")) # comma(,) for OR in Twitter \n count = int(item[2])\n elif len(item)==2: # unigrams\n term = str(item[0]).translate(table).strip()\n count = int(item[1])\n if count>=top and str(term) != 'nan' and len(term)>=3: # ignore term freq minor than top and term length than 3\n ngram.append(term)\n fi.close()\n gn_set = set(ngram)\n \n print(len(gn_set))\n \n f = open(data_path+\".txt\", 'w')\n for w in gn_set:\n f.write('{}\\n'.format(w))\n f.close()\n \n return list(gn_set)", "def computeWordFrequencies(self, tokens: ['token'], frequencies: {'token': int}):\n # project2: update this method to take existing dict as parameter and modify it\n # additionally, stopwords are not inserted in the dict;\n # words shorter than 3 character or contains all digits are ignored\n for token in tokens:\n # if the key is not in dict, dict.setdefault method initiates the value at 0\n # if token not in stopwords and len(token) >= 3 and not token.isdigit():\n frequencies[token] = frequencies.setdefault(token, 0) + 1", "def _collect_counts(self):\n for t in self.system.keys():\n if t in self.gold:\n self.tp += 1\n else:\n self.fp += 1\n for t in self.gold.keys():\n if t not in self.system:\n self.fn += 1", "def frequencies(corpus, index, to_lower=False):\n freq = {}\n for sentence in corpus.get_sentences():\n for word in sentence:\n key = word[index]\n if to_lower:\n key = key.lower()\n if key in freq:\n freq[key] += 1\n else:\n freq[key] = 1\n\n return freq", "def build_ngram_index(tokenized_documents, ngrams):\n dictionary = {}\n\n doc_ngrams = {}\n for doc in tokenized_documents:\n ngrams_freq = {}\n\n measures = nltk.collocations.BigramAssocMeasures()\n finder = BigramCollocationFinder.from_words(tokenized_documents[doc])\n freqs = finder.ngram_fd\n for ngram in freqs:\n ngrams_freq[ngram] = freqs[ngram]\n \n measures = nltk.collocations.TrigramAssocMeasures()\n finder = TrigramCollocationFinder.from_words(tokenized_documents[doc])\n freqs = finder.ngram_fd\n for ngram in freqs:\n ngrams_freq[ngram] = freqs[ngram]\n\n doc_ngrams[doc] = ngrams_freq\n\n for ngram in ngrams:\n dictionary[ngram] = [0]\n for doc in doc_ngrams:\n if ngram in doc_ngrams[doc]:\n dictionary[ngram][0] += doc_ngrams[doc][ngram]\n dictionary[ngram].append((doc, doc_ngrams[doc][ngram]))\n \n return dictionary", "def train(self, iterable):\n for ngram in generate_ngrams(iterable, self.n + 1):\n self.markov_dict.setdefault(ngram[: self.n], Counter()).update([ngram[self.n]])\n self.prob_dict.update([ngram[: self.n]])", "def count_terms(self, tokens):\n\n terms = [self.term_match(t) for t in tokens ]\n \n terms = [t for t in terms if t != None]\n\n #print terms\n lf = dict(Counter(terms))\n for k in lf:\n lf[k] /= float(len(tokens))\n #lf[k] = 1 # binarize?\n pass\n return lf", "def compute_frequencies(num_words, documents):\n res = [0 for i in range(num_words)]\n sum = 0\n for word in documents:\n sum += 1\n tmp = set(word)\n for number in tmp:\n res[number] += 1\n \n res = [i / sum for i in res]\n return res", "def count_doc_frequencies(self, docs):\n frequencyIndex = {}\n doc_id = 0\n for doc in docs:\n for term in doc:\n if term not in frequencyIndex:\n frequencyIndex[term] = [doc_id]\n else:\n for id in frequencyIndex[term]:\n if doc_id == id:\n break\n else:\n frequencyIndex[term].append(doc_id)\n doc_id+=1\n\n for term in frequencyIndex:\n occurences = len(frequencyIndex[term])\n frequencyIndex[term] = occurences\n\n return frequencyIndex", "def _count_ngram(ngram_input_list: Sequence[str], n_gram: int) -> Counter:\n ngram_counter: Counter = Counter()\n\n for i in range(1, n_gram + 1):\n for j in range(len(ngram_input_list) - i + 1):\n ngram_key = tuple(ngram_input_list[j : (i + j)])\n ngram_counter[ngram_key] += 1\n\n return ngram_counter", "def _count_ngram(ngram_input_list: Sequence[str], n_gram: int) ->Counter:\n ngram_counter: Counter = Counter()\n for i in range(1, n_gram + 1):\n for j in range(len(ngram_input_list) - i + 1):\n ngram_key = tuple(ngram_input_list[j:i + j])\n ngram_counter[ngram_key] += 1\n return ngram_counter", "def _update_triad_counts(self):\n for i in range(len(self.learning_string) - 3):\n triad = self.learning_string[i] + self.learning_string[i + 1] + self.learning_string[i + 2]\n\n if self.learning_string[i + 3] == '0':\n self.triad_counts[triad][0] += 1\n elif self.learning_string[i + 3] == '1':\n self.triad_counts[triad][1] += 1\n\n return", "def add_unique_word_count(self):\n call_count = lambda letter_: self._count_unique_words(letter_)\n self.dataframe['unique_words'] = self.dataframe['letter'].map(call_count)", "def update_word_stats(self, tweet):\n\n if not self.text:\n return\n\n words = self.text.split()\n\n # process single words\n for word in words:\n self.update_stats('words', word)\n\n # process 2 word lists\n pairs = self.get_phrase_list(words, 2)\n if pairs is not None:\n for word_pair in pairs:\n self.update_stats('word_pairs', self.get_index_from_list(word_pair))\n\n # process 3 word lists\n triples = self.get_phrase_list(words, 3)\n if triples is not None:\n for word_triple in triples:\n self.update_stats('word_triples', self.get_index_from_list(word_triple))", "def add(counts):\n if counts:\n for k in grammar.keys():\n grammar[k] = grammar[k] + counts[k]", "def num_bigram(doc):\n matched_spans = []\n matches = matcher(doc)\n for match_id, start, end in matches:\n span = doc[start:end]\n matched_spans.append(span)\n for span in matched_spans: # merge into one token after collecting all matches\n span.merge()\n return doc", "def preprocess(self, documents):\n\n # A dict storing the frequency of each word\n word_freq = {}\n\n # Iterate for each document\n for doc in documents:\n # Split the document into a list of words and iterate on it\n for w in extract_words(doc):\n # Update word frequencies\n '''YOUR CODE HERE'''\n if w not in word_freq.keys():\n word_freq[w] = 1\n else:\n word_freq[w] += 1\n\n ''' END CODE FOR THIS LOOP '''\n\n\n # A set of words with frequency less than 'self.min_freq'\n remove_words = set()\n\n # Check frequency of each word and add to 'remove_words'\n # if it's frequency is below self.min_freq\n\n ''' YOUR CODE HERE '''\n for w in word_freq.keys():\n if word_freq[w] < self.min_freq:\n remove_words.add(w)\n\n # Delete the words in 'remove_words' from 'word_freq'\n for w in remove_words:\n del word_freq[w]\n\n # Fill 'self.word_to_idx' and 'self.idx_to_word' for\n # each word in 'word_freq' (dicts are explained above)\n\n i = 0\n for w in word_freq.keys():\n self.word_to_idx[w] = i\n self.idx_to_word[i] = w \n i += 1\n\n ''' END YOUR CODE HERE '''", "def test_fill_in_dic():\n ngrams = NgramFrequencies()\n word_per_list = [\"time\", \"burton's\", \"corpse\", \"bride\"]\n ngrams.fill_in_dic(word_per_list)\n assert ngrams.unigrams_dic == {\n \"COUNT\": 4,\n \"time\": 1,\n \"burton's\": 1,\n \"corpse\": 1,\n \"bride\": 1\n }\n assert ngrams.bigrams_dic == {\n \"COUNT\": 3,\n \"time_burton's\": 1,\n \"burton's_corpse\": 1,\n \"corpse_bride\": 1\n }\n assert ngrams.trigrams_dic == {\n \"COUNT\": 2,\n \"time_burton's_corpse\": 1,\n \"burton's_corpse_bride\": 1\n }", "def unigram_model(list_of_words, unigram_count, N=count_token()):\n d = pd.read_csv(unigram_count)\n proba_dict = {list_of_words[i]: (d[el].values[0] / float(N)) if el in d.columns.values else 0.0 for i, el in enumerate(list_of_words) }\n return proba_dict", "def __init__(self, corpus):\n self.total = 0\n self.reverseBigramCount = defaultdict(lambda : defaultdict(lambda : 0))\n self.bigramCount = defaultdict(lambda : defaultdict(lambda : 0))\n self.unigramCount = defaultdict(lambda: 0)\n self.train(corpus)", "def process_family_frequencies(self, parent, family):\n if not family:\n return None\n counts = {}\n n_tokens = 0\n for description_array in family:\n seen = set([])\n for token in description_array:\n counts[token] = counts.get(token, 0) + 1\n n_tokens += 1\n\n self.word2tf[token] = self.word2tf.get(token, 0) + 1\n self.n_words += 1\n if token not in seen:\n self.word2df[token] = self.word2df.get(token, 0) + 1\n seen.add(token)\n\n self.family2tf[parent] = {tok: count/n_tokens for tok, count in counts.items()}\n self.data.extend(family)\n return True", "def frequencies(self):\n dic = {}\n for word in self.words():\n dic[word] = dic.get(word, 0) + 1\n return dic", "def countFreq(self,document):\n self.document = document\n vocab=['python','js','android','php','django','javascript','oracle','ruby','rails','java']\n cnt_vector = CountVectorizer(vocabulary=vocab)\n self.freq_term_matrix = cnt_vector.fit_transform(self.document)\n return self.freq_term_matrix.toarray()", "def _count_occurrences(self, corpus_tokens: list, window_size: int):\n ngram_occurrences = {}\n for i, corpus_token in enumerate(corpus_tokens):\n ngram = []\n if i + window_size > len(corpus_tokens) - 1:\n break\n else:\n for j in range(window_size):\n ngram.append(corpus_tokens[i + j])\n if self.TOKENS.END.value in ngram:\n continue\n else:\n ngram_key = tuple(ngram)\n if ngram_key in ngram_occurrences:\n ngram_occurrences[ngram_key] += 1\n else:\n ngram_occurrences[ngram_key] = 1\n return ngram_occurrences", "def make_word_to_freq(self):\n\t\tword_to_freq = {}\n\t\tdocuments = self.tokenized_documents[\"train\"]\n\t\tfor document in documents:\n\t\t\tfor word in document:\n\t\t\t\tif not word in self.worddict: # make sure we have not found one of the pre-defined words\n\t\t\t\t\tword_to_freq[word] = word_to_freq.get(word, 0) + 1\n\t\t\n\t\treturn word_to_freq", "def train(self, corpus): \n # TODO your code here\n # Tip: To get words from the corpus, try\n # for sentence in corpus.corpus:\n # for datum in sentence.data: \n # word = datum.word\n for sentence in corpus:\n prevWord = \"\"\n prevPrevWord = \"\"\n for word in sentence:\n word = word.strip(STRIP_CHARS)\n word = word.lower()\n currentWord = word\n self.unigramCounts[currentWord] += 1\n self.total += 1\n if prevWord != \"\":\n if prevPrevWord != \"\":\n trigram = (prevPrevWord, prevWord, currentWord)\n if trigram not in self.trigramCounts:\n self.continuationCounts[currentWord] += 1\n self.followingCounts[(prevPrevWord, prevWord)] += 1\n self.trigramCounts[trigram] += 1\n self.bigramCounts[(prevWord, currentWord)] += 1\n self.totalBigramCounts += 1\n else:\n self.bigramCounts[(prevWord, currentWord)] += 1\n self.totalBigramCounts += 1\n prevPrevWord = prevWord\n prevWord = currentWord\n else:\n prevWord = currentWord\n self.total += len(self.unigramCounts)", "def create_freq_dict(corpus, doc_info):\n for idx, content in enumerate(corpus):\n word_freq_table = {}\n splitted_sentence = content.split()\n for word in splitted_sentence:\n word = word.lower()\n if word not in word_freq_table:\n word_freq_table[word] = 1\n else:\n word_freq_table[word] += 1\n doc_info[idx]['freq_dict'] = word_freq_table", "def count_n_grams(data, n, start_token='<s>', end_token = '<e>'):\r\n \r\n # Initialize dictionary of n-grams and their counts\r\n n_grams = {}\r\n\r\n \r\n for sentence in data: # complete this line\r\n \r\n # prepend start token n times, and append <e> one time\r\n sentence = [start_token]*n + sentence + [end_token]\r\n \r\n # convert list to tuple\r\n # So that the sequence of words can be used as\r\n # a key in the dictionary\r\n sentence = tuple(sentence)\r\n\r\n \r\n for i in range(len(sentence)+1-n): # complete this line\r\n\r\n # Get the n-gram from i to i+n\r\n n_gram = sentence[i:i+n]\r\n\r\n # check if the n-gram is in the dictionary\r\n if n_gram in n_grams: \r\n \r\n # Increment the count for this n-gram\r\n n_grams[n_gram] += 1\r\n else:\r\n # Initialize this n-gram count to 1\r\n n_grams[n_gram] = 1\r\n \r\n return n_grams", "def addUnigrams(self, rating, writtenReview):\n sentence = writtenReview.split()\n for word in sentence:\n if word not in self.dictionary:\n self.addItem(word)\n self.totalTerms[rating] += 1\n self.dictionary[word].incrementFrequency(rating)", "def count(self):\n freq = {}\n\n for desc in self.words:\n if desc in freq:\n freq[desc] += 1\n else:\n freq[desc] = 1\n\n return freq", "def normalize(self):\n for key in self.corpus.keys():\n sum_count = 0\n words = []\n counts = []\n for k, v in self.corpus[key].items():\n sum_count += v\n words.append(k)\n counts.append(v)\n prob = [float(count)/sum_count for count in counts]\n\n self.corpus[key] = [words, prob]", "def increment_count(self, word):\n pass", "def train(self, documents):\n ###DONE\n\n #entire vocab in document set D\n vocab_sod = set()\n vocab_pop = set()\n \n #Calcuates prior probabilities\n priorSOD = 0 #how many docs are spam\n priorPOP = 0 #how many docs are ham\n \n #Cacluates Tct\n term_freq_sod = {} #{term:occur, term:occur}\n term_freq_pop = {}\n \n #Tct'\n Tct_sod = 0 #Tct' = sum of (every term occurence in class c + 1)\n Tct_pop = 0\n \n for doc in documents: \n if 'sod' in doc.label:\n priorSOD += 1\n for token in doc.tokens:\n Tct_sod += 1\n if token in term_freq_sod.keys():\n term_freq_sod[token] = term_freq_sod[token] + 1\n else:\n term_freq_sod[token] = 1\n vocab_sod.add(token) \n else:\n priorPOP += 1\n for token in doc.tokens:\n Tct_pop += 1\n if token in term_freq_pop.keys():\n term_freq_pop[token] = term_freq_pop[token] + 1\n else:\n term_freq_pop[token] = 1\n vocab_pop.add(token)\n \n \n #endfor\n # | is for set join\n self.vocab = vocab_sod | vocab_pop #gets rid of duplicate words (those in both 'ham' and 'spam') \n \n #Tct Primes\n #tct' = term freq of all terms in class c + 1*(total terms)\n Tct_sod = Tct_sod + len(self.vocab) \n Tct_pop = Tct_pop + len(self.vocab) \n \n \n print(\"PriorSod: \" + str(priorSOD))\n print(\"PriorPop: \" + str(priorPOP))\n print(\"LEN Docum: \" + str(len(documents)))\n \n self.priorSOD = priorSOD / len(documents)\n self.priorPOP = priorPOP / len(documents)\n \n for term in self.vocab:\n if term in term_freq_pop.keys():\n self.cond_prob_pop[term] = (term_freq_pop[term] + 1) / Tct_pop\n else:\n self.cond_prob_pop[term] = 1 / Tct_pop\n \n if term in term_freq_sod.keys():\n self.cond_prob_sod[term] = (term_freq_sod[term] + 1) / Tct_sod\n else:\n self.cond_prob_sod[term] = 1 / Tct_sod\n \n \n pass", "def _profile(self, text):\n prof = zeros(len(self.alph)**self.N)\n ngs = ngrams(text, self.N)\n for tup in ngs:\n loc = 0\n for i in range(len(tup)):\n loc += (len(self.alph)**i) * self.alph.index(tup[i])\n prof[loc] += 1\n return prof" ]
[ "0.7613877", "0.754076", "0.7213328", "0.7121193", "0.7052331", "0.6792219", "0.6738704", "0.6667053", "0.6502214", "0.6489286", "0.647944", "0.64368546", "0.6427307", "0.641034", "0.6370047", "0.6328885", "0.6313854", "0.62974036", "0.62933725", "0.6273226", "0.62302506", "0.620344", "0.6201586", "0.61865443", "0.6184886", "0.6174701", "0.6172101", "0.61720663", "0.61524993", "0.6150279", "0.6138737", "0.6106462", "0.60801256", "0.608004", "0.6072656", "0.6045799", "0.6040158", "0.6036199", "0.60251707", "0.6019934", "0.6002051", "0.5982376", "0.5963565", "0.59607244", "0.5948909", "0.59434664", "0.5939282", "0.59319854", "0.5919459", "0.5908679", "0.59001756", "0.5899315", "0.5885471", "0.58834314", "0.587003", "0.58604", "0.58514124", "0.58501804", "0.58459085", "0.5839745", "0.5827479", "0.5822862", "0.58196324", "0.58172333", "0.5813798", "0.58071655", "0.57956904", "0.5786258", "0.57798874", "0.5779528", "0.57774484", "0.5775167", "0.5771084", "0.5766596", "0.57590085", "0.57500756", "0.57495767", "0.5735988", "0.5730348", "0.57278323", "0.5723526", "0.5722045", "0.57175094", "0.57131106", "0.57128155", "0.56992954", "0.56965315", "0.56926936", "0.5691096", "0.56903625", "0.5688576", "0.5681426", "0.5681333", "0.56788903", "0.567876", "0.5673987", "0.56721693", "0.56712574", "0.5667831", "0.56618315", "0.5659215" ]
0.0
-1
Clean a line of data, removing all annotations from the line.
def clean_line(line, normNum=True, normProf=True): # Remove square brackets, ceiling characters, question marks, other # questionable characters, and line breaks line = re.sub(r'(\[|\])', '', line) line = re.sub(r'(⌈|⌉)', '', line) line = re.sub(r'( / )', ' ', line) line = re.sub(r'/', '', line) line = re.sub(r'\?', '', line) line = re.sub(r'([<]|[>])+', '', line) line = re.sub(r'!', '', line) line = re.sub(r'"', '', line) # Remove researcher's notes, and multiple dashes or '='s line = re.sub(r'(\(.*\))', '', line) line = re.sub(r'(#[.]*)', '', line) line = re.sub(r'[-]{2}', '', line) line = re.sub(r'[=]{2}', '', line) # Replace numbers with 'number' if normNum is True: line = re.sub(r'\b(?<!-)(\d+)(?![\w-])', 'number', line) line = re.sub(r'[-+]?\b\d+\b', 'number', line) #line = re.sub(r'\b([\-\.0-9]+)(?![\w-])', 'number', line) # Replace professions with 'profession' if normProf is True: line = professions.replaceProfessions(line) # Remove blank character at end of line linelength = len(line) if (linelength > 0 and line[linelength-1] == ""): del line[0:linelength-2] return line
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cleaning (data):", "def clean_line(self, line):\n\n if \"#\" in line:\n temp = line.split(\"#\")\n if len(temp) < 2:\n return \"\"\n else:\n temp = temp[0] + \"\\n\"\n\n # make sure the \"#\" isn't in quotes\n if temp.count(\"\\\"\") % 2 == 0:\n line = temp\n\n line = line.replace(\"}\", \" } \").replace(\"{\", \" { \")\n while \"=\" in line:\n line = self.replace_equals(line)\n line = line.lstrip()\n return line", "def remove_code_annotations(line_to_transform):\n line_to_transform = str(line_to_transform)\n line_to_transform = line_to_transform.split(' #', 1)[0]\n line_to_transform = line_to_transform.replace(':', '')\n line_to_transform = line_to_transform.replace('\\t', '')\n line_to_transform = line_to_transform.replace(' ', '')\n line_to_transform = line_to_transform.replace('\\n', '')\n line_to_transform = line_to_transform.replace('elif ', '')\n line_to_transform = line_to_transform.replace('if ', '')\n line_to_transform = line_to_transform.replace('else ', '')\n line_to_transform = line_to_transform.replace('else', '')\n line_to_transform = line_to_transform.replace('def ', '')\n line_to_transform = line_to_transform.replace('for ', '')\n line_to_transform = line_to_transform.replace('while ', '')\n return line_to_transform", "def __clean_line_comments(self):\n self.lines = [l for l in self.lines if not l.startswith(\"//\") and len(l) != 0]", "def __clean_line_comments(self):\n self.lines = [l for l in self.lines if not l.startswith(\"//\") and len(l) != 0]", "def clean_data(self):\n for line in self.file:\n if line.startswith('//') or line.isspace():\n continue\n if '//' in line:\n line = line.split('//')[0]\n line = line.replace('\\n', '')\n line = line.replace(' ','')\n self.commands.append(line)", "def cut_line(self):\r\n self.parachute.pop(0)", "def setCleanAnnotations(self, value):\n return self._set(cleanAnnotations=value)", "def mapper_data_cleaning(self, l, line):\n lineitems = line.split(\",\")\n yield (lineitems[0], lineitems[2])", "def clean(c):", "def delTcline(self, line):\n self._checkfigure()\n ld = self._get_linedict(line)\n for vline in ld['vlines']:\n vline.remove()\n ld['vlines'] = []", "def strip_line(line):\n line = line.strip()\n line = line.rstrip('\\n')\n line = line.rstrip('\\t')\n line = (line.split(\"//\"))[0]\n return line", "def FilterLine(self, a_line):\n return a_line", "def strip_warnings(self, line):\n if line[0] == \"|\":\n return \"\"\n else:\n return line", "def delete_line(self, line):\n self._checkfigure()\n ld = self._get_linedict(line)\n self.delTcline(ld)\n line = ld['line']\n line.remove()\n self.lines.remove(ld)", "def test__clean_line():\n LINES = {\n \"One morn before me were three figures seen,\":\n \"One morn before me were three figures seen,\",\n \"And once—more came they by:-alas! wherefore?\":\n \"And once more came they by: alas! wherefore?\",\n }\n for line, clean_line in LINES.items():\n assert(LineBuilder(line)._clean_line() == clean_line)", "def clean(self, line):\r\n m = self.RE.match(line)\r\n if line.strip() == \">\":\r\n return \"\"\r\n elif m:\r\n return m.group(2)\r\n else:\r\n return line", "def remove_lines():\n work_folder = os.path.join(CURRENT_FOLDER, \"..\\\\Data\\\\weather_data\")\n with open(os.path.join(work_folder, \"filtered_merged_history_KMDW.csv\"), \"w\") as outfile:\n with open(os.path.join(work_folder, \"merged_history_KMDW.csv\")) as infile:\n outfile.write(infile.next())\n for line in infile:\n if line[0].isdigit():\n outfile.write(line)", "def clean(self, line):\n m = self.RE.match(line)\n if line.strip() == \">\":\n return \"\"\n elif m:\n return m.group(2)\n else:\n return line", "def rstrip_line(line):\n return line.rstrip()", "def clean_chunk(chunk):\n return '\\n'.join([x[1:] for x in chunk.split('\\n')\n if x and x[0] not in ('-', '@')])", "def clean(line):\n line = line.lower().replace(\"\\n\",\" \").replace(\"\\r\",\"\").replace(',',\"\").replace(\">\",\"> \").replace(\"<\", \" <\").replace(\"|\",\" \")\n return line", "def process(lines):\n lines = list(map(_clean, lines))\n # lines = list(map(_split, lines))\n return lines", "def removeAnnotation(self,i=0):\n #print \"REMOVE %s\" % i\n map(undraw,self._annotations[i])\n del self._annotations[i]", "def RemoveMarker(self, marker, line):\n assert isinstance(marker, ed_marker.Marker)\n marker.Set(self, line, delete=True)", "def unhighlight_line(self, line):\n self._checkfigure()\n ld = self._get_linedict(line)\n ld['highlighted'] = False\n self.update_lines()", "def clean(text):\n lines = text.split('\\n')\n\n indx = range(len(lines))\n indx.reverse()\n for i in indx:\n temp = lines[i].strip()\n if temp == '' or temp.startswith('#'):\n del lines[i]\n else:\n lines[i] = temp\n\n return lines", "def _clean_data(self, dataset):\n dataset.dropna(inplace=True)\n # Problem: handle missing data (in a different way), noisy data, inconsistent data", "def list_strip(line: list):\n new_line = [field.strip() for field in line]\n if new_line != line:\n tpl = \"Removed trailing whitespaces in fields of line: {}\"\n msg = tpl.format(line)\n warnings.warn(msg, ParseIsatabWarning)\n return new_line", "def clean_lines(lines):\n _lines = []\n for l in lines:\n l = l.strip().rstrip()\n if len(l) > 0:\n _lines.append(l)\n return _lines", "def clean(self):\n for i in range(len(self.asteroid_type) - 1, -1, -1):\n x, y = self.get_coords(self.asteroid_type[i])\n if x < -self.gap:\n self.del_asteroid(i)", "def minimalTextCleaning(row, field):\n\n # force encoding\n encoded_text = row[field].encode(encoding = 'ascii',errors = 'replace')\n decoded_text = encoded_text.decode(encoding='ascii',errors='strict')\n remove_funky_chars = str(decoded_text).replace(\"?\", \" \")\n lower_case = str(remove_funky_chars).lower().strip()\n\n # strip redundant whitespace\n cleaned_text = re.sub(' +', ' ', lower_case)\n\n\n # strip signature lines\n cleaned_text = cleaned_text.replace(\"_\", \"\")\n\n return cleaned_text", "def clean_inp(self):\n self.E_str = \"clean_inp\"\n\n # First remove any comment lines\n new_ltxt = []\n for line_num, line in enumerate(self.file_ltxt):\n edit_line, comment = gen_parse.rm_comment_from_line(line)\n edit_line = edit_line.rstrip()\n if edit_line:\n new_ltxt.append(edit_line)\n self.file_ltxt = new_ltxt[:]\n\n # Get line nums for error messages -before the inp cleaning\n self.line_nums = list(range(1, len(self.file_ltxt)+1))\n for line_num in self.line_nums:\n self.file_ltxt_orig[line_num] = self.file_ltxt[line_num - 1]\n self.line_num = 0\n\n self.clean_open_close_brace()", "def test_04_remove_annotations(self):\n self.addAnnotation(\"annotation1\", self.host.id, \"HOST\")\n self.removeAnnotation(self.added_annotations[-1].annotation.id)\n del self.added_annotations[-1]", "def _purify(self, line_str):\n string = line_str.strip('\\n')\n string = string.strip()\n comment_idx = string.find('//')\n if comment_idx == -1:\n return string.strip()\n elif comment_idx == 0:\n return None\n else:\n return string[0:comment_idx].strip()", "def clean_data(self, path, exclude_msgtypes=None):", "def clean_me(args):\n with open(args.input, 'rb') as infile:\n with open(args.output, 'wb') as outfile:\n \n for line in infile:\n if not 'xsi:nil=\"true\"' in line:\n outfile.write(line)\n else:\n print \"Removing %s\" % line", "def strip_rule(line):\n\n return \" \".join(line.split())", "def clean_record(text, \n page_breaks=True,\n midline_returns=True,\n time_marks=True):\n clean_text = text\n if(page_breaks):\n clean_text = remove_page_breaks(clean_text)\n if(midline_returns):\n clean_text = remove_midline_returns(clean_text)\n if(time_marks):\n clean_text = remove_time_marks(clean_text)\n \n return clean_text", "def process_body(line):\n fields = line.split('\\t') # preserves newline\n infos = fields[7].split(';')\n\n whitelisted = [\n info for info in infos\n if any(\n info.startswith(x + '=') or info == x\n for x in WHITELISTED_ANNOTATIONS\n )\n ]\n\n fields[7] = ';'.join(whitelisted)\n return '\\t'.join(fields)", "def remove_some_extraneous_information(variant):\n for key in ['xpos','xstop','vep_annotations',]: variant.pop(key, None)", "def clean(dataset_path: str) -> str:\n def _remove_unused(text: str):\n clean_data = text.lower().strip()\n clean_data = re.sub(\n r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+',\n \" \", clean_data)\n clean_data = re.sub(r\"<.*>\", \"\", clean_data)\n clean_data = re.sub(r\"@[a-zA-Z0-9_]+\", \"\", clean_data)\n clean_data = clean_data.replace(\"\\n\", \"\")\\\n .replace(\"#\", \"\")\n return clean_data\n\n dtypes = {\n \"id\": int,\n \"keyword\": str,\n \"location\": str,\n \"text\": str\n }\n\n if \"train\" in dataset_path:\n dtypes[\"target\"] = int\n\n new_path = _make_new_filepath(dataset_path, \"clean\")\n df = pd.read_csv(f\"/data/{dataset_path}\", index_col=\"id\", dtype=dtypes)\n df[\"text\"] = df[\"text\"].apply(_remove_unused)\n df.to_csv(f\"/data/{new_path}\")\n return new_path", "def do_clear(self, line):\n self.calc.clear()\n self._debug_stack()", "def sanitize(self, line):\n self.line_count = self.line_count + 1\n components = line.split(\",\")\n for comp in components:\n # if any field has IP in it, see if the value matches something in the dict\n items = comp.split(\"=\")\n if \"ip\" in items[0]:\n if items[1] in self._insts:\n # found this dirty IP in our learned dictionary, replace it\n dirty_ip = items[1]\n clean_ip = self._insts[dirty_ip]\n line = re.sub(dirty_ip, clean_ip, line, 1)\n\n # if this message has a pdu, clean up the pdu too\n msg_type = self._extract_by_key(line, \"type\")\n if \"sflow\" in msg_type or \"event\" in msg_type:\n pdu = self._extract_by_key(line, \"pdu\")\n # substitute the converted IP based on type\n if \".\" in dirty_ip:\n # v4\n line = re.sub(self._v4_string_to_hex(dirty_ip),\n self._v4_string_to_hex(clean_ip), line)\n\n pdu = self.fix_checksum(pdu)\n\n line = line[0: (line.find(\"pdu=\")) +4] + pdu + \",\\n\"\n else:\n # v6 - remove : and go to lower case before swap\n dirty_swap = re.sub(\":\", \"\", dirty_ip)\n dirty_swap = dirty_swap.lower()\n line = re.sub(dirty_swap, self._v4_string_to_hex(clean_ip), line)\n if (args.hexdump):\n pdu_hex = pdu\n pdu_hex = \" \".join(pdu_hex[i:i+2] for i in range(0, len(pdu_hex), 2)) #put timestamp and offset in front of pdu hex\n pdu_hex = pdu[0:15] + \" 000000 \" + pdu_hex + \" ,\\n\"\n hexdump_file.write(pdu_hex)\n return line", "def process(self,line):\n\n pattern = re.compile(\"@.*?@\")\n matches = pattern.findall(line)\n for m in matches:\n replacement = r\"<small>{}</small>\".format(re.escape(m[1:-1]))\n line = pattern.sub(replacement,line)\n\n return line", "def trim_data(data, attributes):\n return data.drop(attributes, axis=1)", "def clear_annotation(self):\n\n self.xValues = []\n self.yValues = []\n self.colors = []\n\n self.stop_video()", "def clean_data(td):\n data = td.string\n try:\n return data.strip(\" \\n:-\")\n except AttributeError:\n return u\"\"", "def remove_empty_lines(self):\n self.result_code = open(\"result.c\", \"r\") # Opening the intermediate file in 'read' mode.\n self.line_array = self.result_code.readlines() # Obtaining an array of strings, where each string is a line from the intermediate file.\n self.result_code.close() # Closing the intermediate file.\n self.result_code = open(\"result.c\",\"w\") #Opening the intermediate file in 'write' mode.\n # Looping over all the lines in the input file.\n for line in self.line_array:\n # Checking if the line is empty.\n if line != \"\\n\":\n self.result_code.write(line) # Writing the non-empty line onto the intermediate file.\n self.result_code.close() # Closing the intermediate file.", "def filter_line(self, line):\n if line.startswith(\"<\"):\n # Simply filter out all lines beginning with '<', which are metadata\n return None\n\n # Some metadata-like text is also included at the start of lines, followed by \". - \"\n if u\". - \" in line:\n __, __, line = line.partition(u\". - \")\n\n # Remove -s and spaces from the start of lines\n # Not sure why they're often there, but it's just how the transcripts were formatted\n line = line.lstrip(u\"- \")\n\n # Skip lines that are fully surrounded by brackets: they're typically descriptions of what happened\n # E.g. (Applause)\n if line.startswith(u\"(\") and line.endswith(u\")\"):\n return None\n\n # It's common for a speaker's first utterance to start with a marker indicating the original language\n line = language_indicator_re.sub(u\"\", line)\n return line", "def remove_ats(self):\n\t\tfor key in self.keys():\n\t\t\tif key[:1] == '@':\n\t\t\t\ttry: del self[key]\n\t\t\t\texcept: pass", "def _chop_end_codes(line):\n return re.sub(r\"\\s\\s\\s\\s+[\\w]{4}.\\s+\\d*\\Z\", \"\", line)", "def _clean_commit(self, line):\n cleaned_line = {\n 'repo': line['origin'],\n 'hash': line['data_commit'],\n 'author': line['data_Author'],\n 'category': \"commit\",\n 'created_date': utils.str_to_dt_data(line['data_AuthorDate']),\n 'commit': line['data_Commit'],\n 'commit_date': utils.str_to_dt_data(line['data_CommitDate']),\n 'files_no': len(line['data_files']),\n 'refs': line['data_refs'],\n 'parents': line['data_parents'],\n 'files': line['data_files']\n }\n\n actions = 0\n for file in line['data_files']:\n if 'action' in file:\n actions += 1\n cleaned_line['files_action'] = actions\n\n try:\n non_merge = math.isnan(line['data_Merge'])\n\n except (TypeError, KeyError):\n non_merge = False\n\n cleaned_line['merge'] = not non_merge\n return cleaned_line", "def clean(self):\n # Perform the standard ACE cleaning\n max_status = mm_ace.clean(self)\n\n # Replace bad values with NaN and remove times with no valid data\n self.data = self.data[self.data['status'] <= max_status]\n\n return", "def clean_json(self, line_no, row):\n if len(row) not in [4, 5]:\n return False\n return True", "def clean_line_generator_v2(df_pkl=None, fn='untitled'):", "def clear(self):\n self.clear_markers()\n self.l_marker.remove()\n self.l_line.remove()\n self.r_marker.remove()\n self.r_line.remove()", "def clean_data(self, lines):\n\n data = []\n curr = None\n for line in lines:\n line = self.clean_line(line)\n\n temp = []\n quotes = 0\n for item in line.split():\n if quotes % 2 == 0:\n temp.append(item)\n else:\n temp[-1] += item\n quotes += item.count(\"\\\"\")\n line = temp\n\n if not line:\n continue\n if curr:\n if self.compare_keys(curr, line):\n curr = self.merge_lines(curr, line)\n else:\n data.append(self.add_line(curr))\n curr = line\n else:\n curr = line\n if curr:\n data.append(self.add_line(curr))\n return data", "def _chop_end_misc(line):\n return re.sub(r\"\\s+\\d\\d-\\w\\w\\w-\\d\\d\\s+[1-9][0-9A-Z]{3}\\s*\\Z\", \"\", line)", "def clean_serial_data(data):\n clean_data = []\n line_data = []\n for line in data:\n print (line)\n #line = float(line)\n clean_data.append(int(line)/1000)\n \n return clean_data", "def remove_line(self, origin):\n current_tile = self.board[origin[0]][origin[1]]\n\n if current_tile.is_dot:\n temp = current_tile.next\n current_tile.next = None\n current_tile = temp\n\n # Remove color of all non dot tiles in line.\n while current_tile and current_tile.color and not current_tile.is_dot:\n temp = current_tile.next\n current_tile.color = None\n current_tile.next = None\n current_tile = temp", "def clean(self):\n # Perform the standard ACE cleaning\n max_status = mm_ace.clean(self)\n\n # Replace bad values with NaN and remove times with no valid data\n ecols = ['eflux_38-53', 'eflux_175-315']\n\n # Evaluate the electron flux data\n self[self.data['status_e'] > max_status, ecols] = np.nan\n\n # Evaluate the proton flux data\n pcols = ['pflux_47-68', 'pflux_115-195', 'pflux_310-580',\n 'pflux_795-1193', 'pflux_1060-1900']\n self[self.data['status_p'] > max_status, pcols] = np.nan\n\n # Include both fluxes and the anisotropy index in the removal eval\n eval_cols = ecols + pcols\n eval_cols.append('anis_ind')\n\n # Remove lines without any good data\n good_cols = (np.isfinite(self.data.loc[:, eval_cols])).sum(axis=1)\n bad_index = good_cols[good_cols == 0].index\n self.data = self.data.drop(index=bad_index)\n\n return", "def test_line_strip():\n for _x in range(100):\n l_str = \" \".join([random_str(5, 10) for x in range(30)])\n l_str = (\" \" * randint(0, 10)) + l_str + (\" \" * randint(0, 10))\n line = Line(l_str, random_str(10, 20), randint(1, 10000))\n # Strip the string\n l_stripped = line.strip()\n assert l_stripped == l_str.strip()\n assert isinstance(l_stripped, Line)\n assert l_stripped.file == line.file\n assert l_stripped.number == line.number", "def cleanData(rawData):\n\trawData = re.sub(r'R-LRB- \\(', r'R-LRB- -LRB-', rawData)\n\trawData = re.sub(r'R-RRB- \\)', r'R-RRB- -RRB-', rawData)\n\trawData = re.sub(r'R-RRB- \\(', r'R-RRB- -LRB-', rawData)\n\trawData = re.sub(r'-LRB- \\(', r'-LRB- -LRB-', rawData)\n\trawData = re.sub(r'-RRB- \\)', r'-RRB- -RRB-', rawData)\n\trawData = re.sub(r'PU \\(', r'PU -LRB-', rawData)\n\trawData = re.sub(r'PU \\)', r'PU -RRB-', rawData)\n\trawData = re.sub(r':-\\)', r'smileyface', rawData)\n\n\treturn rawData", "def CleanLineEndings(aLine):\n str = aLine.replace(cr, \"\") # remove cr\n str = str.replace(lf, \"\") # remove lf\n return str", "def clean_lines(df_column):\n \n clean_lines = []\n # pattern for html tags\n tag_match = re.compile('<.*?>')\n # patterm for website\n website_match = re.compile('https?:\\/\\/.*[\\r\\n]*')\n # pattern for tex\n tex_match = re.compile('\\$\\$?.+?\\$\\$?')\n \n for line in df_column:\n s = re.sub(tag_match, '', line)\n s = re.sub(website_match, '[website]', s)\n s = re.sub(tex_match, '[tex]', s)\n # replace extra whitespace with spaces\n for x in string.whitespace:\n s = s.replace(x, ' ')\n clean_lines.append(s)\n \n return clean_lines", "def clean(df):", "def cleanup(segment):\n cnt = ''.join(segment.file_content)\n index = cnt.find('\\\\annotate')\n if index < 0:\n return\n while index >= 0:\n cnt, new_ind = parse_annotation(cnt, index)\n index = cnt.find('\\\\annotate', new_ind)\n f = codecs.open(segment.filename, 'w', 'utf-8')\n f.write(cnt)\n f.close()\n info('Updated: {} {}'.format(segment.voice_name, segment.name))", "def clear(self) -> None:\n self.raw = ''\n self.extent = None # type: ignore[assignment]\n self._lines = []\n self.items = None\n self.seen_headers = {}\n return", "def clean_smile(self, smi):\n smi = smi.replace('\\n', '')\n return smi", "def clean_rows(reader):\n return [[a.strip() for a in row] for row in reader if row]", "def clean_comments(self):\n new_lines = list()\n for line in self.lines:\n if ((not line.startswith(\"//\")) & (not line.isspace()) &\n (not line.startswith(\"/*\") & (not line.startswith(\"*/\")))):\n line = Parser.strip_line(line)\n new_lines.append(line)\n self.lines = new_lines", "def pruneMarks(self):\n self.__prune_marks(self.nodes(data=True))", "def clean(line):\n line = line.strip('\\n').strip()\n line = line.replace('\\xe2\\x80\\x93', '-')\n line = line.replace('\\xe2\\x80\\x99', '\\'')\n\n return line", "def _rstrip(line, JUNK='\\n \\t'):\r\n\r\n i = len(line)\r\n while i > 0 and line[i-1] in JUNK:\r\n i -= 1\r\n return line[:i]", "def clean_textual_attributes(self):\n for attribute in self.__textual_attributes:\n for index, text in self.__df[attribute].dropna().iteritems():\n text = remove_non_printable_characters(text)\n text = remove_html_tags(text)\n text = remove_unnecessary_spaces(text)\n self.__df.at[index, attribute] = text", "def experience_clean_row(row_of_data):\n experience = row_of_data.get('experience')\n z = list(set(remove_filler_words(experience)))\n return z", "def drop_little_line(data, seuil):\n idx = []\n\n for i, line in enumerate(data.abstract):\n if len(line.split()) < seuil:\n idx.append(i)\n data.drop(data.index[idx])\n data.index = range(len(data.index))\n return data", "def minimal_clean_data_inplace(df):\n # There are some 'unknown' users in train dataset only\n unknown_data_lines = df['sexo'].isnull() & df['age'].isnull() & df['ind_empleado'].isnull() & \\\n df['fecha_alta'].isnull() & df['pais_residencia'].isnull()\n\n logging.info(\"- Number of lines with unknown data : %s\" % unknown_data_lines.sum())\n\n # Remove these users as clients\n _clients = df[unknown_data_lines]['ncodpers'].unique()\n bad_lines = df['ncodpers'].isin(_clients)\n df.drop(df[bad_lines].index, inplace=True)\n\n logging.info(\"- Number of columns with nan : %s\" % df.isnull().any().sum())\n\n # Remove accent\n df.loc[df['nomprov'] == \"CORU\\xc3\\x91A, A\", \"nomprov\"] = \"CORUNA\"\n\n unknown_cols = ['sexo',\n 'ind_empleado',\n 'pais_residencia',\n 'ult_fec_cli_1t',\n 'conyuemp',\n 'canal_entrada',\n 'nomprov',\n 'segmento',\n 'tiprel_1mes',\n 'indrel_1mes']\n # Start with cols -> replace nan with UNKNOWN\n for col in unknown_cols:\n df.loc[df[col].isnull(), col] = \"UNKNOWN\"\n\n # Set unknown renta to -99\n df.loc[df['renta'].isnull(), 'renta'] = -99\n\n # Next `fecha_alta` :\n assert df['fecha_alta'].isnull().sum() == 0, \\\n \"Need to replace nan in 'fecha_alta', count=%s\" % df['fecha_alta'].isnull().sum()\n\n # **Remove 'tipodom' and 'cod_prov' columns**\n df.drop([\"tipodom\", \"cod_prov\"], axis=1, inplace=True)\n \n # Convert 'ind_nuevo' to int\n df['ind_nuevo'] = df['ind_nuevo'].astype(int)\n \n # Remove floating point at string indrel_1mes\n df['indrel_1mes'] = df['indrel_1mes'].apply(lambda x: str(int(float(x))) if len(x) == 3 else x)\n\n if \"ind_nomina_ult1\" in df.columns and \"ind_nom_pens_ult1\" in df.columns:\n # Target labels : `ind_nomina_ult1`, `ind_nom_pens_ult1` : nan -> 0\n # I could try to fill in missing values for products by looking at previous months,\n # but since it's such a small number of values for now I'll take the cheap way out.\n df.loc[df.ind_nomina_ult1.isnull(), \"ind_nomina_ult1\"] = 0\n df.loc[df.ind_nom_pens_ult1.isnull(), \"ind_nom_pens_ult1\"] = 0\n\n # replace 'antiguedad' with the number of months between 'fecha_alta' and 'fecha_dato'\n func1 = lambda x: _to_ym_dec(to_yearmonth(x))\n func2 = lambda x: max(_to_nb_months(x), 0) \n\n v1 = df['fecha_dato'].apply(func1)\n v2 = df['fecha_alta'].apply(func1)\n v3 = (v1 - v2).apply(func2)\n df.loc[:, 'antiguedad'] = v3\n \n # Replace 'ult_fec_cli_1t' by current nb of months from fecha_dato, if negative, set to zero\n mask = df['ult_fec_cli_1t'] == 'UNKNOWN'\n df.loc[mask, 'ult_fec_cli_1t'] = df[mask]['fecha_dato']\n v1 = df['fecha_dato'].apply(func1)\n v2 = df['ult_fec_cli_1t'].apply(func1)\n v3 = (v1 - v2).apply(func2)\n df.loc[:, 'ult_fec_cli_1t'] = v3", "def remove_line(self, id):\n id = id_force_tuple(id)\n prf = self.prf.get_parent_proof(id)\n split = id[-1]\n prf.items = prf.items[:split] + prf.items[split+1:]\n for item in prf.items[split:]:\n decr_proof_item(item, id)\n\n self.check_proof(compute_only=True)", "def cleanGpt2Prediction(input_text, prediction):\n if input_text and input_text[-1] != \" \": input_text += \" \"\n prediction = input_text + prediction.strip()\n cutted = \"\"\n if prediction[-1] != '.':\n last_dot_idx = prediction.rfind('. ')\n if last_dot_idx != -1:\n cutted = prediction[last_dot_idx + 1:]\n prediction = prediction[:last_dot_idx + 1]\n print(f\"\"\"{'='*20}\\n-> Removed:\\n|{cutted}|\\nat the end of prediction\"\"\")\n res = [line for line in prediction.split('\\n') if line and line.strip()[0] != '\"']\n return '\\n'.join(res)", "def _(event):\n deleted = line.delete_before_cursor(count=-line.document.get_start_of_line_position())\n line.set_clipboard(ClipboardData(deleted))", "def strip_line_ending( line ):\n\n # surely there's a better way?\n while len(line) and line[-1] in '\\n\\r':\n line = line[:-1]\n\n while len(line) and line[0] in '\\n\\r':\n line = line[1:]\n \n return line", "def strip(self):\n self.document_type = self.document_type.strip()\n self.document_reg_id = self.document_reg_id.strip()\n self.owner_cross_reference = self.owner_cross_reference.strip()\n self.routing_slip_number = self.routing_slip_number.strip()\n self.bcol_account = self.bcol_account.strip()\n self.dat_number = self.dat_number.strip()\n self.examiner_id = self.examiner_id.strip()\n self.update_id = self.update_id.strip()\n self.phone_number = self.phone_number.strip()\n self.attention_reference = self.attention_reference.strip()\n self.name = self.name.strip()\n self.legacy_address = self.legacy_address.strip()\n self.consideration_value = self.consideration_value.strip()\n self.affirm_by_name = self.affirm_by_name.strip()\n self.liens_with_consent = self.liens_with_consent.strip()\n self.client_reference_id = self.client_reference_id.strip()\n self.own_land = self.own_land.strip()", "def remove_line(self, hash_code):\n line_index = None\n marker_index = None\n\n for i, lines in enumerate(self._lines):\n for j, line in enumerate(lines):\n if hash_graphics_line(line) == hash_code:\n marker_index = j\n line_index = i\n\n if line_index is None or marker_index is None:\n return None\n\n del self._lines[line_index][marker_index]\n self.set_changed()\n\n if len(self._lines[line_index]) == 0:\n del self._lines[line_index]\n return None\n\n return self._lines[line_index]", "def clean_data(data):\n src_list = []\n no_net_view = []\n for row in range(data.nrows):\n # Ignore header row.\n if row == 0:\n continue\n # Ignore blank row.\n if data.row_values(row)[1] == '' and \\\n data.row_values(row)[15] == '':\n continue\n # Capture lines that do not have a view listed.\n if data.row_values(row)[1] and not data.row_values(row)[15]:\n no_net_view.append(data.row_values(row))\n continue\n src_list.append(data.row_values(row))\n\n # Clean's src_list values.\n src_list = [[item.replace('\\t', '') for item in row\n if isinstance(item, str)]\n for row in src_list]\n src_list = [[item.replace('\\n', ', ') for item in row\n if isinstance(item, str)]\n for row in src_list]\n src_list = [[item.replace(', ,', ', ') for item in row\n if isinstance(item, str)]\n for row in src_list]\n src_list = [[item.strip() for item in row\n if isinstance(item, str)]\n for row in src_list]\n for enum, row in enumerate(src_list):\n row[0] = row[0].lower()\n src_list[enum] = row\n return src_list", "def remove(self, from_line, from_col, to_line, to_col):\n assert from_line == to_line\n from_col = self.canonicalize_column_index(from_line, from_col)\n to_col = self.canonicalize_column_index(to_line, to_col)\n\n col_off = self.col_offs[from_line]\n adj_from_col = col_off.get_rewritten_pos(from_col)\n adj_to_col = col_off.get_rewritten_pos(to_col)\n theline = self.lines[from_line]\n self.lines[from_line] = theline[:adj_from_col] + theline[adj_to_col:]\n col_off.remove(from_col, to_col-from_col)", "def _clean_data(self):\n if not path.exists('auto-mpg.data.txt'):\n logger.info('Could not find auto-mpg.data.txt in the current working directory')\n sys.exit()\n else:\n try:\n with open('auto-mpg.data.txt', 'r') as dirty_data:\n with open('auto-mpg.clean.txt', 'w') as clean_data:\n ## counter for row writes\n counter = 0\n for row in csv.reader(dirty_data):\n clean_data.write(row[0].expandtabs(1) + '\\n')\n counter +=1\n except Exception as e:\n logger.info('File error occurred: {e}. Exiting')\n sys.exit()", "def _strip_lines(lines):\n for line in lines:\n stripped = line.strip()\n if stripped:\n yield stripped", "def reset_annotations(self):\n # FIXME: this state does not make sense\n self.annotation_date_set = False\n self.annotation_comment_set = False\n self.annotation_type_set = False\n self.annotation_spdx_id_set = False", "def unlines(line):\n\n return line.translate(str.maketrans('\\n', ' '))", "def cleaned_contents(self):\n snip_with_code = re.compile(\"(//.*snip(\\-file)*:?.*\\n)(\\+\\n)?(\\[.*\\]\\n)*----\\n(.*\\n)*?----\\n\", flags=re.IGNORECASE)\n cleaned = re.sub(snip_with_code, r'\\1', self.contents)\n return cleaned", "def clean_data(self):\n data_clean = []\n for item in self.data:\n if int(item[2]) >= self.seq_length and int(item[2]) <= self.max_frames:# and item[1] in self.classes:\n data_clean.append(item)\n\n return data_clean", "def clean(args):\n with_dataset(args, Dataset._clean)", "def education_clean_row(row_of_data):\n education = row_of_data.get('education')\n z = list(set(remove_filler_words(education)))\n return z", "def _strip_once(value):\n s = MLStripper()\n s.feed(value)\n s.close()\n return s.get_data()", "def applyMorphologicalCleaning(self, image):", "def remove_comments(line):\n hashPos = line.find('#')\n return line[:hashPos] if hashPos >= 0 else line", "def clean(datapath):\n red_flags = ['â€', 'Â']\n\n with open(datapath, 'r') as file:\n data = json.load(file)\n\n bad = []\n good = []\n for article in progress(data, 'Fixing {0} articles...'.format(len(data))):\n for key in ['title', 'text']:\n article[key] = fix_text_segment(article[key])\n\n flagged = False\n for flag in red_flags:\n if flag in article['text'] + article['title']:\n bad.append(article)\n flagged = True\n break\n if not flagged:\n good.append(article)\n\n print('Getting rid of {0} bad articles.'.format(len(bad)))\n\n outpath = datapath.replace('.json', '_cleaned.json')\n with open(outpath, 'w') as file:\n json.dump(good, file)", "def strip_comments(line):\n if \"#\" in line:\n return line[:line.find(\"#\")]\n else:\n return line" ]
[ "0.65842664", "0.6322053", "0.61243117", "0.6106798", "0.6106798", "0.6087376", "0.60553944", "0.5906017", "0.5889315", "0.5888119", "0.58622", "0.58618647", "0.58282447", "0.58261687", "0.5818282", "0.58130705", "0.57777137", "0.5760969", "0.57328576", "0.57266486", "0.5716569", "0.5714271", "0.5655816", "0.5653608", "0.56232756", "0.5597971", "0.5590046", "0.558328", "0.5564838", "0.5535715", "0.55310595", "0.55267656", "0.55124736", "0.55076724", "0.5506068", "0.54923844", "0.5490053", "0.5455799", "0.54330385", "0.54263836", "0.54222393", "0.5422164", "0.54146314", "0.541215", "0.5403055", "0.5395121", "0.5388865", "0.53869873", "0.5381728", "0.5363039", "0.53541136", "0.5352374", "0.5351252", "0.5344728", "0.5342233", "0.53421617", "0.5333348", "0.53190917", "0.53185266", "0.5311936", "0.53043824", "0.52966964", "0.5283882", "0.5266071", "0.52626795", "0.5252717", "0.5249984", "0.5243088", "0.52323174", "0.5226279", "0.52212673", "0.52186567", "0.52183056", "0.52108246", "0.5202519", "0.5193992", "0.5192132", "0.51915854", "0.5189797", "0.51839656", "0.51836866", "0.5181604", "0.51642716", "0.51635385", "0.51583624", "0.5154828", "0.5154152", "0.5147709", "0.514597", "0.51426816", "0.5119757", "0.5114575", "0.5110482", "0.510176", "0.50974315", "0.5089471", "0.50836194", "0.5077082", "0.5071092", "0.5069919" ]
0.6030962
7
Returns the accounting period that is currently valid. Valid is an accounting_period when the current date lies between begin and end of the accounting_period
def get_current_valid_accounting_period(): current_valid_accounting_period = None for accounting_period in AccountingPeriod.objects.all(): if accounting_period.begin < date.today() and accounting_period.end > date.today(): return accounting_period if not current_valid_accounting_period: raise AccountingPeriodNotFound()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getCurrentValidAccountingPeriod():\n currentValidAccountingPeriod = None\n for accountingPeriod in AccountingPeriod.objects.all():\n if accountingPeriod.begin < date.today() and accountingPeriod.end > date.today():\n return accountingPeriod\n if currentValidAccountingPeriod == None:\n raise NoFeasableAccountingPeriodFound()", "def get_current_period(self):\n if not self.next_billing:\n return None\n assert self.type == \"N\", _(\"Subscription must be normal to use this method\")\n start = self.next_billing - relativedelta(months=self.frequency)\n end = self.next_billing\n return start, end", "def current_period(self):\n return self._current_period", "def get_all_prior_accounting_periods(target_accounting_period):\n accounting_periods = []\n for accounting_period in AccountingPeriod.objects.all():\n if accounting_period.end < target_accounting_period.begin:\n accounting_periods.append(accounting_period)\n if accounting_periods == []:\n raise AccountingPeriodNotFound(\"Accounting Period does not exist\")\n return accounting_periods", "def getAllPriorAccountingPeriods(targetAccountingPeriod):\n currentValidAccountingPeriod = None\n accountingPeriods = []\n for accountingPeriod in AccountingPeriod.objects.all():\n if accountingPeriod.end < targetAccountingPeriod.begin:\n accountingPeriods.append(accountingPeriod)\n if accountingPeriods == []:\n raise NoPriorAccountingPeriodFound()\n return accountingPeriods", "def period(self):\n return self.__period", "def period(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"period\")", "def period(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"period\")", "def period(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"period\")", "def period(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"period\")", "def period(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"period\")", "def period(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"period\")", "def period(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"period\")", "def period(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"period\")", "def __get_period(self):\n return self.__period", "def get_chart_period(self,req):\n now=int(DATE())\n period=INT(req.period) # allow for it having been a string\n if period>9999: # assume it is a month\n if period<(now//100): # a valid complete previous month\n prior=True# this is a previous month\n else:\n period=now//100 # default to current month\n prior=False\n start=period*100+1\n end=self.nextperiod(period)*100+1\n else: # assume it is a year\n if period and (period<(now//10000)): # a prior year\n prior=True# this is a previous year\n else:\n##\n# period=now//100 # default to current month\n# prior=False\n# start=period*100+1\n# end=self.nextperiod(period)*100+1\n##\n period=now//10000 # default to current year\n prior=False\n start=period*10000+101\n end=self.nextperiod(period)*10000+101\n return period,start,end,prior", "def planning_period(self):\n return self._planning_period", "def getPeriod(self):\n return StripePeriod(self.base.get(\"period\", []))", "def billing_period(self) -> Optional[str]:\n return pulumi.get(self, \"billing_period\")", "def get_interval(self):\n return self._period", "def get_period(self):\n # res\n if self._cacheExpiration <= YAPI.GetTickCount():\n if self.load(YAPI._yapiContext.GetCacheValidity()) != YAPI.SUCCESS:\n return YPwmOutput.PERIOD_INVALID\n res = self._period\n return res", "def get_period_guarantee_faithful_compliance(self):\n return ceiling(self.get_period_faithful_compliance, 3)", "def remaining_days_in_current_period(self):\n try:\n return self.count_days_from_now(self.current_period_ends_at)\n except AttributeError:\n return 0", "def get_period_range(self, period, start, end, inclusive_start=True, inclusive_end=True):\n if not isinstance(start, datetime.datetime):\n start = self.get_date_from_string(start, '%Y-%m-%d')\n if not isinstance(end, datetime.datetime):\n end = self.get_date_from_string(end, '%Y-%m-%d')\n\n if period == 'month':\n get_period = self.get_current_month_range\n get_next_period = self.get_next_month\n get_previous_period = self.get_previous_month\n if period == 'week':\n get_period = self.get_current_week_range\n get_next_period = self.get_next_week\n get_previous_period = self.get_previous_week\n\n #####################\n # inclusive_start means that the result set will include the whole period\n # containing the start date. Likewise for inclusive_end.\n #\n # If you are, say, reporting on a 'last completed month' or something,\n # but your report date (and end date) is mid-month or something, then setting 'inclusive_end'\n # to False will insure that the report ends with the month prior to the\n # end date.\n #\n # If you're doing projections starting with the month following the one\n # you're in, setting inclusive_start to False will insure that the first\n # period in the range is the one *after* the period you're in now.\n #######################\n if not inclusive_start:\n start = get_next_period(start)[0]\n if not inclusive_end:\n end = get_previous_period(end)[1]\n\n returnvals = []\n\n\n firstper = get_period(start)\n returnvals.append(firstper)\n per = firstper\n while per[1] < end:\n # goes as long as the *end* of the period is < our end date.\n # the intent is that if end is 2010-10-04, the last period will be\n # (2010-10-01, 2010-10-31)\n per = get_next_period(per[1])\n returnvals.append(per)\n\n return returnvals", "def checkpoint_period_get(self):\n raise Exception(\"TODO\")", "def expected_last_period_end(self):\n return self._expected_last_period_end", "def valid_period(request):\n return request.param", "def _get_period(self, cr, uid, context={}):\n\n account_period_obj = self.pool.get('account.period')\n ids = account_period_obj.find(\n cr, uid, time.strftime('%Y-%m-%d'), context=context)\n period_id = ids[0]\n return period_id", "def real_period(self):\n return max(\n self.period * self.PERIOD_MARGIN_FACTOR -\n (self.max_lag if self.max_lag else self.lag * self.LAG_MARGIN_FACTOR),\n 0.0)", "def getBeginEnd(self):\n if (self.dr_type == choices.DATE_RANGE_TYPE_FIXED):\n return self.begin, self.end\n\n elif (self.dr_type == choices.DATE_RANGE_TYPE_VARIABLE):\n end = datetime.now()\n\n if (self.unit == choices.TIME_UNIT_DAY):\n begin = end - relativedelta(days=self.quantity)\n\n elif (self.unit == choices.TIME_UNIT_WEEK):\n begin = end - relativedelta(weeks=self.quantity)\n\n elif (self.unit == choices.TIME_UNIT_MONTH):\n begin = end - relativedelta(months=self.quantity)\n\n elif (self.unit == choices.TIME_UNIT_YEAR):\n begin = end - relativedelta(years=self.quantity)\n\n else:\n # This case should not happen\n raise Exception(\"A DateRange object's 'unit' must be a numeric\"\n \" value in: {units}.\".format(units=\", \".join([\n \"{const} ({name})\".format(const=unit, name=unit_name)\n for unit, unit_name in choices.TIME_UNIT\n if unit is not None]))\n )\n\n return begin, end\n\n else:\n # This case should not happen\n raise Exception(\"A DateRange object's 'dr_type' must be one of:\"\n \" {const_fixed} (fixed range) or {const_dynamic}\"\n \" (dynamic range).\".format(\n const_fixed=choices.DATE_RANGE_TYPE_FIXED,\n const_dynamic=choices.DATE_RANGE_TYPE_VARIABLE\n ))", "def period(self) -> int:", "def get_period_guarantee_advance(self):\n return ceiling(self.scheduled_completion, 3)", "def renewal_period(self) -> Optional[float]:\n return pulumi.get(self, \"renewal_period\")", "def get_first_period(start_record, end_record, start_period, end_period):\n start_record, end_record, start_period, end_period = to_datetime(start_record, end_record, start_period, end_period)\n pspan = end_period - start_period\n delta_year = relativedelta(years=1)\n # what is the first day of year of the start of the period that fits the record?\n start_rec_year = start_record.year\n d = datetime(start_rec_year, start_period.month, start_period.day)\n if d < start_record:\n d = d + delta_year\n delta_years = start_period.year - d.year\n e = end_period + relativedelta(years=-delta_years)\n return (d, e)", "def periodCheck(data):", "def date_validity(self):\n return self._date_validity", "def _get_period(self, cr, uid, context=None):\n context = context or {}\n if context.get('period_id', False):\n return context['period_id']\n account_period_obj = self.pool.get('account.period')\n ctx = dict(context, account_period_prefer_normal=True)\n ids = account_period_obj.find(cr, uid, context=ctx)\n period_id = False\n if ids:\n period_id = ids[0]\n return period_id", "def between(cls, begin_date: datetime.date, end_date: datetime.date) -> float:\n\n if begin_date > end_date:\n raise ValueError('End date must not be before begin date.')\n if begin_date == end_date:\n return 0\n data = cls.cumulative()\n first = data.get((begin_date.year, begin_date.month), None)\n last = data.get((end_date.year, end_date.month), None)\n if first is None or last is None:\n raise ValidationError(\"Inflation figures don't cover entire period requested: {} - {}\".format(begin_date,\n end_date))\n return (last / first) - 1", "def orbital_period(self):\n return self._orbital_period", "def valid_until(self) -> datetime:\n return self._valid_until", "def current(self) -> Optional['outputs.CommitmentPeriodResponse']:\n return pulumi.get(self, \"current\")", "def _period_from_date(self):\n if self.date['year']:\n if self.date['month']:\n if self.date['day']:\n self.period = Period.DAILY\n else:\n self.period = Period.MONTHLY\n else:\n self.period = Period.YEARLY\n else:\n self.period = Period.FULL", "def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")", "def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")", "def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")", "def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")", "def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")", "def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")", "def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")", "def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")", "def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")", "def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")", "def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")", "def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")", "def test_list_grading_periods_accounts(self):\r\n account_id = None # Change me!!\r\n\r\n r = self.client.list_grading_periods_accounts(account_id)", "def get_period_budgets(cls, now):\n limits_dict = {}\n strategies = cls.objects_visible.filter(is_distributed_evenly=True)\n strategies = cls.running(strategies)\n\n for strategy in strategies:\n limits_dict[strategy.public_id] = strategy.period_budget(now)\n\n log.info('[SPENDINGS] Period budgets calculated (currency): {0}'.format(limits_dict))\n\n # Cast to budget precision used in Redis\n return {strategy: cast_CPM_to_dbbid(cast_currency_to_CPM(budget)) for strategy, budget in limits_dict.items()}", "def billing_period_start(self): # ISO8601 or timestamp\n return self._safe_value(VAR_BILLINGPERIODSTART, int)", "def amount_to_pay_in_period(self):\n assert self.type == \"N\", _(\"Subscription must be normal to use this method\")\n period_start, period_end = self.get_current_period()\n price_per_day = (\n self.get_price_for_full_period() / (period_end - period_start).days\n )\n days_not_used = 30 * self.frequency - (date.today() - period_start).days\n return int(price_per_day * days_not_used)", "def current_effective_deadline(cls) -> float:", "def get_periods():\n return [\n relativedelta(),\n relativedelta(days=6),\n relativedelta(months=1),\n relativedelta(months=3),\n relativedelta(years=1),\n relativedelta(years=5)\n ]", "def get_period(self):\n raise NotImplementedError('Agent is an abstract base class')", "def block_period_consumption(self):\n return self._safe_value(VAR_BLOCKPERIODCONSUMPTION, float)", "def get_last_period(self, status_id, company_payroll_id, config=None):\n if not config:\n config = self.env['ka_hr_payroll.config'].default_config()\n\n date_now = datetime.now().date()\n date_config = datetime.strptime(\"{0}-{1}-{2}\".format(date_now.year, date_now.month, config.date_start),\n DATE_FORMAT)\n date_config_str = date_config.strftime(DATE_FORMAT)\n\n return self.search([\n ('date_start', '<=', fields.Date.today()),\n ('status_id', 'parent_of', status_id),\n ('state', '=', 'done'),\n ('state_rapel', '!=', '1'),\n ('date_done', '<', date_config_str),\n ('company_payroll_id', '=', company_payroll_id)\n ], limit=1, order='date_start desc')", "def remaining_days(self):\n if self.trialing or self.trial_ended:\n return self.remaining_trial_days\n else:\n return self.remaining_days_in_current_period", "def period(self):\n return float(self._period) / 1000", "def sleepPeriodValidate(self):\n # sleep_validate = False (not in sleep period)\n # sleep_validate = True (in sleep period)\n \n sleep_validate = None\n pre_midnight = '23:59'\n midnight = '00:00'\n \n # check if out of sleep period\n if self.current_time >= self.sleep_stop and self.current_time < self.sleep_start:\n sleep_validate = False\n \n # check if in sleep period\n elif self.current_time >= self.sleep_start and self.current_time <= pre_midnight:\n sleep_validate = True \n elif self.current_time < self.sleep_stop and self.current_time > midnight:\n sleep_validate = True\n \n return sleep_validate", "def getUpdatePeriod( self, obj=None ):\n if obj is not None:\n period = self.getSyndicationInfo(obj).period\n else:\n period = self.period\n return period", "def kind(self):\n return DateValueTypes.PERIOD", "def get_current_fiscal_year(self):\n current_date = datetime.today().date()\n for year in self.fiscal_years.all():\n if year.begin_date < current_date < year.end_date:\n return year\n return None", "def dispatch_interval(self):\n start = self.start\n start_date = start.date()\n\n # add zero padding to make period_id 3 chars long\n period_id = str(self.period_id).zfill(3)\n\n if start.hour < 4 and period_id != \"001\":\n start_date = start_date - timedelta(days=1)\n\n # Concat zero-padded dates and add period id\n dispatch_interval = \"\".join([start_date.strftime(\"%Y%m%d\"), period_id])\n\n return dispatch_interval", "def InactiveNoEndDate(obj):\n if not obj.active_p:\n if not (obj.end_date):\n raise interface.Invalid(\n _(\"If a person is inactive End Date must be set\"), \n \"end_date\", \n \"active_p\")", "def in_grace_period_count(self):\n if \"inGracePeriodCount\" in self._prop_dict:\n return self._prop_dict[\"inGracePeriodCount\"]\n else:\n return None", "def get_employees_born_in_period(cls, start_date, end_date,\n strategy=lazyload):\n cls._check_strategy(strategy)\n\n employees = db.session.query(Employee).options(\n strategy(Employee.department)\n ).filter(\n and_(\n Employee.date_of_birth > start_date,\n Employee.date_of_birth < end_date\n )\n ).all()\n return employees", "def _get_default_period(self, cr, uid, context=None):\n context = context or {}\n if context.get('period_id', False):\n return context['period_id']\n account_period_obj = self.pool.get('account.period')\n ctx = dict(context, account_period_prefer_normal=True)\n ids = account_period_obj.find(cr, uid, context=ctx)\n period_id = False\n if ids:\n period_id = ids[0]\n return period_id", "def validate_check_in_period(check_in_period):\n if not check_in_period:\n check_in_period = 30\n if not isinstance(check_in_period, int):\n try:\n check_in_period = int(check_in_period)\n except ValueError:\n print \"Incorrect check-in period given. Setting to 30.\"\n check_in_period = 30\n\n return check_in_period", "def find_period_below(self, start, end, target, length):\n\n if start > end:\n raise ValueError(\"End needs to be after start!\")\n if length < 0:\n raise ValueError(\"Period length must be larger than zero!\")\n\n period_start = (start if self.get(start) <= target else None)\n\n start_ix = self._trace.bisect_right(start)\n end_ix = self._trace.bisect_left(end)\n for time, lvl in self._trace.items()[start_ix:end_ix]:\n # Period long enough?\n if period_start is not None:\n if time >= period_start + length:\n return period_start\n # Not enough space until end?\n elif time + length > end:\n return None\n # Above target? Reset period\n if lvl > target:\n period_start = None\n else:\n if period_start is None:\n period_start = time\n\n # Possible at end?\n if period_start is not None and period_start+length <= end:\n return period_start\n\n # Nothing found\n return None", "def valid_until(self):\n return self._valid_until / 10000000", "def get_period(self, df, key):\n daysFiltered = self.filter_days(df, **self.periods[key]['days'])\n perStart = string_date(self.periods[key]['dates'][0], df.index[0].date().year)\n perEnd = string_date(self.periods[key]['dates'][1], df.index[0].date().year)\n if self.spans_year(key):\n # slice period for beginning of the year\n fall = daysFiltered.ix[perStart:string_date('12/31', df.index[0].date().year)]\n fall = fall.between_time(self.periods[key]['times'][0], self.periods[key]['times'][1])\n fall = fall.sort_index()\n # slice period for end of the year\n spring = fall.append(daysFiltered.ix[string_date('1/1', df.index[0].date().year):perEnd])\n spring = spring.between_time(self.periods[key]['times'][0], self.periods[key]['times'][1])\n spring = spring.sort_index()\n return(spring.rename_axis({'data': key}, axis=1))\n else:\n df_temp = daysFiltered.ix[perStart:perEnd]\n df_temp = df_temp.between_time(self.periods[key]['times'][0], self.periods[key]['times'][1])\n return(df_temp.rename_axis({'data': key}, axis=1))", "def get_debt_state(member, limit_year, limit_month):\n if member.first_payment_year is None:\n # never paid! using registration date to start with\n yearmonths_paid = set()\n year_to_check = member.registration_date.year\n month_to_check = member.registration_date.month\n else:\n # build a set for the year/month of paid quotas\n quotas = Quota.objects.filter(member=member).all()\n yearmonths_paid = {(q.year, q.month) for q in quotas}\n\n year_to_check = member.first_payment_year\n month_to_check = member.first_payment_month\n\n # verify the limit is after member started paying\n if year_to_check == limit_year:\n if month_to_check > limit_month:\n return []\n elif year_to_check > limit_year:\n return []\n\n # build a set of all the year/month the member should have paid up to (including) the limit\n should_have_paid = set()\n while True:\n should_have_paid.add((year_to_check, month_to_check))\n year_to_check, month_to_check = increment_year_month(year_to_check, month_to_check)\n if year_to_check == limit_year:\n if month_to_check > limit_month:\n break\n elif year_to_check > limit_year:\n break\n\n return sorted(should_have_paid - yearmonths_paid)", "def period_check(init_dates, fcst_dates):\n check_dates(init_dates)\n check_dates(fcst_dates)\n \n if max(init_dates) > min(fcst_dates):\n raise ValueError('Forecast date, %s comes before initialisation '\\\n 'date, %s.' % (min(fcst_dates), max(init_dates)))\n\n fcst_dates = change_zeroth_hour(fcst_dates)\n return init_dates, fcst_dates", "def period(self):\n return f\"{self.measurement_date.year}/{self.measurement_date.month}\"", "def determine_periods(obj):\n return obj[len(obj)-1][\"period\"]", "def pnl(qbo_session, period = \"YEARLY\", start_date=\"first\", end_date=\"last\",\n **kwargs):\n\n pnl_account_types = [\n \n \"Income\", \"Other Income\",\n \"Expense\", \"Other Expense\", \"Cost of Goods Sold\"\n \n ]\n\n \n\n # go through the accounts, collecting a list of those that are \n # pnl accounts\n\n relevant_accounts = []\n\n coa = qbo_session.chart_of_accounts()\n\n AccountType_i = coa[0].index(\"AccountType\")\n fqa_i = coa[0].index(\"FullyQualifiedName\")\n\n for a in coa:\n\n AccountType = a[AccountType_i]\n\n if AccountType in pnl_account_types:\n\n relevant_accounts.append(a[fqa_i])\n \n # now collect the ledger_lines that are even relevant to the time\n # period and pnl accounts (and we'll handle presentation last)\n\n relevant_activity = {} #{account:[relevant lines]}\n\n all_ledger_lines = qbo_session.ledger_lines(None, None, None, True,\n **kwargs)\n\n headers = all_ledger_lines[0]\n\n account_i = headers.index(\"account\") \n amount_i = headers.index(\"amount\")\n date_i = headers.index(\"TxnDate\")\n \n earliest_date = datetime(2100,1,1)\n latest_date = datetime(1900,1,1)\n\n for line in all_ledger_lines[1:]:\n\n account = line[account_i]\n line_date = line[date_i]\n\n #first apply the date filter!\n if not start_date == \"first\" and line_date < start_date:\n continue\n \n if not end_date == \"last\" and line_date > end_date:\n continue\n \n #if it's made the cut, we can update the report date bounds\n earliest_date = min(line_date,earliest_date)\n latest_date = max(line_date,latest_date)\n\n #then apply the account filter!\n\n if not account in relevant_activity:\n #then let's confirm that its account type is a pnl one\n \n if not account in relevant_accounts:\n \n continue\n\n else:\n relevant_activity[account] = []\n\n relevant_activity[account].append(line)\n\n #now let's do presentation\n #TODO -- incorporate pandas tables...do only minimal work on it until then\n\n pnl_lines = []\n\n if period == \"YEARLY\":\n\n report_start_date = datetime(earliest_date.year,1,1)\n report_end_date = datetime(latest_date.year,12,31)\n\n period_start_dates = list(rrule(YEARLY, bymonth=1, bymonthday=1,\n dtstart=report_start_date,\n until=report_end_date))\n\n period_end_dates = list(rrule(YEARLY, bymonth=12, bymonthday=-1,\n dtstart=report_start_date,\n until=report_end_date))\n\n elif period == \"MONTHLY\":\n\n report_start_date = datetime(earliest_date.year,\n earliest_date.month,\n 1)\n report_end_date = datetime(latest_date.year,\n latest_date.month,\n calendar.monthrange(latest_date.year,\n latest_date.month)[1])\n\n period_start_dates = list(rrule(MONTHLY, bymonthday=1,\n dtstart=report_start_date,\n until=report_end_date))\n\n period_end_dates = list(rrule(YEARLY, bymonthday=-1,\n dtstart=report_start_date,\n until=report_end_date)) \n\n header_1 = [\"\", \"Period Start -->\"] + period_start_dates\n header_2 = [\"Account\", \"Period End -->\"] + period_end_dates\n\n pnl_lines.append(header_1)\n pnl_lines.append(header_2)\n\n \"\"\"Clearly, there's a way to do this with only one pass of the data...\n let's get that right in the first re-write...probably with pandas\"\"\"\n\n #now let's fill up the pnl_lines with what we know to be the relevant data\n #for now, we'll rely on the knowledge that the data is coming to us in\n #date order, but that should be fixed too...\n\n for account in relevant_activity:\n\n account_row = [account, \"\"] #one value per period \n\n current_period_index = 0 #primitive counter, yes!\n this_period_total = 0 #this will be this period's total\n\n for line in relevant_activity[account]:\n \n line_amount = line[amount_i]\n line_date = line[date_i] \n\n if line_date > period_end_dates[current_period_index]:\n\n account_row.append(this_period_total)\n this_period_total = line_amount\n current_period_index +=1\n\n else:\n \n this_period_total = round(this_period_total +\n line_amount, 2)\n\n \"\"\"super sloppy...\"\"\"\n account_row.append(this_period_total) #for the last period\n current_period_index +=1\n\n while current_period_index < len(period_end_dates):\n account_row.append(0)\n current_period_index +=1\n\n pnl_lines.append(account_row)\n\n return pnl_lines", "def periods(self) -> localedata.LocaleDataDict:\n try:\n return self._data['day_periods']['stand-alone']['wide']\n except KeyError:\n return localedata.LocaleDataDict({}) # pragma: no cover", "def period_dates(period):\n end = date.today() - timedelta(days=1) # yesterday\n\n if period == LAST_7_DAYS:\n start = end - timedelta(days=7)\n elif period == LAST_30_DAYS:\n start = end - timedelta(days=30)\n elif period == LAST_90_DAYS:\n start = end - timedelta(days=90)\n elif ALL_TIME:\n start = settings.GA_START_DATE\n\n return start, end", "def get_period(year, month):\n first_weekday, days = calendar.monthrange(year, month)\n first = datetime.date(year=year, month=month, day=1)\n last = datetime.date(year=year, month=month, day=days)\n return first, last", "def getReportingPeriod(begin, end, week, month, year, last):\n if begin and end:\n rval = (datetime.strptime(begin, '%Y-%m-%d'),\n datetime.strptime(end, '%Y-%m-%d'))\n elif week or month or year:\n if last:\n rval = 'Last'\n else:\n rval = 'This'\n if week:\n rval += 'Week'\n elif month:\n rval += 'Month'\n else:\n rval += 'Year'\n else:\n rval = 'Yesterday'\n return rval", "def test_is_active_active_not_between(self) -> None:\n today = date.today()\n start_date = today - timedelta(days=today.weekday() - 2)\n end_date = today - timedelta(days=today.weekday() - 1)\n mode = HolidayMode(True, start_date, end_date, 15)\n self.assertFalse(mode.is_applied)", "def _getTimePeriod(self):\n if isinstance(self.period, tuple):\n period = self.soapCustomDateRange % \\\n (self.soapCustomDate % (self.period[1].day,\n self.period[1].month,\n self.period[1].year),\n self.soapCustomDate % (self.period[0].day,\n self.period[0].month,\n self.period[0].year))\n else:\n period = self.soapPredefinedTime % self.period\n self.logger.debug(\"period = %s\", period)\n return period", "def availableWorkersDuringPeriod(self, begin, end):\n availableWorkers = []\n for worker in self._workers:\n if worker.availableInPeriod(begin, end):\n availableWorkers.append(worker)\n return availableWorkers", "def amount_already_paid_in_period(self):\n assert self.type == \"N\", _(\"Subscription must be normal to use this method\")\n period_start, period_end = self.get_current_period()\n price_per_day = (\n self.get_price_for_full_period() / (period_end - period_start).days\n )\n days_already_used = (date.today() - period_start).days\n amount = int(price_per_day * days_already_used)\n if amount > self.get_price_for_full_period():\n amount = self.get_price_for_full_period()\n if amount < 0:\n amount = 0\n return amount", "def get_time_period(value):\n\t\tfor time_period in TimePeriod:\n\t\t\tif time_period.period == value:\n\t\t\t\treturn time_period\n\t\traise ValueError('{} is not a valid TimePeriod'.format(value))", "def two_factor_grace_period(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"two_factor_grace_period\")", "def two_factor_grace_period(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"two_factor_grace_period\")", "def valid_from(self) -> datetime:\n return self._valid_from", "def grace_period(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"grace_period\")", "def grace_period(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"grace_period\")", "def grace_period(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"grace_period\")", "def grace_period(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"grace_period\")", "def limit_date_range_from(self):\n return self._limit_date_range_from" ]
[ "0.82862264", "0.6872075", "0.65232825", "0.6370569", "0.6290718", "0.60823673", "0.6040515", "0.6040515", "0.6040515", "0.6040515", "0.6040515", "0.6040515", "0.6040515", "0.6040515", "0.6018042", "0.6012301", "0.5940794", "0.59097856", "0.5859212", "0.5821298", "0.5755713", "0.57361776", "0.57259804", "0.5627092", "0.5623561", "0.56206757", "0.5603892", "0.55797243", "0.557284", "0.551847", "0.5513599", "0.54543895", "0.5438986", "0.5431954", "0.542451", "0.5408851", "0.5408489", "0.5381044", "0.5378068", "0.5363383", "0.5348973", "0.53457713", "0.5343646", "0.5343646", "0.5343646", "0.5343646", "0.5343646", "0.5343646", "0.5343646", "0.5343646", "0.5343646", "0.5343646", "0.5343646", "0.5343646", "0.5300969", "0.5296546", "0.5283699", "0.5280615", "0.5269267", "0.52642983", "0.523902", "0.52314234", "0.522582", "0.5215747", "0.5184535", "0.5178225", "0.5167521", "0.5165441", "0.5136053", "0.510341", "0.5097612", "0.50848705", "0.5083586", "0.50773853", "0.50672716", "0.5067223", "0.5059559", "0.50570977", "0.5055609", "0.5054063", "0.5043461", "0.50415504", "0.5023548", "0.50187474", "0.5013629", "0.5009764", "0.50083405", "0.4998141", "0.4996523", "0.49894312", "0.4981192", "0.49754077", "0.49590197", "0.49590197", "0.49558988", "0.49378783", "0.49378783", "0.49378783", "0.49378783", "0.49354696" ]
0.8423247
0
Returns the accounting period that is currently valid. Valid is an accountingPeriod when the current date lies between begin and end of the accountingPeriod
def get_all_prior_accounting_periods(target_accounting_period): accounting_periods = [] for accounting_period in AccountingPeriod.objects.all(): if accounting_period.end < target_accounting_period.begin: accounting_periods.append(accounting_period) if accounting_periods == []: raise AccountingPeriodNotFound("Accounting Period does not exist") return accounting_periods
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_current_valid_accounting_period():\n current_valid_accounting_period = None\n for accounting_period in AccountingPeriod.objects.all():\n if accounting_period.begin < date.today() and accounting_period.end > date.today():\n return accounting_period\n if not current_valid_accounting_period:\n raise AccountingPeriodNotFound()", "def getCurrentValidAccountingPeriod():\n currentValidAccountingPeriod = None\n for accountingPeriod in AccountingPeriod.objects.all():\n if accountingPeriod.begin < date.today() and accountingPeriod.end > date.today():\n return accountingPeriod\n if currentValidAccountingPeriod == None:\n raise NoFeasableAccountingPeriodFound()", "def get_current_period(self):\n if not self.next_billing:\n return None\n assert self.type == \"N\", _(\"Subscription must be normal to use this method\")\n start = self.next_billing - relativedelta(months=self.frequency)\n end = self.next_billing\n return start, end", "def current_period(self):\n return self._current_period", "def getAllPriorAccountingPeriods(targetAccountingPeriod):\n currentValidAccountingPeriod = None\n accountingPeriods = []\n for accountingPeriod in AccountingPeriod.objects.all():\n if accountingPeriod.end < targetAccountingPeriod.begin:\n accountingPeriods.append(accountingPeriod)\n if accountingPeriods == []:\n raise NoPriorAccountingPeriodFound()\n return accountingPeriods", "def period(self):\n return self.__period", "def getPeriod(self):\n return StripePeriod(self.base.get(\"period\", []))", "def period(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"period\")", "def period(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"period\")", "def period(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"period\")", "def period(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"period\")", "def period(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"period\")", "def period(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"period\")", "def period(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"period\")", "def period(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"period\")", "def __get_period(self):\n return self.__period", "def planning_period(self):\n return self._planning_period", "def get_chart_period(self,req):\n now=int(DATE())\n period=INT(req.period) # allow for it having been a string\n if period>9999: # assume it is a month\n if period<(now//100): # a valid complete previous month\n prior=True# this is a previous month\n else:\n period=now//100 # default to current month\n prior=False\n start=period*100+1\n end=self.nextperiod(period)*100+1\n else: # assume it is a year\n if period and (period<(now//10000)): # a prior year\n prior=True# this is a previous year\n else:\n##\n# period=now//100 # default to current month\n# prior=False\n# start=period*100+1\n# end=self.nextperiod(period)*100+1\n##\n period=now//10000 # default to current year\n prior=False\n start=period*10000+101\n end=self.nextperiod(period)*10000+101\n return period,start,end,prior", "def billing_period(self) -> Optional[str]:\n return pulumi.get(self, \"billing_period\")", "def get_interval(self):\n return self._period", "def get_period(self):\n # res\n if self._cacheExpiration <= YAPI.GetTickCount():\n if self.load(YAPI._yapiContext.GetCacheValidity()) != YAPI.SUCCESS:\n return YPwmOutput.PERIOD_INVALID\n res = self._period\n return res", "def get_period_range(self, period, start, end, inclusive_start=True, inclusive_end=True):\n if not isinstance(start, datetime.datetime):\n start = self.get_date_from_string(start, '%Y-%m-%d')\n if not isinstance(end, datetime.datetime):\n end = self.get_date_from_string(end, '%Y-%m-%d')\n\n if period == 'month':\n get_period = self.get_current_month_range\n get_next_period = self.get_next_month\n get_previous_period = self.get_previous_month\n if period == 'week':\n get_period = self.get_current_week_range\n get_next_period = self.get_next_week\n get_previous_period = self.get_previous_week\n\n #####################\n # inclusive_start means that the result set will include the whole period\n # containing the start date. Likewise for inclusive_end.\n #\n # If you are, say, reporting on a 'last completed month' or something,\n # but your report date (and end date) is mid-month or something, then setting 'inclusive_end'\n # to False will insure that the report ends with the month prior to the\n # end date.\n #\n # If you're doing projections starting with the month following the one\n # you're in, setting inclusive_start to False will insure that the first\n # period in the range is the one *after* the period you're in now.\n #######################\n if not inclusive_start:\n start = get_next_period(start)[0]\n if not inclusive_end:\n end = get_previous_period(end)[1]\n\n returnvals = []\n\n\n firstper = get_period(start)\n returnvals.append(firstper)\n per = firstper\n while per[1] < end:\n # goes as long as the *end* of the period is < our end date.\n # the intent is that if end is 2010-10-04, the last period will be\n # (2010-10-01, 2010-10-31)\n per = get_next_period(per[1])\n returnvals.append(per)\n\n return returnvals", "def get_period_guarantee_faithful_compliance(self):\n return ceiling(self.get_period_faithful_compliance, 3)", "def remaining_days_in_current_period(self):\n try:\n return self.count_days_from_now(self.current_period_ends_at)\n except AttributeError:\n return 0", "def expected_last_period_end(self):\n return self._expected_last_period_end", "def getBeginEnd(self):\n if (self.dr_type == choices.DATE_RANGE_TYPE_FIXED):\n return self.begin, self.end\n\n elif (self.dr_type == choices.DATE_RANGE_TYPE_VARIABLE):\n end = datetime.now()\n\n if (self.unit == choices.TIME_UNIT_DAY):\n begin = end - relativedelta(days=self.quantity)\n\n elif (self.unit == choices.TIME_UNIT_WEEK):\n begin = end - relativedelta(weeks=self.quantity)\n\n elif (self.unit == choices.TIME_UNIT_MONTH):\n begin = end - relativedelta(months=self.quantity)\n\n elif (self.unit == choices.TIME_UNIT_YEAR):\n begin = end - relativedelta(years=self.quantity)\n\n else:\n # This case should not happen\n raise Exception(\"A DateRange object's 'unit' must be a numeric\"\n \" value in: {units}.\".format(units=\", \".join([\n \"{const} ({name})\".format(const=unit, name=unit_name)\n for unit, unit_name in choices.TIME_UNIT\n if unit is not None]))\n )\n\n return begin, end\n\n else:\n # This case should not happen\n raise Exception(\"A DateRange object's 'dr_type' must be one of:\"\n \" {const_fixed} (fixed range) or {const_dynamic}\"\n \" (dynamic range).\".format(\n const_fixed=choices.DATE_RANGE_TYPE_FIXED,\n const_dynamic=choices.DATE_RANGE_TYPE_VARIABLE\n ))", "def _get_period(self, cr, uid, context={}):\n\n account_period_obj = self.pool.get('account.period')\n ids = account_period_obj.find(\n cr, uid, time.strftime('%Y-%m-%d'), context=context)\n period_id = ids[0]\n return period_id", "def real_period(self):\n return max(\n self.period * self.PERIOD_MARGIN_FACTOR -\n (self.max_lag if self.max_lag else self.lag * self.LAG_MARGIN_FACTOR),\n 0.0)", "def checkpoint_period_get(self):\n raise Exception(\"TODO\")", "def current(self) -> Optional['outputs.CommitmentPeriodResponse']:\n return pulumi.get(self, \"current\")", "def renewal_period(self) -> Optional[float]:\n return pulumi.get(self, \"renewal_period\")", "def valid_period(request):\n return request.param", "def valid_until(self) -> datetime:\n return self._valid_until", "def date_validity(self):\n return self._date_validity", "def period(self) -> int:", "def get_first_period(start_record, end_record, start_period, end_period):\n start_record, end_record, start_period, end_period = to_datetime(start_record, end_record, start_period, end_period)\n pspan = end_period - start_period\n delta_year = relativedelta(years=1)\n # what is the first day of year of the start of the period that fits the record?\n start_rec_year = start_record.year\n d = datetime(start_rec_year, start_period.month, start_period.day)\n if d < start_record:\n d = d + delta_year\n delta_years = start_period.year - d.year\n e = end_period + relativedelta(years=-delta_years)\n return (d, e)", "def orbital_period(self):\n return self._orbital_period", "def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")", "def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")", "def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")", "def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")", "def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")", "def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")", "def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")", "def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")", "def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")", "def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")", "def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")", "def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")", "def get_period_guarantee_advance(self):\n return ceiling(self.scheduled_completion, 3)", "def _get_period(self, cr, uid, context=None):\n context = context or {}\n if context.get('period_id', False):\n return context['period_id']\n account_period_obj = self.pool.get('account.period')\n ctx = dict(context, account_period_prefer_normal=True)\n ids = account_period_obj.find(cr, uid, context=ctx)\n period_id = False\n if ids:\n period_id = ids[0]\n return period_id", "def sleepPeriodValidate(self):\n # sleep_validate = False (not in sleep period)\n # sleep_validate = True (in sleep period)\n \n sleep_validate = None\n pre_midnight = '23:59'\n midnight = '00:00'\n \n # check if out of sleep period\n if self.current_time >= self.sleep_stop and self.current_time < self.sleep_start:\n sleep_validate = False\n \n # check if in sleep period\n elif self.current_time >= self.sleep_start and self.current_time <= pre_midnight:\n sleep_validate = True \n elif self.current_time < self.sleep_stop and self.current_time > midnight:\n sleep_validate = True\n \n return sleep_validate", "def get_period_budgets(cls, now):\n limits_dict = {}\n strategies = cls.objects_visible.filter(is_distributed_evenly=True)\n strategies = cls.running(strategies)\n\n for strategy in strategies:\n limits_dict[strategy.public_id] = strategy.period_budget(now)\n\n log.info('[SPENDINGS] Period budgets calculated (currency): {0}'.format(limits_dict))\n\n # Cast to budget precision used in Redis\n return {strategy: cast_CPM_to_dbbid(cast_currency_to_CPM(budget)) for strategy, budget in limits_dict.items()}", "def getUpdatePeriod( self, obj=None ):\n if obj is not None:\n period = self.getSyndicationInfo(obj).period\n else:\n period = self.period\n return period", "def amount_to_pay_in_period(self):\n assert self.type == \"N\", _(\"Subscription must be normal to use this method\")\n period_start, period_end = self.get_current_period()\n price_per_day = (\n self.get_price_for_full_period() / (period_end - period_start).days\n )\n days_not_used = 30 * self.frequency - (date.today() - period_start).days\n return int(price_per_day * days_not_used)", "def billing_period_start(self): # ISO8601 or timestamp\n return self._safe_value(VAR_BILLINGPERIODSTART, int)", "def period(self):\n return float(self._period) / 1000", "def periodCheck(data):", "def test_list_grading_periods_accounts(self):\r\n account_id = None # Change me!!\r\n\r\n r = self.client.list_grading_periods_accounts(account_id)", "def between(cls, begin_date: datetime.date, end_date: datetime.date) -> float:\n\n if begin_date > end_date:\n raise ValueError('End date must not be before begin date.')\n if begin_date == end_date:\n return 0\n data = cls.cumulative()\n first = data.get((begin_date.year, begin_date.month), None)\n last = data.get((end_date.year, end_date.month), None)\n if first is None or last is None:\n raise ValidationError(\"Inflation figures don't cover entire period requested: {} - {}\".format(begin_date,\n end_date))\n return (last / first) - 1", "def _period_from_date(self):\n if self.date['year']:\n if self.date['month']:\n if self.date['day']:\n self.period = Period.DAILY\n else:\n self.period = Period.MONTHLY\n else:\n self.period = Period.YEARLY\n else:\n self.period = Period.FULL", "def block_period_consumption(self):\n return self._safe_value(VAR_BLOCKPERIODCONSUMPTION, float)", "def current_effective_deadline(cls) -> float:", "def remaining_days(self):\n if self.trialing or self.trial_ended:\n return self.remaining_trial_days\n else:\n return self.remaining_days_in_current_period", "def get_periods():\n return [\n relativedelta(),\n relativedelta(days=6),\n relativedelta(months=1),\n relativedelta(months=3),\n relativedelta(years=1),\n relativedelta(years=5)\n ]", "def get_last_period(self, status_id, company_payroll_id, config=None):\n if not config:\n config = self.env['ka_hr_payroll.config'].default_config()\n\n date_now = datetime.now().date()\n date_config = datetime.strptime(\"{0}-{1}-{2}\".format(date_now.year, date_now.month, config.date_start),\n DATE_FORMAT)\n date_config_str = date_config.strftime(DATE_FORMAT)\n\n return self.search([\n ('date_start', '<=', fields.Date.today()),\n ('status_id', 'parent_of', status_id),\n ('state', '=', 'done'),\n ('state_rapel', '!=', '1'),\n ('date_done', '<', date_config_str),\n ('company_payroll_id', '=', company_payroll_id)\n ], limit=1, order='date_start desc')", "def kind(self):\n return DateValueTypes.PERIOD", "def get_period(self):\n raise NotImplementedError('Agent is an abstract base class')", "def InactiveNoEndDate(obj):\n if not obj.active_p:\n if not (obj.end_date):\n raise interface.Invalid(\n _(\"If a person is inactive End Date must be set\"), \n \"end_date\", \n \"active_p\")", "def availableWorkersDuringPeriod(self, begin, end):\n availableWorkers = []\n for worker in self._workers:\n if worker.availableInPeriod(begin, end):\n availableWorkers.append(worker)\n return availableWorkers", "def find_period_below(self, start, end, target, length):\n\n if start > end:\n raise ValueError(\"End needs to be after start!\")\n if length < 0:\n raise ValueError(\"Period length must be larger than zero!\")\n\n period_start = (start if self.get(start) <= target else None)\n\n start_ix = self._trace.bisect_right(start)\n end_ix = self._trace.bisect_left(end)\n for time, lvl in self._trace.items()[start_ix:end_ix]:\n # Period long enough?\n if period_start is not None:\n if time >= period_start + length:\n return period_start\n # Not enough space until end?\n elif time + length > end:\n return None\n # Above target? Reset period\n if lvl > target:\n period_start = None\n else:\n if period_start is None:\n period_start = time\n\n # Possible at end?\n if period_start is not None and period_start+length <= end:\n return period_start\n\n # Nothing found\n return None", "def valid_until(self):\n return self._valid_until / 10000000", "def get_time_period(value):\n\t\tfor time_period in TimePeriod:\n\t\t\tif time_period.period == value:\n\t\t\t\treturn time_period\n\t\traise ValueError('{} is not a valid TimePeriod'.format(value))", "def get_current_fiscal_year(self):\n current_date = datetime.today().date()\n for year in self.fiscal_years.all():\n if year.begin_date < current_date < year.end_date:\n return year\n return None", "def _getTimePeriod(self):\n if isinstance(self.period, tuple):\n period = self.soapCustomDateRange % \\\n (self.soapCustomDate % (self.period[1].day,\n self.period[1].month,\n self.period[1].year),\n self.soapCustomDate % (self.period[0].day,\n self.period[0].month,\n self.period[0].year))\n else:\n period = self.soapPredefinedTime % self.period\n self.logger.debug(\"period = %s\", period)\n return period", "def getReportingPeriod(begin, end, week, month, year, last):\n if begin and end:\n rval = (datetime.strptime(begin, '%Y-%m-%d'),\n datetime.strptime(end, '%Y-%m-%d'))\n elif week or month or year:\n if last:\n rval = 'Last'\n else:\n rval = 'This'\n if week:\n rval += 'Week'\n elif month:\n rval += 'Month'\n else:\n rval += 'Year'\n else:\n rval = 'Yesterday'\n return rval", "def in_grace_period_count(self):\n if \"inGracePeriodCount\" in self._prop_dict:\n return self._prop_dict[\"inGracePeriodCount\"]\n else:\n return None", "def get_period(self, df, key):\n daysFiltered = self.filter_days(df, **self.periods[key]['days'])\n perStart = string_date(self.periods[key]['dates'][0], df.index[0].date().year)\n perEnd = string_date(self.periods[key]['dates'][1], df.index[0].date().year)\n if self.spans_year(key):\n # slice period for beginning of the year\n fall = daysFiltered.ix[perStart:string_date('12/31', df.index[0].date().year)]\n fall = fall.between_time(self.periods[key]['times'][0], self.periods[key]['times'][1])\n fall = fall.sort_index()\n # slice period for end of the year\n spring = fall.append(daysFiltered.ix[string_date('1/1', df.index[0].date().year):perEnd])\n spring = spring.between_time(self.periods[key]['times'][0], self.periods[key]['times'][1])\n spring = spring.sort_index()\n return(spring.rename_axis({'data': key}, axis=1))\n else:\n df_temp = daysFiltered.ix[perStart:perEnd]\n df_temp = df_temp.between_time(self.periods[key]['times'][0], self.periods[key]['times'][1])\n return(df_temp.rename_axis({'data': key}, axis=1))", "def amount_already_paid_in_period(self):\n assert self.type == \"N\", _(\"Subscription must be normal to use this method\")\n period_start, period_end = self.get_current_period()\n price_per_day = (\n self.get_price_for_full_period() / (period_end - period_start).days\n )\n days_already_used = (date.today() - period_start).days\n amount = int(price_per_day * days_already_used)\n if amount > self.get_price_for_full_period():\n amount = self.get_price_for_full_period()\n if amount < 0:\n amount = 0\n return amount", "def get_employees_born_in_period(cls, start_date, end_date,\n strategy=lazyload):\n cls._check_strategy(strategy)\n\n employees = db.session.query(Employee).options(\n strategy(Employee.department)\n ).filter(\n and_(\n Employee.date_of_birth > start_date,\n Employee.date_of_birth < end_date\n )\n ).all()\n return employees", "def dispatch_interval(self):\n start = self.start\n start_date = start.date()\n\n # add zero padding to make period_id 3 chars long\n period_id = str(self.period_id).zfill(3)\n\n if start.hour < 4 and period_id != \"001\":\n start_date = start_date - timedelta(days=1)\n\n # Concat zero-padded dates and add period id\n dispatch_interval = \"\".join([start_date.strftime(\"%Y%m%d\"), period_id])\n\n return dispatch_interval", "def get_debt_state(member, limit_year, limit_month):\n if member.first_payment_year is None:\n # never paid! using registration date to start with\n yearmonths_paid = set()\n year_to_check = member.registration_date.year\n month_to_check = member.registration_date.month\n else:\n # build a set for the year/month of paid quotas\n quotas = Quota.objects.filter(member=member).all()\n yearmonths_paid = {(q.year, q.month) for q in quotas}\n\n year_to_check = member.first_payment_year\n month_to_check = member.first_payment_month\n\n # verify the limit is after member started paying\n if year_to_check == limit_year:\n if month_to_check > limit_month:\n return []\n elif year_to_check > limit_year:\n return []\n\n # build a set of all the year/month the member should have paid up to (including) the limit\n should_have_paid = set()\n while True:\n should_have_paid.add((year_to_check, month_to_check))\n year_to_check, month_to_check = increment_year_month(year_to_check, month_to_check)\n if year_to_check == limit_year:\n if month_to_check > limit_month:\n break\n elif year_to_check > limit_year:\n break\n\n return sorted(should_have_paid - yearmonths_paid)", "def is_inactive(self):\n now = datetime.datetime.now()\n return not (self.start_date < now < self.end_date)", "def periods(self) -> localedata.LocaleDataDict:\n try:\n return self._data['day_periods']['stand-alone']['wide']\n except KeyError:\n return localedata.LocaleDataDict({}) # pragma: no cover", "def _get_default_period(self, cr, uid, context=None):\n context = context or {}\n if context.get('period_id', False):\n return context['period_id']\n account_period_obj = self.pool.get('account.period')\n ctx = dict(context, account_period_prefer_normal=True)\n ids = account_period_obj.find(cr, uid, context=ctx)\n period_id = False\n if ids:\n period_id = ids[0]\n return period_id", "def valid_from(self) -> datetime:\n return self._valid_from", "def find_suitable_period():\n # The highest acceptable factor will be the square root of the size.\n highest_acceptable_factor = int(math.sqrt(SIZE))\n\n # Too high a factor (eg SIZE/2) and the interval is too small, too \n # low (eg 2) and the period is too small.\n # We would prefer it to be lower than the number of VALID_CHARS, but more\n # than say 4.\n starting_point = len(VALID_CHARS) > 14 and len(VALID_CHARS)/2 or 13\n for p in range(starting_point, 7, -1) \\\n + range(highest_acceptable_factor, starting_point+1, -1) \\\n + [6,5,4,3,2]:\n if SIZE % p == 0:\n return p\n raise Exception, \"No valid period could be found for SIZE=%d.\\n\" \\\n \"Try avoiding prime numbers :-)\" % SIZE", "def failing_periods(self) -> 'outputs.DynamicThresholdFailingPeriodsResponse':\n return pulumi.get(self, \"failing_periods\")", "def validate_check_in_period(check_in_period):\n if not check_in_period:\n check_in_period = 30\n if not isinstance(check_in_period, int):\n try:\n check_in_period = int(check_in_period)\n except ValueError:\n print \"Incorrect check-in period given. Setting to 30.\"\n check_in_period = 30\n\n return check_in_period", "def pnl(qbo_session, period = \"YEARLY\", start_date=\"first\", end_date=\"last\",\n **kwargs):\n\n pnl_account_types = [\n \n \"Income\", \"Other Income\",\n \"Expense\", \"Other Expense\", \"Cost of Goods Sold\"\n \n ]\n\n \n\n # go through the accounts, collecting a list of those that are \n # pnl accounts\n\n relevant_accounts = []\n\n coa = qbo_session.chart_of_accounts()\n\n AccountType_i = coa[0].index(\"AccountType\")\n fqa_i = coa[0].index(\"FullyQualifiedName\")\n\n for a in coa:\n\n AccountType = a[AccountType_i]\n\n if AccountType in pnl_account_types:\n\n relevant_accounts.append(a[fqa_i])\n \n # now collect the ledger_lines that are even relevant to the time\n # period and pnl accounts (and we'll handle presentation last)\n\n relevant_activity = {} #{account:[relevant lines]}\n\n all_ledger_lines = qbo_session.ledger_lines(None, None, None, True,\n **kwargs)\n\n headers = all_ledger_lines[0]\n\n account_i = headers.index(\"account\") \n amount_i = headers.index(\"amount\")\n date_i = headers.index(\"TxnDate\")\n \n earliest_date = datetime(2100,1,1)\n latest_date = datetime(1900,1,1)\n\n for line in all_ledger_lines[1:]:\n\n account = line[account_i]\n line_date = line[date_i]\n\n #first apply the date filter!\n if not start_date == \"first\" and line_date < start_date:\n continue\n \n if not end_date == \"last\" and line_date > end_date:\n continue\n \n #if it's made the cut, we can update the report date bounds\n earliest_date = min(line_date,earliest_date)\n latest_date = max(line_date,latest_date)\n\n #then apply the account filter!\n\n if not account in relevant_activity:\n #then let's confirm that its account type is a pnl one\n \n if not account in relevant_accounts:\n \n continue\n\n else:\n relevant_activity[account] = []\n\n relevant_activity[account].append(line)\n\n #now let's do presentation\n #TODO -- incorporate pandas tables...do only minimal work on it until then\n\n pnl_lines = []\n\n if period == \"YEARLY\":\n\n report_start_date = datetime(earliest_date.year,1,1)\n report_end_date = datetime(latest_date.year,12,31)\n\n period_start_dates = list(rrule(YEARLY, bymonth=1, bymonthday=1,\n dtstart=report_start_date,\n until=report_end_date))\n\n period_end_dates = list(rrule(YEARLY, bymonth=12, bymonthday=-1,\n dtstart=report_start_date,\n until=report_end_date))\n\n elif period == \"MONTHLY\":\n\n report_start_date = datetime(earliest_date.year,\n earliest_date.month,\n 1)\n report_end_date = datetime(latest_date.year,\n latest_date.month,\n calendar.monthrange(latest_date.year,\n latest_date.month)[1])\n\n period_start_dates = list(rrule(MONTHLY, bymonthday=1,\n dtstart=report_start_date,\n until=report_end_date))\n\n period_end_dates = list(rrule(YEARLY, bymonthday=-1,\n dtstart=report_start_date,\n until=report_end_date)) \n\n header_1 = [\"\", \"Period Start -->\"] + period_start_dates\n header_2 = [\"Account\", \"Period End -->\"] + period_end_dates\n\n pnl_lines.append(header_1)\n pnl_lines.append(header_2)\n\n \"\"\"Clearly, there's a way to do this with only one pass of the data...\n let's get that right in the first re-write...probably with pandas\"\"\"\n\n #now let's fill up the pnl_lines with what we know to be the relevant data\n #for now, we'll rely on the knowledge that the data is coming to us in\n #date order, but that should be fixed too...\n\n for account in relevant_activity:\n\n account_row = [account, \"\"] #one value per period \n\n current_period_index = 0 #primitive counter, yes!\n this_period_total = 0 #this will be this period's total\n\n for line in relevant_activity[account]:\n \n line_amount = line[amount_i]\n line_date = line[date_i] \n\n if line_date > period_end_dates[current_period_index]:\n\n account_row.append(this_period_total)\n this_period_total = line_amount\n current_period_index +=1\n\n else:\n \n this_period_total = round(this_period_total +\n line_amount, 2)\n\n \"\"\"super sloppy...\"\"\"\n account_row.append(this_period_total) #for the last period\n current_period_index +=1\n\n while current_period_index < len(period_end_dates):\n account_row.append(0)\n current_period_index +=1\n\n pnl_lines.append(account_row)\n\n return pnl_lines", "def period(self):\n return f\"{self.measurement_date.year}/{self.measurement_date.month}\"", "def quantaHandledByPeriod(self, period):\n\n start_datetime = self.start_time.to_python_datetime()\n end_datetime = self.end_time.to_python_datetime()\n\n total_quanta = 0\n\n # Iterate through the quanta of the period, while the starting_quanta is less\n # than the ending quanta\n\n quanta_start_time = period.start_time\n while quanta_start_time < period.end_time:\n quanta_end_time = quanta_start_time + timedelta(minutes=granularity_in_minutes())\n\n if start_datetime <= quanta_start_time < end_datetime:\n if start_datetime < quanta_end_time <= end_datetime:\n total_quanta = total_quanta + 1\n\n quanta_start_time = quanta_start_time + timedelta(minutes=granularity_in_minutes())\n\n return total_quanta", "def limit_date_range_from(self):\n return self._limit_date_range_from", "def determine_periods(obj):\n return obj[len(obj)-1][\"period\"]", "def grace_period(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"grace_period\")", "def grace_period(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"grace_period\")", "def grace_period(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"grace_period\")", "def grace_period(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"grace_period\")", "def current_quarter_end(start_year:int, start_month:int, logger:lg.Logger = None) -> date:\n if logger:\n logger.info(F\"start year = {start_year}; start month = {start_month}\")\n end_year, end_month = next_quarter_start(start_year, start_month)\n # end date is one day back from the start of the next period\n return date(end_year, end_month, 1) - ONE_DAY", "def last(self) -> 'outputs.CommitmentPeriodResponse':\n return pulumi.get(self, \"last\")" ]
[ "0.84471315", "0.8381506", "0.68428826", "0.6510184", "0.6339413", "0.60962486", "0.60355204", "0.6016154", "0.6016154", "0.6016154", "0.6016154", "0.6016154", "0.6016154", "0.6016154", "0.6016154", "0.6013164", "0.5948316", "0.58576053", "0.58273315", "0.58267355", "0.572815", "0.5672116", "0.5642098", "0.5618393", "0.56165165", "0.549865", "0.54842216", "0.5463866", "0.5452018", "0.544054", "0.540237", "0.53940475", "0.5384737", "0.53807724", "0.536973", "0.53555244", "0.53511566", "0.5345926", "0.5345926", "0.5345926", "0.5345926", "0.5345926", "0.5345926", "0.5345926", "0.5345926", "0.5345926", "0.5345926", "0.5345926", "0.5345926", "0.53408724", "0.5314138", "0.5302331", "0.52880704", "0.52830076", "0.52730215", "0.52422166", "0.52128017", "0.5212548", "0.521189", "0.5178269", "0.51723397", "0.5168015", "0.5166653", "0.5156847", "0.5150747", "0.5137372", "0.5122274", "0.5120129", "0.5113371", "0.5078736", "0.50711745", "0.5070952", "0.5065349", "0.5038819", "0.50291747", "0.50278246", "0.50227565", "0.5016887", "0.5011094", "0.50102353", "0.49985716", "0.49718535", "0.49652025", "0.4963701", "0.49523288", "0.49407074", "0.49304017", "0.49261263", "0.49230066", "0.492173", "0.49210066", "0.4920434", "0.49172437", "0.48874632", "0.48860762", "0.48860762", "0.48860762", "0.48860762", "0.4880182", "0.48799425" ]
0.63429344
4
Transform mp3 file into wav format calling bash and using mpg123 or ffmpeg.
def mp3_to_wav(mp3_file, wav_file, encoder='mpg123'): if encoder == 'mpg123': bash_command = ['mpg123', '-w', wav_file, '--mono', mp3_file] else: bash_command = ['ffmpeg', '-i', mp3_file, wav_file] subprocess.run(bash_command)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mp3_to_wav(show_progress=True):\n\n # Define a devnull var to supress subprocess output\n devnull = open(os.devnull, 'w')\n\n # Get a list of the filepath for each of the mp3 files in each subdirectory of data/fma_small\n file_list = glob.glob('./../data/fma_small/*/*.mp3')\n\n # Get the number of files N and initialize a counter\n N = len(file_list)\n counter = 0\n\n # For each file/filepath, convert that file to wav format and save it to data/wavs/*/*.wav (so as a wave file)\n for filepath in file_list:\n\n # Every 100 file conversions, print a progress update\n if counter % 50 == 49 and show_progress:\n progress = str(round(100 * counter / N, 2))\n print('File conversion ' + progress + '% complete.')\n\n # Get the file name from the path and define a new path for the wav file\n file_name = filepath[24:-4]\n new_path = './../data/wavs/' + file_name + '.wav'\n\n # Call the subprocess using ffmpeg to convert the file to wav format (and supress all the output)\n subprocess.call(['ffmpeg', '-i', filepath, new_path], stdout=devnull)\n\n # Increment the counter\n counter += 1", "def convert_to_wav(mp3_filename):\n\n wav_filename = mp3_filename[:-4] + \".wav\"\n complete_mp3FileName = os.path.join(MP3_FOLDER, mp3_filename)\n complete_wavFileName = os.path.join(WAV_FOLDER, wav_filename)\n\n mp3_file = AudioSegment.from_mp3(complete_mp3FileName)\n mp3_file.export(complete_wavFileName, format=\"wav\")\n\n print(f\"The mp3 file {complete_mp3FileName} was successfully converted to \" \\\n + f\"the wav file {complete_wavFileName}.\")", "def convert_to_mp3(self,path, filename):\n\n codec = \"libmp3lame\"\n mp3_filename = filename + \".mp3\"\n\n command = [self.FFMPEG_BIN,\n \"-n\",\n \"-i\", path,\n \"-acodec\", codec,\n \"-ab\", \"128k\",\n mp3_filename\n ]\n\n return command", "def convert_to_wav(fin, fout):\n temp = subprocess.run([\"ffmpeg\",\n \"-i\", \n fin, \n fout], \n stdout=subprocess.PIPE, \n stderr=subprocess.PIPE)", "def extract_audio_from(file, out_dir=''):\n output_filename = f'{os.path.join(out_dir, os.path.basename(file)[:-4])}.wav'\n os.system(f'ffmpeg -i {file} {output_filename}')\n return output_filename", "def analyze_mp3(mp3filespec):\n \n # Make a temporary working directory for storing the wav file\n # that soundstretch should analyze\n wavfilespec = tempfile.NamedTemporaryFile(suffix='.wav') \n \n # Use lame to make a wav representation of the mp3 file to be analyzed\n wav_command = 'sox %s %s' % (mp3filespec, wavfilespec.name)\n subprocess.call([wav_command], shell=True, stderr=open(os.devnull, 'w'))\n \n # Call soundstretch to analyze the wav file\n bpm_command = 'soundstretch %s -bpm' % wavfilespec.name\n p = subprocess.Popen([bpm_command], shell=True,stdout=subprocess.PIPE)\n output = p.communicate()[0]\n \n # Delete temporary working directory and its contents\n #shutil.rmtree(workingdir)\n\n bpm_suggestion = _get_bpm_from_soundstretch(output)\n\n return fit_bpm_in_window(bpm_suggestion)", "def all_wav_to_mp3(self):\n for each_file, artist in self.past_songs_db_data:\n self.convert_wav_to_mp3(each_file)", "def convert_to_mp3(filename: str, title: str, start: int=None, end: int=None) -> list:\n\t# setup args for ffmpeg\n\tfile_a = f\"{path_to_wrk_dir}{filename}.mp4\" # input file\n\tfile_b = f\"{path_to_wrk_dir}{title}.mp3\" # output file\n\tfiles_b = [] # this list need if file more than 30 mb\n\targs = [\n\t\t\"/usr/bin/ffmpeg\", # path to ffmpeg\n\t\t\"-i\", # flag for input file\n\t\tfile_a, # input file\n\t\t\"-acodec\", # setup codec\n\t\t\"libmp3lame\", # codec name\n\t\t]\n\n\t# now need setup timings for target encode\n\tif start is not None and start != 0:\n\t\targs = args + [\"-ss\", str(start)]\n\tif end is not None and end != 0:\n\t\targs = args + [\"-t\", str(end - start)]\n\n\t# and last part for args to ffmpeg\n\targs = args + [\n\t\t\"-metadata\", # setup metadata for file\n\t\tf\"title={title}\", # title\n\t\t\"-metadata\",\n\t\tf\"artist={title}\", # and artist\n\t\t\"-b:a\", # setup bitrate\n\t\t\"320k\", # setup max bitrate\n\t\tfile_b,\n\t\t]\n\tprint(f\"{args}\")\n\t# start subprocess for encoding\n\tpopen = subprocess.Popen(args)\n\tpopen.wait()\n\n\t# check size file. if he more than 30 mb, bot need split him to chunks.\n\tsize = getsize(file_b) / 1024 / 1024\n\tif size > 30 and ( start or end is None ):\n\t\t# setup args for split to chunks\n\t\targs = [\n\t\t\t\"ffprobe\",\n\t\t\t\"-show_entries\",\n\t\t\t\"format=duration\",\n\t\t\t\"-i\",\n\t\t\tfile_b,\n\t\t\t]\n\n\t\t# get duration video.\n\t\tpopen = subprocess.Popen(args, stdout=subprocess.PIPE)\n\t\tpopen.wait()\n\t\toutput = popen.stdout.read()\n\t\t# now we know how long this audio file\n\t\t# split to 10 min chunks\n\t\tdur = re.findall(r\"\\d{1,10}\", str(output))\n\t\t# get chunks count for loop\n\t\tcount_chunks = (int(dur[0]) // 600) + 1\n\t\tfor chunk_start_time in range(0, count_chunks):\n\t\t\t# setup args for split\n\t\t\t# big parts of args the same for encode\n\t\t\targs = [\n\t\t\t\t\"/usr/bin/ffmpeg\",\n\t\t\t\t\"-i\",\n\t\t\t\tfile_b,\n\t\t\t\t\"-ss\",\n\t\t\t\tf\"{chunk_start_time * 600}\", # when start chunk\n\t\t\t\t\"-t\",\n\t\t\t\t\"600\", # 10 mints duration\n\t\t\t\t\"-acodec\",\n\t\t\t\t\"copy\", # copy\n\t\t\t\t\"-b:a\",\n\t\t\t\t\"320k\",\n\t\t\t\tf\"{path_to_wrk_dir}{title}_{chunk_start_time}.mp3\", # now we have path to video with chunk number.\n\t\t\t]\n\t\t\ttry:\n\t\t\t\t# start process for cut chunk\n\t\t\t\tpopen = subprocess.Popen(args, stdout=subprocess.PIPE)\n\t\t\t\tpopen.wait()\n\t\t\t# handle except.\n\t\t\texcept Exception as e:\n\t\t\t\tprint(f\"Exception - {e}\")\n\t\t\tfiles_b.append(f\"{path_to_wrk_dir}{title}_{chunk_start_time}.mp3\") # append name of file in list\n\t\tremove(file_b)\n\ttry:\n\t\t# remove tmp file\n\t\tremove(file_a)\n\t# handle except\n\texcept FileNotFoundError:\n\t\tfiles = get_file_list(path_to_wrk_dir)\n\t\tfor i in files:\n\t\t\tif -1 != f\"{path_to_wrk_dir}{i}\".find(f\"{filename}\") and f\"{i}\".find(f\".mp3\") == -1:\n\t\t\t\ttry:\n\t\t\t\t\tremove(f\"{path_to_wrk_dir}{i}\")\n\t\t\t\texcept FileNotFoundError:\n\t\t\t\t\tprint(f\"can't remove file {path_to_wrk_dir}{i}\")\n\tif len(files_b) == 0:\n\t\treturn [file_b]\n\telse:\n\t\treturn files_b", "def extract_audio(file_name, audio_directory):\n basename = os.path.splitext(os.path.basename(file_name))[0]\n audio_file_name = audio_directory + '/' + basename + '.wav'\n subprocess.call(['ffmpeg', '-y', '-i', file_name, '-ac', '1', audio_file_name])\n return audio_file_name", "def mp3_to_wav(song_dir, snip_dir, bird_list_path='bird_list.txt'):\n if os.path.exists(snip_dir):\n shutil.rmtree(snip_dir)\n os.makedirs(snip_dir)\n with open(bird_list_path) as f:\n lines = f.readlines()\n bird_list = [line.rstrip('\\n') for line in lines]\n # Build the bird-labeled subdirectories in 'snip_dir'.\n _make_bird_dirs(snip_dir, birds_list)\n # Populate the subdirectory with recordings converted from .mp3 to .wav.\n for f in os.listdir(song_dir):\n bird = extract_bird_name(f)\n if bird in birds_list:\n index = birds_list.index(bird)\n wav_filename = os.path.splitext(f)[0].replace(' ', '_') + '.wav'\n orig = os.path.join(mp3_dir, f)\n new = os.path.join(snip_dir, str(index), wav_filename)\n # MP3-to-WAV conversion requires the ffmpeg package.\n call([\"ffmpeg\", \"-i\", orig, new])", "def process_sound_file(file_path):\n\n return to_mfcc(get_wav(file_path))", "def convert_mp3_to_ogg(self, filename: str):\n mp3_path = os.path.join(self.directory, filename)\n ogg_path = mp3_path.replace(self.extension_mp3, self.extension_ogg)\n if os.path.isfile(ogg_path):\n # already done\n return\n command = [FFMPEG_BIN, '-i', mp3_path, ogg_path]\n pipe = sp.Popen(command, shell=False, stdout=sp.PIPE)\n pipe.wait()", "def convert_to_wav (filename, name, origpath, wavpath, mono):\n print(\"Converting {0} to .wav...\".format(filename))\n if not re.match(r\".*_\\d+$\",name):\n # If filenames do include video titles\n name = name.rsplit('_',1)[0]\n\n channel, vid_num = name.rsplit('_', 1)\n channel = re.sub(r'[^A-Za-z1-9]', '', channel)\n newname = '_'.join([channel, vid_num])\n\n exportname = newname + \".wav\"\n filepath = path.join(origpath, filename)\n\n if not path.exists(wavpath):\n makedirs(wavpath)\n exportPath = path.join(wavpath, exportname)\n sound = AudioSegment.from_file(filepath,\"mp4\")\n if mono == True:\n sound = sound.set_channels(1)\n sound.export(exportPath, format=\"wav\")", "def to_voice(item):\r\n item.seek(0)\r\n item = AudioSegment.from_file(item)\r\n m = io.BytesIO()\r\n m.name = \"voice.ogg\"\r\n item.split_to_mono()\r\n dur = len(item) / 1000\r\n item.export(m, format=\"ogg\", bitrate=\"64k\", codec=\"libopus\")\r\n m.seek(0)\r\n return m, dur", "def play_audio():\n directory = os.fsencode(MINI_PATH)\n print(directory)\n adp= []\n # lst = os.listdir(directory)\n # lst.sort()\n for file in os.listdir(directory):\n filename = os.fsdecode(file)\n #print(file)\n\n if filename.endswith(\".mp3\"): \n adp.append(MINI_PATH+filename)\n #print(adp)\n adp.sort()\n print(\"ADP: \", adp)\n x = \"|\".join(adp)\n print( f'concat:{x}')\n subprocess.call(['ffmpeg', '-i', f'concat:{x}', '-acodec', 'copy', RESULT_PATH])\n \n for file in os.listdir(directory):\n filename = os.fsdecode(file)\n print(filename)\n if filename.endswith(\".mp3\"):\n os.remove(MINI_PATH+filename)", "def mp3ogg(fname, datas):\n oggname = \"%s.ogg\" % fname[:-4]\n logger.info(\"(mp3ogg) encode [%s]\" % fname)\n logger.debug(\"(mp3ogg) oggenc binary path %s\" % settings.OGGENC)\n logger.debug(\"(mp3ogg) mpg123 binary path %s\" % settings.MPG123)\n\n command = [settings.OGGENC,\n \"--artist\", datas['artist'],\n \"--title\", datas['title'],\n \"--album\", datas['album'],\n \"--genre\", datas['genre'],\n \"--date\", datas['date'],\n \"--tracknum\", datas['tracknumber'],\n \"-o\", oggname,\n \"-\"]\n\n try:\n mpg = subprocess.Popen([settings.MPG123,\n \"-w\",\n \"-\",\n fname],\n stdout=subprocess.PIPE)\n\n ogg = subprocess.Popen(command,\n stdin=mpg.stdout, # pylint: disable-msg=E1101\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n (stdout, stderr) = ogg.communicate()\n logger.debug(stdout)\n logger.error(stderr)\n result = oggname\n except:\n logger.error(\"(mp3ogg) subprocess failed on [%s]\" % fname)\n result = None\n\n if result:\n os.unlink(fname)\n\n return result", "def main():\n destination = Path(argv[1])\n source_files = destination.glob(\"**/*.wma\")\n for file in source_files:\n new_name = file.name.rsplit(\".\", maxsplit=1)[0] + \".flac\"\n dest = str(file.parent / new_name)\n cmd = list(map(str, [\"avconv\", \"-i\", file, dest]))\n if platform == \"win32\":\n print(\"Running on windows... on Unix I'd run the following command:\")\n print(cmd)\n else:\n that = Popen(cmd)\n that.wait()", "def convert_to_wav(txt_file, sph_path, target_dir):\n wav_dir = os.path.join(target_dir, 'wav/')\n txt_dir = os.path.join(target_dir, 'txt/')\n os.makedirs(wav_dir, exist_ok=True)\n os.makedirs(txt_dir, exist_ok=True)\n path_to_data = os.path.dirname(txt_file)\n\n def process(x):\n file_path = x[\"audio_file\"]\n text = x[\"transcription\"]\n start_time = x[\"start_time\"]\n duration = x[\"end_time\"] - start_time\n file_name = os.path.splitext(os.path.basename(file_path))[0]\n file_name = str(start_time) + \"_\" + str(duration) + file_name\n text = text.strip().upper()\n with open(os.path.join(txt_dir, file_name + '.txt'), 'w') as f:\n f.write(text)\n cmd = \"sox -v 0.6 -t wav {} -r {} -b 16 -c 1 -t wav {} trim {} {}\".format(\n os.path.join(path_to_data, file_path),\n args.sample_rate,\n os.path.join(wav_dir, file_name + \".wav\"),\n start_time,\n duration)\n subprocess.call([cmd], shell=True)\n print('Converting wav to wav for {}.'.format(txt_file))\n # generate processed data\n data = read_transcription_file(txt_file, sph_path)\n with ThreadPool(10) as pool:\n pool.map(process, data)", "def check_wav(song, source_folder, temp_folder, encoder='mpg123'):\n # Name of files\n song_name, extension = os.path.splitext(song)\n mp3_file = os.path.join(source_folder, song)\n if '.wav' != extension:\n wav_file = os.path.join(temp_folder, song_name + '.wav')\n try:\n if not os.path.isfile(wav_file):\n mp3_to_wav(\n mp3_file=mp3_file,\n wav_file=wav_file,\n encoder=encoder)\n else:\n pass\n except MemoryError:\n logger.error('MemoryError: %s MP3 couldn\\'t be transformed into WAV', song_name)\n else: # Already a wav file\n copyfile(mp3_file, os.path.join(temp_folder, song_name))", "def convert(\n album,\n):\n for track in list_dir(album):\n ext = splitext(track)[1]\n if ext != \".mp3\":\n new_track = track.replace(ext, \".mp3\")\n if not exists(new_track):\n track_non_mp3 = AudioSegment.from_file(track, format=ext[1:])\n print(f\"{track} -> {new_track}\")\n track_non_mp3.export(new_track, format=\"mp3\")\n os.remove(track)", "def play_audio(filename):\n os.system(AUDIOPLAYER + ' ' + filename)", "def mono(filename,wout=True):\n n, data, data_dB,sr,ch=inputwav(filename)\n if ch==2:\n print('Converting to mono...')\n L=data[:,0]\n R=data[:,1]\n n=len(data)\n data_m=np.zeros((n,1))\n data_m=L/2.0+R/2.0\n if wout==True:\n print('Exporting...')\n sf.write(filename[0:len(filename)-4]+'_mono.wav',data_m,sr,'PCM_16')\n print('Done!')\n return data_m\n else:\n print( \"Error: input is already mono stoooooooooopid!\")", "def wavplay(filename):\n\tif (os.path.isfile(filename) == False): # raise error if wrong input file\n\t\tprint(\"Input file does not exist. Make sure you computed the analysis/synthesis\")\n\telse:\n\t\tif sys.platform == \"linux\" or sys.platform == \"linux2\":\n\t\t # linux\n\t\t subprocess.call([\"aplay\", filename])\n\n\t\telif sys.platform == \"darwin\":\n\t\t\t# OS X\n\t\t\tsubprocess.call([\"afplay\", filename])\n\t\telse:\n\t\t\tprint(\"Platform not recognized\")", "def main(directory, wavelength=16000, replace=True):\n\n if os.path.isdir(directory):\n # get the directory of mp3 files\n mpthree_files = find_directory__files(directory, 'mp3')\n\n # check whether there are mp3 files\n if len(mpthree_files) > 0:\n # converts all the mp3 files to wav files\n map(lambda x: convert_mp3_to_wav(x, replace=replace), mpthree_files.values())\n\n # now get the wav files after conversion(if any)\n wav_files = find_directory__files(directory, 'wav')\n\n # convert\n map(lambda x: convert_wavelength_file(x, wavelength=wavelength, replace=replace), wav_files.values())\n elif os.path.isfile(directory):\n\n # check if it's a wav\n filetype = find_filetype(directory)\n if filetype != 'wav':\n if filetype == 'mp3':\n convert_mp3_to_wav(directory, replace=replace)\n # get the new file name\n directory = directory.replace('mp3', 'wav')\n else:\n raise ValueError(\"Not a supported filetype at this moment\")\n\n # when filetype == wav or after converting from mp3 to wav\n convert_wavelength_file(directory, wavelength, replace=replace)\n else:\n raise ValueError(\"input is wrong\")", "def to_audio(self, _in, _out, bitrate, file_format):\n\n # Default output parameter\n # If not current directory, append '/'\n if os.path.isdir(_out):\n _out = '' if _out == '.' else _out + '/'\n _out += self.get_name_from_path(_in,\n replace=True) + '.' + file_format\n _out = _out.replace('//', '/')\n self.out = _out\n\n # File format unchecked for single inputs\n if not check_is_video(_in):\n msg = \" is not a supported media type\"\n self.abort_conversion(\n self.get_name_from_path(_in) + msg)\n\n \"\"\"\n else:\n base_name = os.path.basename(_out)\n ext = os.path.splitext(base_name)[1]\n _out = _out.replace(ext, '.mp3')\n \"\"\"\n commands = ['ffmpeg', '-i', _in,\n '-vn', '-ar', '44100',\n '-ac', '2', '-ab',\n bitrate, _out]\n try:\n self.run_convert_commands(commands)\n except FileNotFoundError as er:\n res = require_ffmepg()\n\n if not res:\n self.abort_conversion(\"Dependecy not installed.\")", "def encodeMP3(self, wavf: str, dstf: str, cover: str, meta: TrackMeta) -> None:\n FNULL = open(os.devnull, 'w')\n subprocess.call(['lame', '-V2', wavf, dstf], stdout=FNULL, stderr=FNULL)\n FNULL.close()\n # tag MP3\n mm = TrackMeta(meta)\n mp3 = MP3(dstf, ID3=ID3)\n mp3[\"TIT2\"] = TIT2(encoding=3, text=mm.title())\n mp3[\"TPE1\"] = TPE1(encoding=3, text=mm.artist())\n mp3[\"TALB\"] = TALB(encoding=3, text=mm.album())\n mp3[\"TPE2\"] = TPE2(encoding=3, text=mm.albumartist())\n if mm.date():\n mp3[\"TDRC\"] = TDRC(encoding=3, text=mm.date())\n mp3[\"TRCK\"] = TRCK(encoding=3,\n text=mm.tracknumber() + \"/\" + mm.tracktotal())\n mp3[\"TPOS\"] = TPOS(encoding=3,\n text=mm.discnumber() + \"/\" + mm.disctotal())\n\n # composer\n if mm.composer():\n mp3[\"TCM\"] = TCM(encoding=3, text=mm.composer())\n\n # cover\n if cover:\n data = open(cover, 'rb').read()\n if cover.endswith('png'):\n mime = 'image/png'\n else:\n mime = 'image/jpeg'\n mp3.tags.add(APIC(encoding=3, mime=mime, type=3, desc=u'Cover', data=data))\n\n # save\n mp3.save()", "def transcodetomp4(file_in, logger):\n\n import subprocess\n\n file_out = file_in.replace('.mkv', '.mp4')\n\n if os.path.isfile('/usr/bin/avconv'):\n\n convert_command = 'su securityspy -c \\\"/usr/bin/avconv -i \"{}\" -f mp4 -vcodec copy -acodec '.format(file_in) + \\\n 'libfaac -b:a 112k -ac 2 -y \"{}\"'.format(file_out) + \"\\\"\"\n\n try:\n subprocess.check_call(convert_command, shell=True)\n except subprocess.CalledProcessError:\n logger.error(\"The command to transcode: {} --- failed...\".format(convert_command))\n return file_in\n\n return file_out\n else:\n return file_in\n # fin", "def audio_pipeline(wav):\n sig = sb.dataio.dataio.read_audio(wav)\n return sig", "def audio_pipeline(wav):\n sig = sb.dataio.dataio.read_audio(wav)\n return sig", "def read_audio(f, downmix):\n if f.endswith('.mp3'):\n f = _mp3_hook(f)\n sr, audio = scipy.io.wavfile.read(f)\n if not audio.dtype is np.float32:\n audio = _normalize_pcm(audio)\n if downmix and len(audio.shape) == 2:\n audio = down_mix(audio)\n return sr, audio", "def mpg_convert(input_path: str, output_path: str, check=True,\n verbose_level=0):\n if verbose_level > 0:\n v = '-v'\n else:\n v = '-q'\n os.system('mpg123 ' + v + ' -w ' + output_path + ' ' + input_path)\n if check:\n if not os.path.isfile(output_path):\n raise RuntimeError('Not able to convert file', input_path,\n output_path)", "def wav_wav(orig, dest, **_kwargs):\n\n # options = kwargs.get(\"tree\").cmd_options.get(\"options\", [])\n\n # first demux it to 16 bit 48khz\n dest_list = []\n for index, orig_elem in enumerate(tools.get_iter(orig)):\n tmp_dest = os.path.join(\n os.path.dirname(dest),\n \"{0}_{1}\".format(index, os.path.basename(dest)))\n cmd = \"ffmpeg -i {orig} -acodec pcm_s16le -ar 48000 {dest}\".format(\n dest=tmp_dest,\n orig=orig_elem)\n logger.debug(cmd)\n try:\n subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as error:\n logger.error(error)\n logger.error(tools.to_unicode(error.output))\n continue\n dest_list.append(tmp_dest)\n\n if len(dest_list) > 1:\n cmd = \"sox {orig} {dest}\".format(\n orig=\" \".join(orig),\n dest=dest)\n logger.debug(cmd)\n try:\n subprocess.check_call(cmd, shell=True)\n except subprocess.CalledProcessError as error:\n logger.error(error)\n logger.error(tools.to_unicode(error.output))\n else:\n os.rename(dest_list[0], dest)\n return dest", "def make_wav(text, speed=1.0, emotion='normal', output_file='__temp.wav', output_dir=os.getcwd()):\n open_jtalk = [OPENJTALK_BINPATH + '/open_jtalk']\n mech = ['-x', OPENJTALK_DICPATH]\n htsvoice = ['-m', OPENJTALK_VOICEPATH.format(emotion=emotion)]\n speed = ['-r', str(speed)]\n outwav = ['-ow', os.path.join(output_dir, output_file)]\n cmd = open_jtalk + mech + htsvoice + speed + outwav\n c = subprocess.Popen(cmd,stdin=subprocess.PIPE)\n c.stdin.write(text.encode('utf-8'))\n c.stdin.close()\n c.wait()\n return os.path.join(output_dir, output_file)", "def convert(self):\n #lame --mp3input --silent -h -b BITRATE SOURCE TARGET\n self.success = False\n command = ['lame', '-h', '--silent']\n command.append('-b ' + str(self.bitrate))\n command.append(self.source)\n command.append(self.target)\n msg('command', command)\n error = check_call(command)\n if error != 0:\n raise TaskError(subprocess.CalledProcessError)\n self.success = True", "def _to_wav(self):\n self._status = 0\n fname = fm.file2wav(self.get_filename()) \n if fname != self.get_filename(): # can change the name\n self._set_filename(fname) # in case of wave transcoding\n self._status = 1", "def convert_wav(src_wav, dst_wav, subtype='PCM_16'):\n assert os.path.exists(src_wav), \"{} not exists!\".format(src_wav)\n data, sr = soundfile.read(src_wav)\n soundfile.write(dst_wav, data, sr, subtype=subtype)", "def get_audio(name, n):\n audio_path = os.path.join(args.input_folder, name, \"audio.ogg\")\n if not os.path.exists(audio_path):\n ## Some folders have multiple .ogg files, so we need to first combine them into one file. Example:\n ## |── Universe\n ##  │   ├── aligned.swc\n ##  │   ├── audio1.ogg\n ##  │   ├── audio2.ogg\n ##  │   ├── audio3.ogg\n ##  │   ├── audio4.ogg\n ##  │   ├── audiometa.txt\n ##  │   ├── info.json\n ##  │   ├── wiki.html\n ##  │   ├── wiki.txt\n ##  │   └── wiki.xml\n\n multiple_ogg_files = []\n for i in range(1, 5):\n path = os.path.join(args.input_folder, name, \"audio\" + str(i) + \".ogg\")\n if os.path.exists(path):\n multiple_ogg_files.append(path)\n else:\n break\n if len(multiple_ogg_files) == 0:\n return\n elif len(multiple_ogg_files) == 1:\n os.system(\"cp \\\"\" + multiple_ogg_files[0] + \"\\\" \\\"\" + audio_path + \"\\\"\")\n else:\n tmp_file_name = \"ffmeg_inputs.txt\"\n print(\"tmp_file_name=\", tmp_file_name)\n with open(tmp_file_name, \"w\", encoding=\"utf-8\") as tmp_file:\n for path in multiple_ogg_files:\n tmp_file.write(\"file '\" + path + \"'\\n\")\n cmd = \"ffmpeg -f concat -i \\\"\" + tmp_file_name + \"\\\" -c copy \\\"\" + audio_path + \"\\\"\"\n print(cmd)\n os.system(cmd)\n\n output_audio_path = args.destination_folder + \"/audio/\" + str(n) + \".ogg\"\n os.system(\"cp \\\"\" + audio_path + \"\\\" \" + output_audio_path)", "def create_audio_file():\n # Get the response from boto3\n raw_audio = generate_audio()\n # pull the Audiostream object from the response from boto3\n raw_audio = raw_audio[\"AudioStream\"]\n # create output location\n # process the whole block\n with closing(raw_audio) as audio:\n with open(\"output_audio.mp3\", \"wb\") as file:\n file.write(raw_audio.read())", "def play_sound():\r\n try:\r\n\r\n file_name = r\"sound.mp3\"\r\n playsound.playsound(file_name)\r\n except OSError:\r\n AudioSegment.converter=r\"/Users/russelllamb/Downloads/ffmpeg\"\r\n sound=AudioSegment.from_mp3(r\"sound.mp3\")\r\n play(sound)", "def generate_audio():\n text, lang = introduction()\n ses = boto3.Session(profile_name=\"default\")\n pol = ses.client(\"polly\")\n res = pol.synthesize_speech(Text=text, LanguageCode=lang, OutputFormat=\"mp3\", VoiceId=VOICE)\n return res", "def convert(file):\n extension = file[-4:]\n if extension == '.wav':\n return file\n if not exists(file):\n raise IOError('%s file not found' % file)\n if not extension in SUPPORTED_EXTENSION:\n raise IOError('%s file format is not supported' % file)\n if not exists(CONVERTION_DIRECTORY):\n makedirs(CONVERTION_DIRECTORY)\n filename = splitext(basename(file))[0]\n path = join(CONVERTION_DIRECTORY, filename + '.wav')\n if (not exists(path)):\n logging.info(\"Converting file %s\" % file)\n CONVERTERS[extension](file).export(path, format='wav')\n return path", "def extract(path, quality=\"medium\"):\n\n try:\n file = ffmpeg.input(path)\n output_path = path[:-3] + \"ogg\"\n if os.path.exists(output_path):\n print(\n f\"[{colored('#','yellow')}] Audio file {colored(path2title(output_path),'green')} already exists\"\n )\n return output_path\n print(\n f\"\\n[{colored('+','green')}] Extracting audio for file %s\"\n % (colored(path2title(path), \"green\")),\n end=\"\",\n )\n from util import Animation\n\n anim = Animation()\n file.audio.output(\n output_path,\n acodec=\"libvorbis\",\n audio_bitrate=BITRATE * get_multiplier(quality),\n loglevel=0,\n ).run()\n anim.complete()\n print(\n f\"[{colored('+','green')}] Extraction completed for file %s\"\n % (colored(path2title(output_path), \"green\"))\n )\n\n except Exception as ex:\n print(\n f\"[{colored('-','red')}] There was an error extracting the audio for path {colored(path2title(output_path),'green')}: \",\n ex,\n )\n sys.exit(-1)\n\n return output_path", "def get_large_audio_transcription(path):\n # open the audio file using pydub\n r = sr.Recognizer()\n sound = AudioSegment.from_mp3(path)\n sound.export(\"tmp.wav\", format=\"wav\")\n sound = AudioSegment.from_wav('tmp.wav')\n # split audio sound where silence is 700 miliseconds or more and get chunks\n chunks = split_on_silence(sound,\n # experiment with this value for your target audio file\n min_silence_len = 500,\n # adjust this per requirement\n silence_thresh = sound.dBFS-14,\n # keep the silence for 1 second, adjustable as well\n keep_silence=500,\n )\n folder_name = \"audio-chunks\"\n # create a directory to store the audio chunks\n if not os.path.isdir(folder_name):\n os.mkdir(folder_name)\n whole_text = \"\"\n\n chapter=(str(path.split('/')[-1])).split('_')[3]\n # if chapter == '01':\n # target=2\n # else:\n # target=1\n target=2\n # process each chunk\n for i, audio_chunk in enumerate(chunks, start=1):\n # export audio chunk and save it in\n # the `folder_name` directory.\n if i==1:\n chunk_filename = os.path.join(folder_name, f\"chunk{i}.wav\")\n audio_chunk.export(chunk_filename, format=\"wav\")\n # recognize the chunk\n with sr.AudioFile(chunk_filename) as source:\n audio_listened = r.record(source)\n # try converting it to text\n try:\n text = r.recognize_google(audio_listened,language=\"en-US\")\n\n except sr.UnknownValueError as e:\n print(\"Error:\", str(e))\n else:\n #text = f\"{text.capitalize()}. \"\n #print(chunk_filename, \":\", text)\n whole_text += text\n # return the text for all chunks detected\n else:\n chunk_filename = os.path.join(folder_name, f\"chunk{i}.wav\")\n audio_chunk.export(chunk_filename, format=\"wav\")\n # recognize the chunk\n with sr.AudioFile(chunk_filename) as source:\n audio_listened = r.record(source)\n # try converting it to text\n try:\n text = r.recognize_google(audio_listened, language=\"en-US\")\n\n except sr.UnknownValueError as e:\n print(\"Error:\", str(e))\n else:\n #text = f\"{text.capitalize()}. \"\n # print(chunk_filename, \":\", text)\n if chapter == '01':\n whole_text += ' ' +text\n if str(text).isalnum():\n if str(text).split(' ')[0]==' ':\n whole_text += text\n else: whole_text += ' '+text\n # return the text for all chunks detected\n\n if i==target:\n break\n if os.path.isfile('tmp.wav') :os.remove('tmp.wav')\n subprocess.run([\"rm\", \"-rf\", folder_name])\n return whole_text", "def save_mp3(ndarray, sr, feature_name, out_path, x, y, new_labels, mp3_filename=None):\n import soundfile as sf\n\n def _save_mp3(source_path, out_path):\n cmd = [\n 'lame',\n '--preset',\n 'insane',\n str(source_path),\n str(out_path)\n ]\n errno = subprocess.call(cmd)\n if errno:\n print('{} encoding failed with code'.format(source_path), end=' ')\n print(errno)\n print('skipping...')\n return errno\n os.remove(source_path)\n return 0\n\n # this is kind-of standard\n if mp3_filename is None:\n mp3_filename = FeatureExtractor.get_file_name(x, feature_name, 'mp3')\n wav_filename = mp3_filename.replace('mp3', 'wav')\n sf.write(str(out_path / wav_filename), ndarray, sr) # write wav file\n errno = _save_mp3(out_path / wav_filename,\n out_path / mp3_filename) # load wav, encode as mp3 and remove wav file\n if errno:\n # if any error, then keep wav\n filename = wav_filename\n else:\n # non-error clause, then it was successfully exported to mp3\n filename = mp3_filename\n if new_labels is not None:\n new_labels.append([filename, y])\n print('info: {} transformed and saved!'.format(filename))\n return filename", "def mp4_to_mp3(filepath):\n audio_clip = AudioFileClip(filepath)\n mp3_filename = filepath[:-3] + 'mp3'\n audio_clip.write_audiofile(mp3_filename)\n os.remove(filepath)\n audio_clip.close()", "def enregistre_audio(\n audio: AudioSegment, \n chemin:str = 'animalese.wav',\n format:str ='wav'\n ) -> AudioSegment:\n\n which = pydub.utils.which\n\n if which(\"avconv\"):\n app = \"avconv\"\n elif which(\"ffmpeg\"):\n app = \"ffmpeg\"\n elif format not in {'raw', 'wav'}:\n raise FileNotFoundError(\"ffmpeg/avconv introuvable.\") \n\n return audio.export(chemin, format=format)", "def create_wav_file(self, ):\n\n f_out = open(self.wav_file, 'w')\n u_utt2spk = open(self.utt2spk, 'w')\n for file in glob.glob(self.wav_folder+'/*.wav'):\n base = os.path.basename(file).split('.')[0]\n # write to scp file\n f_out.write(base + '\\t' + file + '\\n')\n u_utt2spk.write(base + '\\t' + 'tts' + '\\n')", "async def transcribe_wav(args: argparse.Namespace, core: Voice2JsonCore) -> None:\n from rhasspyasr import Transcription\n\n # Make sure profile has been trained\n assert core.check_trained(), \"Not trained\"\n\n # Get speech to text transcriber for profile\n transcriber = core.get_transcriber(open_transcription=args.open, debug=args.debug)\n\n # Directory to report WAV file names relative to\n relative_dir = (\n None if args.relative_directory is None else Path(args.relative_directory)\n )\n\n try:\n if args.wav_file or args.stdin_files:\n # Read WAV file paths\n wav_files = args.wav_file\n if args.stdin_files:\n _LOGGER.debug(\"Reading file paths from stdin\")\n wav_files = itertools.chain(wav_files, sys.stdin)\n\n for wav_path_str in wav_files:\n wav_path_str = wav_path_str.strip()\n\n # Load and convert\n wav_path = Path(wav_path_str)\n _LOGGER.debug(\"Transcribing %s\", wav_path)\n\n wav_data = await core.maybe_convert_wav(wav_path.read_bytes())\n\n # Transcribe\n transcription = (\n transcriber.transcribe_wav(wav_data) or Transcription.empty()\n )\n result = dataclasses.asdict(transcription)\n\n if relative_dir is None:\n # Add name of WAV file to result\n result[\"wav_name\"] = wav_path.name\n else:\n # Make relative to some directory\n result[\"wav_name\"] = str(\n wav_path.absolute().relative_to(relative_dir.absolute())\n )\n\n print_json(result)\n else:\n # Read WAV data from stdin\n _LOGGER.debug(\"Reading WAV data from stdin\")\n\n if args.input_size:\n # Number of bytes is on separate line\n line = sys.stdin.buffer.readline().strip()\n if not line:\n return\n\n num_bytes = int(line)\n while num_bytes > 0:\n # Read in WAV\n wav_data = sys.stdin.buffer.read(num_bytes)\n while len(wav_data) < num_bytes:\n wav_data = sys.stdin.buffer.read(num_bytes - len(wav_data))\n\n # Transcribe\n wav_data = await core.maybe_convert_wav(wav_data)\n transcription = (\n transcriber.transcribe_wav(wav_data) or Transcription.empty()\n )\n result = dataclasses.asdict(transcription)\n\n print_json(result)\n\n # Next WAV\n line = sys.stdin.buffer.readline().strip()\n if not line:\n break\n\n num_bytes = int(line)\n else:\n # Load and convert entire input\n wav_data = await core.maybe_convert_wav(sys.stdin.buffer.read())\n\n # Transcribe\n transcription = (\n transcriber.transcribe_wav(wav_data) or Transcription.empty()\n )\n result = dataclasses.asdict(transcription)\n\n print_json(result)\n finally:\n transcriber.stop()", "def webm_to_wav(webm_file: str):\n wav_file = webm_file.replace(\".webm\", \".wav\")\n wav = AudioSegment.from_file(webm_file)\n wav.export(wav_file, format=\"wav\")\n return wav_file", "def morse_to_audio(words, playsound=None, name_file=\"output\\\\code_to_audio_output.wav\"):\n dot = wave.open(\"kropka.wav\", 'rb')\n dash = wave.open(\"kreska.wav\", 'rb')\n\n rate_dot = dot.getframerate()\n\n rate_dash = dash.getframerate()\n\n data_dot = dot.readframes(-1)\n data_dash = dash.readframes(-1)\n data_dot = np.fromstring(data_dot, 'Int16')\n data_dash = np.fromstring(data_dash, 'Int16')\n\n l2=len(data_dot)\n l1=len(data_dash)\n\n output=[]\n\n for element in words:\n # print(element)\n for i in range(0, len(element)):\n # print(element[i])\n if element[i] == '1':\n # playsound(\"kropka.wav\")\n output.extend(data_dot)\n\n if element[i] == '0':\n # playsound(\"kreska.wav\")\n output.extend(data_dash)\n if element[i] == ' ':\n output.extend(np.zeros(int(len(data_dash)))*3)\n if i != len(element) - 1:\n # time.sleep(dl_kropka)\n output.extend(np.zeros(int(len(data_dot))))\n else:\n continue\n # time.sleep(dl_kreska)\n output.extend(np.zeros(int(len(data_dash))))\n\n # print(output)\n\n wynik=np.asarray(output)\n\n wynik=np.array(wynik).astype('int16')\n\n wav.write(name_file, rate_dash, wynik)\n\n #plik sie nie odtwarza w windowsie ale w audacity jest już wyraźnym szumem XD\n\n dot.close()\n dash.close()", "def process_audio(fname, output_dir, poller):\n result = []\n try:\n if poller.params.candidate_transcripts is not None:\n out_path = \"{}/{}{}\".format(output_dir, os.path.splitext(os.path.basename(fname))[0], \".json\")\n else:\n out_path = \"{}/{}{}\".format(output_dir, os.path.splitext(os.path.basename(fname))[0], \".txt\")\n audio, audio_length = load_audio(fname, poller.params.model.sampleRate())\n pred = transcribe_audio(poller.params.model, audio, candidate_transcripts=poller.params.candidate_transcripts)\n with open(out_path, \"w\") as fp:\n fp.write(pred)\n result.append(out_path)\n except KeyboardInterrupt:\n poller.keyboard_interrupt()\n except:\n poller.error(\"Failed to process audio file: %s\\n%s\" % (fname, traceback.format_exc()))\n return result", "def setup_audio(self):\n\t\t\n\t\tpath_to_file = '/var/lib/snips/skills/snips_app_pilldispenser/settings/setup_audio.sh'\n\t\tsubprocess.call([path_to_file])", "def wav2mfcc(file_path, max_len=44, n_mfcc=20):", "def convert_files(enumerated_src_file):\n i, src_file = enumerated_src_file\n src_file = src_file.strip()\n file_extension, acodec, quality = audio_codec()\n\n dst_file = '.'.join(src_file.split('.')[:-1]) + file_extension\n sys.stdout.write(str(i + 1) + ': ' + src_file + ' -> ' + dst_file + '\\n')\n subprocess.call(['ffmpeg', '-i', src_file, '-vn', '-acodec',\n acodec, '-aq', quality, dst_file, '-loglevel', 'quiet'])\n return src_file", "def normalize_audio(audio_path: str, output_path: str, name: str):\n sound = AudioSegment.from_file(audio_path + os.sep + name + '.wav',\n \"wav\")\n change_in_d_bfs = (-20.0) - sound.dBFS\n sound = sound.apply_gain(change_in_d_bfs)\n sound.export(output_path + os.sep + name + '.wav', format=\"wav\")", "def sp_audio_pipeline(wav):\n sig = sb.dataio.dataio.read_audio(wav)\n sig = sig.unsqueeze(0)\n sig = hparams[\"speed_perturb\"](sig)\n sig = sig.squeeze(0)\n return sig", "def remux_audio(filename, title):\n dbg(\"starting remux\")\n temp_file = filename + \".\" + str(random.randint(10000, 99999))\n os.rename(filename, temp_file)\n meta = extract_metadata(title)\n metadata = [\"title=%s\" % meta[\"title\"]]\n\n if meta[\"artist\"]:\n metadata = [\"title=%s\" % meta[\"title\"], \"-metadata\",\n \"artist=%s\" % meta[\"artist\"]]\n\n cmd = [g.muxapp, \"-y\", \"-i\", temp_file, \"-acodec\", \"copy\", \"-metadata\"]\n cmd += metadata + [\"-vn\", filename]\n dbg(cmd)\n\n try:\n with open(os.devnull, \"w\") as devnull:\n subprocess.call(cmd, stdout=devnull, stderr=subprocess.STDOUT)\n\n except OSError:\n dbg(\"Failed to remux audio using %s\", g.muxapp)\n os.rename(temp_file, filename)\n\n else:\n os.unlink(temp_file)\n dbg(\"remuxed audio file using %s\" % g.muxapp)", "def _process_utterance(lf0_dir, mgc_dir, bap_dir, cmp_dir, linear_dir, basename, wav_path, text, hparams):\n\n\tif hparams.trim_silence:\n\t\ttar_wavfile = wav_path[:-4] + \"_trim.wav\"\n\t\tprint(\"raw wav path:%s\" % wav_path)\n\t\twav_raw, fs = sf.read(wav_path)\n\t\twav_trim = audio.trim_silence(wav_raw, hparams)\n\t\tsf.write(tar_wavfile, wav_trim, fs)\n\n\t\twav_path = tar_wavfile\n\n\tnFFTHalf, alpha, bap_dim = audio.get_config(hparams.sample_rate)\n\n\tmcsize = hparams.num_mgc - 1\n\n\tfilename = basename #os.path.basename(wav_path).split(\".\")[0]\n\n\tprint('extract feats for %s' % wav_path)\n\n\t# extract f0,sp,ap\n\tos.system(\"analysis %s %s/%s.f0 %s/%s.sp %s/%s.bapd\" %\n\t\t\t\t (wav_path, lf0_dir, filename,\n\t\t\t\t mgc_dir, filename, bap_dir, filename)) # get float64???\n\n # interpolate f0\n\tf0 = np.fromfile(\"%s/%s.f0\" % (lf0_dir, filename),dtype=np.float64)\n\tcontinuous_f0 = interp1d(f0, kind=\"slinear\")\n\tcontinuous_f0.tofile(\"%s/%s.f0c\" % (lf0_dir, filename))\n\n\t# convert f0 to lf0\n\tos.system(\"x2x +da %s/%s.f0c > %s/%s.f0a\" % (lf0_dir, filename, lf0_dir, filename))\n\tos.system(\"x2x +af %s/%s.f0a | sopr -magic 0.0 -LN -MAGIC -1.0E+10 > %s/%s.lf0\" % (\n\t\tlf0_dir, filename, lf0_dir, filename))\n\n\t# convert sp to mgc\n\tos.system(\"x2x +df %s/%s.sp | sopr -R -m 32768.0 | \"\n\t\t\t \"mcep -a %f -m %d -l %d -e 1.0E-8 -j 0 -f 0.0 -q 3 \"\n\t\t\t \"> %s/%s.mgc\" % (mgc_dir, filename, alpha, mcsize, nFFTHalf, mgc_dir, filename))\n\n\t# convert ap to bap\n\tos.system(\"x2x +df %s/%s.bapd > %s/%s.bap\" %\n\t\t\t (bap_dir, filename, bap_dir, filename))\n\n\t# merge mgc,lf0 and bap to cmp\n\tos.system(\"merge +f -s 0 -l 1 -L %d %s/%s.mgc < %s/%s.lf0 > %s/%s.ml\" %\n\t\t\t((mcsize+1), mgc_dir, filename, lf0_dir, filename, cmp_dir, filename))\n\tos.system(\"merge +f -s 0 -l %d -L %d %s/%s.ml < %s/%s.bap > %s/%s.cmp\" %\n\t\t\t(bap_dim, (mcsize+2), cmp_dir, filename, bap_dir, filename, cmp_dir, filename))\n\n\t#if mel_frames > hparams.max_mel_frames and hparams.clip_mels_length:\n\t#\treturn None\n\n\t#Compute the linear scale spectrogram from the wav\n\twav = audio.load_wav(wav_path, hparams.sample_rate)\n\tlinear_spectrogram = audio.linearspectrogram(wav, hparams).astype(np.float32)\n\tlinear_frames = linear_spectrogram.shape[1]\n\n\t#sanity check\n\t#assert linear_frames == mel_frames\n\n\tlf0 = np.fromfile(\"%s/%s.lf0\" % (lf0_dir, filename), dtype=np.float32)\n\tmgc = np.fromfile(\"%s/%s.mgc\" % (mgc_dir, filename), dtype=np.float32)\n\tbap = np.fromfile(\"%s/%s.bap\" % (bap_dir, filename), dtype=np.float32)\n\tcmp = np.fromfile(\"%s/%s.cmp\" % (cmp_dir, filename), dtype=np.float32)\n\n\tcmp_dim = mcsize + 1 + 1 + bap_dim\n\tcmp_frames = cmp.shape[0] / cmp_dim\n\t#print(f0[:100])\n\t#print(continuous_f0[:100])\n\tprint(lf0.shape)\n\tprint(continuous_f0.shape)\n\tprint(mgc.shape)\n\tprint(bap.shape)\n\tprint(cmp_frames)\n\tprint(continuous_f0.dtype)\n\tprint(mgc.dtype)\n\tprint(bap.dtype)\n\tassert (mgc.shape[0]/(mcsize+1)) == (continuous_f0.shape[0]/1) == (bap.shape[0]/bap_dim) == cmp_frames\n\tassert cmp_dim == hparams.num_mels\n\t#assert len(out) >= cmp_frames * audio.get_hop_size(hparams)\n\n\t#time resolution adjustement\n\t#ensure length of raw audio is multiple of hop size so that we can use\n\t#transposed convolution to upsample\n\t#out = out[:mel_frames * audio.get_hop_size(hparams)]\n\t#assert len(out) % audio.get_hop_size(hparams) == 0\n\t#time_steps = len(out)\n\n\t# Write the spectrogram and audio to disk\n\t#audio_filename = 'audio-{}.npy'.format(index)\n\tcmp_mat = cmp.reshape(-1, cmp_dim)\n\tcmp_filename = 'cmp-{}.npy'.format(basename)\n\tlinear_filename = 'linear-{}.npy'.format(basename)\n\t#np.save(os.path.join(wav_dir, audio_filename), out.astype(out_dtype), allow_pickle=False)\n\tnp.save(os.path.join(cmp_dir, cmp_filename), cmp_mat, allow_pickle=False)\n\tnp.save(os.path.join(linear_dir, linear_filename), linear_spectrogram.T, allow_pickle=False)\n\t# Return a tuple describing this training example\n\treturn (cmp_filename, linear_filename, cmp_frames, text)", "def main():\n\n #If the input is not appropiate abort operations\n\n if not check_input():\n print(\"Inappropiate Input.\")\n return\n\n convert_to_wav(sys.argv[1])", "def make_audio(audio_path):\n content, sample_rate = librosa.load(audio_path, sr=16000)\n del sample_rate\n if content.dtype in (np.float32, np.float64):\n content = (content * np.iinfo(np.int16).max).astype(np.int16)\n return speech.RecognitionAudio(content=content.tobytes())", "def get_audio(path):\n return send_from_directory('audio', path)", "def pron(word):\n\n return send_from_directory('prons', word + \".mp3\", mimetype=\"audio/mpeg\")", "def load_jam_audio(\n jam_in, audio_file, validate=True, strict=True, fmt=\"auto\", **kwargs\n):\n\n if isinstance(jam_in, jams.JAMS):\n jam = jam_in\n elif jam_in is None:\n jam = jams.JAMS()\n else:\n jam = jams.load(jam_in, validate=validate, strict=strict, fmt=fmt)\n\n y, sr = librosa.load(audio_file, **kwargs)\n\n if jam.file_metadata.duration is None:\n jam.file_metadata.duration = librosa.get_duration(y=y, sr=sr)\n\n return jam_pack(jam, _audio=dict(y=y, sr=sr))", "def main():\n\n import os\n import numpy as np\n\n # Re-set FFMPEG\n # ffmpeg = FFMPEG_info()\n # ffmpeg.set()\n\n # Import a file, and play the sound\n # data_dir = r'/home/thomas/Coding/scikit-sound/sksound/tests'\n data_dir = 'tests'\n in_file = 'a1.wav'\n\n full_file = os.path.join(data_dir, in_file)\n try:\n # mySound = Sound(full_file)\n # mySound.play()\n # time.sleep(mySound.duration)\n mySound2 = Sound()\n mySound2.play()\n except NoFFMPEG_Error:\n pass\n\n # Test with self-generated data\n rate = 22050\n dt = 1./rate\n t = np.arange(0,0.5,dt)\n freq = 880\n x = np.sin(2*np.pi*freq*t)\n sounddata = np.int16(x*2**13)\n\n in_sound = Sound(inData=sounddata, inRate=rate)\n in_sound.summary()\n in_sound.play()\n time.sleep(in_sound.duration)\n\n print('hi')\n\n # Test if type conversion works\n in_sound2 = Sound(inData=x, inRate=rate)\n in_sound2.play()\n\n # Test with GUI\n in_sound = Sound()\n in_sound.play()\n print(in_sound.summary())\n out = in_sound.get_info()\n print(out)\n in_sound.write_wav()", "def main():\n input_video = sys.argv[1]\n input_audio = sys.argv[2]\n output_video = sys.argv[3]\n set_audio(input_video, input_audio, output_video)", "def main():\n convert(\"env_100000.mp4\", TargetFormat.GIF)", "def stt_google_wav(audio_fname):\n\n print \"Sending \", audio_fname\n #Convert to flac first\n filename = audio_fname\n del_flac = False\n if 'flac' not in filename:\n del_flac = True\n print \"Converting to flac\"\n print FLAC_CONV + filename\n os.system(FLAC_CONV + ' ' + filename)\n filename = filename.split('.')[0] + '.flac'\n\n f = open(filename, 'rb')\n flac_cont = f.read()\n f.close()\n \n req = urllib2.Request(GOOGLE_SPEECH_URL, data=flac_cont, headers={'Content-type': 'audio/x-flac; rate=44100;'})\n\n try:\n\tret = urllib2.urlopen(req)\n except urllib2.URLError:\n print \"Error Transcribing Voicemail\"\n sys.exit(1)\n\n responses=[]\n responses = ret.read()\n #print responses\n text = json.loads(json.dumps(responses))\n\n if del_flac:\n os.remove(filename) # Remove temp file\n\n return text", "def mels_to_audio(\n self, mels: np.ndarray, settings: typing.Optional[SettingsType] = None,\n ) -> np.ndarray:\n pass", "def concatenate_wav(path_master_wav_repo, path_target_wavfile, sox_script_file):\n\n\n # SANITY-CHECK:\n # Renaming the directories appropriately if not renamed already\n replace_keys(path_master_wav_repo, ' ', '_')\n replace_keys(path_master_wav_repo, '\\'', '') # replacing special characters\n replace_keys(path_master_wav_repo, '(', '') # replacing special characters\n replace_keys(path_master_wav_repo, ')', '') # replacing special characters\n replace_keys(path_master_wav_repo, '&', '_') # replacing special characters\n\n sox_script = open(sox_script_file, 'w') # ToDo: do sanity-check if the file exists or not\n\n\n for subdir, dirs, files in os.walk(path_master_wav_repo):\n for file in files:\n if file.endswith('.wav'):\n file_path = os.path.join(subdir, file)\n sox_script.write(file_path + ' \\\\' + '\\n')\n\n sox_script.close()\n subprocess.call(['sort', sox_script_file, '-o', sox_script_file])\n\n sox_script = open(sox_script_file, 'r')\n temp = sox_script.read()\n sox_script.close()\n sox_script = open(sox_script_file, 'w')\n sox_script.write('sox \\\\'+'\\n')\n sox_script.write(temp)\n sox_script.write(path_target_wavfile)\n sox_script.close()\n\n\n subprocess.call(['bash', sox_script_file])\n\n return None", "def avi2mpg(filename):\n assert filename.endswith('.avi')\n ofile = '%s.mpg' % os.path.splitext(filename)[0]\n run_shell_cmd('ffmpeg -y -i %s -qscale:v 1 %s' % (filename, ofile), ignore=True)\n return ofile", "def mp4ogg(fname):\n\n logger.info(\"(mp4ogg) encode [%s] with [%s]\" % (fname,\n settings.FFMPEG2THEORA))\n oggname = \"%s.oga\" % fname[:-4]\n\n rescom = subprocess.call([settings.FFMPEG2THEORA, fname])\n if rescom == 0:\n logger.debug(\"(mp4ogg) success on [%s]\" % fname)\n result = oggname\n else:\n logger.warning(\"(mp4ogg) subprocess failed on [%s]\" % fname)\n result = None\n\n if result:\n os.unlink(fname)\n\n return result", "def transcribe_audio_file(filename):\n url = 'https://api.nexiwave.com/SpeechIndexing/file/storage/' + USERNAME +'/recording/?authData.passwd=' + PASSWORD + '&auto-redirect=true&response=application/json'\n\n # To receive transcript in plain text, instead of html format, comment this line out (for SMS, for example)\n #url = url + '&transcriptFormat=html'\n\n\n # Ready to send:\n sys.stderr.write(\"Send audio for transcript with \" + url + \"\\n\")\n r = requests.post(url, files={'mediaFileData': open(filename,'rb')})\n data = r.json()\n transcript = data['text']\n foo = data['text']\n f = open('newf.txt', 'w')\n f.write(foo)\n f.close() \n # Perform your magic here:\n print \"Transcript for \"+filename+\"=\" + transcript", "def output_sound():\n try:\n subprocess.call(['ffplay', '-nodisp', '-autoexit', SOUND_FILE])\n except:\n pass", "def execute(self, **kwargs):\n if \"text\" not in kwargs:\n return ''\n phrase = str(kwargs[\"text\"])\n \n names = {\n \"callie\": \"6.5\",\n \"lawrence\": \"8.5\"\n }\n name = \"callie\"\n\n #TODO find a better way of implementing TTS\n ttsfd, ttsfile = tempfile.mkstemp(\".wav\")\n outfile, outname = tempfile.mkstemp(\".wav\")\n try:\n \n tts = sp.Popen(['/opt/swift/bin/swift', '-o', ttsfile, '-n', name, phrase], stdout=sp.PIPE, stderr=sp.PIPE)\n# cmd = ('/opt/swift/bin/swift \"' + phrase + '\" -o ' + ttsname + ' && sox -V1 ' +\n# tmp + ' -t wav ' + tmp2 + ' trim 8 ;')\n# p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE, shell=True)\n# out, err = p.communicate()\n# if len(err) > 0:\n# return err\n\n out, err = tts.communicate()\n if not err:\n sox = sp.Popen(['sox', '-V1', ttsfile, '-t', 'wav', outname, 'trim', names[name]], stdout=sp.PIPE, stderr=sp.PIPE)\n out, err = sox.communicate()\n\n player = gst.element_factory_make(\"playbin2\", \"player\")\n bus = player.get_bus()\n bus.add_signal_watch()\n\n mainloop = gobject.MainLoop()\n\n def quit(bus, message):\n mainloop.quit()\n\n bus.connect(\"message::eos\", quit)\n bus.connect(\"message::error\", quit)\n player.set_property(\"uri\", 'file://' + outname)\n player.set_state(gst.STATE_PLAYING)\n\n try:\n mainloop.run()\n finally:\n player.set_state(gst.STATE_NULL)\n\n finally:\n try:\n os.remove(ttsfile)\n except OSError as err:\n print e\n try:\n os.remove(outname)\n except IOError as err:\n print err", "def testRoundTrip(self):\n with self.test_session():\n path = os.path.join(\n resource_loader.get_data_files_path(), 'testdata/mono_10khz.wav')\n with open(path, 'rb') as f:\n original_contents = f.read()\n\n audio_op = ffmpeg.decode_audio(\n original_contents, file_format='wav', samples_per_second=10000,\n channel_count=1)\n encode_op = ffmpeg.encode_audio(\n audio_op, file_format='wav', samples_per_second=10000)\n encoded_contents = encode_op.eval()\n self.assertEqual(original_contents, encoded_contents)", "def audio_with_sox(path, sample_rate, start_time, end_time):\n try:\n with NamedTemporaryFile(suffix=\".wav\") as tar_file:\n tar_filename = tar_file.name\n sox_params = \"sox \\\"{}\\\" -r {} -c 1 -b 16 -e si {} trim {} ={} >/dev/null 2>&1\".format(path, sample_rate,\n tar_filename,\n start_time,\n end_time)\n os.system(sox_params)\n y = load_audio(tar_filename)\n except Exception as E:\n y = load_audio(path)\n return y", "def downmixWAV(self, wavf: str) -> None:\n # HACK: https://github.com/jiaaro/pydub/issues/129\n # FIXME: a reliable method to get number of wav channels\n multichannel = True\n try:\n w = wave.open(wavf, 'rb')\n if w.getnchannels() < 3:\n multichannel = False\n w.close()\n except Exception:\n pass\n if multichannel:\n newwavf = wavf[:-4] + \"-stereo.wav\"\n FNULL = open(os.devnull, 'w')\n subprocess.call(['ffmpeg', '-y', '-i', wavf, '-c:a', 'pcm_s24le', '-ac', '2', newwavf], stdout=FNULL, stderr=FNULL)\n FNULL.close()\n os.remove(wavf)\n os.rename(newwavf, wavf)", "def createMelody(song, outputSongFileName, timing=4):\n wavInput = (())\n wavInput1 = (())\n wavInput2 = (())\n wavInput3 = (())\n\n # Remove the beginning and end portions of the canvas that are blank\n while song[0] == ['R','R','R','R']:\n del song[0]\n while song[-1] == ['R','R','R','R']:\n del song[-1]\n\n for notesList in song:\n\n remove_dup(notesList)\n\n notesNum = []\n for i in range(len(notesList)):\n if (notesList[i].upper() == 'R'):\n notesNum.append('')\n elif (notesList[i].upper() == 'A' or notesList[i].upper() == 'B'):\n notesNum.append('3')\n else:\n notesNum.append('4')\n\n wavInput = ((notesList[0].lower() + str(notesNum[0]), timing),) + wavInput\n wavInput1 = ((notesList[1].lower() + str(notesNum[1]), timing),) + wavInput1\n wavInput2 = ((notesList[2].lower() + str(notesNum[2]), timing),) + wavInput2\n wavInput3 = ((notesList[3].lower() + str(notesNum[3]), timing),) + wavInput3\n\n\n wavInput = wavInput[::-1]\n wavInput1 = wavInput1[::-1]\n wavInput2 = wavInput2[::-1]\n wavInput3 = wavInput3[::-1]\n\n wavNames = [\".wav1.wav\",\".wav2.wav\",\".wav3.wav\",\".wav4.wav\"]\n wavInputs = [wavInput,wavInput1,wavInput2,wavInput3]\n\n validWavInputs = []\n\n for i in range(len(wavInputs)):\n if isAllRests(wavInputs[i]) == False:\n validWavInputs.append(wavInputs[i])\n\n validWavNames = wavNames[:len(validWavInputs)]\n\n call(['python','GenerateWavFiles.py',str(validWavNames) + \"@\" + str(validWavInputs)])\n\n sounds = []\n for i in range(len(validWavNames)):\n sounds.append(AudioSegment.from_wav(validWavNames[i]))\n\n combined = sounds[0]\n for i in range(1, len(sounds)):\n combined = combined.overlay(sounds[i])\n\n combined.export(outputSongFileName, format='wav')", "def path_to_audio(path):\n print(path)\n audio, sr = librosa.load(path, sr=SAMPLING_RATE)\n x = audio.shape[0]\n audio = np.reshape(audio, (x, 1))\n audio = mt.array_to_tensor(audio)\n print(audio.shape)\n #audio = tf.io.read_file(path)\n #audio, _ = tf.audio.decode_wav(audio, 1, SAMPLING_RATE)\n return audio", "def load_wav(wav_filepath):\n wv, _ = librosa.load(wav_filepath, sr=44100, mono=False) \n return wv", "def convert_to_m4a(self,path, filename):\n codec = \"aac\"\n m4a_filename = filename + \".m4a\"\n command = [self.FFMPEG_BIN,\n \"-n\",\n \"-i\", path,\n \"-acodec\", codec,\n \"-ab\", \"128k\",\n m4a_filename\n ]\n\n return command", "def test_process_mono_file(self):\n test_path = pathlib.Path(__file__).parent.absolute() / 'data/mono.wav'\n self.default_kwargs['input_file'] = test_path\n self.default_kwargs['output_file'] = pathlib.Path(self.temp_file.name)\n self.encoder = FileEncoder(**self.default_kwargs)\n self.encoder.process()", "def play(self):\n\n try:\n if self.source is None:\n # If there is no source-file, write the data to a temporary WAV-file ...\n tmpFile = tempfile.NamedTemporaryFile(suffix='.wav', delete=False)\n tmpFile.close()\n self.write_wav(tmpFile.name)\n \n # ... and play that file\n if sys.platform=='win32':\n winsound.PlaySound(tmpFile.name, winsound.SND_FILENAME)\n elif sys.platform == 'darwin':\n cmd = ['afplay', tmpFile.name]\n subprocess.run(cmd)\n else:\n pygame.init()\n pygame.mixer.music.load(tmpFile.name)\n pygame.mixer.music.play()\n time.sleep(self.duration)\n \n # If you want to use FFMPEG instead, use the following commands:\n #cmd = [self.ffmpeg_info.ffplay, '-autoexit', '-nodisp', '-i', tmpFile.name]\n #subprocess.run(cmd)\n \n elif os.path.exists(self.source):\n # If you have a given input file ...\n print('Playing ' + self.source)\n \n # ... then play that one\n if sys.platform == 'win32':\n winsound.PlaySound(str(self.source), winsound.SND_FILENAME)\n elif sys.platform == 'darwin':\n cmd = ['afplay', str(self.source)]\n subprocess.run(cmd)\n else:\n pygame.init()\n pygame.mixer.music.load(self.source)\n pygame.mixer.music.play()\n time.sleep(self.duration)\n \n # If you want to use FFMPEG instead, use the following commands:\n #cmd = [self.ffmpeg_info.ffplay, '-autoexit', '-nodisp', '-i', self.source]\n #subprocess.run(cmd)\n \n except SystemError:\n print('If you don''t have FFMPEG available, you can e.g. use installed audio-files. E.g.:')\n print('import subprocess')\n print('subprocess.run([r\"C:\\Program Files (x86)\\VideoLAN\\VLC\\vlc.exe\", r\"C:\\Music\\14_Streets_of_Philadelphia.mp3\"])')", "def decode_audio(fp, fs=None, mono=False, normalize=False, fastwav=False, measured = False):\n if measured:\n fp = fp.decode('latin').replace(\"clean\", \"measured\")\n\n if fastwav:\n # Read with scipy wavread (fast).\n _fs, _wav = wavread(fp)\n if fs is not None and fs != _fs:\n raise NotImplementedError('Fastwav cannot resample audio.')\n if _wav.dtype == np.int16:\n _wav = _wav.astype(np.float32)\n _wav /= 32768.\n elif _wav.dtype == np.float32:\n pass\n else:\n raise NotImplementedError('Fastwav cannot process atypical WAV files.')\n else:\n # TODO: librosa currently optional due to issue with cluster installation\n import librosa\n # Decode with librosa load (slow but supports file formats like mp3).\n _wav, _fs = librosa.core.load(fp, sr=fs, mono=False)\n if _wav.ndim == 2:\n _wav = np.swapaxes(_wav, 0, 1)\n\n assert _wav.dtype == np.float32\n\n # At this point, _wav is np.float32 either [nsamps,] or [nsamps, nch].\n # We want [nsamps, 1, nch] to mimic 2D shape of spectral feats.\n if _wav.ndim == 1:\n nsamps = _wav.shape[0]\n nch = 1\n else:\n nsamps, nch = _wav.shape\n _wav = np.reshape(_wav, [nsamps, 1, nch])\n \n # Average channels if we want monaural audio.\n if mono:\n _wav = np.mean(_wav, 2, keepdims=True)\n\n if normalize:\n _wav /= np.max(np.abs(_wav))\n\n return _wav", "def wavread(filename):\n\n\tif (os.path.isfile(filename) == False): # raise error if wrong input file\n\t\traise ValueError(\"Input file is wrong\")\n\n\tfs, x = read(filename)\n\n\tif (len(x.shape) !=1): # raise error if more than one channel\n x = np.mean(x,axis = 1)\n\t\tprint \"Audio file is stereo, converting to mono\"\n\n\t#scale down and convert audio into floating point number in range of -1 to 1\n\tx = np.float32(x)/norm_fact[x.dtype.name]\n\treturn fs, x", "def audio_codec():\n arguments = map_arguments()\n if arguments['-c'] == 'ogg':\n return '.ogg', 'libvorbis', arguments['-q']\n elif arguments['-c'] == 'mp3':\n return '.mp3', 'libmp3lame', arguments['-q']", "def output_beat_to_file(file_name, e):\n print(\"Writing to file:\", file_name)\n routine = gp.compile(e,pset)\n with open(file_name+\".raw\",'w') as f:\n for t in range(200000):\n f.write(chr(int(routine(t+1))%256))\n # Now convert to wav\n subprocess.call(SOX_COMMAND + \" \" + file_name + \".raw\" + \" \" + file_name + \".wav\", shell=True)\n subprocess.call(LAME_COMMAND + \" \" + file_name + \".wav\", shell=True)", "def inputwav(filename):\n data, sr = sf.read(filename)\n print('Decoding \"'+filename+'\"...')\n print('Sample rate is '+str(sr)+'...')\n try:\n ch=len(data[0,])\n except:\n ch=1\n print('File contains '+str(ch)+' audio channel(s)...')\n #Reshape the data so other functions can interpret the array if mono.\n #basically transposing the data\n if ch==1:\n data=data.reshape(-1,1)\n n=len(data)\n #This prevents log(data) producing nan when data is 0\n data[np.where(data==0)]=0.00001\n #convert to dB\n data_dB=20*np.log10(abs(data))\n return n, data,data_dB,sr, ch", "def tag_file(filename, artist, title, year=None, genre=None, artwork_url=None, album=None, track_number=None, url=None):\n\n try:\n audio = EasyMP3(filename)\n audio.tags = None\n audio[\"artist\"] = artist\n audio[\"title\"] = title\n if year:\n audio[\"date\"] = str(year)\n if album:\n audio[\"album\"] = album\n if track_number:\n audio[\"tracknumber\"] = track_number\n if genre:\n audio[\"genre\"] = genre\n if url: # saves the tag as WOAR\n audio[\"website\"] = url\n audio.save()\n\n if artwork_url:\n\n artwork_url = artwork_url.replace('https', 'http')\n\n mime = 'image/jpeg'\n if '.jpg' in artwork_url:\n mime = 'image/jpeg'\n if '.png' in artwork_url:\n mime = 'image/png'\n\n if '-large' in artwork_url:\n new_artwork_url = artwork_url.replace('-large', '-t500x500')\n try:\n image_data = requests.get(new_artwork_url).content\n except Exception as e:\n # No very large image available.\n image_data = requests.get(artwork_url).content\n else:\n image_data = requests.get(artwork_url).content\n\n audio = MP3(filename, ID3=OldID3)\n audio.tags.add(\n APIC(\n encoding=3, # 3 is for utf-8\n mime=mime,\n type=3, # 3 is for the cover image\n desc='Cover',\n data=image_data\n )\n )\n audio.save()\n\n # because there is software that doesn't seem to use WOAR we save url tag again as WXXX\n if url:\n audio = MP3(filename, ID3=OldID3)\n audio.tags.add(WXXX(encoding=3, url=url))\n audio.save()\n\n return True\n\n except Exception as e:\n puts(colored.red(\"Problem tagging file: \") + colored.white(\"Is this file a WAV?\"))\n return False", "def transcribe_wav( local_file_path, gcp_credentials_path=None, language_code=\"en-US\", client=None ):\n SEGMENT_SIZE = 55 * 1000 # 55 seconds\n OVERLAP_SIZE = 5 * 1000 # 5 seconds\n\n #\n # Instantiate a client\n #\n if client is None:\n client = create_api_client( gcp_credentials_path )\n\n #\n # Build the request. Because we only support WAV, don't need to define encoding\n # or sample rate.\n #\n config = {\n \"model\": \"video\", # premium model, but cost is basically nothing for single user anyway. Works MUCH better.\n \"language_code\": language_code,\n \"enable_word_time_offsets\": True,\n }\n\n #\n # GCP inline audio is restricted to just one minute. To avoid needing to setup\n # a GCP bucket, we'll split any provided audio files into 55-second chunks with\n # 5 seconds of overlap (since we'll probably split a word). IE, chunk 1 is from\n # 0:00 to 0:55, two is from 0:50 to 1:45, etc...\n #\n full_text = \"\"\n time_map = []\n full_recording = pydub.AudioSegment.from_file( local_file_path, format=\"wav\" )\n full_duration_ms = len( full_recording )\n offset = 0\n while offset < full_duration_ms:\n\n # If we're splitting into chunks, insert a hint\n if offset > 0:\n full_text += \" \" + SPLICE_STR + \" \"\n time_map.append( ( int( offset / 1000 ), SPLICE_STR ) )\n\n # Segment the clip into a RAM file\n this_clip = full_recording[ offset : min( offset + SEGMENT_SIZE, full_duration_ms ) ]\n segment_wav = io.BytesIO()\n this_clip.export( segment_wav, format=\"wav\" )\n segment_wav.seek(0)\n audio = { \"content\": segment_wav.read() }\n\n #\n # Submit the request & wait synchronously\n #\n operation = client.long_running_recognize( config, audio )\n response = operation.result()\n\n #\n # Process the response. Only take the first alternative.\n #\n for result in response.results:\n if len( result.alternatives ) < 1:\n continue\n best_guess = result.alternatives[0]\n full_text += best_guess.transcript\n time_map.extend( [ ( x.start_time.seconds + int( offset / 1000 ), x.word ) for x in best_guess.words ] )\n\n # Next offset\n offset += ( SEGMENT_SIZE - OVERLAP_SIZE )\n\n return ( full_text, time_map )", "def play(path):\n sound = AudioSegment.from_mp3(path)\n playback.play(sound)", "def remix(self):\n self.original = audio.LocalAudioFile(self.infile)\n #for i, segment in enumerate(self.original.analysis.segments):\n # segment.encode(\"seg_%s.mp3\" % i)\n print \"\\n\\n\\n\"\n loudnesses = [x.timbre[0] for i, x in enumerate(self.original.analysis.segments)]\n brightnesses = [x.timbre[1] for i, x in enumerate(self.original.analysis.segments)]\n flatnesses = [x.timbre[2] for i, x in enumerate(self.original.analysis.segments)]\n attacks = [x.timbre[3] for i, x in enumerate(self.original.analysis.segments)]\n timbre5 = [x.timbre[4] for i, x in enumerate(self.original.analysis.segments)]\n timbre6 = [x.timbre[5] for i, x in enumerate(self.original.analysis.segments)]\n timbre7 = [x.timbre[6] for i, x in enumerate(self.original.analysis.segments)]\n timbre8 = [x.timbre[7] for i, x in enumerate(self.original.analysis.segments)]\n timbre9 = [x.timbre[8] for i, x in enumerate(self.original.analysis.segments)]\n timbre10 = [x.timbre[9] for i, x in enumerate(self.original.analysis.segments)]\n timbre11 = [x.timbre[10] for i, x in enumerate(self.original.analysis.segments)]\n timbre12 = [x.timbre[11] for i, x in enumerate(self.original.analysis.segments)]\n\n print \"AVERAGES\"\n print \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % ('loud','bright','flat','attack','t5','t6','t7','t8','t9','t10','t11','t12')\n print \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (avg(loudnesses),avg(brightnesses),avg(flatnesses),avg(attacks),avg(timbre5),avg(timbre6),avg(timbre7),avg(timbre8),avg(timbre9),avg(timbre10),avg(timbre11),avg(timbre12))\n print\n print \"STDVS\"\n print \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % ('loud','bright','flat','attack','t5','t6','t7','t8','t9','t10','t11','t12')\n print \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (stddev(loudnesses),stddev(brightnesses),stddev(flatnesses),stddev(attacks),stddev(timbre5),stddev(timbre6),stddev(timbre7),stddev(timbre8),stddev(timbre9),stddev(timbre10),stddev(timbre11),stddev(timbre12))\n\n\n print \"\\tLoud\\tBright\\tFlat\\tAttack\\ttim5\\ttim6\\ttim7\\ttim8\\ttim9\\ttim10\\ttim11\\ttim12\"\n for segment in self.original.analysis.segments:\n if are_kicks(segment): print \"Kick\",\n elif are_snares(segment): print \"Snar\",\n elif are_hats(segment): print \"Hats\",\n else: print \"else\",\n print \"\\t%s\\t%s\\t%s\\t%s\\t%s\" % (segment.timbre[0], segment.timbre[1], segment.timbre[2], segment.timbre[3], segment.timbre[4])\n\n kicks = self.original.analysis.segments.that(are_kicks)\n #if kicks: kicks.encode('kicks.mp3')\n snares = self.original.analysis.segments.that(are_snares)\n #if snares: snares.encode('snares.mp3')\n hats = self.original.analysis.segments.that(are_hats)\n #if hats: hats.encode('hats.mp3')\n\n # Time to replace\n hat_sample = audio.AudioData(self.sample_path + self.template['hats'], sampleRate=44100, numChannels=2, verbose=False)\n kick_sample = audio.AudioData(self.sample_path + self.template['kick'], sampleRate=44100, numChannels=2, verbose=False)\n snare_sample = audio.AudioData(self.sample_path + self.template['snare'], sampleRate=44100, numChannels=2, verbose=False)\n \n empty = audio.AudioData(ndarray=numpy.zeros(((self.original.sampleRate * self.original.analysis.duration), 2), dtype=numpy.int16), numChannels=2, sampleRate=44100)\n\n last = 0\n for segment in kicks:\n if last + len(kick_sample.data) > segment.start:\n print \"Adding kick at %s\" % segment.start\n empty.data[self.original.sampleRate*segment.start:self.original.sampleRate*segment.start + len(kick_sample.data)] += kick_sample.data\n last = segment.start\n\n last = 0\n for segment in snares:\n if last + len(snare_sample.data) > segment.start:\n print \"Adding snare at %s\" % segment.start\n empty.data[self.original.sampleRate*segment.start:self.original.sampleRate*segment.start + len(snare_sample.data)] += snare_sample.data \n last = segment.start\n for segment in hats:\n if last + len(hat_sample.data) > segment.start:\n print \"Adding hat at %s\" % segment.start\n empty.data[self.original.sampleRate*segment.start:self.original.sampleRate*segment.start + len(hat_sample.data)] += hat_sample.data\n last = segment.start\n\n audio.mix(empty, self.original, 0.5).encode('mixed.mp3')", "def convert2mel(audio,base_path,fs, n_fft,fmax,n_mels,hop_length_samples, window_lenght,type_training):\n\n path = os.path.join(base_path, audio)\n if type_training != \"train\":\n if os.path.isfile(os.path.join(base_path,\"processed_wavs_train\",audio)):\n data,_ = librosa.core.load(os.path.join(base_path,\"processed_wavs_train\",audio), sr=fs, res_type=\"kaiser_best\")\n else:\n data,_ = librosa.core.load(os.path.join(base_path,\"processed_wavs_test\",audio), sr=fs, res_type=\"kaiser_best\")\n else:\n data, _ = librosa.core.load(path, sr=fs, res_type=\"kaiser_best\")\n data = normalize_amplitude(data)\n\n powSpectrum = np.abs(stft(data+ 0.00001,n_fft,hop_length = hop_length_samples, win_length = window_lenght, window = windowing(window_lenght, sym=False), center=True, pad_mode='reflect'))**2\n\n mels = melspectrogram(y= None,n_fft=n_fft ,sr=fs ,S= powSpectrum, hop_length= hop_length_samples ,n_mels=n_mels,fmax=fmax , fmin = 0.0).T\n mels = librosa.core.power_to_db(mels, ref=np.min(mels))\n mels = mels / np.max(mels)\n\n return mels.T", "def convert_and_move_file (filename, origpath, wavpath, mp4path, mono):\n name, ext = path.splitext(filename)\n if ext == \".mp4\":\n print(filename)\n convert_to_wav (filename, name, origpath, wavpath, mono)\n\n if not path.exists(mp4path):\n makedirs(mp4path)\n oldlocation = path.join(origpath, filename)\n newlocation = path.join(mp4path, filename)\n shutil.move(oldlocation, newlocation)", "def convert_text(self):\n if msg.askyesno(message=\"Do you want to save audio file?\"):\n text = self.textbox.get(\"1.0\", tk.END)\n self.file.text = text\n files = [('Sound', '*.mp3')]\n mp3_file = asksaveasfile(title=\"Save your mp3 file\", filetypes=files, defaultextension=files)\n if mp3_file is not None:\n self.file.convert_text_to_mp3(languages[self.language.get()], mp3_file.name)\n msg.showinfo(title=\"Text to audio\", message=\"Done\")", "def process_audio_multiprocess(file_paths_arr,\n filt_type, filt_cutoff_freq, filt_order,\n trim_margin_left, trim_margin_right, trim_top_db, trim_window_length, trim_hop_length, trim_ref, trim_preemphasis_strength,\n SAMPLE_RATE=48000, MIN_SAMPLE_RATE=15999, BIT_DEPTH=2,\n ignore_dirs=[\"Noise samples\",\"_Noisy_\",\"_Very Noisy_\"], skip_existing=False,\n in_ext_=None, out_ext=\".wav\", use_tqdm=True, dump_sample_rates=True\n ):\n import soundfile as sf\n import scipy\n from scipy import signal\n \n if dump_sample_rates:\n sample_rates = {} # array of dicts. e.g: [{path 0: sample_rate 0}, {path 1: sample_rate 1}, {path 2: sample_rate 2}, ...]\n \n skip = 0\n prev_sr = 0\n iterator = tqdm(file_paths_arr, smoothing=0.0) if use_tqdm else file_paths_arr\n for file_path in iterator: # recursive directory search\n in_ext = in_ext_ if (in_ext_ is not None) else os.path.splitext(os.path.split(file_path)[-1])[-1] # get ext from file_path or use override.\n out_path = file_path.replace(in_ext,out_ext)\n if skip_existing and os.path.exists(out_path):\n continue\n if any([filter_dir in file_path for filter_dir in ignore_dirs]):\n continue\n \n # VCTK cleanup\n #if file_path.endswith(f\"_mic1{in_ext}\"):\n # os.rename(file_path, file_path.replace(f\"_mic1{in_ext}\",in_ext))\n #if file_path.endswith(f\"_mic2{in_ext}\"):\n # continue\n try:\n native_sound, native_SR = sf.read(file_path, always_2d=True)\n except RuntimeError as ex:\n print(f'\"{os.path.split(file_path)[-1]}\" failed to load and has been deleted.\\nDELETED PATH: \"{file_path}\"')\n os.unlink(file_path)\n #raise RuntimeError(ex)\n native_sound = native_sound[:,0]# take first channel (either mono or left audio channel)\n native_sound = np.asfortranarray(native_sound).astype('float64') # and ensure the audio is contiguous\n \n if native_SR < MIN_SAMPLE_RATE: # skip any files with native_SR below the minimum\n continue\n if native_SR != SAMPLE_RATE: # ensure all audio is same Sample Rate\n try:\n sound = librosa.core.resample(native_sound, native_SR, SAMPLE_RATE)\n except ValueError as ex:\n print(ex, file_path, native_SR, len(native_sound), sep=\"\\n\")\n raise ValueError(ex)\n else:\n sound = native_sound\n \n if dump_sample_rates:\n sample_rates[os.path.abspath(out_path)] = native_SR\n \n # 24 bit -> 16 bit, 32 bit -> 16 bit\n if max(np.amax(native_sound), -np.amin(native_sound)) > (2**23): # if samples exceed values possible at 24 bit\n sound = (sound / 2**(31-15))#.astype('int16') # change bit depth from 32 bit to 16 bit\n elif max(np.amax(native_sound), -np.amin(native_sound)) > (2**15): # if samples exceed values possible at 16 bit\n sound = (sound / 2**(23-15))#.astype('int16') # change bit depth from 24 bit to 16 bit\n \n # apply audio filters\n for type_, freq_, order_ in zip(filt_type, filt_cutoff_freq, filt_order): # eg[ ['lp'], [40], [10] ] # i.e [type, freq, strength]\n sos = signal.butter(order_, freq_, type_, fs=SAMPLE_RATE, output='sos') # calcuate filter somethings\n sound = signal.sosfilt(sos, sound) # apply filter\n \n # apply audio trimming\n for i, (margin_left_, margin_right_, top_db_, window_length_, hop_length_, ref_, preemphasis_strength_) in enumerate(zip(trim_margin_left, trim_margin_right, trim_top_db, trim_window_length, trim_hop_length, trim_ref, trim_preemphasis_strength)):\n if preemphasis_strength_:\n sound_filt = librosa.effects.preemphasis(sound, coef=preemphasis_strength_)\n _, index = librosa.effects.trim(sound_filt, top_db=top_db_, frame_length=window_length_, hop_length=hop_length_, ref=ref_) # gonna be a little messed up for different sampling rates\n else:\n _, index = librosa.effects.trim(sound, top_db=top_db_, frame_length=window_length_, hop_length=hop_length_, ref=ref_) # gonna be a little messed up for different sampling rates\n try:\n sound = sound[int(max(index[0]-margin_left_, 0)):int(index[1]+margin_right_)]\n except TypeError:\n print(f'Slice Left:\\n{max(index[0]-margin_left_, 0)}\\nSlice Right:\\n{index[1]+margin_right_}')\n assert len(sound), f\"Audio trimmed to 0 length by pass {i+1}\\nconfig = {[margin_left_, margin_right_, top_db_, window_length_, hop_length_, ref_]}\\nFile_Path = '{file_path}'\"\n \n # write updated audio to file\n if os.path.exists(out_path):\n os.unlink(out_path) # using unlink incase the out_path object is a symlink\n sf.write(out_path, sound, SAMPLE_RATE)\n \n if dump_sample_rates:\n return sample_rates", "def text_to_mp3(client: texttospeech.TextToSpeechClient,\n voice: texttospeech.VoiceSelectionParams,\n audio_config: texttospeech.AudioConfig,\n text: str,\n output_file_path: Path) -> None:\n lines = text.splitlines()\n\n logger.info(f'Synthesising {len(lines)} lines ...')\n\n output_file_log = output_file_path.parent / (output_file_path.stem + '_log.json')\n\n with output_file_path.open(mode='wb') as output_file:\n for (i, text_chunk) in enumerate(lines):\n # skip empty lines\n if len(text_chunk) > 0:\n input_text = texttospeech.SynthesisInput(text=text_chunk)\n try:\n logger.info(f'Synthesising speech for chunk `{i}`, size: `{len(text_chunk)}`')\n response = client.synthesize_speech(input=input_text, voice=voice, audio_config=audio_config)\n except Exception as e:\n # If a line could not be synthesised properly, return it along with the error message\n # It is possible that textract could not extract the text properly.\n logger.error(f'Speech synthesising failed! Chunk text: `{input_text}`\\nError: {e}\\n')\n _error_log = {\n 'chunk_number': i,\n 'chunk_length': len(text_chunk),\n 'chunk_text': str(text_chunk),\n 'Error message': traceback.format_exc()\n }\n with open(f'{output_file_log}', 'w') as log_out:\n json.dump(_error_log, log_out)\n continue\n output_file.write(response.audio_content)\n logger.info(f'Audio content written to `{output_file_path}`!')\n\n logger.info(f'Output saved to `{output_file_path}`')\n logger.info(f'logs at `{output_file_log}`')", "def save(filename_audio, filename_jam, jam, strict=True, fmt=\"auto\", **kwargs):\n\n y = jam.sandbox.muda._audio[\"y\"]\n sr = jam.sandbox.muda._audio[\"sr\"]\n\n # First, dump the audio file\n psf.write(filename_audio, y, sr, **kwargs)\n\n # Then dump the jam\n jam.save(filename_jam, strict=strict, fmt=fmt)", "def encode_audio(in_file, out_file):\r\n # construct the encoder\r\n autoencoder = keras.models.load_model(\"audio_autoencoder.model\")\r\n in_layer = keras.layers.Input(shape=(416, 1))\r\n encode = autoencoder.layers[1](in_layer)\r\n encode = autoencoder.layers[2](encode)\r\n encode = autoencoder.layers[3](encode)\r\n encode = autoencoder.layers[4](encode)\r\n encode = autoencoder.layers[5](encode)\r\n encode = autoencoder.layers[6](encode)\r\n encode = autoencoder.layers[7](encode)\r\n encode = autoencoder.layers[8](encode)\r\n encode = autoencoder.layers[9](encode)\r\n encode = autoencoder.layers[10](encode)\r\n encode = autoencoder.layers[11](encode)\r\n encode = autoencoder.layers[12](encode)\r\n encoder = keras.models.Model(in_layer, encode)\r\n\r\n # Read the file\r\n samp_rate, data = wavfile.read(in_file)\r\n # check if the file is mono or stereo\r\n if len(data.shape) == 2:\r\n data = np.concatenate(data)\r\n chans = 2\r\n else:\r\n chans = 1\r\n\r\n # Rescale integer samples over range [-32768,32767] to floats over range [0.0,1.0]\r\n data = data.astype('float32') / float(pow(2, 15))\r\n data += 1.0\r\n data = data / 2.0\r\n\r\n # Pad the samples with zeroes, if needed, to make the last encoding frame full\r\n padded = np.pad(data, (0, 416 - (len(data) % 416)), 'constant')\r\n\r\n # Construct input layer\r\n inputs = padded.reshape(len(padded) // 416, 416, 1)\r\n\r\n # Encode the data\r\n encoded = encoder.predict(inputs)\r\n\r\n # Save the encoded data, as well as the important parameters\r\n np.savez_compressed(out_file, data=encoded, rate=samp_rate, Type=1, channels=chans)", "def wav_to_raw(path, log=False):\n rate, data = wavfile.read(path)\n if log:\n m, s = divmod(float(len(data))/rate, 60)\n h, m = divmod(m, 60)\n logging.info(\"Original recording length: %d h %d m %d s\" % (h, m, s))\n try:\n if data.shape[1] == 2:\n # If stereo (2-channel), take the average of the two channels.\n data = 0.5 * (data[:, 0] + data[:, 1])\n if log:\n logging.info('Stereo audio')\n except IndexError:\n if log:\n logging.info('Mono audio')\n return rate, data" ]
[ "0.73525465", "0.7225452", "0.7119044", "0.70570403", "0.67239785", "0.6696764", "0.6579686", "0.6547333", "0.6539131", "0.6505683", "0.6481485", "0.64209044", "0.6362076", "0.6354701", "0.632691", "0.6275299", "0.62359387", "0.6216278", "0.6215746", "0.61728823", "0.6155799", "0.6154977", "0.61340415", "0.6125287", "0.61050206", "0.60945886", "0.6072155", "0.60684216", "0.60684216", "0.60557544", "0.6053467", "0.604538", "0.60409653", "0.6027764", "0.5987558", "0.59873974", "0.59644055", "0.5963298", "0.59589297", "0.59359926", "0.5931909", "0.5915822", "0.58688855", "0.58459353", "0.58223444", "0.58203864", "0.58193636", "0.58147824", "0.5812108", "0.5777394", "0.5773733", "0.57648647", "0.57339704", "0.57298374", "0.5728567", "0.5728271", "0.5708801", "0.56893677", "0.5678529", "0.5664465", "0.566363", "0.5662083", "0.56588143", "0.56450695", "0.5641505", "0.5635191", "0.5629698", "0.5610478", "0.5598876", "0.55977076", "0.5597488", "0.55618393", "0.5556827", "0.5553541", "0.55526215", "0.55238634", "0.55171514", "0.5505199", "0.55034196", "0.54997575", "0.54846406", "0.5481474", "0.5480482", "0.547962", "0.5478493", "0.54772556", "0.5474178", "0.54688716", "0.54688287", "0.5467219", "0.5467049", "0.54613817", "0.5459846", "0.54512024", "0.5448733", "0.5447393", "0.5444183", "0.5442604", "0.5438985", "0.5438186" ]
0.8660163
0