query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Sets the player_a_rating_adjustment of this Game.
def player_a_rating_adjustment(self, player_a_rating_adjustment): self._player_a_rating_adjustment = player_a_rating_adjustment
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def player_a_rating(self, player_a_rating):\n\n self._player_a_rating = player_a_rating", "def player_b_rating_adjustment(self, player_b_rating_adjustment):\n\n self._player_b_rating_adjustment = player_b_rating_adjustment", "def player_b_rating(self, player_b_rating):\n\n self._player_b_rating = player_b_rating", "def a_rate(self, a_rate):\n\n self._a_rate = a_rate", "def adjust(self, rating, series):\n return series[0] - self.expect(rating, series[1])", "def adjust(self, rating, series):\n return series[0] - self.expect(rating, series[1])", "def set_rating(self, value):\n try:\n self._rating = float(value)\n except ValueError:\n pass", "def updateScore(self, player: int) -> None:\n\n if player == 1:\n self._score[0] += 1\n elif player == 2:\n self._score[1] += 1\n\n # logging\n logger.info(\"Player {winner} has scored a goal. Score: {score}\", winner=player, score=str(self._score))", "def adjust_score(self):\n self.score += game.temporary_score", "def objective_player_score(self, objective_player_score):\n\n self._objective_player_score = objective_player_score", "def _adjust_score(self, my_choice, their_choice):\n self._score += p.params['score_matrix'][my_choice][their_choice]\n self._score -= p.params['loss_per_tick']", "def combat_player_score(self, combat_player_score):\n\n self._combat_player_score = combat_player_score", "def setAssaultStrength(self, ratio):\n assaultStrength = int(float(self.maxAssault) * ratio)\n if self.isAssault == 1:\n self.assaultStrength = assaultStrength\n else:\n self.assaultStrength = assaultStrength + self.getPersonStrength()", "def update_rating_average(self, rating):\n self.num_ratings += 1\n self.rating_total += rating\n self.save(update_fields=[\"num_ratings\", \"rating_total\"])\n self.average_rating = int(round(self.rating_total/self.num_ratings))\n self.save(update_fields=[\"average_rating\"])\n return", "def player_a_points(self, player_a_points):\n\n self._player_a_points = player_a_points", "def player_a_id(self, player_a_id):\n\n self._player_a_id = player_a_id", "def increase(self,player):\n\n if self.level is not None:\n increase_roll = (random.randint(0,player.level))\n\n if skill.level < (player.level/2):\n bonus_threshold = .5\n else:\n bonus_threshold = .75\n\n if increase_roll/player.level >= bonus_threshold:\n skill.level +=2\n else:\n skill.level +=1\n\n return skill.level\n\n else:\n return None", "def ratings(self, ratings):\n\n self._ratings = ratings", "def ratings(self, ratings):\n\n self._ratings = ratings", "def player_a_games(self, player_a_games):\n\n self._player_a_games = player_a_games", "def setArmor(self, armor):\n self.av = armor", "def setA(self, a):\n\t\tself.a = int(a)", "def player_a_name(self, player_a_name):\n\n self._player_a_name = player_a_name", "def bcp_player_score(self, value, prev_value, change, **kwargs):\n\n if self.player:\n self.player['score'] = int(value)", "def set_strength_ratios(\n self,\n strength_ratios: Union[float, Tuple[float], np.ndarray],\n ):\n self._strength_ratios = np.clip(\n _convert_to_np_array(strength_ratios, self._num_motors), 0, 1)", "def rating(self, rating):\n if (self.local_vars_configuration.client_side_validation and\n rating is not None and rating > 5): # noqa: E501\n raise ValueError(\"Invalid value for `rating`, must be a value less than or equal to `5`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n rating is not None and rating < 1): # noqa: E501\n raise ValueError(\"Invalid value for `rating`, must be a value greater than or equal to `1`\") # noqa: E501\n\n self._rating = rating", "def update_attendance_rate(self):\n session_avg_rate = self.session_set\\\n .filter(attendance_rate__isnull=False)\\\n .aggregate(Avg('attendance_rate'))\n self.attendance_rate = session_avg_rate['attendance_rate__avg']\n self.save()", "def set_player(self, player):\n\n self._player = player", "def adjust_ace(person):\n while person.total > GOAL_TOTAL() and person.aceCount != 0:\n person.total -= ACE_MODIFIER()\n person.aceCount -= 1\n print(f\"\\nAn Ace value in this player's hand has been adjusted to \"\n f\"1 for a new total of {person.total}\")", "def set_adaptive_padding(self, adaptive_padding):\n if adaptive_padding<4 and adaptive_padding != -1:\n raise ValueError(\"may confused with channel, adaptive padding must bigger than 4\")\n self.adaptive_padding = adaptive_padding" ]
[ "0.81710166", "0.7350537", "0.6238224", "0.5933093", "0.5733358", "0.5733358", "0.5576456", "0.55176663", "0.54827565", "0.54711294", "0.54251564", "0.539781", "0.53230685", "0.5264612", "0.52562875", "0.52448636", "0.52171713", "0.5096457", "0.5096457", "0.5091329", "0.5078149", "0.5059322", "0.49906597", "0.49848524", "0.49710244", "0.49532282", "0.49476796", "0.49376494", "0.48455402", "0.48432893" ]
0.9040026
0
Sets the player_b_rating_adjustment of this Game.
def player_b_rating_adjustment(self, player_b_rating_adjustment): self._player_b_rating_adjustment = player_b_rating_adjustment
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def player_b_rating(self, player_b_rating):\n\n self._player_b_rating = player_b_rating", "def player_a_rating_adjustment(self, player_a_rating_adjustment):\n\n self._player_a_rating_adjustment = player_a_rating_adjustment", "def player_a_rating(self, player_a_rating):\n\n self._player_a_rating = player_a_rating", "def setB(self, b):\n\t\tself.b = int(b)", "def player_b_points(self, player_b_points):\n\n self._player_b_points = player_b_points", "def _set_bet_limit(self) -> None:\n for i, ratio in enumerate(BET_LIMIT_RATIOS):\n self._bet_limits[i] = self._treasury_min.get() // ratio", "def player_b_games(self, player_b_games):\n\n self._player_b_games = player_b_games", "def percent_b(self, percent_b: float):\n\n self._percent_b = percent_b", "def player_b_id(self, player_b_id):\n\n self._player_b_id = player_b_id", "def player_b_name(self, player_b_name):\n\n self._player_b_name = player_b_name", "def customize_weapon_bay_size(self, current_gameboard, new_size):\n current_gameboard['weapon_bay_size'] = new_size", "def set_rating(self, value):\n try:\n self._rating = float(value)\n except ValueError:\n pass", "def set_boost(self, boost):\r\n self._boost = float(boost)\r\n return self", "def bcp_player_score(self, value, prev_value, change, **kwargs):\n\n if self.player:\n self.player['score'] = int(value)", "def update_bias(self):\n self._bias = self._bias + self.update_bias_value\n self.bias_clipping()", "def _adjustBlock(self, b):\n pinTypeBlockConverters.adjustSmearDensity(b, self.value)", "def binarize(self):\n # Loop through the ratings and binarize based on overall average rating\n rating_sum = np.sum(self.ratings)\n rating_count = np.count_nonzero(self.ratings)\n rating_avg = (1.0 * rating_sum) / rating_count\n\n def binary_transform(x, rating_avg):\n if x == 0.0:\n return 0.0\n elif x >= rating_avg:\n return 1.0\n else:\n return -1.0\n\n btransform = np.vectorize(binary_transform, otypes=[np.float])\n if self.is_turbo:\n self.ratings = btransform(self.ratings, rating_avg)", "def buying_rate(self, buying_rate):\n\n self._buying_rate = buying_rate", "def __set_bias_score(self, row, best_score):\n if row.productid == best_score:\n result = 1\n else:\n result = row.final_score\n\n return result", "def b(self, b):\n\n self._b = b", "def setBigBlindBetAmount(self):\n\t\tif sum(self.currentBet) < self.bigBlind:\n\t\t\tif len(self.pots) > 1:\n\t\t\t\tnewbet = self.bigBlind - sum(self.currentBet)\n\t\t\telse:\n\t\t\t\tnewbet = self.bigBlind\n\t\t\tself.currentBet[-1] = newbet", "def update_average_book_rating(self, isbn):\n self.cursor.execute(\"\"\"UPDATE book SET avg_rating = total_rating_score / num_ratings WHERE \n ISBN=%s\"\"\", (isbn,))\n self.db.commit()", "def update_bias(self):\n self._bias = self._bias + self.update_bias_value", "def bid_percentage(self, bid_percentage):\n\n self._bid_percentage = bid_percentage", "def _bowl_params(self):\n self.vars['bowl_strength'] = self.bowl.strength + \\\n self.vars['beta_min_offset']\n self.vars['q_init'] = self.vars['bowl_strength']\n if self.vars['bowl_strength'] <= self.vars['beta_min_offset']:\n print(\n f\"Bowl overflow -- Set to the minimum value : {self.vars['beta_min_offset']}\")\n # raise ValueError(\"Bowl overflow... strength lower than set tolerance. Modify the tolerance or fix the bug!\")\n self.vars['bowl_strength'] = self.vars['beta_min_offset']\n if self.vars['bowl_strength'] > self.vars['q_max']:\n self.vars['bowl_strength'] = self.vars['q_max']\n\n self.vars['zeta_bowl'] = self.toNeural(self.bowl.center)\n print(f\"Value for Q set to {self.vars['bowl_strength']}\")", "def bias(self, value):\n self.mbmod.bias = value", "def update_b(self, theta, force=False):\n self.b = self.eval_b(self.theta)\n self.b_eval_cnt += 1", "def percent_b(self) -> float:\n return self._percent_b", "def update_balance(self, multiplier: int) -> int:\n self.user.balance += DEFAULT_BET * multiplier\n return self.user.balance", "def _onSetParameterBIgnoreBounds(self, value):\n self._parameters['b'] = value\n self._logger.info(\"Parameter 'b' of function '{}' changed to {}\".format(self._function, value))\n self.functionChanged.emit(self._dim, self._function, self._parameters.copy())" ]
[ "0.84605795", "0.6785584", "0.6118386", "0.5667898", "0.5614027", "0.56034553", "0.5589638", "0.54016906", "0.5335416", "0.52572376", "0.50731677", "0.505252", "0.50342023", "0.50341153", "0.5001537", "0.49912506", "0.49864346", "0.4957764", "0.49305597", "0.49127644", "0.4888302", "0.4884609", "0.4882224", "0.48661798", "0.48546243", "0.48471144", "0.48370516", "0.4835793", "0.48318675", "0.48304996" ]
0.903613
0
Sets the match_id of this Game.
def match_id(self, match_id): self._match_id = match_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_id(self, player_id):\n pass", "def set_match_id(match_id):\n conn = get_connect()\n conn.execute(\"UPDATE match SET isSearched = 1 WHERE matchId = \" + str(match_id))\n conn.commit()\n conn.close()\n print(\"matchId \" + str(match_id) + \" has been searched\")\n return", "def addMatch(self, id, match):\n self._matches[id] = match", "def set_player_id(self, player_id):\n self.player_id = player_id", "def player_id(self, player_id):\n\n self._player_id = player_id", "def player_id(self, player_id):\n\n self._player_id = player_id", "def game_id(self, game_id):\n\n self._game_id = game_id", "def game_id(self, game_id):\n\n self._game_id = game_id", "def game_id(self, game_id):\n\n self._game_id = game_id", "def match_node_id(self, id_, match):\n pass", "def match_score(self, match_score):\n\n self._match_score = match_score", "def getMatchId(self):\n return None", "async def handle_set_group(self, match: Match[str], payload: str) -> None:\n groupid = match.group(1)\n\n try:\n group = self._bridge.groups[groupid]\n state = GroupSetState(**json.loads(payload))\n LOGGER.info(f\"Updating group {group.name}\")\n await group.set_action(**state.dict())\n except IndexError:\n LOGGER.warning(f\"Unknown group id: {groupid}\")\n except json.JSONDecodeError:\n LOGGER.warning(f\"Bad JSON on light request: {payload}\")\n except TypeError:\n LOGGER.warning(f\"Expected dictionary, got: {payload}\")\n except ValidationError as e:\n LOGGER.warning(f\"Invalid light state: {e}\")", "def match_id(self):\n return self._id", "def set_game_id(self, game_name):\n dic = {(''.join(filter(str.isalpha, key))): v for key, v in self.games_map.items()}\n dic = dic[self.league]\n dic = {(''.join(filter(str.isalpha,key))):v for key,v in dic.items()}\n self.game_id = dic[game_name][0]\n self.game_time = dic[game_name][1]", "def set_id(self, id_):\n\n self.id_ = id_", "def set_id(self, refobj, identifier):\n cmds.setAttr(\"%s.identifier\" %refobj, identifier)", "def scoring_play_id(self, scoring_play_id):\n\n self._scoring_play_id = scoring_play_id", "def set_id(self, id):\n self.__id = id", "def id(self, _id):\n self._id = _id", "def team_id(self, team_id):\n\n self._team_id = team_id", "def team_id(self, team_id):\n\n self._team_id = team_id", "def team_id(self, team_id):\n\n self._team_id = team_id", "def team_id(self, team_id):\n\n self._team_id = team_id", "def _play_match(self, team, opponent, point, mode, match_id):", "def SetId(self, new_id):\r\n\r\n self.id = new_id", "async def handle_set_light(self, match: Match[str], payload: str) -> None:\n uniqueid = match.group(1)\n\n # Find the light with that uniqueid\n for light_id in self._bridge.lights:\n light = self._bridge.lights[light_id]\n if light.uniqueid == uniqueid:\n try:\n state = LightSetState(**json.loads(payload))\n LOGGER.info(f\"Updating {light.name}\")\n await light.set_state(**state.dict())\n except json.JSONDecodeError:\n LOGGER.warning(f\"Bad JSON on light request: {payload}\")\n except TypeError:\n LOGGER.warning(f\"Expected dictionary, got: {payload}\")\n except ValidationError as e:\n LOGGER.warning(f\"Invalid light state: {e}\")\n return\n LOGGER.warning(f\"Unknown light uniqueid: {uniqueid}\")", "def set_id(self, id):\n self.data['id'] = id", "def set_id(self, id):\n self.id = id\n print(\"self id = \" + str(self.id))", "def id(self, id):\n self._id = id" ]
[ "0.6941635", "0.68668336", "0.66586006", "0.66515535", "0.6378987", "0.6378987", "0.62718457", "0.62718457", "0.62718457", "0.6133261", "0.61036193", "0.59934545", "0.5909548", "0.588252", "0.5878106", "0.58763146", "0.5867918", "0.5850785", "0.5738993", "0.570567", "0.56536347", "0.56536347", "0.56536347", "0.56536347", "0.5645559", "0.5620524", "0.5617478", "0.5593509", "0.5581472", "0.5581146" ]
0.84743714
0
Adds a list of cytoband objects to database
def add_cytobands(self, cytobands): LOG.debug(f"Inserting {len(cytobands)} cytoband intervals into database") result = self.cytoband_collection.insert_many(cytobands) LOG.debug(f"Number of inserted documents:{len(result.inserted_ids)}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_to_db(car_list: list) -> dict:\n db = CarsDb() # pylint: disable=invalid-name\n db.create_table()\n for _cars in car_list:\n for car in _cars:\n db.add_cars(car)\n db.commit()\n db.close()", "def extend (self, db_list):\n for db in db_list:\n self.append(db)", "def add_to_database(session, objects):\n if isinstance(objects, list):\n session.add_all(objects)\n else:\n session.add(objects)\n session.commit()", "def add_to_db(ark_obj):\n session = Session()\n session.add(ark_obj)\n session.commit()\n session.close()", "def add_anime(utoa_list):\n for utoa in utoa_list:\n db.session.add(utoa)\n\n db.session.commit()", "def addrow_from_list(self, list):\n database = managers.database_manager.get_database(self.owner_id, self.database_id)\n con = database.get_connection()\n cur = con.cursor()\n arg = \"(?\"\n for i in xrange(len(list) - 1):\n arg += \", ?\"\n arg += \")\"\n sql = \"INSERT INTO \\'%s\\' VALUES %s\" % (self.name, arg)\n cur.execute(sql, tuple(list))\n con.commit()", "def add_all(self, objects):\n self.lock.acquire()\n self.__Session.add_all(objects)\n self.__Session.commit()\n self.lock.release()", "def insert_data_category_into_bdd(self):\n for category in constant.LIST_CATEGORIES:\n data = Category(name=category)\n data.save()\n print(\"the category \" + str(category) + \" has been created\")", "async def insert_many(self, models):\n\n pass", "def addAll(self, objs):\n self.getSession().add_all(objs)\n self.commit() # paranoially\n return objs", "def add_models(dummy_request):\n for entry in ENTRIES:\n row = Entries(title=entry[\"title\"], creation_date=entry[\"creation_date\"], body=entry[\"body\"])\n dummy_request.dbsession.add(row)", "def _add_bus_stops(dbsession):\n import json\n from shapely.geometry import shape\n\n file = open(os.path.join(os.path.dirname(__file__),\n '..',\n 'data',\n 'osm-lausanne-bus-stops.geojson'))\n geojson = json.load(file)\n file.close()\n\n bus_stops = []\n for feature in geojson['features']:\n id = feature['id'].replace('node/', '')\n geometry = shape(feature['geometry'])\n name = feature['properties']['name'] \\\n if 'name' in feature['properties'] else ''\n bus_stop = BusStop(\n id=int(float(id)),\n geom='SRID=4326;' + geometry.wkt,\n name=name)\n bus_stops.append(bus_stop)\n\n dbsession.add_all(bus_stops)", "def bulk_create(cls, raw_list):\n\t\tresource_list = [cls(**item) for item in raw_list]\n\t\tdb.session.add_all(resource_list)\n\t\tdb.session.commit()\n\n\t\treturn resource_list", "def save_all(self, objects):\n self.session.add_all(objects)\n self.session.commit()", "def save_all(self, obj_list):\n\n for obj in obj_list:\n self.save(obj)", "def add_models(dummy_request):\n for entry in ENTRIES:\n row = MyModel(\n title=entry['title'],\n body=entry['body'],\n creation_date=datetime.datetime.strptime(entry['creation_date'],\n '%b %d, %Y')\n )\n\n dummy_request.dbsession.add(row)", "def import_aa_data(anime_list):\n for anime, atog in anime_list:\n db.session.add(anime)\n for genre in atog:\n db.session.add(genre)\n\n db.session.commit()", "def _add_to_businesses(params):\n print params\n if not Business.query.filter_by(yelp_id=params['yelp_id']).first():\n business = Business()\n cat_list = []\n for key in params:\n # adds elements in category lists to category table if they don't already exist\n if key == \"categories\":\n for cat in params[key]:\n cat_list.append(cat)\n if not Category.query.filter_by(category_name=cat).first():\n category = Category(category_name=cat)\n db.session.add(category)\n # THROUGH LINE 40 REPLACED BY 30-34\n # for group in params[key]:\n # print type(group)\n # for subtype in group:\n # print type(subtype)\n # if not Category.query.filter_by(category_name=subtype).first():\n # category = Category(category_name=subtype)\n # db.session.add(category)\n # cat_list.append(subtype)\n # print cat_list\n elif key == \"yelp_id\":\n business.yelp_id = params[key]\n elif key == \"name\":\n business.name = params[key]\n elif key == \"address_line_1\":\n business.address_line_1 = params[key]\n elif key == \"address_line_2\":\n business.address_line_2 = params[key]\n elif key == \"city\":\n business.city = params[key]\n elif key == \"state\":\n business.state = params[key]\n elif key == \"zipcode\":\n business.zipcode = params[key]\n elif key == \"phone\":\n business.phone = params[key]\n elif key == \"latitude\":\n business.latitude = params[key]\n elif key == \"longitude\":\n business.longitude = params[key]\n try:\n db.session.add(business)\n db.session.commit()\n except:\n db.session.rollback()\n print business.name, \"has insufficient information, skipping.\"\n return None\n # creates rows in reference table\n for cat in cat_list:\n # creates row in reference table\n business = Business.query.filter_by(yelp_id=params['yelp_id']).first()\n catbus = BusinessCategory()\n print business.business_id\n catbus.business_id = business.business_id\n cat_object = Category.query.filter_by(category_name=cat).first()\n print cat_object.category_name\n catbus.category_id = cat_object.category_id\n\n if not BusinessCategory.query.filter_by(business_id=catbus.business_id,\n category_id=catbus.category_id).first():\n db.session.add(catbus)\n db.session.commit()\n\n print \"added \" + business.name + \" to db\"\n\n else:\n print \"Already in Dictionary\"\n return None", "def add_to_database(dataframe):\r\n \r\n # df.shape returns the number of columns and rows in a dataframe\r\n # So using the first value returned, we can cycle through each row in the dataframe (where each row has information on a specific station)\r\n for i in range(0, (dataframe.shape[0]-1)):\r\n data = dataframe.iloc[i] # df.iloc[] just allows us to access elements via normal indexing of a pandas dataframe\r\n date_time = pd.to_datetime(data[\"last_update\"]/1000,unit='s')\r\n day = str(date_time.day_name())\r\n hour = str(date_time.hour)\r\n # Store all the information from the dataframe in a list\r\n elements = [data.get(\"address\"), int(data.get(\"available_bike_stands\")), int(data.get(\"available_bikes\")), int(data.get(\"banking\")), int(data.get(\"bike_stands\")), int(data.get(\"bonus\")), data.get(\"contract_name\"), float(data.get(\"last_update\")), data.get(\"name\"), int(data.get(\"number\")), data.get(\"position\").get(\"lat\"), data.get(\"position\").get(\"lng\"), data.get(\"status\"),day,str(date_time),str(hour)]\r\n \r\n # Add each of these elements to the table in our database\r\n cursor.execute(\"INSERT INTO seviBikes VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)\", elements)\r\n conn.commit()", "def insert_test_data(client):\n businesses = get_businesses()\n for business_dict in businesses:\n save_business_to_db(business_dict)\n\n collection = Business.objects()\n assert len(collection) == 12", "def add_listings_to_db(all_listings: List[Listing]) -> int:\n for listing in all_listings:\n session.add(CraigslistHousing(\n id=listing.listing_id,\n price=listing.price,\n url=listing.listing_url\n ))\n try:\n session.commit()\n return 1\n except Exception as e:\n raise e", "def add_events_to_database(self):\n # Adding events sequentially deals with the case where duplicate\n # events exist inside the _event_list field.\n for i in range(0, len(self._event_index_list), 1):\n e = self._event_list[i]\n e_ind = self._event_index_list[i]\n if not(SimpleDeduplicator.is_duplicate(e)):\n e.save()\n self.gdc.write_id_nth_event(e_ind, e.id)\n self._event_id_list.append(e.id)\n # Add categories whether it is a duplicate or not.\n # ManyToMany relationships work like sets, so there won't be a\n # problem with categories appearing more than once if added twice.\n c_cat_list = self.gdc.get_categories_nth_element(e_ind)\n for cat in c_cat_list:\n assert isinstance(cat, Category)\n e.category.add(cat)", "def _from_db_object_list(db_objects, cls, context):\n return [Boar._from_db_object(cls(context), obj)\n for obj in db_objects]", "def save_all(objs: List[ModelBase], DataClass: ModelBase):\n dicts = map(lambda x: x.to_dict(), objs)\n with db.atomic():\n if driver is Driver.POSTGRESQL:\n for bar in dicts:\n DataClass.insert(bar).on_conflict(\n update=bar,\n conflict_target=(\n DataClass.stock_id,\n DataClass.date,\n ),\n ).execute()\n else:\n i = 1\n num = 5000\n for c in chunked(dicts, num):\n sta = time.time()\n print(f\"Insert data to database {DataClass.__name__}: {i}-{i + num - 1}\")\n DataClass.insert_many(c).on_conflict_replace().execute()\n print(time.time() - sta)\n i += num", "def add_boid(self, new_boid):\r\n self.collection.append(new_boid)", "def add_causas(Base, session):\r\n # pdb.set_trace()\r\n Causa = Base.classes.causa\r\n file_causas = os.path.realpath(\r\n \"../dashboard-operativo-transito/static/data/causas.json\")\r\n with open(file_causas) as causas_data:\r\n causas = json.load(causas_data)\r\n for causa in causas['causas']:\r\n if not session.query(Causa).filter(Causa.id == causa['id']).count():\r\n session.add(\r\n Causa(descripcion=causa['descripcion'].encode('utf-8'), id=causa['id']))\r\n session.commit()\r\n session.close()", "def add_phosphosites_to_db(phosphosites, db_cursor):\n\n for phosphosite in phosphosites:\n residue = phosphosite.get_residue()\n position = phosphosite.get_position()\n uniprotid = phosphosite.get_uniprotid()\n fold_change = phosphosite.get_fold_change()\n db_cursor.execute(\\\n \"INSERT INTO phosphositetb (residue,position,uniprotid,foldchange) VALUES(?,?,?,?);\"\\\n ,(residue,position,uniprotid,fold_change))", "def addAll(self,*args, **kwargs):\n pass", "def add_darters(darters):\n\n values = []\n for darter in darters:\n darter += (CURRENT_EPOCH_TIME, CURRENT_EPOCH_TIME)\n values.append(darter)\n\n query = \"INSERT INTO darters(slack_name,slack_id,real_name, created_at, updated_at) VALUES(%s, %s, %s, %s, %s)\"\n\n db.insert(query, values)", "def save_all(objs: List[\"DbFactorGroupData\"]):\n dicts = [i.to_dict() for i in objs]\n with db.atomic():\n if driver is Driver.POSTGRESQL:\n for bar in dicts:\n DbFactorGroupData.insert(bar).on_conflict(\n update=bar,\n conflict_target=(\n DbFactorGroupData.stock_id,\n DbFactorGroupData.date,\n ),\n ).execute()\n else:\n for c in chunked(dicts, 5000):\n DbFactorGroupData.insert_many(c).on_conflict_replace().execute()" ]
[ "0.65280896", "0.6474675", "0.6398069", "0.60381", "0.59929323", "0.59694403", "0.59540313", "0.5905814", "0.58230734", "0.58040357", "0.5773995", "0.57596177", "0.5729488", "0.57210636", "0.5661529", "0.56500214", "0.5644303", "0.56313354", "0.5625418", "0.5623507", "0.5602209", "0.55952954", "0.5569171", "0.5567341", "0.5563918", "0.55581796", "0.5547212", "0.5532273", "0.5526164", "0.5517483" ]
0.69417334
0
Returns a dictionary of cytobands with chromosomes as keys
def cytoband_by_chrom(self, build="37"): if "38" in str(build): build = "38" else: build = "37" match = {"$match": {"build": build}} group = { "$group": { "_id": "$chrom", "cytobands": { "$push": { "band": "$band", "chrom": "$chrom", "start": "$start", "stop": "$stop", } }, } } sort = {"$sort": {"start": pymongo.ASCENDING}} result = self.cytoband_collection.aggregate([match, group, sort]) cytobands_by_chrom = {each.pop("_id"): each for each in result} return cytobands_by_chrom
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_band_maps(self):\n band_maps = []\n source_band_index = 1\n target_band_index = self.starting_target_band\n for band in self.image['bands']:\n band_maps.append({\n 'source': source_band_index,\n 'target': target_band_index\n })\n source_band_index += 1\n target_band_index += 1\n return band_maps", "def get_cytobands(bt, cyto_bed):\n\n chrom = bt[0].chrom\n bands = sorted(list(set([x[-2] for x in bt.intersect(cyto_bed, wb=True)])))\n if len(bands) > 1:\n bandrange = '{}{}-{}'.format(chrom, bands[0], bands[-1])\n else:\n bandrange = chrom + bands[0]\n\n return bandrange", "def bandlcs(self):\n lc = self.lc\n bandgroups = lc.group_by('band')\n return bandgroups", "def get_cytoband(request, genome, chrom):\n logger.debug(\"annotation_server.get_cytoband called for genome: %s chromosome: %s\" % (genome, chrom)) \n \n if genome in SUPPORTED_GENOMES:\n current_table = eval(genome+ \"_CytoBand\")\n curr_vals = current_table.objects.filter(chrom__iexact=chrom).values('chrom', 'chromStart', 'chromEnd', 'name', 'gieStain')\n data = ValuesQuerySetToDict(curr_vals)\n return HttpResponse(data, 'application/json')\n else:\n return HttpResponse(status=400)\n \n #cursor = connection.cursor() \n #query = \"\"\"SELECT s.name as chrom, #<region as start, #>region as end, region_name from dm3.cytobandideo c join dm3.sequence s on s.id = c.seq_id where s.name ilike '%s' order by region;\"\"\" % (chrom)\n #cursor.execute(query)\n #return HttpResponse(cursor_to_json(cursor), 'application/javascript')", "def getbandlcs(lc):\n bandgroups = lc.group_by('band')\n\n return bandgroups", "def getCoveragePerBpVariable(wigFileName):\n\n d = {}\n\n for line in open(wigFileName):\n if line.startswith(\"track\") or line.startswith(\"browser\"):\n continue\n\n [chr, start, stop, level] = line.split()\n chr = chr.split(\"|\")[1].replace(\"MAL\", \"chr\")\n level = int(level)\n start = int(start)\n stop = int(stop)\n\n if not d.has_key(chr):\n d[chr] = {}\n\n for i in range(start, stop+1):\n d[chr][i] = level\n\n return d", "def qiskit_circuit_measurement_map(c: QiskitCircuit) -> Dict[int, int]:\n measurements = [x for x in c.data if x[0].name == 'measure']\n return {\n c.qubits.index(x[1][0]): c.clbits.index(x[2][0])\n for x in measurements\n }", "def band_color_dict(self):\n return self._bandColorDict", "def set_chrom_dict():\n chrom_dict = {\n str(i):'chr' + str(i) for i in range(1, MAXCHROM)\n }\n chrom_dict.update({\n 'X':'chr23',\n 'Y':'chr24',\n 'XY':'chr25',\n 'M':'chr26',\n 'MT':'chr26',\n 'chrX':'chr23',\n 'chrY':'chr24',\n 'chrXY':'chr25',\n 'chrM':'chr26',\n 'chrMT':'chr26'\n })\n return chrom_dict, MAXCHROM", "def get_chroms(chromfile):\n chroms = {}\n with open(chromfile) as c:\n for line in c:\n try:\n chrom, length = line.strip().split()\n chroms[chrom] = length\n except ValueError:\n chroms[line.strip()] = 1\n return chroms", "def samples_with_multiple_barcodes(self):\n # Look for samples with multiple barcodes\n multiple_barcodes = {}\n for project,sample in self.walk():\n if len(sample.barcode_seqs) > 1:\n multiple_barcodes[sample.sample_id] = \\\n [s for s in sample.barcode_seqs]\n return multiple_barcodes", "def __init__(self, chromosome, starts, ends, labels, gstrands):\n \n if 1 <= chromosome <= 24:\n self._chromosome = chromosome\n else:\n raise ValueError('wrong chromosome number %d' % chromosome)\n\n # Sort bands by starting base\n sorted_bands = sorted(zip(labels, starts, ends, gstrands),\n key=op.itemgetter(1))\n\n self._band_keys = dict((k[0], i) for i, k in enumerate(sorted_bands))\n self._bands = tuple(ChromosomeBand(self._chromosome, *band)\n for band in sorted_bands)", "def read_chr(fpath):\n\t# init dict and indices\n\tchrom_dicts={}\n\tstart=0\n\tindex=0\n\n\t# iterate through chromosome scores \n\tfor line in fileinput.input(fpath):\n\t\tx=line.split()\n\t\t\n\t\t# if chromosome skips some region, then normalize the previous window (<100 bp) and init new window \t\n\t\tif len(x)==4:\n\t\t\tif start in chrom_dicts:\n\t\t\t\tchrom_dicts[start]/=index\n\t\t\tstart=int(x[2].split(\"=\")[1])\n\t\t\tchrom_dicts[start]=0\n\t\t\tindex=0\n\n\t\t# if not a black region, then make news windows every 100 locations\n\t\tif len(x)==1:\n\t\t\tchrom_dicts[start]+=float(x[0])\n\t\t\tif index==100:\n\t\t\t\tchrom_dicts[start]/=index\n\t\t\t\tindex=0\n\t\t\t\tstart+=100\n\t\t\t\tchrom_dicts[start]=0\n\t\t\tindex+=1\n\t\n\t# track chromosomes that have been binned\n\tprint(\"%s %d\" % (fpath,len(chrom_dicts)))\n\treturn(chrom_dicts)", "def get_dict(self):\r\n return self.cmap", "def createChromosomes(self) -> ChromList:\n raise NotImplementedError", "def bands(self):\n\t\treturn self._bands", "def _init_meg_map_dict(bands, length=0):\n\n # Initialize dictionary\n meg_map = dict()\n\n # Add oscillation bands\n for band in bands:\n meg_map[band] = np.zeros(length)\n\n return meg_map", "def csc():\n endcaps = [1,2]\n disks = [1,2,3,4]\n rings = {1:[1,2,3], # different rings for different disks\n 2:[1,2], \n 3:[1,2],\n 4:[1,2]}\n\n csc_info = {\n \"endcaps\":endcaps,\n \"disks\": disks,\n \"rings\": rings}\n\n return csc_info", "def get_covered_regions_dict(experiment_df):\n covered_regions_dict = {}\n benchmarks = experiment_df.benchmark.unique()\n for benchmark in benchmarks:\n benchmark_df = experiment_df[experiment_df.benchmark == benchmark]\n fuzzers = benchmark_df.fuzzer.unique()\n for fuzzer in fuzzers:\n fuzzer_covered_regions = get_fuzzer_covered_regions(\n benchmark_df, benchmark, fuzzer)\n key = get_fuzzer_benchmark_key(fuzzer, benchmark)\n covered_regions_dict[key] = fuzzer_covered_regions\n\n return covered_regions_dict", "def byte_strobes(self):\n strb = {}\n first = self.lsb // 8\n last = self.msb // 8\n for i in range(first, last + 1):\n # per every byte strobe\n wdata_lsb = self.lsb if i == first else i * 8\n wdata_msb = (i + 1) * 8 - 1 if ((i + 1) * 8 - 1 - self.msb) < 0 else self.msb\n bf_lsb = wdata_lsb - self.lsb\n bf_msb = wdata_msb - self.lsb\n strb[i] = {'bf_lsb': bf_lsb, 'bf_msb': bf_msb,\n 'wdata_lsb': wdata_lsb, 'wdata_msb': wdata_msb}\n return strb", "def bands(self):\n return self._bands", "def get_cds_regions(annotations):\n # Determine locations of CDS regions for each chromosome\n cds_regions = {}\n\n for chr_id, chromosome in annotations.items():\n # get chromosome dimensions (the first feature represents the\n # chromosome itself)\n ch_end = int(chromosome.features[0].location.end)\n\n # filter out everything except for genes\n genes = [x for x in chromosome.features if x.type == 'gene']\n\n # order by position along chromosome\n genes.sort(key=lambda x: x.location.start)\n\n # add range before first gene\n start = 0\n\n # keep track of strand of polycistronic transcriptional unit\n strand = None\n\n cds_regions[chr_id] = {\n -1: [],\n +1: []\n }\n\n # iterate over genes and add CDS coordinates\n for gene in genes:\n coords = (int(gene.location.start), int(gene.location.end))\n cds_regions[chr_id][gene.location.strand].append(coords)\n\n return cds_regions", "def getBandnames(self,cx,cy):\n bandnames = []\n\n for bandname in self.cells:\n if [cx,cy] in self.cells[bandname]:\n bandnames.append(bandname)\n\n return bandnames", "def addCytogenicBands(merged_data, cytogenicBandingFile):\n\n # make dictionary of the cytogenic bands\n cytogenic_bands = {}\n for line in open(cytogenicBandingFile):\n chrom, start, end, _, stain = [x.strip() for x in line.split(\"\\t\")]\n chrom = chrom.replace('chr','').lower()\n if chrom not in cytogenic_bands:\n cytogenic_bands[chrom] = [[int(start),int(end),stain]]\n else:\n cytogenic_bands[chrom].append([int(start),int(end),stain])\n\n import ast\n staining_column = [] \n for index, row in merged_data.iterrows():\n guide_cutsites = ast.literal_eval(row['Sorted Cut-Sites'])\n #guide_cutsites = [x.strip() for x in ast.literal_eval(row['Sorted Cut-Sites'])]\n chrom = row['Chromosome (+ strand)']\n staining_column.append(get_staining_overlap(chrom, guide_cutsites, cytogenic_bands))\n \n merged_data['Staining Overlap'] = staining_column", "def initDictionary(bands):\r\n for x in bands:\r\n d[\"{}\".format(x)] = {ProdCost: [], AlbumSales: []}", "def get_benchmark_cov_dict(coverage_dict, benchmark):\n benchmark_cov_dict = {}\n for key_pair, covered_regions in coverage_dict.items():\n current_fuzzer, current_benchmark = key_pair.split()\n if current_benchmark == benchmark:\n covered_regions_in_set = set()\n for region in covered_regions:\n covered_regions_in_set.add(tuple(region))\n benchmark_cov_dict[current_fuzzer] = covered_regions_in_set\n return benchmark_cov_dict", "def codonComposition(self):#works\n return {codon: self.countDicNuc.get(codon) for codon in self.rnaCodonTable.keys()}", "def get_raw_band_paths(self, **kwargs) -> dict:\n raw_band_paths = {}\n for band in self.get_existing_bands():\n raw_band_paths[band] = self._get_tile_path(band=band, **kwargs)\n return raw_band_paths", "def get_gene_symbols(self):\n # TODO: could be made much nicer with join in DB via SQL Alchemy\n bins = binning.containing_bins(self.start - 1, self.end)\n gene_intervals = list(\n GeneInterval.objects.filter(\n database=\"ensembl\",\n release=self.release,\n chromosome=self.chromosome,\n bin__in=bins,\n start__lte=self.end,\n end__gte=self.start,\n )\n )\n gene_ids = [itv.gene_id for itv in gene_intervals]\n symbols1 = {\n o.gene_symbol for o in EnsemblToGeneSymbol.objects.filter(ensembl_gene_id__in=gene_ids)\n }\n symbols2 = {o.symbol for o in Hgnc.objects.filter(ensembl_gene_id__in=gene_ids)}\n return sorted(symbols1 | symbols2)", "def get_gene_symbols(self):\n # TODO: could be made much nicer with join in DB via SQL Alchemy\n bins = binning.containing_bins(self.start - 1, self.end)\n gene_intervals = list(\n GeneInterval.objects.filter(\n database=\"ensembl\",\n release=self.release,\n chromosome=self.chromosome,\n bin__in=bins,\n start__lte=self.end,\n end__gte=self.start,\n )\n )\n gene_ids = [itv.gene_id for itv in gene_intervals]\n symbols1 = {\n o.gene_symbol for o in EnsemblToGeneSymbol.objects.filter(ensembl_gene_id__in=gene_ids)\n }\n symbols2 = {o.symbol for o in Hgnc.objects.filter(ensembl_gene_id__in=gene_ids)}\n return sorted(symbols1 | symbols2)" ]
[ "0.63961923", "0.6316996", "0.6242813", "0.6224416", "0.6127217", "0.60912484", "0.60851306", "0.6077323", "0.6038242", "0.5990606", "0.59787744", "0.5711337", "0.5703827", "0.5694839", "0.5685335", "0.5667714", "0.56494695", "0.56059813", "0.5526039", "0.5510321", "0.5509816", "0.54849726", "0.5480938", "0.54723674", "0.54386884", "0.5423994", "0.5413957", "0.53923935", "0.53842866", "0.53842866" ]
0.6918466
0
returns `imgaug BoundingBoxesOnImage` object which can be used to boxes on the image
def get_bounding_boxes_on_image(self, image_shape): return BoundingBoxesOnImage(self.bboxes, image_shape)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_bounding_box(img):\n rows = np.any(img, axis=1)\n cols = np.any(img, axis=0)\n rmin, rmax = np.where(rows)[0][[0, -1]]\n cmin, cmax = np.where(cols)[0][[0, -1]]\n # due to python indexing, need to add 1 to max\n # else accessing will be 1px in the box, not out\n rmax += 1\n cmax += 1\n return [rmin, rmax, cmin, cmax]", "def return_bbox_image(self, image, bboxes, label, color):\n if bboxes:\n for obj in bboxes:\n image = self.draw_single_bbox(image, obj.position_xywh, label=label, color=color)\n\n return image", "def get_bounding_box(im):\n coords = np.where(im)\n \n return np.array([np.min(coords[0]), np.max(coords[0]), \n np.min(coords[1]), np.max(coords[1])])", "def draw_bounding_boxes(self, image_path):\n img = cv.imread(image_path, cv.IMREAD_ANYDEPTH)\n bboxes = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n unique, counts = np.unique(img, return_counts=True)\n for uni in unique:\n if uni == 0:\n continue\n self.get_instance_bounding_box(img, bboxes, uni)\n\n cv.namedWindow('building bounding boxes', cv.WINDOW_NORMAL)\n cv.imshow('building bounding boxes', bboxes)\n cv.waitKey(0)\n cv.destroyAllWindows()", "def bbox(img):\n a = np.where(img != 0)\n bbox = np.min(a[0]), np.max(a[0]), np.min(a[1]), np.max(a[1])\n return bbox", "def to_imgaug(self, image_shape):\n image_height, image_width, _ = image_shape\n\n # Create ia bounding boxes from json\n regions = []\n for region in self.regions:\n regions.append(region.to_imgaug(image_width, image_height))\n bbs = BoundingBoxesOnImage(regions, shape=image_shape)\n\n return bbs", "def generate_boxes(self, img):\r\n return [Box(left, top, img) for (left, top) in self.coords]", "def _get_bounding_boxes(self, imgs, summed_viz, threshold_value=.7):\n self.viz = summed_viz # for debug\n viz = summed_viz\n n_batchs = viz.shape[ 0]\n n_classes = viz.shape[-1]\n \n # viz.shape (100,14,14,20) => (14,14,100,20)\n viz = viz.swapaxes(0,2); viz = viz.swapaxes(0,1)\n \n # Normalize <viz>, image per image (to be in range [-1,1])\n viz = viz / np.max(np.abs(viz), axis=(0,1))\n viz = (viz+1)/2 # range[0,1]\n \n # Resize each summed_viz to its original size (size of input image)\n if viz.shape[:2] != imgs.shape[1:3]:\n viz = np.array(\n [ skimage.transform.resize(viz[:,:,idx], imgs[idx].shape[:2])\n for idx in range(len(imgs))\n if viz.shape[0] != imgs.shape[1]\n ] )\n viz = viz.swapaxes(0,2); viz = viz.swapaxes(0,1)\n \n # Threshold <viz>s to keep values over 70% of its max values\n m_max = threshold_value * viz.max(axis=(0,1))\n viz = viz * (m_max < viz)\n \n # We want a 2d boundind box, so project threshold in xs and ys\n xxs = viz.sum(axis=0)\n yys = viz.sum(axis=1)\n \n # Get some non-thresholded values (left, top... of bounding boxes)\n get_lefts = lambda b_id, c_idx: xxs[:,b_id,c_idx].nonzero()[0][ 0]\n get_tops = lambda b_id, c_idx: yys[:,b_id,c_idx].nonzero()[0][-1]\n get_rights = lambda b_id, c_idx: xxs[:,b_id,c_idx].nonzero()[0][-1]\n get_bottoms = lambda b_id, c_idx: yys[:,b_id,c_idx].nonzero()[0][ 0]\n\n # Debug\n # def get_lefts (b_id, c_idx): \n # print xxs[:,b_id,c_idx].nonzero()\n # xxs[:,b_id,c_idx].nonzero()[0][ 0]\n \n # Build the 2d array with first or lasts positions of zeros\n # INNER FUNCTION\n def _get_border_array(f_border=get_lefts):\n return np.array(\n [ map(f_border, [b_idx]*n_classes, range(n_classes))\n for b_idx in range(n_batchs) ]\n )\n \n lefts = _get_border_array(get_lefts)\n tops = _get_border_array(get_tops)\n rights = _get_border_array(get_rights)\n bottoms = _get_border_array(get_bottoms)\n \n return lefts, tops, rights, bottoms", "def get_bboxes(self, image_path: str, img_pipeline=None):\n pass", "def get_boxes(image: np.ndarray) -> tuple((Any, list)):\n image = _correct_rotation(image)\n invert = _get_inverted(image)\n combined_lines = _combine_lines(\n _get_vertical_lines(image, invert), _get_horizontal_lines(image, invert)\n )\n boxes = _get_bounding_boxes(combined_lines)\n boxes = _check_boxes(image, boxes)\n boxes = _sort_bounding_boxes(boxes)\n return (_overlay_lines(image, combined_lines), _get_final_boxes(boxes))", "def __draw_boxes(self, img, bboxes, color=(128, 0, 0), thick=4):\n\n # Make a copy of the image\n imcopy = np.copy(img)\n # Iterate through the bounding boxes\n for bbox in bboxes:\n # Draw a rectangle given bbox coordinates\n cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)\n # Return the image copy with boxes drawn\n return imcopy", "def boundingbox(self, *args, **kwargs):\n return _image.image_boundingbox(self, *args, **kwargs)", "def draw_boxes_on_image(img, bboxes, color=(0, 0, 1), thick=6):\n imcopy = np.copy(img)\n\n for bbox in bboxes:\n cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)\n\n return imcopy", "def draw_bounding_boxes(image, boxes):\n num_boxes = boxes.shape[0]\n gt_boxes_new = boxes.copy()\n draw_image = Image.fromarray(np.uint8(image))\n for i in range(num_boxes):\n draw_image = _draw_single_box(image=draw_image,\n quad=gt_boxes_new[i,:],\n font=FONT)\n\n image = np.array(draw_image, dtype=np.float32)\n return image", "def getbbox(self):\r\n img_ = (self._instance > 0)\r\n rows = np.any(img_, axis=1)\r\n cols = np.any(img_, axis=0)\r\n rmin, rmax = np.argmax(rows), img_.shape[0] - 1 - np.argmax(np.flipud(rows))\r\n cmin, cmax = np.argmax(cols), img_.shape[1] - 1 - np.argmax(np.flipud(cols))\r\n return (rmin, rmax, cmin, cmax)", "def vis_bbox(image: Image, bbox, color=_GREEN, thick=1) -> Image:\n image = image.astype(np.uint8)\n bbox = list(map(int, bbox))\n x0, y0, x1, y1 = bbox\n cv2.rectangle(image, (x0, y0), (x1, y1), color, thickness=thick)\n return image", "def draw_bounding_boxes_on_image_array(image, boxes, color=[], thickness=5):\n\n draw_bounding_boxes_on_image(image, boxes, color, thickness)\n\n return image", "def draw_boxes(img, bboxes, color=(0, 0, 255), thick=6):\n # make a copy of the image\n imcopy = np.copy(img)\n # draw each bounding box on your image copy using cv2.rectangle()\n # Iterate through the bounding boxes\n for bbox in bboxes:\n # Draw a rectangle given bbox coordinates\n cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)\n # return the image copy with boxes drawn\n return imcopy", "def draw_boxes(image, bboxes, color=(0., 0., 1.0), thick=6):\n # make a copy of the image\n draw_img = np.copy(image)\n # draw each bounding box on your image copy using cv2.rectangle()\n for bbox in bboxes:\n # Draw a rectangle given bbox coordinates\n cv2.rectangle(draw_img, bbox[0], bbox[1], color, thick)\n # return the image copy with boxes drawn\n return draw_img", "def get_instance_bounding_box(img, bounding_boxes, instance):\n mask = np.zeros(img.shape, dtype=np.uint16)\n mask[img == instance] = 1\n ret, threshed = cv.threshold(mask, 0, 2 ** 16, cv.THRESH_BINARY)\n compressed = threshed.astype(np.uint8)\n contours, hierarchy = cv.findContours(compressed, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE)\n x, y, w, h = cv.boundingRect(contours[0])\n cv.rectangle(bounding_boxes, (x, y), (x + w, y + h), (randint(25, 255), randint(25, 255), randint(25, 255)), 3)\n img2 = contours = hierarchy = mask = None", "def draw_boundingbox(image, infer_output, image_width, image_height, conf_thresh):\n\n out_image = image.copy()\n logger.debug(' - input image: [width] %d, [height] %d' % (image.shape[1], image.shape[0]))\n\n def check_valid_range(val, max_val):\n \"\"\" check the coordinate of bbox is inside of an image\"\"\"\n if val < 0:\n val = 0\n elif val > max_val:\n val = max_val\n else:\n pass\n return val\n\n valid_obj_num = 0\n valid_obj_bbox = []\n\n for obj_info in infer_output:\n conf = obj_info['conf']\n # filter by the confidence\n if conf >= conf_thresh:\n # calculate bbox coordinate\n xmin = int(obj_info['x_min'] * image_width)\n ymin = int(obj_info['y_min'] * image_height)\n xmax = int(obj_info['x_max'] * image_width)\n ymax = int(obj_info['y_max'] * image_height)\n\n # round up into valid range\n xmin = check_valid_range(xmin, image_width)\n ymin = check_valid_range(ymin, image_height)\n xmax = check_valid_range(xmax, image_width)\n ymax = check_valid_range(ymax, image_height)\n\n # draw bbox\n cv2.rectangle(out_image, (xmin, ymin), (xmax, ymax), (0, 0, 255), 2)\n\n valid_obj_num += 1\n valid_obj_bbox.append((xmin, ymin, xmax, ymax))\n logger.debug(' - draw bbox [%d, %d, %d, %d] confidence: %f' % (xmin,ymin,xmax,ymax,conf))\n\n return out_image, valid_obj_num", "def from_imagaug(image_width, image_height, imgaug_bounding_boxes):\n\n data = ImageData()\n for bb in imgaug_bounding_boxes:\n # Convert from imgaug bounding boxes to ImageDataRegion\n data.regions.append(ImageDataRegion.from_imgaug(bb, image_width, image_height))\n\n return data", "def draw_image_bboxes_opencv(image, gt_candidate, detection_candidate):\n for candidate in detection_candidate:\n minc, minr, maxc, maxr = candidate\n cv2.rectangle(image, (minc, minr), (maxc, maxr), (0, 0, 255), 8) # Red\n\n for candidate in gt_candidate:\n minc, minr, maxc, maxr = candidate\n cv2.rectangle(image, (minc, minr), (maxc, maxr), (0, 255, 0), 5) # Green\n\n return image", "def bbox_img(img, bbox):\n if len(bbox) == 4:\n return img[bbox[1]:bbox[3], bbox[0]:bbox[2]]\n else:\n return img", "def imageBoundingBox(img, M):\n #TODO 8\n #TODO-BLOCK-BEGIN\n \n M = M/M[2][2]\n\n farRight = img.shape[1]-1\n farDown = img.shape[0]-1\n\n matrixtl = np.zeros((3,1))\n matrixtl[2][0] = 1\n\n matrixtr = np.zeros((3,1))\n matrixtr[0][0] = farRight\n matrixtr[2][0] = 1\n\n matrixbl = np.zeros((3,1))\n matrixbl[1][0] = farDown\n matrixbl[2][0] = 1\n\n matrixbr = np.zeros((3,1))\n matrixbr[0][0] = farRight\n matrixbr[1][0] = farDown\n matrixbr[2][0] = 1\n\n topLeft = np.dot(M, matrixtl)\n topRight = np.dot(M, matrixtr)\n bottomLeft = np.dot(M, matrixbl)\n bottomRight = np.dot(M, matrixbr)\n\n xlist = [topLeft[0], topRight[0], bottomLeft[0], bottomRight[0]]\n ylist = [topLeft[1], topRight[1], bottomLeft[1], bottomRight[1]]\n\n minX = min(xlist)\n maxX = max(xlist)\n minY = min(ylist)\n maxY = max(ylist)\n #TODO-BLOCK-END\n return int(minX), int(minY), int(maxX), int(maxY)", "def draw_bbox(image, im_id, catid2name, bboxes, threshold):\n draw = ImageDraw.Draw(image)\n\n catid2color = {}\n color_list = colormap(rgb=True)[:40]\n for dt in np.array(bboxes):\n if im_id != dt['image_id']:\n continue\n catid, bbox, score = dt['category_id'], dt['bbox'], dt['score']\n if score < threshold:\n continue\n\n xmin, ymin, w, h = bbox\n xmax = xmin + w\n ymax = ymin + h\n\n if catid not in catid2color:\n idx = np.random.randint(len(color_list))\n catid2color[catid] = color_list[idx]\n color = tuple(catid2color[catid])\n\n # draw bbox\n draw.line(\n [(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin),\n (xmin, ymin)],\n width=2,\n fill=color)\n\n # draw label\n text = \"{} {:.2f}\".format(catid2name[catid], score)\n tw, th = draw.textsize(text)\n draw.rectangle(\n [(xmin + 1, ymin - th), (xmin + tw + 1, ymin)], fill=color)\n draw.text((xmin + 1, ymin - th), text, fill=(255, 255, 255))\n\n return image", "def show_boxes(img, boundary_boxes, gt_boxes=None):\n\n for (x_tl, y_tl, x_br, y_br) in boundary_boxes:\n cv2.rectangle(img, (x_tl, y_tl),\n (x_br, y_br),\n (0, 0, 255), 2)\n\n if gt_boxes is not None:\n for (x_tl, y_tl, x_br, y_br) in gt_boxes:\n cv2.rectangle(img, (x_tl, y_tl),\n (x_br, y_br),\n (0, 255, 0), 2)\n\n cv2.imshow(\"img\", img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()", "def draw_bboxes(img, bboxes, color=(0, 0, 255), thick=6):\n draw_img = np.copy(img)\n # Draw rectangles given bbox coordinates as opposing coordinates\n # bboxes = opposing coordinates: (x1,y1), (x2,y2)\n [cv2.rectangle(draw_img, bbox[0], bbox[1], color, thick) for bbox in bboxes]\n return draw_img", "def show_bboxes(img, bounding_boxes=None, facial_landmarks=[]):\n\n img_copy = img.copy()\n draw = ImageDraw.Draw(img_copy)\n# for b in bounding_boxes:\n# draw.rectangle([\n# (b[0], b[1]), (b[2], b[3])\n# ], outline='white')\n\n for p in facial_landmarks:\n for i in range(106):\n draw.ellipse([\n (p[i*2] - 1.0, p[2*i + 1] - 1.0),\n (p[i*2] + 1.0, p[2*i+1] + 1.0)\n ], outline='blue')\n font = ImageFont.truetype(\"arial.ttf\", 10)\n draw.text([p[2*i], p[2*i+1]], str(i), font=font)\n\n return img_copy", "def get_bounding_boxes(frame):\n\n blob = cv2.dnn.blobFromImage(frame,1/255,(320,320),(0,0,0),1,crop=False)\n net.setInput(blob)\n\n output_layer_names = net.getUnconnectedOutLayersNames()\n layer_outputs = net.forward(output_layer_names)\n\n all_boxes, confidences = get_all_boxes(layer_outputs)\n\n indexes=cv2.dnn.NMSBoxes(all_boxes,confidences,0.5,0.3)\n\n return indexes, confidences, all_boxes" ]
[ "0.73006785", "0.7061073", "0.7016004", "0.6997015", "0.6996555", "0.6985397", "0.6981905", "0.6970928", "0.6970363", "0.68584704", "0.6788558", "0.67682064", "0.67322797", "0.6707081", "0.6704685", "0.6648175", "0.6627646", "0.6615074", "0.6561027", "0.65533084", "0.65444976", "0.6535674", "0.6495153", "0.649212", "0.64812297", "0.64782214", "0.64700055", "0.6431612", "0.6389743", "0.6352283" ]
0.7467911
0
Given a list of values, return the number of times high is broken >>> arr = np.array([11,12,9,8,13]) >>> list(high_count(arr)) [0, 1, 1, 1, 2]
def high_count(values): length = len(values) arr = np.zeros(length, dtype=np.int16) count = 0 max_val = values[0] for i in np.arange(1, length): if values[i] > max_val: max_val = values[i] count += 1 arr[i] = count return arr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def low_count(values):\n length = len(values)\n arr = np.zeros(length, dtype=np.int16)\n count = 0\n min_val = values[0]\n for i in np.arange(1, length):\n if values[i] < min_val:\n min_val = values[i]\n count += 1\n arr[i] = count\n return arr", "def last_high(values):\n length = len(values)\n arr = np.zeros(length, dtype=np.int32)\n max_val = values[0]\n counter = 0\n for i in np.arange(1, length):\n if values[i] > max_val:\n max_val = values[i]\n counter = i\n arr[i] = counter\n return arr", "def num_larger(threshold, values):\n num = sum([1 for n in values if (n>threshold)])\n return num", "def countingSort(arr):\n freq = [0] * 100\n for el in arr:\n freq[el] += 1\n return freq", "def count(array, value):\n count = 0\n for i in range (len(array)):\n if (array[i] == value):\n count += 1\n return count", "def counts(e, x):\n arr = np.asarray(arr)\n return len(np.where(arr == x)[0])", "def countElements(self, nums):\n import sys\n max_n = -sys.maxint\n min_n = sys.maxint\n\n for n in nums:\n max_n = max(n, max_n)\n min_n = min(n, min_n)\n\n count = 0\n for n in nums:\n if min_n < n < max_n:\n count += 1\n return count", "def count(A,target):\n\n def rcount(lo, hi, target):\n \"\"\"Use recursion to find maximum value in A[lo:hi+1].\"\"\"\n if lo == hi:\n return 1 if A[lo] == target else 0\n\n mid = (lo+hi)//2\n left = rcount(lo, mid, target)\n right = rcount(mid+1, hi, target)\n return left + right\n\n return rcount(0, len(A)-1, target)", "def score(hand):\n occurrences = [] \n for die in hand:\n if die > len(occurrences):\n occurrences.extend([0 for dummy_idx in range(len(occurrences) ,die)]) \n occurrences[die - 1] += 1\n maxi = 0\n for idx in range(len(occurrences)):\n if (idx+1) * occurrences[idx] > maxi:\n maxi = (idx + 1) * occurrences[idx]\n return maxi", "def fast_histogram(input_array: np.ndarray, nr_values: int) -> np.ndarray:\n output_array = np.empty(nr_values, dtype=int)\n for i in range(nr_values):\n output_array[i] = (input_array == i).sum()\n return output_array", "def score(hand):\n max_score = []\n for die in hand:\n max_score.append(hand.count(die) * die)\n return max(max_score)", "def solution(n, array):\n\n counters = [0] * n\n\n # Current greatest value calculated so far\n max_count = 0\n\n for i in range(len(array)):\n if array[i] == n + 1:\n # max_count = max(counters)\n counters = [max_count] * n\n else:\n counters[array[i] - 1] += 1\n\n # To avoid calculating max(), we update the max value at each step\n if counters[array[i] - 1] > max_count:\n max_count = counters[array[i] - 1]\n\n return counters", "def count_interval_peak(arr1d, lower, upper):\n from scipy.signal import find_peaks\n peaks = find_peaks(arr1d, height=(lower, upper))\n return len(peaks[0])", "def numberOfOccurences(arr=[], find=0):\n idxMin = binarySearch_first(arr, find)\n idxMax = binarySearch_last(arr, find)\n if idxMin is not None and idxMax is not None:\n print \"number of occurences of {} is {}\".format(find, idxMax - idxMin + 1)\n else:\n print \"didn't find any occurences of {}\".format(find)", "def counting_sort(arr):\n\n # No need to sort\n if arr is None:\n return arr\n\n n = len(arr)\n if n <= 1:\n return arr\n\n # find the counting scope, i.e., the max value\n max_value = arr[0]\n for i in range(1, n):\n if arr[i] > max_value:\n max_value = arr[i]\n\n # init the counting array via list comprehension\n count_arr = [0 for _ in range(max_value + 1)]\n\n # update the counting number\n for i in arr:\n count_arr[i] += 1\n\n # update the total counting number\n for i in range(1, max_value + 1):\n count_arr[i] += count_arr[i - 1]\n\n # store sorted result in a temp array, why scan inversely?\n # note reverse-scanning can guarantee the sort result is stable\n tmp_arr = [0 for _ in range(n)]\n for i in range(n - 1, -1, -1):\n idx = count_arr[arr[i]] - 1\n tmp_arr[idx] = arr[i]\n count_arr[arr[i]] -= 1\n\n # copy result back to original array\n for i in range(n):\n arr[i] = tmp_arr[i]", "def get_frequency(arr):\n freq = {}\n for i in arr:\n if(i not in freq):\n freq[i] = 0\n freq[i] += 1\n \n return freq", "def score(hand):\r\n \r\n max_score = []\r\n \r\n for dice in hand:\r\n max_score.append(hand.count(dice) * dice)\r\n \r\n return max(max_score)", "def count_abs_index(arr1d, threshold):\n count = 0\n for ele in arr1d:\n if ele <= threshold:\n count = count + 1\n return count", "def countZeroes(arr):\n counter = 0\n #sort the array\n arr.sort(reverse=True)\n print(arr)\n n = len(arr)\n print(n)\n\n # Find index of first zero in given array\n first = firstZero(arr, 0, n - 1)\n \n # If 0 is not present at all, return 0\n if (first == -1):\n return 0\n\n for i in range(first,len(arr)):\n if (arr[i] == 0):\n counter += 1\n else:\n break\n\n return counter", "def score(hand):\n counted = []\n scores = []\n for element in hand:\n if element not in counted:\n scores.append(hand.count(element)*element)\n counted.append(element)\n return max(scores)", "def integer_hist(a, int_range=None, open_range=False, relative=False):\n data = np.round(a).flatten()\n if int_range:\n values = np.arange(int(int_range[0]), int(int_range[1])+1)\n else:\n values = np.arange(int(data.min()), int(data.max())+1)\n N = values.size\n if relative:\n count = np.empty(N, 'd')\n else:\n count = np.empty(N, 'l')\n for bin, c in enumerate(values):\n if open_range and bin == N - 1:\n count[bin] = (data >= c).sum()\n else:\n count[bin] = (data == c).sum()\n if relative:\n count /= count.sum()\n return values, count", "def n50_counter(input_list):\n input_list.sort()\n half_tot = sum(input_list) / 2\n\n current_count = 0\n for num in input_list:\n current_count += num\n if current_count >= half_tot:\n return num", "def birthdayCakeCandles(n, ar):\n\n tallest = max(ar)\n return ar.count(tallest)", "def _counts(data):\n table = collections.Counter(iter(data)).most_common()\n if not table:\n return table\n maxfreq = table[0][1]\n for i in range(1, len(table)):\n if table[i][1] != maxfreq:\n table = table[:i]\n break\n return table", "def of_a_kind_size(dice_list):\n return max([dice_list.count(value) for value in range(1,7)])", "def _get_majority_def(array: List[int]) -> Optional[int]:\n if len(array) == 0:\n return None\n counter = dict()\n for item in array:\n if item in counter:\n counter[item] += 1\n else:\n counter[item] = 1\n majority = max(counter, key=counter.get)\n if counter[majority] > len(array) // 2:\n return majority\n else:\n return None", "def dice_roll_count(dice_list, value_to_count):\n return dice_list.count(value_to_count)", "def count_within(values, intervals):\n rvals = np.reshape(values, [-1, 1])\n intervals_m = np.array(intervals)\n rlow = intervals_m[:, 0].reshape(1, -1)\n rhigh = intervals_m[:, 1].reshape(1, -1)\n\n flags = (rlow <= rvals) * (rvals < rhigh)\n return np.sum(flags, axis=0)", "def count(self,value = 1):\n n = 0\n for s in self.sample:\n if s == value:\n n += 1\n return n", "def countingsort(values):\n\n output = []\n \n n = len(values)\n\n # O(n) to find maximum value in input to map range.\n k = max(values)\n\n # we're could leverage a hashtable for this instead of an array, the C\n # version will use an array.\n counts = []\n\n for i in range(k+1):\n counts.append(0)\n\n # there are a few ways to implement this; i've chosen one, but there is \n # another with which I am familiar but is super pythonic and I wanted \n # something more general\n # the other variation just loads the entire thing into the counts as a\n # list, and then dumps out.\n\n # get the counts. \n for v in values:\n counts[v] += 1\n output.append(0) # to make it an array of the same size as input\n\n # get the totals so you have counts[i] is total <= i instead of == i.\n # prefix sums.\n total = 0\n for i in range(k+1):\n total += counts[i]\n counts[i] = total\n\n # start, stop, step\n for i in range(n-1, -1, -1):\n l = values[i] # the value\n output[counts[l]-1] = l\n counts[l] -= 1\n\n return output" ]
[ "0.71308786", "0.67923504", "0.6276579", "0.6243201", "0.61764824", "0.61432064", "0.5935874", "0.590811", "0.5905983", "0.58521324", "0.5787228", "0.5758916", "0.57581323", "0.5749915", "0.57492715", "0.57201385", "0.5710379", "0.5701776", "0.57005394", "0.5690722", "0.5673694", "0.5671456", "0.5663454", "0.56616515", "0.5648744", "0.56361514", "0.56207335", "0.56103957", "0.5584793", "0.55646205" ]
0.79292583
0
Given a list of values, return the number of times low is broken >>> arr = np.array([13,14,12,11,9,10]) >>> list(low_count(arr)) [0, 0, 1, 2, 3, 3]
def low_count(values): length = len(values) arr = np.zeros(length, dtype=np.int16) count = 0 min_val = values[0] for i in np.arange(1, length): if values[i] < min_val: min_val = values[i] count += 1 arr[i] = count return arr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def high_count(values):\n length = len(values)\n arr = np.zeros(length, dtype=np.int16)\n count = 0\n max_val = values[0]\n for i in np.arange(1, length):\n if values[i] > max_val:\n max_val = values[i]\n count += 1\n arr[i] = count\n return arr", "async def _get_num_open_high_low_trades(self) -> float:\n\n low = 0\n high = 0\n\n for pair in self.trades:\n current_value = self.market.adjusted_close_values[pair][-1]\n for trade in self.trades[pair]['open']:\n fees = config['trade_fee_percent'] * trade['open_value'] + config['trade_fee_percent'] * current_value\n if current_value - fees > trade['open_value']:\n high += 1\n else:\n low += 1\n\n return (low, high)", "def countZeroes(arr):\n counter = 0\n #sort the array\n arr.sort(reverse=True)\n print(arr)\n n = len(arr)\n print(n)\n\n # Find index of first zero in given array\n first = firstZero(arr, 0, n - 1)\n \n # If 0 is not present at all, return 0\n if (first == -1):\n return 0\n\n for i in range(first,len(arr)):\n if (arr[i] == 0):\n counter += 1\n else:\n break\n\n return counter", "def num_larger(threshold, values):\n num = sum([1 for n in values if (n>threshold)])\n return num", "def countElements(self, nums):\n import sys\n max_n = -sys.maxint\n min_n = sys.maxint\n\n for n in nums:\n max_n = max(n, max_n)\n min_n = min(n, min_n)\n\n count = 0\n for n in nums:\n if min_n < n < max_n:\n count += 1\n return count", "def count_interval_peak(arr1d, lower, upper):\n from scipy.signal import find_peaks\n peaks = find_peaks(arr1d, height=(lower, upper))\n return len(peaks[0])", "def countSmaller(self, nums: List[int]) -> List[int]:\n self.index2num = {index:num for index, num in enumerate(nums)}\n self.indexes = [i for i in range(len(nums))]\n self.counts = Counter()\n\n self.mergeSort(self.indexes)\n\n return [self.counts[i] for i in range(len(nums))]", "def countingSort(arr):\n freq = [0] * 100\n for el in arr:\n freq[el] += 1\n return freq", "def min_counts(self) -> int:\n return self._min_counts", "def min_counts(self) -> int:\n return self._min_counts", "def last_high(values):\n length = len(values)\n arr = np.zeros(length, dtype=np.int32)\n max_val = values[0]\n counter = 0\n for i in np.arange(1, length):\n if values[i] > max_val:\n max_val = values[i]\n counter = i\n arr[i] = counter\n return arr", "def count(array, value):\n count = 0\n for i in range (len(array)):\n if (array[i] == value):\n count += 1\n return count", "def lower_bound(stock):\n counter=0\n for i in stock_price(stock):\n if i <= support(stock):\n counter+=1\n return counter", "def count_abs_index(arr1d, threshold):\n count = 0\n for ele in arr1d:\n if ele <= threshold:\n count = count + 1\n return count", "def firstMissingPositive(self, nums):\n nums.sort()\n res = 1\n for num in nums:\n if num == res:\n res += 1\n return res", "def count_interval_index(arr1d, lower, upper):\n count = 0\n for ele in arr1d:\n if ele >= lower and ele <= upper:\n count = count + 1\n return count", "def count_from_top(img):\n pixel_count = 0\n for row in img:\n unique_pixel_vals = np.unique(row)\n if 255 not in unique_pixel_vals: # ignore shading (values between 0-255)\n pixel_count += 1\n else:\n return pixel_count", "def min_count_bins(data, min_step, min_count, minimum=None, maximum=None):\n if minimum is None:\n minimum = data.min()\n if maximum is None:\n maximum = data.max()\n bins = np.concatenate((np.arange(minimum, maximum, min_step), [maximum]))\n histogram, _ = np.histogram(data, bins)\n sub_sum = 0\n result = [bins[0]]\n for i in range(len(histogram)):\n sub_sum += histogram[i]\n if sub_sum > min_count:\n sub_sum = 0\n result.append(bins[i + 1])\n result[-1] = bins[-1]\n return result", "def counts(e, x):\n arr = np.asarray(arr)\n return len(np.where(arr == x)[0])", "def get_average_in_range(list, low, high):\n track = 0\n val = 0\n for num in list:\n if num >= low and num < high:\n val += num\n track += 1\n if track == 0:\n return 0\n return val / track", "def counting_sort_o(arr: List[int]) -> List[int]:\n ar_min = min(arr)\n ar_max = max(arr)\n count_arr = [0] * (ar_max - ar_min + 1)\n res = [0] * len(arr)\n for el in arr:\n count_arr[el - ar_min] += 1\n for i in range(1, ar_max - ar_min + 1):\n count_arr[i] += count_arr[i - 1]\n for i in range(len(arr) - 1, -1, -1):\n res[count_arr[arr[i] - ar_min] - 1] = arr[i]\n count_arr[arr[i] - ar_min] -= 1\n return res", "def count_less(count, a):\n if len(a[0]) < len(a[1]):\n return count + 1\n return count", "def numberOfOccurences(arr=[], find=0):\n idxMin = binarySearch_first(arr, find)\n idxMax = binarySearch_last(arr, find)\n if idxMin is not None and idxMax is not None:\n print \"number of occurences of {} is {}\".format(find, idxMax - idxMin + 1)\n else:\n print \"didn't find any occurences of {}\".format(find)", "def count_ge_one(array):\r\n return numpy.count_nonzero(array >= 1)", "def integer_hist(a, int_range=None, open_range=False, relative=False):\n data = np.round(a).flatten()\n if int_range:\n values = np.arange(int(int_range[0]), int(int_range[1])+1)\n else:\n values = np.arange(int(data.min()), int(data.max())+1)\n N = values.size\n if relative:\n count = np.empty(N, 'd')\n else:\n count = np.empty(N, 'l')\n for bin, c in enumerate(values):\n if open_range and bin == N - 1:\n count[bin] = (data >= c).sum()\n else:\n count[bin] = (data == c).sum()\n if relative:\n count /= count.sum()\n return values, count", "def count(pred, l):\n nl = [i for i in range(0,len(l)) if pred(l[i])]\n\n return len(nl)", "def n_peaks_valleys(x):\n diff_sign = np.sign(x[1:] - x[:-1])\n return np.count_nonzero(diff_sign[1:] != diff_sign[:-1])", "def minpeaks(sig):\n diff_sig = np.diff(sig)\n\n return np.sum([1 for nd in range(len(diff_sig[:-1])) if (diff_sig[nd]<0 and diff_sig[nd + 1]>0)])", "def smallerNumbersThanCurrent(nums: List[int]) -> List[int]:\n i, count = 0, 0\n arr = []\n for j in range(len(nums)):\n if nums[i] > nums[j]:\n count += 1\n arr.append(count)\n return arr", "def count_occurrences(x):\r\n tmp_x = sorted(copy(x))\r\n ux = unique(x)\r\n return searchsorted(tmp_x, ux, 'right') - searchsorted(tmp_x, ux, 'left')" ]
[ "0.6687898", "0.59021145", "0.5875652", "0.58746266", "0.5855694", "0.58536065", "0.5841549", "0.5823681", "0.5800267", "0.5800267", "0.5797332", "0.5706409", "0.57043064", "0.5671021", "0.5630919", "0.56085753", "0.5604887", "0.5599267", "0.5597285", "0.55907637", "0.554958", "0.5532444", "0.55147636", "0.55107886", "0.5499484", "0.54761857", "0.54681593", "0.545271", "0.5426367", "0.54235506" ]
0.7902696
0
Given a list of values, return an array with the index of the corresponding last highs Note index starts at zero >>> arr = np.array([12,14,11,12,13,18]) >>> list(last_high(arr)) [0, 1, 1, 1, 1, 5]
def last_high(values): length = len(values) arr = np.zeros(length, dtype=np.int32) max_val = values[0] counter = 0 for i in np.arange(1, length): if values[i] > max_val: max_val = values[i] counter = i arr[i] = counter return arr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def last_high(self):\n return self.data.last('1D').high.iat[0]", "def binarySearch_last(arr=[], find=0):\n min = 0\n max = len(arr) - 1\n result = None\n while min <= max:\n mid = (min + max) / 2\n if arr[mid] == find:\n result = mid\n min = mid + 1 # look right for last occurence\n elif arr[mid] < find:\n min = mid + 1\n else:\n max = mid - 1\n return result", "def high(self):\n return self.high_array", "def binary_search_find_last(arr: List[int], value: int):\n start = 0\n end = len(arr) - 1\n\n while start <= end:\n # to avoid start+end overflow and bit operate is faster, use current start+((end-start)>>1)\n # which is start+(end-start)/2\n mid = start + ((end - start) >> 1)\n mid_value = arr[mid]\n\n if value < mid_value:\n end = mid - 1\n elif value > mid_value:\n start = mid + 1\n else:\n if mid == len(arr) - 1 or arr[mid + 1] != value:\n return mid\n else:\n start = mid + 1\n\n return -1", "def last_index_of(my_list, my_value):\n return len(my_list) - my_list[::-1].index(my_value)", "def Find_the_last_index(A, target):\n if len(A) == 0:\n return -1\n begin = 0\n end = len(A) - 1\n while end - begin > 1:\n mid = begin + (end - begin >> 1)\n if target < A[mid]:\n end = mid\n else:\n begin = mid\n if A[end] == target:\n return end\n elif A[begin] == target:\n return begin\n else:\n return -1", "def hi(lows_arr: np.array, highs_arr: np.array, idx_start: int = 0):\n high = lows_arr[idx_start]\n high_idx = idx_start\n\n for idx in range(idx_start + 1, len(highs_arr)):\n act_high = highs_arr[idx]\n if act_high > high:\n high = act_high\n high_idx = idx\n else:\n return high, high_idx\n\n return high, high_idx", "def last2(x, y):\n y = np.asarray(y)\n return y[np.argsort(x)][-1]", "def find_index_of_max(array):\n\n max_value = -abs(np.amax(array))\n\n max_index_hour = 0\n\n for k in range(len(array)):\n if array[k] > max_value:\n max_value = array[k]\n max_index_hour = k\n\n return max_index_hour", "def latest(scores: list) -> int:\n return scores[-1]", "def last_low(self):\n return self.data.last('1D').low.iat[0]", "def find_max(list):\n return find_value_at(list, 0)", "def indexOfMax(list):\n max = -np.Infinity\n index = 0\n i = 0\n for value in list:\n if value > max:\n max = value\n index = i\n i += 1\n return index", "def next_hi(lows_arr: np.array, highs_arr: np.array, idx_start: int = 0, prev_high: float = 0):\n\n high = lows_arr[idx_start]\n high_idx = None\n\n prev_high_reached = False\n for idx in range(idx_start + 1, len(highs_arr)):\n\n act_high = highs_arr[idx]\n\n if act_high < prev_high and not prev_high_reached:\n continue\n\n elif act_high > prev_high and not prev_high_reached:\n prev_high_reached = True\n high = act_high\n high_idx = idx\n\n elif act_high > high:\n high = act_high\n high_idx = idx\n\n else:\n return high, high_idx\n\n return None, None", "def first_and_last_index(arr, number):\n\n # TODO: Write your first_and_last function here\n # Note that you may want to write helper functions to find the start\n # index and the end index\n bin_occurence = find_number_recursively(arr, number, 0, len(arr) - 1)\n print(bin_occurence)\n first = find_first_index_recursively(arr, number, bin_occurence)\n last = find_last_index_recursively(arr, number, bin_occurence)\n print([first, last])\n return [first, last]", "def highestCurrent(requestContext, seriesList, n):\n return sorted( seriesList, key=safeLast )[-n:]", "def nonzero_last(arr, *, axis):\n def nonzero_last_1d(arr):\n try:\n return np.nonzero(arr)[0][-1]\n except IndexError:\n return -1\n return np.apply_along_axis(nonzero_last_1d, axis, arr)", "def find_peak(list_of_integers):\n\n x = list_of_integers\n\n high = None\n\n for index, number in enumerate(x):\n if index == 0 or x[index - 1] < number:\n left = True\n else:\n left = False\n if index == len(x) - 1 or x[index + 1] < number:\n right = True\n else:\n right = False\n if right and left:\n return number\n if high is None or number > high:\n high = number\n\n return high", "def latest(scores):\n return scores[-1]", "def latest(scores):\n return scores[-1]", "def get_peak_ind(discrete_array):\n\n indexes = [j for j in range(discrete_array.size) if discrete_array[j-1]==0 and\\\n discrete_array[j]==1]\n\n return indexes", "def getMax(array_list):\n m = array_list[0]\n m_index = 0\n for i,value in enumerate(array_list):\n if value > m:\n m = value\n m_index = i\n return (m_index,m)", "def peak_index(A):\n peak = 0\n for idx in range(1, len(A)-1):\n if A[idx] > A[idx-1]:\n peak = idx\n return peak", "def maximo(arr):\n maxVal = float('-inf')\n maxIdx = -1\n\n for i in range(len(arr)):\n if arr[i] > maxVal:\n maxVal = arr[i]\n maxIdx = i\n\n return maxVal, maxIdx", "def last_index(list_, value):\n\n found = None\n for index, val in enumerate(list_):\n if val == value:\n found = index\n if found is None:\n raise ValueError(\"{} is not in list {}\".format(value, list_))\n return found", "def index_largest(seq):\n assert len(seq) > 0\n x, greatest, index = len(seq), seq[0], 0\n for elem in range(1, x):\n if seq[elem] > greatest:\n greatest = seq[elem]\n index = elem\n return index", "def extract_max_value(h: np.ndarray):\n return np.argmax(h, axis=1)", "def index_of_max_change(vals):\n i_vals = zip(range(len(vals)), vals)\n vals = [v for i, v in i_vals]\n vals_diff = [abs(v1 - v0) for v0, v1 in zip(vals[:-1], vals[1:])]\n return i_vals[vals_diff.index(max(vals_diff))][0]", "def find_peak(list_of_integers):\n if list_of_integers == []:\n return None\n\n list_of_integers.sort()\n return list_of_integers[-1]", "def findlastindex(iteratee, seq):\n iteratee = fnc.iteratee(iteratee)\n return next((i for i, value in reversed(tuple(enumerate(seq))) if iteratee(value)), -1)" ]
[ "0.7242109", "0.65918887", "0.6544895", "0.6467938", "0.6450139", "0.62479717", "0.6244132", "0.6187111", "0.6116131", "0.6113819", "0.61057824", "0.6069469", "0.60632396", "0.6051137", "0.59798074", "0.5966679", "0.5830544", "0.58028775", "0.57961994", "0.57961994", "0.5773162", "0.5769177", "0.5759071", "0.57154244", "0.5713744", "0.56856483", "0.56826395", "0.56809586", "0.5680131", "0.56463444" ]
0.79293066
0
Internal implementation of the image downloading. Opens the URLs file and iterates over each URL.
def _download_images(self, url_file, destination_dir, log_file): logger = self.setup_log(log_file) logger.info(config.LOG_INITIAL_MESSAGE % (url_file, destination_dir)) with open(url_file) as urls: for i, l in enumerate(urls): pass bar = progressbar.ProgressBar(i + 1) download_count = 0 # opening the url file and reading the urls with open(url_file, 'r') as urls: for i, url in enumerate(urls): bar.set(i) url = url.strip() components = urllib.parse.urlparse(url) if not (components.scheme and components.netloc and components.path): logger.error('%s: "%s"' % (config.LOG_URL_INVALID, self.truncate_middle(url, config.MAX_URL))) continue # check whether the robots.txt allows us to crawl this URL try: can_fetch = self.download_allowed(url, components.scheme, components.netloc) except (AttributeError, urllib.error.URLError, ValueError): logger.error('%s: %s' % (config.LOG_ERROR_ROBOTS, self.truncate_middle(url, config.MAX_URL))) continue # log that image download is disallowed if not can_fetch: logger.error('%s: %s' % (config.LOG_DISALLOWED, self.truncate_middle(url, config.MAX_URL))) continue # open image url try: url_response = urllib.request.urlopen(url) except urllib.error.URLError as error: logger.error('%s: %s' % (config.LOG_ERROR_OPENING, self.truncate_middle(url, config.MAX_URL))) continue # check whether the URL content is an image if url_response.info().get_content_maintype().lower() != config.IMAGE_MIMETYPE: logger.error('%s: %s' % (config.LOG_NOT_AN_IMAGE, self.truncate_middle(url, config.MAX_URL))) continue # retrieve the content and store in the destination directory os.makedirs(destination_dir, exist_ok=True) image_name = '%s_%s' % (download_count + 1, os.path.basename(url)) with open(os.path.join(destination_dir, image_name), 'wb') as image_file: try: image_file.write(url_response.read()) except urllib.error.URLError as error: logger.error('%s: %s' % (config.LOG_ERROR_DOWNLOADING, self.truncate_middle(url, config.MAX_URL))) continue # log download and increment the counter logger.info('%s %s, url: %s' % (config.LOG_DOWNLOADED, self.truncate_middle(image_name, config.MAX_FILE_NAME), self.truncate_middle(url, config.MAX_URL))) download_count += 1 # set the progress bar to 100 percent and print a comment and new line for the returning prompt bar.complete('completed') # release the logger handles self.shutdown_log(logger)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def downloadImages(self):\n\t\ti = 0\n\t\tfor im in self.images:\n\t\t\t# Let's get the file extension and file name and make the final file path. \n\t\t\t# We need to do this to slugify the file name and avoid errors when loading images\n\t\t\tfile_name, file_extension = os.path.splitext(im['url'])\n\t\t\tfile_name = file_name.split(\"/\")[-1]\n\n\t\t\tfile_path = self.data_path + self.dataset + \"/\" + im['slug'] + '/' + str(im['id']) + '_' + slugify(file_name) + file_extension\n\n\t\t\t# If file is not in the file path, then download from the url\n\t\t\tif not os.path.exists(file_path):\n\t\t\t\ttry:\n\t\t\t\t\turllib.urlretrieve(im['url'], file_path )\n\t\t\t\t\tprint \"i:{} url:{}\".format(i,im['url'])\n\t\t\t\texcept Exception, e:\n\t\t\t\t\tprint e\n\t\t\ti += 1", "def _download_images(self, image_urls: typing.List[str], save_dir: str) -> typing.List[str]:\n\n\t\timage_paths = []\n\n\t\tfor i, url in enumerate(image_urls):\n\t\t\timage = self.send_request_image(url)\n\n\t\t\timage_ext = url.split(\".\")[-1]\n\n\t\t\timage_dst_path = os.path.join(save_dir, f\"{i}.{image_ext}\")\n\n\t\t\tif image is not None:\n\t\t\t\twith open(image_dst_path, \"wb\") as fh:\n\n\t\t\t\t\t# Magic boolean which makes it work\n\t\t\t\t\timage.raw.decode_content = True\n\n\t\t\t\t\t# noinspection PyBroadException\n\n\t\t\t\t\t# Attempt to download the image from the URL\n\t\t\t\t\ttry:\n\t\t\t\t\t\tshutil.copyfileobj(image.raw, fh)\n\n\t\t\t\t\t# We should reduce the scope\n\t\t\t\t\texcept Exception:\n\t\t\t\t\t\tpass\n\n\t\t\t\t\t# We downloaded the image without any errors\n\t\t\t\t\telse:\n\t\t\t\t\t\timage_paths.append(image_dst_path)\n\n\t\treturn image_paths", "def create_image_urls(self):\n self._image_urls = []\n while True:\n image_url = self._create_random_url()\n request = urllib2.Request(image_url)\n opener = urllib2.build_opener(NoRedirection)\n try:\n response = opener.open(request)\n code = response.code\n except urllib2.HTTPError as error:\n code = error.code\n if code == 200:\n print \"Found a successful url!\"\n self._image_urls.append(image_url)\n if len(self._image_urls) > 100:\n break\n print self._image_urls\n image_url_file = open(self._image_urls_file_name, 'w')\n for image_url in self._image_urls:\n image_url_file.write(image_url + '\\n')\n image_url_file.close()", "def run(self):\n urls_to_download = self._get_links()\n results = ThreadPool(8).imap_unordered(self._download_url, urls_to_download)\n for path in results:\n print(path)", "def download_urls(urls, path):\n count = 0\n if urls:\n for url in urls:\n try:\n res = requests.get(url, verify=False, stream=True)\n rawdata = res.raw.read()\n with open(os.path.join(path, 'img_' + str(count) + '.jpg'), 'wb') as f:\n f.write(rawdata)\n count += 1\n except Exception as e:\n print('Failed to write rawdata.')\n print(e)", "def download_images(urlList):\n fileNumber = 1;\n fileName = \"\"\n\n # urlList[0] is just titles, so we start at 1\n for url in urlList[1:]:\n sys.stdout.write(\"\\rFile number %i of %i \" % (fileNumber+1, len(urlList)))\n\n sys.stdout.flush()\n\n try:\n fileName = str(fileNumber) + \".png\"\n # Download the file from `url` and save it locally under `fileName`:\n # I append png to the end of the file to \"make it\" png, but there's definitely a better way\n with urllib.request.urlopen(url) as response, open(fileName, 'wb') as out_file:\n shutil.copyfileobj(response, out_file)\n except urllib.error.HTTPError:\n sys.stdout.flush()\n print(\"\\r %s is not a downloadable image. Skipping to next url...\" % url)\n \n fileNumber += 1;\n\n sys.stdout.write(\"\\r\\nDone!\")\n sys.stdout.flush()\n sys.stdout.write(\"\\r\\n\")", "def download_images(image_urls):\n fetched = []\n count = 0\n for img_url in image_urls:\n if not db.is_image_in_db(img_url):\n filename = os.path.basename(img_url)\n if not os.path.exists(cfg.PHOTO_DIR + filename):\n referer_string = web.get_referrer_string(img_url) # to trick 4walled.org\n cmd = \"wget -t {retry_count} -T {timeout} {ref} {url} -O {save}\".format(url=img_url,\n save=os.path.join(cfg.PHOTO_DIR, filename),\n ref=referer_string,\n retry_count=cfg.WGET_RET,\n timeout=cfg.WGET_TIMEOUT)\n print cmd\n os.system(cmd)\n fetched.append(img_url)\n count += 1\n else:\n print(\"# {0} was already fetched once...\".format(img_url))\n\n print(\"# new imgage(s): {0}\".format(count))\n return fetched", "def download_pics(pics_links):\n\n for link in range(len(pics_links)):\n r = requests.get(pics_links[link][0])\n with open(os.path.join(\"tmp\", f\"{link}.jpg\"), \"wb\") as dl:\n dl.write(r.content)", "def download_images(links):\n\n for link in links:\n print(\"Processing\", link)\n try:\n response = requests.get(link,\n timeout=METADATA_REQUEST_TIMEOUT, stream=True)\n except requests.exceptions.RequestException as e:\n print(e)\n sys.exit(1)\n\n artist_name = link.rsplit('/', 2)[1]\n image_name = link.rsplit('/', 2)[2]\n image_name = artist_name + image_name\n\n file_location = ASSET_PATH.joinpath(image_name)\n\n with open(str(file_location), 'wb') as outfile:\n shutil.copyfileobj(response.raw, outfile)", "def download_images(main_keyword, supplemented_keywords, download_dir): \n image_links = set()\n print('Process {0} Main keyword: {1}'.format(os.getpid(), main_keyword))\n\n # create a directory for a main keyword\n img_dir = download_dir + main_keyword + '/'\n if not os.path.exists(img_dir):\n os.makedirs(img_dir)\n\n for j in range(len(supplemented_keywords)):\n print('Process {0} supplemented keyword: {1}'.format(os.getpid(), supplemented_keywords[j]))\n search_query = quote(main_keyword + ' ' + supplemented_keywords[j])\n # url = 'https://www.google.com/search?q=' + search_query + '&espv=2&biw=1366&bih=667&site=webhp&source=lnms&tbm=isch&sa=X&ei=XosDVaCXD8TasATItgE&ved=0CAcQ_AUoAg'\n url = 'https://www.google.com/search?q=' + search_query + '&source=lnms&tbm=isch'\n image_links = image_links.union(parse_page(url))\n print('Process {0} get {1} links so far'.format(os.getpid(), len(image_links)))\n time.sleep(2)\n print (\"Process {0} get totally {1} links\".format(os.getpid(), len(image_links)))\n\n print (\"Start downloading...\")\n count = 1\n for link in image_links:\n try:\n req = urllib.request.Request(link, headers = {\"User-Agent\": generate_user_agent()})\n response = urllib.request.urlopen(req)\n data = response.read()\n file_path = img_dir + '{0}.jpg'.format(count)\n with open(file_path,'wb') as wf:\n wf.write(data)\n print('Process {0} fininsh image {1}/{2}.jpg'.format(os.getpid(), main_keyword, count))\n count += 1\n except urllib.error.URLError as e:\n logging.error('URLError while downloading image {0}\\nreason:{1}'.format(link, e.reason))\n continue\n except urllib.error.HTTPError as e:\n logging.error('HTTPError while downloading image {0}\\nhttp code {1}, reason:{2}'.format(link, e.code, e.reason))\n continue\n except Exception as e:\n logging.error('Unexpeted error while downloading image {0}\\nerror type:{1}, args:{2}'.format(link, type(e), e.args))\n continue\n\n print(\"Finish downloading, total {0} errors\".format(len(image_links) - count))", "def download_images(self, url_file, destination_dir, log_file):\n try:\n self._download_images(url_file, destination_dir, log_file)\n except IOError as error:\n sys.stderr.write(str(error))\n sys.exit(error.errno)\n except Exception as error:\n sys.stderr.write('[Unknown error] %s' % str(error))\n sys.exit(1)", "def download_imgs(img_urls, outfolder):\n \n print \"Downloading %d images from: \" %len(img_urls), url\n \n for image in img_urls:\n filename = image.split('/')[-1]\n outpath = os.path.join(outfolder, filename)\n img_url = urljoin(url, image)\n try:\n urlretrieve(image, outpath)\n print img_url, \"downloaded successfully.\"\n \n except IOError:\n print \"Failed to download file:\", img_url\n pass", "def download_images(img_urls, dest_dir):\n if len(img_urls) > 0 :\n if not os.path.exists(dest_dir):\n os.mkdir(dest_dir)\n # save each images file name\n image_names = []\n # Iterate over each image url, downloading the image to a local file\n img_ctr = 0\n for url in img_urls :\n file_name = 'img' + str(img_ctr) + '.jpg'\n image_names.append(file_name)\n full_name = dest_dir + '/' + file_name\n print('Writing file: %s from %s' % (full_name, url) )\n # When calling the SSLContext constructor directly, CERT_NONE is the default.\n # Since it does not authenticate the other peer it can be insecure\n # Beyond the scope of this exercise (emoji holding my nose)\n unsecure_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)\n with urllib.request.urlopen(url, context=unsecure_context) as response, open(full_name, 'wb') as out_file:\n shutil.copyfileobj(response, out_file)\n img_ctr += 1\n return image_names", "def download_images(img_urls, dest_dir):\n if not os.path.exists(dest_dir):\n # If the directory doesn't exist, create it\n os.mkdir(dest_dir)\n count = 0\n img_string = ''\n # Copies each file from the url provided to the directory provided\n for file in img_urls:\n new_filename = '{}/img{}.jpg'.format(dest_dir, count)\n print \"Retrieving {}\".format(file)\n urllib.urlretrieve(file, new_filename)\n img_string += \"<img src = 'img{}.jpg'>\".format(count)\n count += 1\n print \"Retrieved {} files\".format(count)\n # Creates an html file to display the completed image\n with open('{}/index.html'.format(dest_dir), 'w') as f:\n f.write(\n '<html>\\n<body>\\n{}\\n</body>\\n</html>'.format(img_string)\n )\n pass", "def download_image(urls):\r\n image_paths = []\r\n\r\n base_url = \"https://classifieds.castanet.net\"\r\n image_directory = os.path.join('C:\\\\', 'users', 'ccholon', 'my documents', 'castanet images')\r\n\r\n for url in urls:\r\n listing_url = base_url + url\r\n image_page = requests.get(listing_url)\r\n image_soup = BeautifulSoup(image_page.text, 'html.parser')\r\n\r\n # find the URL for the listing image\r\n image_element = image_soup.find(name='div', class_='image_container')\r\n image_element = image_element.find(name='img')\r\n image_url = image_element.get('src')\r\n\r\n # download the image\r\n #image = requests.get(image_url, stream=True)\r\n\r\n # save to local directory\r\n #image_file = open(os.path.join(image_directory, os.path.basename(image_url)), 'wb')\r\n #for bytes in image.iter_content(100000):\r\n #image_file.write(bytes)\r\n #image_file.close()\r\n\r\n image_paths.append(os.path.join(image_directory, os.path.basename(image_url)))\r\n\r\n return image_paths", "def _read_image_urls(self):\n if not os.path.isfile(self._image_urls_file_name):\n raise IOError, \"'%s' is not found\" % self._image_urls_file_name\n if os.path.getsize(self._image_urls_file_name) == 0:\n raise IOError, \"'%s' is empty\" % self._image_urls_file_name\n for line in open(self._image_urls_file_name, 'r'):\n self._image_urls.append(line.strip())", "def downloadLocal(url_list,path):\n print(\"You are downloading {} images\".format(parser_arguments().limit),end=\" \");print(\"of {} class.\".format(parser_arguments().classes))\n print(\"Please, be patient :)\")\n for i in range(len(url_list)):\n filename= url_list[i].split(\"/\")[-1] # name of the picture file\n r = requests.get(url_list[i], stream =True)\n print(filename)\n\n with open(filename,'wb') as f : # create the file locally in binary-write mode\n r = requests.get(url_list[i], stream =True)\n shutil.copyfileobj(r.raw, f) #write our image to the file\n shutil.move(filename,path)\n print('Done!')", "def download_images(img_urls, dest_dir):\n # Creating the directory if the directory does not already exist\n if not os.path.exists(str(dest_dir)):\n os.mkdir(dest_dir)\n print ('Retrieving...')\n with open(str(dest_dir) + '/index.html', 'w') as f:\n f.write(\"<html>\\n<body>\\n\")\n for index, url in enumerate(img_urls):\n img_name = 'img' + str(index + 1)\n urllib.urlretrieve(\"https://code.google.com\" + url, filename=str(dest_dir) + '/'\n + img_name +'.jpg')\n print ('Downloaded ' + url[-10:] + \": \" + \\\n str(index + 1) + \" images downloaded\")\n\n f.write(\"<img src=\" + '\"' + img_name +\".jpg\" +'\">')\n f.write(\"\\n</html>\\n</body>\")\n print ('Download Complete!')\n pass", "def collect_images_from_urls(url_filepath, target_folder, image_class_name):\n\n def get_img_from_url(index, url):\n \"\"\"Closure function invoked by each running downloading Thread\"\"\"\n try:\n with urllib.request.urlopen(url) as response:\n if response.headers.get_content_maintype() == 'image':\n image_filename = image_filename_prefix.format(name=image_class_name,\n counter=index,\n ext=response.headers.get_content_subtype())\n image_filepath = os.path.join(target_folder, image_filename)\n with open(image_filepath, 'wb') as image_file:\n image_file.write(response.read())\n\n print('Fetched URL {}'.format(index))\n\n except urllib.request.HTTPError:\n pass\n except Exception:\n pass\n\n image_filename_prefix = '{name}_{counter}.{ext}'\n list_of_urls = list()\n with open(url_filepath) as url_file:\n for url in url_file:\n url = url.strip()\n list_of_urls.append(url)\n\n print('Collected {} total URLS'.format(len(list_of_urls)))\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=50) as thread_pool:\n for idx, url in enumerate(list_of_urls):\n thread_pool.submit(get_img_from_url, idx, url)", "def download_images(img_urls, dest_dir):\n # +++your code here+++\n (errcode, statusmsg) = check_create_dir(dest_dir)\n if errcode:\n print statusmsg\n sys.exit(errcode)\n else: print statusmsg\n # retrieve images and generate html code for files\n html_str = '<html>\\n<body>\\n' # opening html file tags\n i = 0\n for img in img_urls:\n img_filename = 'img' + str(i)\n full_filepath = os.path.join(dest_dir, img_filename) \n print 'Retrievieng ' + img + ' to ' + full_filepath + ' file..'\n urllib.urlretrieve(img, full_filepath)\n html_str += '<img src=\\\"' + img_filename + '\\\">'\n i += 1\n html_str += '\\n</html>\\n</body>' # closing html file tags\n # create html file\n html_filename = os.path.join(dest_dir, 'index.html')\n f = open(html_filename, 'w')\n f.write(html_str) \n f.close()\n print 'File ' + html_filename + ' was created.'", "def __urlImageGenerator(cls, link):\n\n try:\n a = Article(url=link)\n a.download()\n a.parse()\n a.fetch_images()\n\n for img in a.imgs:\n yield img\n except Exception:\n pass", "def download_images(pages):\n try:\n pool = Pool(conf.MAX_PROCESS)\n pool.map_async(get_image_from_page, pages)\n pool.close()\n pool.join()\n except:\n pool.close()\n pool.join()", "def read_from_server(url_base=\"http://10.200.102.18/\", url_dir=\"G179-dataset/\"):\n\n all_images = urllib2.urlopen(url_base + url_dir).read()\n\n parser = ImagesHTMLParser()\n parser.feed(all_images)\n data = parser.data\n imgs = []\n\n print(\"Found %d images!\" % len(data))\n print(\"Started Download!\")\n i = 1\n\n for d in data:\n print(\"\\rProgress: %d/%d \" % (i, len(data)), end='')\n dl_img = urllib2.urlopen(url_base + url_dir + d).read()\n asd = cStringIO.StringIO(dl_img)\n img = Image.open(asd)\n imgs.append(np.array(img))\n i = i + 1\n\n return imgs", "def download_images(img_urls, dest_dir, base_url=\"http://code.google.com\"):\n create_dir(dest_dir)\n img_tags = fetch_call(img_urls, dest_dir)\n create_html(dest_dir, img_tags)", "def multi_download(self, url_list):\n workers = 4\n with ThreadPoolExecutor(workers) as ex:\n urls = [url_list[x] for x in range(len(url_list))]\n self.filenames = [str(y)+\".txt\" for y in range(len(url_list))]\n ex.map(self.download, urls, self.filenames)\n return self.filenames", "def download_files(self):", "def download_pics(pic_urls, directory):\r\n print(\"downloading pictures...\")\r\n for url in pic_urls:\r\n name = url.split(\"/\")[-1]\r\n if len(name) >= 20:\r\n name = name[len(name)-20:]\r\n \r\n print('from:', url)\r\n pic_path = directory + name\r\n if not os.path.exists(pic_path):\r\n print(\"downloading ->\", pic_path)\r\n try:\r\n urllib.request.urlretrieve(url, pic_path)\r\n except ValueError:\r\n # 'http://' missing from link\r\n urllib.request.urlretrieve(\"http://\" + url, pic_path)\r\n except urllib.error.HTTPError:\r\n # access forbidden\r\n # ex: http://puu.sh/n2zPL/2491975ef3.jpg\r\n print(\"URL skipped due to HTTPError\", url)\r\n else:\r\n print(\"already downloaded ->\", pic_path)\r\n print(\"Downloads Finished\")", "def download_images_jpg(self):\n self.show_as_waiting(True)\n self.download_images('JPEG')\n self.show_as_waiting(False)", "def downloader(urls, path):\n counter = 1\n for media_file in urls:\n # Create the file name\n file_name = \"meme\" + str(counter) + \".jpg\"\n file_location = path + \"/\" + file_name\n print(f\"Downloading {media_file} as {file_name}.\")\n # Overwrite files\n if os.path.exists(file_location):\n os.remove(file_location)\n print(f\"{file_name} will overwrite an existing file of the same name.\")\n wget.download(media_file, out=file_location)\n print(\"\\n\")\n counter += 1\n print(f\"{counter - 1} items were downloaded.\")\n return counter - 1", "def download_all(self):\r\n # Fetch website list\r\n self.fetch_website_list()\r\n\r\n for website in self.website_list:\r\n self.download(website['id'])" ]
[ "0.78803545", "0.741669", "0.72933877", "0.7226717", "0.71155745", "0.711161", "0.7077615", "0.69910926", "0.6969075", "0.68928254", "0.68679714", "0.68267655", "0.68218905", "0.6775458", "0.6758594", "0.6738965", "0.67308867", "0.6698974", "0.66714567", "0.6645975", "0.66432416", "0.65874064", "0.65798855", "0.6559124", "0.65373915", "0.65052253", "0.6504822", "0.6483421", "0.6470945", "0.6459745" ]
0.7687683
1
Downloads images from URLs given by the url_file, stores them into the directory destination_dir, and logs the progress in the log_file.
def download_images(self, url_file, destination_dir, log_file): try: self._download_images(url_file, destination_dir, log_file) except IOError as error: sys.stderr.write(str(error)) sys.exit(error.errno) except Exception as error: sys.stderr.write('[Unknown error] %s' % str(error)) sys.exit(1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _download_images(self, url_file, destination_dir, log_file):\n logger = self.setup_log(log_file)\n logger.info(config.LOG_INITIAL_MESSAGE % (url_file, destination_dir))\n\n with open(url_file) as urls:\n for i, l in enumerate(urls):\n pass\n bar = progressbar.ProgressBar(i + 1)\n\n download_count = 0\n\n # opening the url file and reading the urls\n with open(url_file, 'r') as urls:\n for i, url in enumerate(urls):\n bar.set(i)\n\n url = url.strip()\n components = urllib.parse.urlparse(url)\n if not (components.scheme and components.netloc and components.path):\n logger.error('%s: \"%s\"' % (config.LOG_URL_INVALID, self.truncate_middle(url, config.MAX_URL)))\n continue\n \n # check whether the robots.txt allows us to crawl this URL\n try:\n can_fetch = self.download_allowed(url, components.scheme, components.netloc)\n except (AttributeError, urllib.error.URLError, ValueError):\n logger.error('%s: %s' % (config.LOG_ERROR_ROBOTS, self.truncate_middle(url, config.MAX_URL)))\n continue\n\n # log that image download is disallowed\n if not can_fetch:\n logger.error('%s: %s' % (config.LOG_DISALLOWED, self.truncate_middle(url, config.MAX_URL)))\n continue\n \n # open image url\n try:\n url_response = urllib.request.urlopen(url)\n except urllib.error.URLError as error:\n logger.error('%s: %s' % (config.LOG_ERROR_OPENING, self.truncate_middle(url, config.MAX_URL)))\n continue\n\n # check whether the URL content is an image \n if url_response.info().get_content_maintype().lower() != config.IMAGE_MIMETYPE:\n logger.error('%s: %s' % (config.LOG_NOT_AN_IMAGE, self.truncate_middle(url, config.MAX_URL)))\n continue\n \n # retrieve the content and store in the destination directory\n os.makedirs(destination_dir, exist_ok=True) \n image_name = '%s_%s' % (download_count + 1, os.path.basename(url))\n with open(os.path.join(destination_dir, image_name), 'wb') as image_file:\n try:\n image_file.write(url_response.read())\n except urllib.error.URLError as error:\n logger.error('%s: %s' % (config.LOG_ERROR_DOWNLOADING, self.truncate_middle(url, config.MAX_URL)))\n continue\n \n # log download and increment the counter\n logger.info('%s %s, url: %s' % (config.LOG_DOWNLOADED, self.truncate_middle(image_name, config.MAX_FILE_NAME), self.truncate_middle(url, config.MAX_URL)))\n download_count += 1\n\n # set the progress bar to 100 percent and print a comment and new line for the returning prompt\n bar.complete('completed')\n\n # release the logger handles\n self.shutdown_log(logger)", "def collect_images_from_urls(url_filepath, target_folder, image_class_name):\n\n def get_img_from_url(index, url):\n \"\"\"Closure function invoked by each running downloading Thread\"\"\"\n try:\n with urllib.request.urlopen(url) as response:\n if response.headers.get_content_maintype() == 'image':\n image_filename = image_filename_prefix.format(name=image_class_name,\n counter=index,\n ext=response.headers.get_content_subtype())\n image_filepath = os.path.join(target_folder, image_filename)\n with open(image_filepath, 'wb') as image_file:\n image_file.write(response.read())\n\n print('Fetched URL {}'.format(index))\n\n except urllib.request.HTTPError:\n pass\n except Exception:\n pass\n\n image_filename_prefix = '{name}_{counter}.{ext}'\n list_of_urls = list()\n with open(url_filepath) as url_file:\n for url in url_file:\n url = url.strip()\n list_of_urls.append(url)\n\n print('Collected {} total URLS'.format(len(list_of_urls)))\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=50) as thread_pool:\n for idx, url in enumerate(list_of_urls):\n thread_pool.submit(get_img_from_url, idx, url)", "def download_images(img_urls, dest_dir):\n if not os.path.exists(dest_dir):\n # If the directory doesn't exist, create it\n os.mkdir(dest_dir)\n count = 0\n img_string = ''\n # Copies each file from the url provided to the directory provided\n for file in img_urls:\n new_filename = '{}/img{}.jpg'.format(dest_dir, count)\n print \"Retrieving {}\".format(file)\n urllib.urlretrieve(file, new_filename)\n img_string += \"<img src = 'img{}.jpg'>\".format(count)\n count += 1\n print \"Retrieved {} files\".format(count)\n # Creates an html file to display the completed image\n with open('{}/index.html'.format(dest_dir), 'w') as f:\n f.write(\n '<html>\\n<body>\\n{}\\n</body>\\n</html>'.format(img_string)\n )\n pass", "def download_imgs(img_urls, outfolder):\n \n print \"Downloading %d images from: \" %len(img_urls), url\n \n for image in img_urls:\n filename = image.split('/')[-1]\n outpath = os.path.join(outfolder, filename)\n img_url = urljoin(url, image)\n try:\n urlretrieve(image, outpath)\n print img_url, \"downloaded successfully.\"\n \n except IOError:\n print \"Failed to download file:\", img_url\n pass", "def download_images(urlList):\n fileNumber = 1;\n fileName = \"\"\n\n # urlList[0] is just titles, so we start at 1\n for url in urlList[1:]:\n sys.stdout.write(\"\\rFile number %i of %i \" % (fileNumber+1, len(urlList)))\n\n sys.stdout.flush()\n\n try:\n fileName = str(fileNumber) + \".png\"\n # Download the file from `url` and save it locally under `fileName`:\n # I append png to the end of the file to \"make it\" png, but there's definitely a better way\n with urllib.request.urlopen(url) as response, open(fileName, 'wb') as out_file:\n shutil.copyfileobj(response, out_file)\n except urllib.error.HTTPError:\n sys.stdout.flush()\n print(\"\\r %s is not a downloadable image. Skipping to next url...\" % url)\n \n fileNumber += 1;\n\n sys.stdout.write(\"\\r\\nDone!\")\n sys.stdout.flush()\n sys.stdout.write(\"\\r\\n\")", "def downloadLocal(url_list,path):\n print(\"You are downloading {} images\".format(parser_arguments().limit),end=\" \");print(\"of {} class.\".format(parser_arguments().classes))\n print(\"Please, be patient :)\")\n for i in range(len(url_list)):\n filename= url_list[i].split(\"/\")[-1] # name of the picture file\n r = requests.get(url_list[i], stream =True)\n print(filename)\n\n with open(filename,'wb') as f : # create the file locally in binary-write mode\n r = requests.get(url_list[i], stream =True)\n shutil.copyfileobj(r.raw, f) #write our image to the file\n shutil.move(filename,path)\n print('Done!')", "def download_images(img_urls, dest_dir, base_url=\"http://code.google.com\"):\n create_dir(dest_dir)\n img_tags = fetch_call(img_urls, dest_dir)\n create_html(dest_dir, img_tags)", "def download(\n images_url,\n filename,\n buffer_size=_DEFAULT_BUFFER_SIZE,\n print_progress=False,\n download_limit=None\n ):\n download_limit = download_limit or _DEFAULT_DOWNLOAD_LIMIT\n log(\"TRACE\", \"Downloading {} to {}\".format(images_url, filename))\n try:\n resp = requests.get(images_url, stream=True, proxies=_PROXIES,\n headers={'User-Agent': 'UHD Images Downloader'})\n except TypeError:\n # requests library versions pre-4c3b9df6091b65d8c72763222bd5fdefb7231149\n # (Dec.'12) workaround\n resp = requests.get(images_url, prefetch=False, proxies=_PROXIES,\n headers={'User-Agent': 'UHD Images Downloader'})\n if resp.status_code != 200:\n raise RuntimeError(\"URL does not exist: {}\".format(images_url))\n filesize = float(resp.headers['content-length'])\n if filesize > download_limit:\n if not ask_permission(\n \"The file size for this target ({:.1f} MiB) exceeds the \"\n \"download limit ({:.1f} MiB). Continue downloading?\".format(\n filesize/1024**2, download_limit/1024**2)):\n return 0, 0, \"\"\n filesize_dl = 0\n base_filename = os.path.basename(filename)\n if print_progress and not sys.stdout.isatty():\n print_progress = False\n log(\"INFO\", \"Downloading {}, total size: {} kB\".format(\n base_filename, filesize/1000))\n with open(filename, \"wb\") as temp_file:\n sha256_sum = hashlib.sha256()\n for buff in resp.iter_content(chunk_size=buffer_size):\n if buff:\n temp_file.write(buff)\n filesize_dl += len(buff)\n sha256_sum.update(buff)\n if print_progress:\n status = r\"%05d kB / %05d kB (%03d%%) %s\" % (\n int(math.ceil(filesize_dl / 1000.)), int(math.ceil(filesize / 1000.)),\n int(math.ceil(filesize_dl * 100.) / filesize),\n base_filename)\n if os.name == \"nt\":\n status += chr(8) * (len(status) + 1)\n else:\n sys.stdout.write(\"\\x1b[2K\\r\") # Clear previous line\n sys.stdout.write(status)\n sys.stdout.flush()\n if print_progress:\n print('')\n return filesize, filesize_dl, sha256_sum.hexdigest()", "def download_image_and_save(image_url, destination):\n response = requests.get(image_url, stream=True)\n with open(destination, 'wb') as out_file:\n shutil.copyfileobj(response.raw, out_file)\n del response", "def download_images(img_urls, dest_dir):\n # Creating the directory if the directory does not already exist\n if not os.path.exists(str(dest_dir)):\n os.mkdir(dest_dir)\n print ('Retrieving...')\n with open(str(dest_dir) + '/index.html', 'w') as f:\n f.write(\"<html>\\n<body>\\n\")\n for index, url in enumerate(img_urls):\n img_name = 'img' + str(index + 1)\n urllib.urlretrieve(\"https://code.google.com\" + url, filename=str(dest_dir) + '/'\n + img_name +'.jpg')\n print ('Downloaded ' + url[-10:] + \": \" + \\\n str(index + 1) + \" images downloaded\")\n\n f.write(\"<img src=\" + '\"' + img_name +\".jpg\" +'\">')\n f.write(\"\\n</html>\\n</body>\")\n print ('Download Complete!')\n pass", "def download(self, url, destination):\n fileDownloader = utils.HttpFileDownloader(url, destination)\n fileDownloader.download()", "def download_images(img_urls, dest_dir):\n # +++your code here+++\n (errcode, statusmsg) = check_create_dir(dest_dir)\n if errcode:\n print statusmsg\n sys.exit(errcode)\n else: print statusmsg\n # retrieve images and generate html code for files\n html_str = '<html>\\n<body>\\n' # opening html file tags\n i = 0\n for img in img_urls:\n img_filename = 'img' + str(i)\n full_filepath = os.path.join(dest_dir, img_filename) \n print 'Retrievieng ' + img + ' to ' + full_filepath + ' file..'\n urllib.urlretrieve(img, full_filepath)\n html_str += '<img src=\\\"' + img_filename + '\\\">'\n i += 1\n html_str += '\\n</html>\\n</body>' # closing html file tags\n # create html file\n html_filename = os.path.join(dest_dir, 'index.html')\n f = open(html_filename, 'w')\n f.write(html_str) \n f.close()\n print 'File ' + html_filename + ' was created.'", "def download_pics(pic_urls, directory):\r\n print(\"downloading pictures...\")\r\n for url in pic_urls:\r\n name = url.split(\"/\")[-1]\r\n if len(name) >= 20:\r\n name = name[len(name)-20:]\r\n \r\n print('from:', url)\r\n pic_path = directory + name\r\n if not os.path.exists(pic_path):\r\n print(\"downloading ->\", pic_path)\r\n try:\r\n urllib.request.urlretrieve(url, pic_path)\r\n except ValueError:\r\n # 'http://' missing from link\r\n urllib.request.urlretrieve(\"http://\" + url, pic_path)\r\n except urllib.error.HTTPError:\r\n # access forbidden\r\n # ex: http://puu.sh/n2zPL/2491975ef3.jpg\r\n print(\"URL skipped due to HTTPError\", url)\r\n else:\r\n print(\"already downloaded ->\", pic_path)\r\n print(\"Downloads Finished\")", "def download(self, url):\n req = self.request(url)\n inputfile, outputfile = BytesIO(urlopen(req).read()), BytesIO()\n\n img = Image.open(inputfile)\n img = img.convert(\"RGB\") if img.mode != \"RGB\" else img\n img.thumbnail((192, 192), Image.ANTIALIAS)\n img.save(outputfile, \"JPEG\")\n\n self.image.save(os.path.basename(\n self._clean_url(url)),\n ContentFile(outputfile.getvalue()),\n save=False,\n )", "def downloadImages(self):\n\t\ti = 0\n\t\tfor im in self.images:\n\t\t\t# Let's get the file extension and file name and make the final file path. \n\t\t\t# We need to do this to slugify the file name and avoid errors when loading images\n\t\t\tfile_name, file_extension = os.path.splitext(im['url'])\n\t\t\tfile_name = file_name.split(\"/\")[-1]\n\n\t\t\tfile_path = self.data_path + self.dataset + \"/\" + im['slug'] + '/' + str(im['id']) + '_' + slugify(file_name) + file_extension\n\n\t\t\t# If file is not in the file path, then download from the url\n\t\t\tif not os.path.exists(file_path):\n\t\t\t\ttry:\n\t\t\t\t\turllib.urlretrieve(im['url'], file_path )\n\t\t\t\t\tprint \"i:{} url:{}\".format(i,im['url'])\n\t\t\t\texcept Exception, e:\n\t\t\t\t\tprint e\n\t\t\ti += 1", "def image_downloader(url, file_path, file_name):\n response = requests.get(url, stream=True)\n with open(file_path + \"/\" + file_name, 'wb') as out_file:\n shutil.copyfileobj(response.raw, out_file)", "def download_img(self, url, output):\n try:\n print(\"Downloading from: %s\" % url)\n with open(output, 'wb') as f:\n f.write(urllib2.urlopen(url).read())\n print(\"Wrote to: %s\" % output)\n except IOError, e:\n print(e)", "def download_images(img_urls, dest_dir):\n if len(img_urls) > 0 :\n if not os.path.exists(dest_dir):\n os.mkdir(dest_dir)\n # save each images file name\n image_names = []\n # Iterate over each image url, downloading the image to a local file\n img_ctr = 0\n for url in img_urls :\n file_name = 'img' + str(img_ctr) + '.jpg'\n image_names.append(file_name)\n full_name = dest_dir + '/' + file_name\n print('Writing file: %s from %s' % (full_name, url) )\n # When calling the SSLContext constructor directly, CERT_NONE is the default.\n # Since it does not authenticate the other peer it can be insecure\n # Beyond the scope of this exercise (emoji holding my nose)\n unsecure_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)\n with urllib.request.urlopen(url, context=unsecure_context) as response, open(full_name, 'wb') as out_file:\n shutil.copyfileobj(response, out_file)\n img_ctr += 1\n return image_names", "def download_file():\n for lines in urls:\n try:\n req.urlretrieve(lines, '{0}/{1}'.format(folder_path, lines.split('/')[-1]))\n time.sleep(1)\n print ('File - {} - downloaded successfully'.format(lines.split('/')[-1]))\n except urllib.error.HTTPError:\n print('File is missing or not reachable')\n print('Download Complete & Successful!')", "def download_image(url, dest):\n logging.info('Downloading {} into {}'.format(url, dest))\n dest = um.join_paths(dest, url.split('/')[-1])\n response = requests.get(url)\n if um.is_image_response(response):\n with open(dest, 'wb') as f:\n f.write(response.content)", "def _download_images(self, image_urls: typing.List[str], save_dir: str) -> typing.List[str]:\n\n\t\timage_paths = []\n\n\t\tfor i, url in enumerate(image_urls):\n\t\t\timage = self.send_request_image(url)\n\n\t\t\timage_ext = url.split(\".\")[-1]\n\n\t\t\timage_dst_path = os.path.join(save_dir, f\"{i}.{image_ext}\")\n\n\t\t\tif image is not None:\n\t\t\t\twith open(image_dst_path, \"wb\") as fh:\n\n\t\t\t\t\t# Magic boolean which makes it work\n\t\t\t\t\timage.raw.decode_content = True\n\n\t\t\t\t\t# noinspection PyBroadException\n\n\t\t\t\t\t# Attempt to download the image from the URL\n\t\t\t\t\ttry:\n\t\t\t\t\t\tshutil.copyfileobj(image.raw, fh)\n\n\t\t\t\t\t# We should reduce the scope\n\t\t\t\t\texcept Exception:\n\t\t\t\t\t\tpass\n\n\t\t\t\t\t# We downloaded the image without any errors\n\t\t\t\t\telse:\n\t\t\t\t\t\timage_paths.append(image_dst_path)\n\n\t\treturn image_paths", "def save_image(filename: str, img_url: str) -> None:\n\n if not (os.path.isfile(filename)): # Check if the file already exists\n print('Downloading image {}...'.format(img_url))\n res = requests.get(img_url) # Download the image.\n res.raise_for_status()\n\n # Save the image\n image_file = open(filename, 'wb')\n for chunk in res.iter_content(100000):\n image_file.write(chunk)\n image_file.close()", "def save_image(self, url_and_filename: List[str]) -> None:\n\n res_jpg = requests.get(url_and_filename[0])\n res_jpg.raise_for_status()\n os.makedirs(os.path.join(THIS_DIR, 'comics'), exist_ok=True)\n open(os.path.join(THIS_DIR, f'comics/{url_and_filename[1]}'), \"wb\").write(res_jpg.content)", "def download(query, destination='', max_items=None):\n destination = os.path.join(destination, query)\n eol_id = search(query)\n urls = []\n for idx, url in enumerate(get_images(eol_id)):\n filepath = os.path.join(destination, str(idx))\n data.download_image(url, filepath)\n print(idx)\n if max_items and idx >= max_items:\n break", "def download_images(image_urls):\n fetched = []\n count = 0\n for img_url in image_urls:\n if not db.is_image_in_db(img_url):\n filename = os.path.basename(img_url)\n if not os.path.exists(cfg.PHOTO_DIR + filename):\n referer_string = web.get_referrer_string(img_url) # to trick 4walled.org\n cmd = \"wget -t {retry_count} -T {timeout} {ref} {url} -O {save}\".format(url=img_url,\n save=os.path.join(cfg.PHOTO_DIR, filename),\n ref=referer_string,\n retry_count=cfg.WGET_RET,\n timeout=cfg.WGET_TIMEOUT)\n print cmd\n os.system(cmd)\n fetched.append(img_url)\n count += 1\n else:\n print(\"# {0} was already fetched once...\".format(img_url))\n\n print(\"# new imgage(s): {0}\".format(count))\n return fetched", "def download_url(url, destination_filename=None, progress_updater=None,\\\n force_download=False, quiet=True):\n \n # This is not intended to guarantee uniqueness, we just know it happens to guarantee\n # uniqueness for this application.\n if destination_filename is None:\n url_as_filename = url.replace('://', '_').replace('/', '_') \n destination_filename = \\\n os.path.join(temp_dir,url_as_filename)\n if (not force_download) and (os.path.isfile(destination_filename)):\n if not quiet:\n print('Bypassing download of already-downloaded file {}'.format(os.path.basename(url)))\n return destination_filename\n print('Downloading file {} to {}'.format(os.path.basename(url),destination_filename),end='')\n urllib.request.urlretrieve(url, destination_filename, progress_updater) \n assert(os.path.isfile(destination_filename))\n nBytes = os.path.getsize(destination_filename)\n print('...done, {} bytes.'.format(nBytes))\n return destination_filename", "def download(self, url, path_to_dir):\n\n if not os.path.exists(path_to_dir):\n os.makedirs(path_to_dir)\n\n raw_data = self.__class__.get_raw_data(url)\n path_to_image = os.path.join(path_to_dir, url.split('/')[-1].split('?')[0])\n with open(path_to_image, 'wb') as f:\n self.__class__.copy_to(raw_data, f)\n\n return path_to_image", "def main(file_path, urls):\n # format urls input\n with open(urls, 'r') as file:\n urls = file.read().replace('\\n', '')\n\n urls = urls.strip('[]')\n urls = re.findall(r'\\([^\\)\\(]*\\)', urls)\n\n for file in urls:\n\n file_name, url = tuple(file.strip('()').split(', '))\n\n # check if file is already downloaded\n if os.path.exists(os.path.join(file_path, file_name)):\n print(\"%s already exists.\\n\" % file_name)\n continue\n else:\n print(\"Starting download for %s...\\n\" % file_name)\n\n # Create the data subdirectory if it doesn't exist\n os.makedirs(file_path, exist_ok=True)\n\n # create response object\n r = requests.get(url, stream=True)\n widgets = [\"Progress: \",\n progressbar.DataSize(), \"| \",\n progressbar.Timer()]\n bar = progressbar.ProgressBar(widgets=widgets,\n max_value=progressbar.UnknownLength)\n value = 0\n # download started\n with open(os.path.join(file_path, file_name), 'wb') as f:\n for chunk in r.iter_content(chunk_size=64*1024):\n if chunk:\n f.write(chunk)\n value += len(chunk)\n bar.update(value)\n\n print(\"\\n%s downloaded!\\n\" % file_name)\n\n print(\"All files downloaded!\")", "def download_data_files(self, dest_directory):\n\t\tif not os.path.exists(dest_directory):\n\t\t\tos.makedirs(dest_directory)\n\t\tfilename = DATA_URL.split('/')[-1]\n\t\tfilepath = os.path.join(dest_directory, filename)\n\t\tif not os.path.exists(filepath):\n\t\t\tdef _progress(count, block_size, total_size):\n\t\t\t\tsys.stdout.write('\\r>> Downloading %s %.1f%%' % (filename,\n\t\t\t\t\t\tfloat(count * block_size) / float(total_size) * 100.0))\n\t\t\t\tsys.stdout.flush()\n\t\t\tfilepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n\t\t\tprint()\n\t\t\tstatinfo = os.stat(filepath)\n\t\t\tprint('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n\t\textracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin')\n\t\tif not os.path.exists(extracted_dir_path):\n\t\t\ttarfile.open(filepath, 'r:gz').extractall(dest_directory)", "def download_and_save_image(imgurl, save_dir, num_retries=5, retry_interval=10):\n parse_result = urlparse(imgurl)\n img_name = os.path.basename(parse_result.path)\n img_id = img_name.split(\".\")[0]\n img_data = url_fetch(imgurl, attempt=0, num_retries=num_retries, retry_interval=retry_interval)\n save_name = os.path.join(save_dir, img_name)\n with open(save_name, \"wb\") as f:\n f.write(img_data)\n return {\"path\": save_name, \"img_id\": img_id}" ]
[ "0.87778664", "0.7013445", "0.69618165", "0.6846969", "0.6748923", "0.66912276", "0.6671626", "0.66698825", "0.6666592", "0.66486555", "0.66068697", "0.6581736", "0.6539759", "0.65313685", "0.65105104", "0.65020895", "0.648967", "0.64734596", "0.64350826", "0.6407587", "0.63675845", "0.63257915", "0.6298046", "0.6284764", "0.62831986", "0.62517875", "0.6217284", "0.6215705", "0.6215376", "0.620613" ]
0.8535847
1
Convert from a psmeca Sm format string to a SymMT object
def psmeca2SymMT( string ): # convert first 12 columns into array mtline = NP.fromstring( string, count=12, sep =' ', dtype=float ) # get the location part c = mtline[0:3] # assume lon/lat/depth are centroid h = NP.array( [mtline[10], mtline[11], mtline[2]] ) # assume second lon/lat are hypocenter # get moment tensor part, voigt notation # [mrr mtt mpp mrt mrp mtp] -> [mrr mtt mpp mtp mrp mrt] mhat = NP.r_[mtline[3:6], mtline[8], mtline[7], mtline[6]] exp = mtline[9] # remove the norm, take care with off diagonals norm = NP.sqrt( NP.sum( mhat**2 ) + NP.sum( mhat[3:]**2 ) ) mhat /= norm # get the true norm norm *= 10**exp # make the moment tensor object return SymMT( mhat=mhat, Norm=norm, c=c, h=h )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def psmeca2EigMT( string ): \n \n # convert first 12 columns into array\n mtline = NP.fromstring( string, count=12, sep =' ', dtype=float )\n \n # get the location part\n c = mtline[0:3] # assume lon/lat/depth are centroid\n h = NP.array( [mtline[10], mtline[11], mtline[2]] ) # assume second lon/lat are hypocenter\n\n # get moment tensor part, voigt notation\n exp = mtline[9]\n # [mrr mtt mpp mrt mrp mtp] -> [mrr mtt mpp mtp mrp mrt]\n m = ( NP.r_[mtline[3:6], mtline[8], mtline[7], mtline[6]] )* 10**exp\n\n # make the moment tensor object\n return EigMT( m=m, c=c, h=h )", "def SM2m(sm):\n return sm * 1609.344", "def stringToSymMod(string):\n string = string.strip() #delete all surrounding whitespaces\n i = 0\n symbol = \"\"\n # read the symbol\n while i < len(string) and string[i] != \"(\":\n symbol = symbol + string[i]\n i = i + 1\n # if parameters are present, get them\n if i< len(string) and string[i] == \"(\": # If true then parameters will follow, else we are done\n i = i + 1 # skip the opening bracket\n params = string[i:(len(string) - 1)].split(\",\")\n for i in range(0,len(params)):\n params[i] = params[i].strip()\n return(Module(symbol,params))\n else:\n return(Module(symbol,[]))", "def convert_to_semanticsymbol(cls, elem):\r\n if (len(elem) == 0):\r\n return None\r\n\r\n elem_content = io.StringIO(elem) # treat the string as if a file\r\n root = xml.etree.ElementTree.parse(elem_content).getroot()\r\n\r\n return SemanticSymbol.parse_from_mathml(root)", "def galactic_to_MS():\n return MS_MATRIX", "def stringToMod(string):\n string = string.strip() \n i = 0\n symbol = \"\"\n # read the symbol\n while i < len(string) and string[i] != \"(\":\n symbol = symbol + string[i]\n i = i + 1\n # if parameters are present, get them\n if i< len(string) and string[i] == \"(\": \n i = i + 1 # skip the opening bracket\n params = string[i:(len(string) - 1)].split(\",\")\n for i in range(0,len(params)):\n params[i] = float(params[i].strip())\n return(Module(symbol,params))\n else:\n return(Module(symbol,[]))", "def stringToSymModWithExpr(string):\n parser = Parser()\n string = string.strip() #delete all surrounding whitespaces\n i = 0\n symbol = \"\"\n # read the symbol\n while i < len(string) and string[i] != \"(\":\n symbol = symbol + string[i]\n i = i + 1\n # if parameters are present, get them\n if i < len(string) and string[i] == \"(\": # If true then parameters will follow, else we are done\n i = i + 1 # skip the opening bracket\n params = string[i:(len(string) - 1)].split(\",\")\n for i in range(0,len(params)):\n params[i] = parser.parse(params[i].strip())\n return(Module(symbol,params))\n else:\n return(Module(symbol,[]))", "def test_psi4_qm_2g():\n subject = copy.deepcopy(subject2)\n subject[1] = \"\"\"@Ne_{CN}_O 2 4 6\"\"\"\n subject = '\\n--\\n'.join(subject)\n\n with pytest.raises(qcelemental.MoleculeFormatError):\n final, intermed = qcelemental.molparse.from_string(subject, return_processed=True)", "def read_psmecalist( istream , isEig=False ):\n\n mtlist=[] # this will be the output list\n\n # read everything\n alltxt = NP.genfromtxt( istream, delimiter='\\n' , dtype=str)\n try: \n istream.close()\n except:\n tmp=1\n\n # loop through all tensors\n n = len(alltxt)\n\n # check for desired output type\n if isEig:\n for i in range(0,n):\n mtlist.append( psmeca2EigMT( alltxt[i] ) )\n else:\n for i in range(0,n):\n mtlist.append( psmeca2SymMT( alltxt[i] ) )\n\n \n return mtlist, alltxt", "def test_psi4_qm_2c():\n subject = copy.deepcopy(subject2)\n subject.insert(0, '1 3\\n1 3')\n subject = '\\n--\\n'.join(subject)\n\n with pytest.raises(qcelemental.MoleculeFormatError):\n final, intermed = qcelemental.molparse.from_string(subject, return_processed=True)", "def test_xyzp_qm_7b():\n subject = subject7\n\n with pytest.raises(qcelemental.MoleculeFormatError):\n final, intermed = qcelemental.molparse.from_string(subject, return_processed=True, dtype='xyz')", "def test_xyzp_qm_7a():\n subject = subject7\n\n with pytest.raises(qcelemental.MoleculeFormatError):\n final, intermed = qcelemental.molparse.from_string(subject, return_processed=True, dtype='psi4')", "def mc2ms(mc,eta):\n root = np.sqrt(0.25-eta)\n fraction = (0.5+root) / (0.5-root)\n invfraction = 1/fraction\n\n m2= mc * np.power((1+fraction),0.2) / np.power(fraction,0.6)\n\n m1= mc* np.power(1+invfraction,0.2) / np.power(invfraction,0.6)\n return (m1,m2)", "def _convert_meta(m):\n # Decode Pascal style string with 4 bytes length field\n l = struct.unpack(\"<I\", m[:4])[0]\n return m[4:4+l]", "def smi2patt(smi):\n # By default, RDKit won't recognize \"C=[NH]=C\",\n # setting sanitize=F recitify this\n _m = Chem.MolFromSmiles(smi) #, sanitize=False)\n zs = []; cns = []; repls = []; atypes = []\n for ai in _m.GetAtoms():\n zi = ai.GetAtomicNum()\n cni = ai.GetTotalDegree()\n # here we use '<' & '>' instead of '[' & ']'\n # is due to the fact that we need to sequentially\n # replace the content within [] by `repl\n repls.append( '<#%d;X%d>'%(zi,cni) )\n atypes.append( '%02d;X%d'%(zi,cni) )\n zs.append( zi )\n cns.append( cni )\n zs = np.array(zs,dtype=int)\n na = len(zs)\n assert np.all(zs>1), '#ERROR: found H?'\n for bi in _m.GetBonds():\n bi.SetBondType(bo2bt['1.0'])\n # The line below is necessary! If not, we may end up with\n # smarts like '[#6]12:[#6]:[#6]:[#6]=1:[#6]:[#6]:2', originating\n # from input SMILES 'C12C=CC=1C=C2'\n bi.SetIsAromatic(False)\n sma = Chem.MolToSmarts(_m)\n #print ' repls = ', repls\n for i in range(na):\n sma = re.sub('\\[.*?\\]', repls[i], sma, count=1)\n #print ' sma = ', sma\n patts = [ '<', '>', '-'] #'-\\[', '\\]-' ]\n repls = [ '[', ']', '~'] #'~[', ']~' ]\n n = len(patts)\n for i in range(n):\n sma = re.sub(patts[i], repls[i], sma)\n return atypes, sma", "def mathmode(strng):\n mathexp = (\n # forma: a^{b}\n re.compile(r\"([\\^])[{]([^}\\$]+)[}]\"), re.compile(r\"([_])[{]([^}$]+)[}]\"),\n # forma: a^\\beta\n re.compile(r\"([\\^])(\\\\[\\w]+)\"), re.compile(r\"([_])(\\\\[\\w]+)\"),\n # forma: a^b\n re.compile(r\"([\\^])([^\\{\\\\\\$])\"), re.compile(r\"([_])([^\\$\\{\\\\])\")\n )\n for i in mathexp:\n strng = i.sub(r\"$\\1{\\2}$\", strng)\n return strng", "def lab2symm(sgnum='001',label='a'):\r\n wyck_dic=np.load('support/WyckoffSG_dict.npy').item()['wycksym']\r\n return wyck_dic[sgnum].get(label)", "def NM2m(NM):\n return NM * 1852", "def fromString(cls, string):\n # From SAM specification v1.5, slightly adapted for single-token parsing\n pattern = r\"^[0-9]+[MIDNSHPX=]\" \n string = string.strip()\n if string == '*':\n return CIGAR.fromList(['*'])\n parsed = []\n s = string\n # Parse string token (e.g. 14M) by token, re.findall is not enough,\n # because non-matching subsequences between (e.g. \"14Mblabla3D4M\") would\n # go unnoticed! Also it would be good to abort as early as possible if\n # an invalid string is found to avoid parsing possibly very long strings\n while s != '':\n r = re.match(pattern, s)\n if not r:\n raise ValueError('Invalid CIGAR string: \"'+string+'\"')\n g = r.group(0)\n parsed.append(g)\n s = s[len(g):]\n \n parsed = [(int(p[:-1]), p[-1:]) for p in parsed]\n\n return CIGAR.fromList(parsed)", "def bld(cls, adapter, mtms_str):\n mtms = super(MTMS, cls)._bld(adapter)\n mtm, sn = mtms_str.split('*', 1)\n mt, md = mtm.split('-', 1)\n\n # Assignment order is significant\n mtms.machine_type = mt\n mtms.model = md\n mtms.serial = sn\n return mtms", "def parse(s: str) -> StateFormula:\n tree = PCTL_PARSER.parse(s.replace(\" \", \"\"))\n return PCTLTransformer.transform(tree)", "def parse(self, string):\n parse = re.match(\"^((?:[0-9]{1,3}\\.){3}[0-9]{1,3})\\s\\(((?:\\d)*\\.(?:\\d)*|(?:\\d)*)\\sms\\)$\", string)\n parse_result = parse.groups()\n return parse_result[0], parse_result[1]", "def _reconstruct_matrix(serial_string):\n from_hex = bytes.fromhex(serial_string)\n f = BytesIO(from_hex)\n matrix = spsp.load_npz(f)\n return matrix", "def get_symmetrized_pmg(pmg, tol=1e-3, a_tol=5.0, style='pyxtal', hn=None):\n\n pmg = symmetrize(pmg, tol, a_tol=a_tol, style=style, hn=hn)\n s = sga(pmg, symprec=tol, angle_tolerance=a_tol)\n # make sure that the coordinates are in standard setting\n if hn is None:\n hn = Hall(s._space_group_data['number'], style=style).hall_default\n if hn != s._space_group_data[\"hall_number\"]:\n s._space_group_data = get_symmetry_dataset(s._cell, tol,\n angle_tolerance=a_tol,\n hall_number=hn)\n return s.get_symmetrized_structure(), s.get_space_group_number()", "def parsePresentationMathMLSymbol(xml):\r\n tag = gettag(xml)\r\n if tag == 'mn':\r\n return xml.text\r\n elif tag == 'mi':\r\n return xml.text\r\n elif tag == 'msub':\r\n return '_'.join([parsePresentationMathMLSymbol(y) for y in xml])\r\n elif tag == 'msup':\r\n return '^'.join([parsePresentationMathMLSymbol(y) for y in xml])\r\n raise Exception('[parsePresentationMathMLSymbol] unknown tag %s' % tag)", "def readMathMLFromString(*args):\n return _libsbml.readMathMLFromString(*args)", "def decode(self, s):", "def decode(self, s):", "def get_mmt():\r\n # M^T\r\n MT = np.array([[-1, 0, 0, 0, 0, 0, 0, 0, 0],\r\n [1, -1, 0, 0, 0, 0, 0, 0, 0],\r\n [0, 1, -1, 0, 0, 0, 0, 0, 0],\r\n [0, 0, 1, -1, 0, 0, 0, 0, 0],\r\n [0, 0, 0, 1, -1, 0, 0, 0, 0],\r\n [0, 0, 0, 0, 1, -1, 0, 0, 0],\r\n [0, 0, 0, 0, 0, 1, -1, 0, 0],\r\n [0, 0, 0, 0, 0, 0, 1, -1, 0],\r\n [0, 0, 0, 0, 0, 0, 0, 1, -1],\r\n [0, 0, 0, 0, 0, 0, 0, 0, 1]])\r\n\r\n M = np.transpose(MT)\r\n return M, MT", "def matrixstr(inputstr, converter=proper):\n ys = basicformat(inputstr).split(\"\\n\")\n for x in xrange(0,len(ys)):\n ys[x] = basicformat(ys[x])[1:-1].split(\",\")\n for z in xrange(0, len(ys[x])):\n ys[x][z] = converter(ys[x][z])\n return matrixlist(ys, converter)" ]
[ "0.64555776", "0.6072237", "0.5941381", "0.5923099", "0.5500192", "0.54345137", "0.5423918", "0.539085", "0.5381176", "0.5208619", "0.51720643", "0.5168524", "0.51670635", "0.51506907", "0.5129304", "0.51136523", "0.5053754", "0.50467676", "0.50440377", "0.501633", "0.49815646", "0.4930912", "0.4924693", "0.49050373", "0.49044228", "0.4895741", "0.4885979", "0.4885979", "0.48842925", "0.48728433" ]
0.80408126
0
Convert from a psmeca Sm format string to a EigMT object
def psmeca2EigMT( string ): # convert first 12 columns into array mtline = NP.fromstring( string, count=12, sep =' ', dtype=float ) # get the location part c = mtline[0:3] # assume lon/lat/depth are centroid h = NP.array( [mtline[10], mtline[11], mtline[2]] ) # assume second lon/lat are hypocenter # get moment tensor part, voigt notation exp = mtline[9] # [mrr mtt mpp mrt mrp mtp] -> [mrr mtt mpp mtp mrp mrt] m = ( NP.r_[mtline[3:6], mtline[8], mtline[7], mtline[6]] )* 10**exp # make the moment tensor object return EigMT( m=m, c=c, h=h )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def psmeca2SymMT( string ): \n\n # convert first 12 columns into array\n mtline = NP.fromstring( string, count=12, sep =' ', dtype=float )\n \n # get the location part\n c = mtline[0:3] # assume lon/lat/depth are centroid\n h = NP.array( [mtline[10], mtline[11], mtline[2]] ) # assume second lon/lat are hypocenter\n\n # get moment tensor part, voigt notation \n # [mrr mtt mpp mrt mrp mtp] -> [mrr mtt mpp mtp mrp mrt]\n mhat = NP.r_[mtline[3:6], mtline[8], mtline[7], mtline[6]] \n exp = mtline[9]\n \n # remove the norm, take care with off diagonals\n norm = NP.sqrt( NP.sum( mhat**2 ) + NP.sum( mhat[3:]**2 ) )\n mhat /= norm\n\n # get the true norm\n norm *= 10**exp\n\n # make the moment tensor object\n return SymMT( mhat=mhat, Norm=norm, c=c, h=h )", "def read_psmecalist( istream , isEig=False ):\n\n mtlist=[] # this will be the output list\n\n # read everything\n alltxt = NP.genfromtxt( istream, delimiter='\\n' , dtype=str)\n try: \n istream.close()\n except:\n tmp=1\n\n # loop through all tensors\n n = len(alltxt)\n\n # check for desired output type\n if isEig:\n for i in range(0,n):\n mtlist.append( psmeca2EigMT( alltxt[i] ) )\n else:\n for i in range(0,n):\n mtlist.append( psmeca2SymMT( alltxt[i] ) )\n\n \n return mtlist, alltxt", "def test_psi4_qm_2g():\n subject = copy.deepcopy(subject2)\n subject[1] = \"\"\"@Ne_{CN}_O 2 4 6\"\"\"\n subject = '\\n--\\n'.join(subject)\n\n with pytest.raises(qcelemental.MoleculeFormatError):\n final, intermed = qcelemental.molparse.from_string(subject, return_processed=True)", "def test_psi4_efp_5c():\n subject = subject5 + '\\nno_com\\nfix_orientation\\nsymmetry c1'\n\n with pytest.raises(qcelemental.MoleculeFormatError):\n final, intermed = qcelemental.molparse.from_string(subject, return_processed=True)", "def test_xyzp_qm_7b():\n subject = subject7\n\n with pytest.raises(qcelemental.MoleculeFormatError):\n final, intermed = qcelemental.molparse.from_string(subject, return_processed=True, dtype='xyz')", "def test_xyzp_qm_7a():\n subject = subject7\n\n with pytest.raises(qcelemental.MoleculeFormatError):\n final, intermed = qcelemental.molparse.from_string(subject, return_processed=True, dtype='psi4')", "def mse_converter( fname ):\n lines = []\n with open( fname ) as fh:\n for line in fh.readlines():\n if len(line) > 1: # avoid empty lines\n if line.startswith('m'):\n continue\n # strip off \\n and split on tabs\n line = line.strip().split( '\\t' )\n lines.append( ( float(line[0]), float(line[1]) ) )\n return numpy.array( lines )", "def parse_engineering( string, unit = \"\" ):\n if not string.endswith(unit):\n raise ValueError(\"string '%s' is missing the unit '%s'\" % (string, unit))\n if unit:\n string = string[:-len(unit)]\n\n m = re.match(r\"\\s*([\\+\\-]?[.0-9]+)\\s*([a-zA-Z]*)\\s*\", string)\n if not m:\n raise ValueError(\"string '%s' cannot be parsed\" % string)\n x = m.group(1)\n mod = m.group(2)\n conv = {'a':1e-18, 'f':1e-15, 'p':1e-12, 'n':1e-9, 'u':1e-6,\n 'm':1e-3 , 'c':1e-2 , 'd':1e-1 , '':1.0 , 'k':1e3 ,\n 'M':1e6 , 'G':1e9 , 'T':1e12 , 'P':1e15, 'E':1e18}\n return float(x) * conv[mod]", "def convert_mev_inv_cm(toto):\n hb=1.05458e-34\n ev=1.60218e-19\n c= 3e8\n return toto*ev/(1e5*hb*2*np.pi*c)", "def intf_ENTPGRAM(E):\n # !! Need to check for some eids being TRIs. Filter that out.\n if ( not inc.entid_or_LST_of_entids(E.The,3) or \n not inc.point_formatted_LST(E.The,2) or\n not inc.point_formatted_LST(E.The,1) ):\n print(\"Input Error: pgram\")\n print(intf_ENTPGRAM.__doc__)\n return # Without doing much of anything.\n oB= [ xyz.val for xyz in E.The.StackPop().val ] # A list [3.5 -2 0].\n oA= [ xyz.val for xyz in E.The.StackPop().val ] # A list [3.5 -2 0].\n myeids= E.The.StackPop().val\n if type(myeids)==type(list()):\n #myeids= map(lambda x:x.val, myeids) # Should now be a list of ints.\n myeids= [x.val for x in myeids] # Should now be a list of ints.\n else:\n myeids= [ myeids ] # Also a (1 item) list of ints.\n neweidlist= []\n for myeid in myeids:\n if myeid in MMEL.El: # Check if eid exists.\n src_ent= MMEL.El[myeid]\n new_ent= src_ent.duplicate()\n new_ent.translate([ oB[0]-oA[0], oB[1]-oA[1], oB[2]-oA[2] ])\n As= mm.Entity.allplist.PLdict[ src_ent.epts[0] ]\n Ae= mm.Entity.allplist.PLdict[ src_ent.epts[1] ]\n Bs= mm.Entity.allplist.PLdict[ new_ent.epts[0] ]\n Be= mm.Entity.allplist.PLdict[ new_ent.epts[1] ]\n neweidlist.append(new_ent.eid)\n MMEL.add_ent(new_ent)\n line_entS= mm.Line_Entity( [As,Bs] )\n neweidlist.append(line_entS.eid)\n MMEL.add_ent(line_entS)\n line_entE= mm.Line_Entity( [Ae,Be] )\n neweidlist.append(line_entE.eid)\n MMEL.add_ent(line_entE)\n tri_entA= mm.Tri_Entity( [As, Ae, Bs] )\n neweidlist.append(tri_entA.eid)\n MMEL.add_ent(tri_entA)\n tri_entB= mm.Tri_Entity( [Bs, Be, Ae] )\n neweidlist.append(tri_entB.eid)\n MMEL.add_ent(tri_entB)\n else:\n print(\"WARNING: Entity ID# %d does not exist.\" % myeid)\n if neweidlist:\n neweids= objectifier.StackOB_LST( [objectifier.StackOB_VAL(x) for x in neweidlist] )\n E.The.StackPush(neweids)\n OUT.default(MMEL,E) # AUTODUMP ", "def test_psi4_qmefpformat_error_6c():\n\n subject = subject6.replace(' efp h2O', '0 1\\n efp h2O')\n\n with pytest.raises(qcelemental.MoleculeFormatError):\n final, intermed = qcelemental.molparse.from_string(subject, return_processed=True)", "def convert_eV_cmm3(toto):\n hb=1.05458e-34\n ev=1.60218e-19\n eps0 = 8.85e-12\n mel = 9.109e-31\n return ((toto*ev/(2*np.pi*hb))/ev)**2*eps0*mel/1e6", "def intf_MMLIST(E):\n print(MMEL.__repr__())\n #print(OUT.default(MMEL,E))", "def _convert_meta(m):\n # Decode Pascal style string with 4 bytes length field\n l = struct.unpack(\"<I\", m[:4])[0]\n return m[4:4+l]", "def test_psi4_qm_2c():\n subject = copy.deepcopy(subject2)\n subject.insert(0, '1 3\\n1 3')\n subject = '\\n--\\n'.join(subject)\n\n with pytest.raises(qcelemental.MoleculeFormatError):\n final, intermed = qcelemental.molparse.from_string(subject, return_processed=True)", "def parse_email(msg):\n eml_dict = {}\n psr = Parser()\n parsed_eml = psr.parsestr(msg)\n eml_dict.update(parsed_eml)\n eml_dict['Body'] = parsed_eml.get_payload()\n return eml_dict", "def parse_emission_matrix(emissions_matrix_path):\r\n f = open(emissions_matrix_path)\r\n f.readline() # remove first line\r\n lines = f.readlines()\r\n k_counter = len(lines)\r\n emissions_mat = np.zeros([k_counter + NOT_MOTIF_STATES, ALPHABET_LEN])\r\n # B start\r\n emissions_mat[0, 0] = 1\r\n # B end\r\n emissions_mat[-1, -1] = 1\r\n # B_1\r\n emissions_mat[1, 1:-1] = UNIFORM_PROB\r\n # B_2\r\n emissions_mat[-2, 1:-1] = UNIFORM_PROB\r\n for k, line in enumerate(lines, 2): # go over every line\r\n emissions = line.split('\t')\r\n for letter in range(len(alphabet)): # create emissions for every S_i\r\n emissions_mat[k, letter + 1] = float(emissions[letter])\r\n return wrap_log(emissions_mat), k_counter", "def parse_msp(msp_entry,tic_normalization=True,min_perc=False,windowed_mode=False,top=10,window_size=100):\n\t\n\tidentifier = \"\"\n\tmz_list = []\n\tintensity_list = []\n\tif tic_normalization: tot_tic = 0.0\n\t\n\t#Iterate over the lines in the MSP entry and record the identifiers, m/z and intensities\n\tfor line in msp_entry:\n\t\tline = line.rstrip()\n\t\tif line == \"\": continue\n\t\tif line.startswith(\"Name: \"):\n\t\t\tidentifier = line.lstrip(\"Name: \")\n\t\t\tcontinue\n\t\tif \":\" in line: continue\n\t\t\n\t\tsplitline = line.split(\"\\t\")\n\n\t\tmz_list.append(float(splitline[0]))\n\t\tintensity_list.append(float(splitline[1]))\n\t\tif tic_normalization: tot_tic += intensity_list[-1]\n\t\n\t#In the case of tic normalization iterate over the values and divide by total intensity\n\tif tic_normalization:\n\t\tfor index,intens in enumerate(intensity_list):\n\t\t\tintensity_list[index] = intens/tot_tic\n\t\n\t#Filter based on the top intensities\n\tgr_mz_list,gr_intensity_list = get_top_spec(mz_list,\n\t\t\t\t\t\t\t\t\t\t\t\tintensity_list,\n\t\t\t\t\t\t\t\t\t\t\t\tmin_perc=min_perc,\n\t\t\t\t\t\t\t\t\t\t\t\twindowed_mode=windowed_mode,\n\t\t\t\t\t\t\t\t\t\t\t\ttop=top,\n\t\t\t\t\t\t\t\t\t\t\t\twindow_size=window_size)\n\t\n\treturn(identifier,gr_mz_list,gr_intensity_list)", "def _convert(string, type, message):\n try:\n return type(string)\n except ValueError as e:\n print(e)\n raise CharmmPSFError('Could not convert %s' % message)", "def parse(s):\n return s", "def stringToMod(string):\n string = string.strip() \n i = 0\n symbol = \"\"\n # read the symbol\n while i < len(string) and string[i] != \"(\":\n symbol = symbol + string[i]\n i = i + 1\n # if parameters are present, get them\n if i< len(string) and string[i] == \"(\": \n i = i + 1 # skip the opening bracket\n params = string[i:(len(string) - 1)].split(\",\")\n for i in range(0,len(params)):\n params[i] = float(params[i].strip())\n return(Module(symbol,params))\n else:\n return(Module(symbol,[]))", "def decode(self, s):", "def decode(self, s):", "def convert_cmm3_eV(toto):\n hb=1.05458e-34\n ev=1.60218e-19\n eps0 = 8.85e-12\n mel = 9.109e-31\n return (np.sqrt(toto*1e6*ev**2/(eps0*mel)))*2*np.pi*hb/ev", "def SM2m(sm):\n return sm * 1609.344", "def convert_s_mev(toto):\n hb=1.05458e-34\n ev=1.60218e-19\n return(2*np.pi*hb*1e3)/(toto*ev)", "def to_meme(self):\n motif_id = self.id.replace(\" \", \"_\")\n m = \"MOTIF %s\\n\" % motif_id\n m += \"BL MOTIF %s width=0 seqs=0\\n\"% motif_id\n m += \"letter-probability matrix: alength= 4 w= %s nsites= %s E= 0\\n\" % (len(self), np.sum(self.pfm[0]))\n m +=\"\\n\".join([\"\\t\".join([\"%s\" % x for x in row]) for row in self.pwm])\n return m", "def deserialize(self, str):\n try:\n if self.model is None:\n self.model = articulation_msgs.msg.ModelMsg()\n if self.data is None:\n self.data = articulation_msgs.msg.ModelMsg()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.model.header.seq, _x.model.header.stamp.secs, _x.model.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.model.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.model.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.model.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.model.name = str[start:end].decode('utf-8')\n else:\n self.model.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.params = []\n for i in range(0, length):\n val1 = articulation_msgs.msg.ParamMsg()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n _x = val1\n start = end\n end += 9\n (_x.value, _x.type,) = _struct_dB.unpack(str[start:end])\n self.model.params.append(val1)\n _x = self\n start = end\n end += 12\n (_x.model.track.header.seq, _x.model.track.header.stamp.secs, _x.model.track.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.model.track.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.model.track.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.model.track.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.track.pose = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v15 = val1.position\n _x = _v15\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v16 = val1.orientation\n _x = _v16\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.model.track.pose.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.track.pose_headers = []\n for i in range(0, length):\n val1 = std_msgs.msg.Header()\n start = end\n end += 4\n (val1.seq,) = _struct_I.unpack(str[start:end])\n _v17 = val1.stamp\n _x = _v17\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.frame_id = str[start:end].decode('utf-8')\n else:\n val1.frame_id = str[start:end]\n self.model.track.pose_headers.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.track.pose_projected = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v18 = val1.position\n _x = _v18\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v19 = val1.orientation\n _x = _v19\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.model.track.pose_projected.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.track.pose_resampled = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v20 = val1.position\n _x = _v20\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v21 = val1.orientation\n _x = _v21\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.model.track.pose_resampled.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sI'%length\n start = end\n end += struct.calcsize(pattern)\n self.model.track.pose_flags = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.track.channels = []\n for i in range(0, length):\n val1 = sensor_msgs.msg.ChannelFloat32()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n end += struct.calcsize(pattern)\n val1.values = struct.unpack(pattern, str[start:end])\n self.model.track.channels.append(val1)\n _x = self\n start = end\n end += 12\n (_x.data.header.seq, _x.data.header.stamp.secs, _x.data.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.data.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.data.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.data.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.data.name = str[start:end].decode('utf-8')\n else:\n self.data.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.params = []\n for i in range(0, length):\n val1 = articulation_msgs.msg.ParamMsg()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n _x = val1\n start = end\n end += 9\n (_x.value, _x.type,) = _struct_dB.unpack(str[start:end])\n self.data.params.append(val1)\n _x = self\n start = end\n end += 12\n (_x.data.track.header.seq, _x.data.track.header.stamp.secs, _x.data.track.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.data.track.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.data.track.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.data.track.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.track.pose = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v22 = val1.position\n _x = _v22\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v23 = val1.orientation\n _x = _v23\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.data.track.pose.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.track.pose_headers = []\n for i in range(0, length):\n val1 = std_msgs.msg.Header()\n start = end\n end += 4\n (val1.seq,) = _struct_I.unpack(str[start:end])\n _v24 = val1.stamp\n _x = _v24\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.frame_id = str[start:end].decode('utf-8')\n else:\n val1.frame_id = str[start:end]\n self.data.track.pose_headers.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.track.pose_projected = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v25 = val1.position\n _x = _v25\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v26 = val1.orientation\n _x = _v26\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.data.track.pose_projected.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.track.pose_resampled = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v27 = val1.position\n _x = _v27\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v28 = val1.orientation\n _x = _v28\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.data.track.pose_resampled.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sI'%length\n start = end\n end += struct.calcsize(pattern)\n self.data.track.pose_flags = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.track.channels = []\n for i in range(0, length):\n val1 = sensor_msgs.msg.ChannelFloat32()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n end += struct.calcsize(pattern)\n val1.values = struct.unpack(pattern, str[start:end])\n self.data.track.channels.append(val1)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n if self.model_aligned is None:\n self.model_aligned = articulation_msgs.msg.ModelMsg()\n if self.data_aligned is None:\n self.data_aligned = articulation_msgs.msg.ModelMsg()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.model_aligned.header.seq, _x.model_aligned.header.stamp.secs, _x.model_aligned.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.model_aligned.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.model_aligned.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.model_aligned.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.model_aligned.name = str[start:end].decode('utf-8')\n else:\n self.model_aligned.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model_aligned.params = []\n for i in range(0, length):\n val1 = articulation_msgs.msg.ParamMsg()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n _x = val1\n start = end\n end += 9\n (_x.value, _x.type,) = _struct_dB.unpack(str[start:end])\n self.model_aligned.params.append(val1)\n _x = self\n start = end\n end += 12\n (_x.model_aligned.track.header.seq, _x.model_aligned.track.header.stamp.secs, _x.model_aligned.track.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.model_aligned.track.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.model_aligned.track.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.model_aligned.track.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model_aligned.track.pose = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v71 = val1.position\n _x = _v71\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v72 = val1.orientation\n _x = _v72\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.model_aligned.track.pose.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model_aligned.track.pose_headers = []\n for i in range(0, length):\n val1 = std_msgs.msg.Header()\n start = end\n end += 4\n (val1.seq,) = _struct_I.unpack(str[start:end])\n _v73 = val1.stamp\n _x = _v73\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.frame_id = str[start:end].decode('utf-8')\n else:\n val1.frame_id = str[start:end]\n self.model_aligned.track.pose_headers.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model_aligned.track.pose_projected = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v74 = val1.position\n _x = _v74\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v75 = val1.orientation\n _x = _v75\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.model_aligned.track.pose_projected.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model_aligned.track.pose_resampled = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v76 = val1.position\n _x = _v76\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v77 = val1.orientation\n _x = _v77\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.model_aligned.track.pose_resampled.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sI'%length\n start = end\n end += struct.calcsize(pattern)\n self.model_aligned.track.pose_flags = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model_aligned.track.channels = []\n for i in range(0, length):\n val1 = sensor_msgs.msg.ChannelFloat32()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n end += struct.calcsize(pattern)\n val1.values = struct.unpack(pattern, str[start:end])\n self.model_aligned.track.channels.append(val1)\n _x = self\n start = end\n end += 12\n (_x.data_aligned.header.seq, _x.data_aligned.header.stamp.secs, _x.data_aligned.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.data_aligned.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.data_aligned.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.data_aligned.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.data_aligned.name = str[start:end].decode('utf-8')\n else:\n self.data_aligned.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data_aligned.params = []\n for i in range(0, length):\n val1 = articulation_msgs.msg.ParamMsg()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n _x = val1\n start = end\n end += 9\n (_x.value, _x.type,) = _struct_dB.unpack(str[start:end])\n self.data_aligned.params.append(val1)\n _x = self\n start = end\n end += 12\n (_x.data_aligned.track.header.seq, _x.data_aligned.track.header.stamp.secs, _x.data_aligned.track.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.data_aligned.track.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.data_aligned.track.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.data_aligned.track.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data_aligned.track.pose = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v78 = val1.position\n _x = _v78\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v79 = val1.orientation\n _x = _v79\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.data_aligned.track.pose.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data_aligned.track.pose_headers = []\n for i in range(0, length):\n val1 = std_msgs.msg.Header()\n start = end\n end += 4\n (val1.seq,) = _struct_I.unpack(str[start:end])\n _v80 = val1.stamp\n _x = _v80\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.frame_id = str[start:end].decode('utf-8')\n else:\n val1.frame_id = str[start:end]\n self.data_aligned.track.pose_headers.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data_aligned.track.pose_projected = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v81 = val1.position\n _x = _v81\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v82 = val1.orientation\n _x = _v82\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.data_aligned.track.pose_projected.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data_aligned.track.pose_resampled = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v83 = val1.position\n _x = _v83\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v84 = val1.orientation\n _x = _v84\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.data_aligned.track.pose_resampled.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sI'%length\n start = end\n end += struct.calcsize(pattern)\n self.data_aligned.track.pose_flags = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data_aligned.track.channels = []\n for i in range(0, length):\n val1 = sensor_msgs.msg.ChannelFloat32()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n end += struct.calcsize(pattern)\n val1.values = struct.unpack(pattern, str[start:end])\n self.data_aligned.track.channels.append(val1)\n start = end\n end += 72\n self.R = _struct_9d.unpack(str[start:end])\n start = end\n end += 24\n self.T = _struct_3d.unpack(str[start:end])\n _x = self\n start = end\n end += 12\n (_x.dist_rot, _x.dist_trans,) = _struct_df.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def dnde_ee(_: PseudoScalarMediatorBase, egams, cme):\n return dnde_xx_to_p_to_ffg(egams, cme, me)" ]
[ "0.6330677", "0.601451", "0.55377746", "0.5511349", "0.5498", "0.5460986", "0.53059566", "0.5282362", "0.52589643", "0.52587265", "0.5222153", "0.52134115", "0.51957136", "0.5180492", "0.512726", "0.5088043", "0.5087436", "0.5049602", "0.5042577", "0.5039193", "0.5031885", "0.5023888", "0.5023888", "0.49917492", "0.49857205", "0.4975265", "0.49612394", "0.49595606", "0.49384037", "0.49252275" ]
0.79937345
0
From an input file or stdin, read a list of moment tensors in psmeca form Expected format lon/lat/z/mrr/mtt/mpp/mrt/mrp/mtp/exp/lon0/lat0/str/anything else if isEig is false, return list of SymMT objects, otherwise get a list of EigMT objects
def read_psmecalist( istream , isEig=False ): mtlist=[] # this will be the output list # read everything alltxt = NP.genfromtxt( istream, delimiter='\n' , dtype=str) try: istream.close() except: tmp=1 # loop through all tensors n = len(alltxt) # check for desired output type if isEig: for i in range(0,n): mtlist.append( psmeca2EigMT( alltxt[i] ) ) else: for i in range(0,n): mtlist.append( psmeca2SymMT( alltxt[i] ) ) return mtlist, alltxt
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def psmeca2EigMT( string ): \n \n # convert first 12 columns into array\n mtline = NP.fromstring( string, count=12, sep =' ', dtype=float )\n \n # get the location part\n c = mtline[0:3] # assume lon/lat/depth are centroid\n h = NP.array( [mtline[10], mtline[11], mtline[2]] ) # assume second lon/lat are hypocenter\n\n # get moment tensor part, voigt notation\n exp = mtline[9]\n # [mrr mtt mpp mrt mrp mtp] -> [mrr mtt mpp mtp mrp mrt]\n m = ( NP.r_[mtline[3:6], mtline[8], mtline[7], mtline[6]] )* 10**exp\n\n # make the moment tensor object\n return EigMT( m=m, c=c, h=h )", "def psmeca2SymMT( string ): \n\n # convert first 12 columns into array\n mtline = NP.fromstring( string, count=12, sep =' ', dtype=float )\n \n # get the location part\n c = mtline[0:3] # assume lon/lat/depth are centroid\n h = NP.array( [mtline[10], mtline[11], mtline[2]] ) # assume second lon/lat are hypocenter\n\n # get moment tensor part, voigt notation \n # [mrr mtt mpp mrt mrp mtp] -> [mrr mtt mpp mtp mrp mrt]\n mhat = NP.r_[mtline[3:6], mtline[8], mtline[7], mtline[6]] \n exp = mtline[9]\n \n # remove the norm, take care with off diagonals\n norm = NP.sqrt( NP.sum( mhat**2 ) + NP.sum( mhat[3:]**2 ) )\n mhat /= norm\n\n # get the true norm\n norm *= 10**exp\n\n # make the moment tensor object\n return SymMT( mhat=mhat, Norm=norm, c=c, h=h )", "def read_from_mtz ( mtzin = \"\", colin = \"F,SIGF\" ) :\n\n log_string = \"\\n >> clipper_tools: io.structure_factors.read_from_mtz\"\n log_string += \"\\n mtzin: %s\" % mtzin\n\n xml_root = etree.Element('input_file')\n xml_root.attrib['name'] = mtzin\n xml_root.attrib['type'] = 'mini MTZ'\n \n hkl_data = clipper.HKL_data_F_sigF_float()\n hkl_info = clipper.HKL_info ()\n \n if mtzin is not \"\" :\n mtzfilein = clipper.CCP4MTZfile()\n mtzfilein.open_read ( mtzin )\n mtzfilein.import_hkl_info (hkl_info, True)\n mtzfilein.import_hkl_data (hkl_data, \"*/*/[\" + colin + \"]\")\n else :\n return log_string, xml_root, hkl_data, hkl_info\n \n print (dir(hkl_data))\n \n log_string += \"\\n << read_from_mtz has finished\\n\"\n xml_root.attrib['ok'] = 'yes'\n \n return log_string, xml_root, hkl_info, hkl_data", "def read_qe(qefile, task):\n fileobj = open(qefile)\n lines = fileobj.readlines()\n fileobj.close()\n if task == \"PW_INP\": # Reading a pw.x input file\n for i, line in enumerate(lines):\n if \"nat\" in line:\n # Reading the number of atoms in the cell\n if \",\" in line.split()[2]:\n nat = int(line.split()[2][:len(line.split()[2])-1])\n else:\n nat = int(line.split()[2])\n elif \"ntyp\" in line:\n if \",\" in line.split()[2]:\n ntypat = int(line.split()[2][:len(line.split()[2])-1])\n else:\n ntypat = int(line.split()[2])\n elif \"CELL_PARAMETERS\" in line:\n # Reading the cell vectors\n cell = [x.split()[0:3] for x in lines[i + 1:i + 4]]\n cell = array([[float(col) for col in row] for row in cell])\n elif \"ATOMIC_POSITIONS\" in line:\n if \"crystal\" in line:\n # Reading the atoms and creating a collection of ase.Atoms objects\n geom_start = i + 1\n geom_stop = geom_start + nat\n species = [line.split()[0] for line in lines[geom_start:geom_stop]]\n geom = dot(array([[float(col) for col in line.split()[1:4]]\n for line in lines[geom_start:geom_stop]]), cell)\n else:\n # Reading the atoms and creating a collection of ase.Atoms objects\n geom_start = i + 1\n geom_stop = geom_start + nat\n species = [line.split()[0] for line in lines[geom_start:geom_stop]]\n geom = array([[float(col) for col in line.split()[1:4]]\n for line in lines[geom_start:geom_stop]])\n # Returning the input structure\n rstrc = Atoms(\n cell=cell,\n pbc=True,\n positions=geom,\n symbols=\"\".join(species))\n return rstrc\n elif task == \"PW_OUT_RELAX\": # Reading a pw.x output file for a calculation = \"relax\"\n status = \"NONE\"\n rstrcs = []\n rtotEs = []\n rtotFs = []\n rforces = []\n rstress = []\n for i, line in enumerate(lines):\n # Initial information related to the input cell\n if \"number of atoms/cell\" in line:\n # Reading the number of atoms in the cell\n nat = int(line.split()[4])\n elif \"number of atomic types\" in line:\n ntypat = int(line.split()[5])\n elif \"crystal axes: (cart. coord. in units of alat)\" in line:\n # Reading the cell vectors\n cell = [x.split()[3:6] for x in lines[i + 1:i + 4]]\n cell = array([[float(col) for col in row] for row in cell])\n elif \"Crystallographic axes\" in line:\n # Reading the input coordinates and creating a collection of ase.Atoms objects\n geom_start = i + 3\n geom_stop = geom_start + nat\n species = [line.split()[1] for line in lines[geom_start:geom_stop]]\n geom = dot(array([[float(col) for col in line.split()[6:9]]\n for line in lines[geom_start:geom_stop]]), cell)\n tstrc = Atoms(\n cell=cell,\n pbc=True,\n positions=geom,\n symbols=\"\".join(species))\n rstrcs.append(tstrc)\n #print (\"Appending coordinates (first)\")\n # Now, just after each SCF cycle\n # Reading total energy\n elif \"Forces acting on atoms\" in line:\n forces_start = i + 2\n forces_stop = forces_start + nat\n try:\n forces = array([[float(col) for col in line.split()[6:9]]\n for line in lines[forces_start:forces_stop]])\n #print (\"Appending forces\")\n rforces.append(forces)\n except ValueError:\n # expected to occur when forces are too big\n # and so incompatible with the format used in QE\n # for instance:\n # atom 3 type 2 force = 674.57999165 312.30521069-1079.69944125\n print (\"Rerror reading forces in file:\")\n print (qefile)\n #print (\"Appending forces (empty)\")\n rforces.append([])\n elif \"! total energy\" in line:\n rtotEs.append(float(line.split()[4]))\n #print (\"Appending energy\")\n elif \"total stress (Ry/bohr**3)\" in line:\n # Reading the stress tensor\n stress = [x.split()[0:3] for x in lines[i + 1:i + 4]]\n stress = array([[float(col) for col in row] for row in stress])\n rstress.append(stress)\n #print (\"Appending stress\")\n elif \"Total force\" in line:\n rtotFs.append(float(line.split()[3]))\n #print (\"Appending total forces\")\n elif \"ATOMIC_POSITIONS (alat)\" in line:\n # Reading the relaxed and creating a collection of ase.Atoms objects\n geom_start = i + 1\n geom_stop = geom_start + nat\n species = [line.split()[0] for line in lines[geom_start:geom_stop]]\n geom = array([[float(col) for col in line.split()[1:4]]\n for line in lines[geom_start:geom_stop]])\n tstrc = Atoms(\n cell=cell,\n pbc=True,\n positions=geom,\n symbols=\"\".join(species))\n rstrcs.append(tstrc)\n #print (\"Appending coordinates\")\n elif \"convergence NOT achieved after 100 iterations: stopping\" in line:\n # Removing the last item the vector with structures\n status = \"SCF_NOT_CONVERGED\"\n rstrcs.pop()\n #print (\"Removing coordinates\")\n # Checking if no even the first SCF started\n if len(rtotEs) == 0 and status == \"NONE\":\n status = \"CRASH\"\n rstrcs.pop()\n #print (\"Removing coordinates\")\n # Checking if the SCF has not been finished because of timeout\n if len(rstrcs) > len(rtotEs) and status == \"NONE\":\n status = \"TIMEOUT_OR_CRASH\"\n rstrcs.pop()\n #print (\"Removing coordinates\")\n # Checking if the BFGS has been finished\n if status == \"TIMEOUT_OR_CRASH\" and \"JOB DONE\" in lines[len(lines)-2]:\n status = \"FINISHED\"\n # Returning a collection of cells and properties\n return status, rstrcs, rtotEs, rtotFs, rforces, rstress", "def parse_msp(msp_entry,tic_normalization=True,min_perc=False,windowed_mode=False,top=10,window_size=100):\n\t\n\tidentifier = \"\"\n\tmz_list = []\n\tintensity_list = []\n\tif tic_normalization: tot_tic = 0.0\n\t\n\t#Iterate over the lines in the MSP entry and record the identifiers, m/z and intensities\n\tfor line in msp_entry:\n\t\tline = line.rstrip()\n\t\tif line == \"\": continue\n\t\tif line.startswith(\"Name: \"):\n\t\t\tidentifier = line.lstrip(\"Name: \")\n\t\t\tcontinue\n\t\tif \":\" in line: continue\n\t\t\n\t\tsplitline = line.split(\"\\t\")\n\n\t\tmz_list.append(float(splitline[0]))\n\t\tintensity_list.append(float(splitline[1]))\n\t\tif tic_normalization: tot_tic += intensity_list[-1]\n\t\n\t#In the case of tic normalization iterate over the values and divide by total intensity\n\tif tic_normalization:\n\t\tfor index,intens in enumerate(intensity_list):\n\t\t\tintensity_list[index] = intens/tot_tic\n\t\n\t#Filter based on the top intensities\n\tgr_mz_list,gr_intensity_list = get_top_spec(mz_list,\n\t\t\t\t\t\t\t\t\t\t\t\tintensity_list,\n\t\t\t\t\t\t\t\t\t\t\t\tmin_perc=min_perc,\n\t\t\t\t\t\t\t\t\t\t\t\twindowed_mode=windowed_mode,\n\t\t\t\t\t\t\t\t\t\t\t\ttop=top,\n\t\t\t\t\t\t\t\t\t\t\t\twindow_size=window_size)\n\t\n\treturn(identifier,gr_mz_list,gr_intensity_list)", "def read_szf_fmv_13(eps_file):\n data = {}\n metadata = {}\n\n n_lines = eps_file.mdr_counter\n n_node_per_line = eps_file.mdr[\"LONGITUDE_FULL\"].shape[1]\n idx_nodes = np.arange(n_lines).repeat(n_node_per_line)\n\n # extract metadata\n metadata[\"spacecraft_id\"] = np.int8(eps_file.mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(eps_file.mphr[\"ORBIT_START\"])\n metadata[\"state_vector_time\"] = datetime.strptime(\n eps_file.mphr[\"STATE_VECTOR_TIME\"][:-4], \"%Y%m%d%H%M%S\")\n\n fields = [\n \"processor_major_version\", \"processor_minor_version\",\n \"format_major_version\", \"format_minor_version\"\n ]\n for f in fields:\n metadata[f] = np.int16(eps_file.mphr[f.upper()])\n\n # extract time\n dt = np.datetime64(\n \"2000-01-01\") + eps_file.mdr[\"UTC_LOCALISATION\"][\"day\"].astype(\n \"timedelta64[D]\"\n ) + eps_file.mdr[\"UTC_LOCALISATION\"][\"time\"].astype(\"timedelta64[ms]\")\n data[\"time\"] = dt[idx_nodes]\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"beam_number\", \"flagfield_rf1\", \"flagfield_rf2\", \"flagfield_pl\",\n \"flagfield_gen1\"\n ]\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"beam_number\"\n ]\n\n # 101 min = 6082 seconds\n # state_vector_time = ascending node crossing time - 1520.5,\n # time crossing at -90 lat\n orbit_start_time = metadata[\"state_vector_time\"] - timedelta(\n seconds=1520.5)\n orbit_end_time = orbit_start_time + timedelta(seconds=6082)\n\n data[\"orbit_nr\"] = np.ma.zeros(\n data[\"time\"].size, dtype=np.int32,\n fill_value=int32_nan) + metadata[\"orbit_start\"]\n data[\"orbit_nr\"][data[\"time\"] > orbit_end_time] += 1\n\n metadata[\"orbits\"] = {}\n for orbit_nr in np.unique(data[\"orbit_nr\"]):\n if orbit_nr == metadata[\"orbit_start\"]:\n metadata[\"orbits\"][orbit_nr] = (orbit_start_time, orbit_end_time)\n else:\n metadata[\"orbits\"][orbit_nr] = (orbit_end_time, orbit_end_time +\n timedelta(seconds=6082))\n\n # extract data\n for f in fields:\n if eps_file.mdr_sfactor[f.upper()] == 1:\n data[f] = eps_file.mdr[f.upper()].flatten()[idx_nodes]\n else:\n data[f] = (eps_file.mdr[f.upper()].flatten() * 1. /\n eps_file.mdr_sfactor[f.upper()])[idx_nodes]\n\n data[\"swath_indicator\"] = (data[\"beam_number\"].flatten() > 3).astype(\n np.uint8)\n data[\"as_des_pass\"] = (data[\"sat_track_azi\"] < 270).astype(np.uint8)\n\n fields = [(\"longitude_full\", long_nan), (\"latitude_full\", long_nan),\n (\"sigma0_full\", long_nan), (\"inc_angle_full\", uint_nan),\n (\"azi_angle_full\", int_nan), (\"flagfield\", int_nan)]\n\n for f, nan_val in fields:\n data[f] = eps_file.mdr[f.upper()].flatten()\n invalid = eps_file.mdr[f.upper()].flatten() == nan_val\n\n if eps_file.mdr_sfactor[f.upper()] != 1:\n data[f] = data[f] * 1. / eps_file.mdr_sfactor[f.upper()]\n\n data[f][invalid] = nan_val\n\n # modify longitudes from (0, 360) to (-180, 180)\n mask = np.logical_and(data[\"longitude_full\"] != long_nan,\n data[\"longitude_full\"] > 180)\n data[\"longitude_full\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n idx = (data[\"azi_angle_full\"] != int_nan) & (data[\"azi_angle_full\"] < 0)\n data[\"azi_angle_full\"][idx] += 360\n\n # set flags\n data[\"f_usable\"] = set_flags_fmv13(data[\"flagfield\"])\n\n return data, metadata", "def ase_mol_parse(m):\n \n try:\n coords = read(m.calc.log).positions\n except AttributeError:\n try:\n coords = np.array(m.calc.max_data['Positions'])\n except ValueError:\n pass\n\n symbols = m.get_chemical_symbols()\n no_atoms = len(m)\n\n ##these try except blocks are because the machine readable part of log files \n ##for the calculations on the single atoms in explicably lack the fields (though\n ##in the log file they are liste\n try:\n charges = np.array(m.calc.max_data['atomcharges']['mulliken'])\n except KeyError:\n charges = [None for i in range(no_atoms)]\n try:\n zpe = m.calc.max_data['Zeropoint']\n except KeyError:\n zpe = 0\n try:\n vib_freqs = m.calc.max_data['vibfreqs']\n except KeyError:\n vib_freqs = []\n try:\n rot_A, rot_B, rot_C = m.calc.max_data['rotconstants']\n except KeyError:\n rot_A, rot_B, rot_C = None,None,None\n \n try:\n h_298k = m.calc.max_data['enthalpy']\n except KeyError:\n h_298k = None\n\n try:\n f_298k = m.calc.max_data['freeenergy']\n except KeyError:\n f_298k = None\n\n try:\n u_0k = m.calc.max_data['Hf'] + zpe\n except KeyError:\n u_0k = None\n \n try:\n u_298k = m.calc.max_data['Hf'] + m.calc.max_data['Thermal']\n except KeyError:\n u_298k = None\n\n try:\n dipole = np.linalg.norm(m.calc.max_data['Dipole'])\n except KeyError:\n dipole = None\n\n try:\n polarisability = m.calc.max_data['Polar'][0]\n except KeyError:\n polarisability = None\n\n try: \n homo_ind = m.calc.max_data['homos'][0]\n except KeyError:\n homo_ind = None\n\n try:\n homo = m.calc.max_data['moenergies'][0][homo_ind]\n except KeyError:\n homo = None\n\n try:\n lumo = m.calc.max_data['moenergies'][0][homo_ind+1]\n except KeyError:\n lumo = None\n\n try:\n band_gap = lumo-homo\n except TypeError:\n band_gap = None\n\n # the biotools environment on this machine and the ml environment on cx1\n # contain a hacked cclib that extract heatcapacity/rotconstants and ese from log files\n try:\n cp_298k = m.calc.max_data['heatcapacity']\n except KeyError:\n cp_298k = None\n\n try:\n ese = m.calc.max_data['ese']\n except KeyError:\n ese = None\n\n data_dict = {'mol_id': m.calc.label,\n 'coords': coords,\n 'charges': charges,\n 'rot_A': rot_A,\n 'rot_B': rot_B,\n 'rot_C': rot_C,\n 'u_0K': u_0k,\n 'u_298.15K': u_298k,\n 'h_298.15K': h_298k,\n 'f_298.15K': f_298k,\n 'cp_298.15K': cp_298k,\n 'zpe': zpe,\n \n 'symbols': symbols,\n 'no_atoms': no_atoms,\n 'homo': homo,\n 'lumo': lumo,\n 'band_gap': band_gap,\n \n 'dipole': dipole,\n 'polarizability': polarisability,\n 'ese': ese,\n 'vib_freqs': vib_freqs,\n \n 'smiles': '', \n 'inchi': '',\n }\n \n return data_dict", "def read_lammpstrj(lmp):\n raw = []\n with open(lmp, 'r') as infile:\n for lines in infile:\n if lines.startswith('ITEM: TIMESTEP'):\n if raw:\n yield raw\n raw = []\n raw.append(lines)\n if raw:\n yield raw", "def read_smx_fmv_12(eps_file):\n raw_data = eps_file.scaled_mdr\n raw_unscaled = eps_file.mdr\n\n n_node_per_line = raw_data[\"LONGITUDE\"].shape[1]\n n_lines = raw_data[\"LONGITUDE\"].shape[0]\n n_records = eps_file.mdr_counter * n_node_per_line\n idx_nodes = np.arange(eps_file.mdr_counter).repeat(n_node_per_line)\n\n data = {}\n metadata = {}\n\n metadata[\"spacecraft_id\"] = np.int8(eps_file.mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(eps_file.mphr[\"ORBIT_START\"])\n\n ascat_time = shortcdstime2jd(raw_data[\"UTC_LINE_NODES\"].flatten()[\"day\"],\n raw_data[\"UTC_LINE_NODES\"].flatten()[\"time\"])\n data[\"jd\"] = ascat_time[idx_nodes]\n\n fields = [(\"sigma0_trip\", long_nan), (\"inc_angle_trip\", uint_nan),\n (\"azi_angle_trip\", int_nan), (\"kp\", uint_nan),\n (\"f_land\", uint_nan)]\n\n for f, nan_val in fields:\n data[f] = raw_data[f.upper()].reshape(n_records, 3)\n valid = raw_unscaled[f.upper()].reshape(n_records, 3) != nan_val\n data[f][~valid] = nan_val\n\n fields = [\"sat_track_azi\", \"abs_line_number\"]\n for f in fields:\n data[f] = raw_data[f.upper()].flatten()[idx_nodes]\n\n fields = [(\"longitude\", long_nan, long_nan),\n (\"latitude\", long_nan, long_nan),\n (\"swath_indicator\", byte_nan, byte_nan),\n (\"soil_moisture\", uint_nan, uint_nan),\n (\"soil_moisture_error\", uint_nan, uint_nan),\n (\"sigma40\", long_nan, long_nan),\n (\"sigma40_error\", long_nan, long_nan),\n (\"slope40\", long_nan, long_nan),\n (\"slope40_error\", long_nan, long_nan),\n (\"dry_backscatter\", long_nan, long_nan),\n (\"wet_backscatter\", long_nan, long_nan),\n (\"mean_surf_soil_moisture\", uint_nan, uint_nan),\n (\"soil_moisture_sensetivity\", ulong_nan, float32_nan),\n (\"correction_flags\", uint8_nan, uint8_nan),\n (\"processing_flags\", uint8_nan, uint8_nan),\n (\"aggregated_quality_flag\", uint8_nan, uint8_nan),\n (\"snow_cover_probability\", uint8_nan, uint8_nan),\n (\"frozen_soil_probability\", uint8_nan, uint8_nan),\n (\"innudation_or_wetland\", uint8_nan, uint8_nan),\n (\"topographical_complexity\", uint8_nan, uint8_nan)]\n\n for f, nan_val, new_nan_val in fields:\n data[f] = raw_data[f.upper()].flatten()\n valid = raw_unscaled[f.upper()].flatten() != nan_val\n data[f][~valid] = new_nan_val\n\n # sat_track_azi (uint)\n data[\"as_des_pass\"] = \\\n np.array(raw_data[\"SAT_TRACK_AZI\"].flatten()[idx_nodes] < 270)\n\n # modify longitudes from [0,360] to [-180,180]\n mask = np.logical_and(data[\"longitude\"] != long_nan,\n data[\"longitude\"] > 180)\n data[\"longitude\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n mask = (data[\"azi_angle_trip\"] != int_nan) & (data[\"azi_angle_trip\"] < 0)\n data[\"azi_angle_trip\"][mask] += 360\n\n fields = [\"param_db_version\", \"warp_nrt_version\"]\n for f in fields:\n data[f] = raw_data[\"PARAM_DB_VERSION\"].flatten()[idx_nodes]\n\n metadata[\"spacecraft_id\"] = int(eps_file.mphr[\"SPACECRAFT_ID\"][2])\n\n data[\"node_num\"] = np.tile((np.arange(n_node_per_line) + 1), n_lines)\n\n data[\"line_num\"] = idx_nodes\n\n return data, metadata", "def readInput(in_file_name):\n in_file = open(in_file_name, 'r')\n positions = []\n samples = []\n M = []; P = [];\n MC = []; PC = [];\n while True:\n line = in_file.readline()\n if not line: break\n if line[0] == '#': continue #skip comment\n line = line.rstrip('\\n').split('\\t')\n \n #genomic positions and allele support in plasma samples\n positions.append(int(line[0]))\n samples.append(tuple(map(int, line[1:5])))\n \n #maternal and paternal alleles\n M.append(tuple(line[5:7]))\n MC.append(tuple(map(float, line[7:9])))\n \n P.append(tuple(line[9:11]))\n PC.append(tuple(map(float, line[11:13]))) \n \n in_file.close()\n return positions, samples, M, P, MC, PC", "def read(self,isOutputFile = False, headerCols = None, verbose = 0):\n \n #\n # TODO TODO also need a 'readFinal' one to read the FINAL information!!\n # set a flag in MonteFormat.py to select which cs info to read...\n\n if verbose == 1:\n print \"Reading %s chemical shift list %s\" % (self.format,self.name)\n\n fin = open(self.name, 'rU')\n\n line = fin.readline()\n \n spinSystemId = 0\n resLabel = oldResLabel = None\n\n while line:\n\n if self.patt['%sComment' % self.format].search(line):\n\n if not isOutputFile and not self.chemShifts and not headerCols:\n\n #\n # Get atom info from first line...\n #\n \n headerCols = line.split()\n headerCols.pop(0)\n\n line = fin.readline()\n continue\n\n if self.patt['emptyline'].search(line):\n line = fin.readline()\n continue\n \n #\n # Make sure header info is available - otherwise no point\n #\n \n if not headerCols:\n raise \"Error: no header column information available. Try reading .par file!\"\n return\n \n #\n # Get the info... should really come for .par file!!\n #\n \n cols = line.split()\n \n infoCode = None\n \n if not isOutputFile:\n \n stripId = returnFloat(cols.pop(0))\n\n #\n # NOt necessarily info string available...\n #\n\n if self.patt['onlyFloat'].search(cols[0]):\n seqCode = None\n resLabel = None\n\n else:\n assignment = cols.pop(0)\n\n searchAssignment = self.patt['%sAssignment' % self.format].search(assignment)\n\n resLabel = searchAssignment.group(1)\n seqCode = searchAssignment.group(2)\n \n else:\n \n seqCode = cols.pop(0)\n if seqCode[-1] in '+':\n seqCode = seqCode[:-1]\n infoCode = seqCode[-1]\n \n oldResLabel = resLabel\n resLabel = cols.pop(0)\n stripId = returnFloat(cols.pop(0))\n voidCol = cols.pop(0)\n \n #\n # Set up info for atoms...\n #\n \n if not seqCode or seqCode == '?':\n seqCode = None\n spinSystemId = spinSystemId + 2\n else:\n seqCode = returnInt(seqCode)\n\n if len(cols) == 1:\n cols = cols.split(',')\n\n values = returnFloats(cols)\n\n for i in range(0,len(values)):\n atomId = headerCols[i]\n value = values[i]\n \n if value == 0.0:\n continue\n \n atomSearch = self.patt['%sAtomInfo' % self.format].search(atomId)\n \n atomName = atomSearch.group(1)\n atomPlace = atomSearch.group(2)\n \n if atomName == 'HA1':\n nextAtomValue = values[i+1]\n if nextAtomValue == 0.00:\n atomName = 'HA'\n \n curSeqCode = seqCode\n curResLabel = None\n \n if seqCode == None:\n curSpinSystemId = spinSystemId\n prevSpinSystemId = spinSystemId - 1\n else:\n curSpinSystemId = None\n prevSpinSystemId = None\n \n if atomPlace == '(i-1)' or atomPlace == '-1':\n\n if seqCode != None:\n curSeqCode = seqCode - 1\n else:\n curSpinSystemId = spinSystemId - 1\n prevSpinSystemId = None\n \n if not isOutputFile:\n curResLabel = resLabel\n else:\n curResLabel = oldResLabel\n \n elif isOutputFile:\n curResLabel = resLabel\n\n self.chemShifts.append(MonteChemShift(value,atomName,curSeqCode,curSpinSystemId,stripId,curResLabel,self.defaultMolCode, infoCode = infoCode, prevSpinSystemId = prevSpinSystemId))\n\n line = fin.readline()\n\n fin.close()", "def read_formatted_file(file, listmat=None, listmf=None, listmt=None):\n ftype = get_file_format(file)\n if ftype is \"errorr\":\n return Errorr.from_file(file).filter_by(listmat=listmat, listmf=listmf, listmt=listmt)\n elif ftype is \"gendf\":\n return Gendf.from_file(file).filter_by(listmat=listmat, listmf=listmf, listmt=listmt)\n elif ftype is \"endf6\" or ftype is \"pendf\":\n return Endf6.from_file(file).filter_by(listmat=listmat, listmf=listmf, listmt=listmt)\n else:\n raise SandyError(\"file '{}' not in a known format\".format(file))", "def read_mf35(tape, mat, mt):\n mf = 35\n df = tape._get_section_df(mat, mf, mt)\n out = {\"MAT\": mat,\n \"MF\": mf,\n \"MT\": mt}\n i = 0\n C, i = sandy.read_cont(df, i)\n out.update({\"ZA\": C.C1,\n \"AWR\": C.C2,\n \"NK\": C.N1, # Number of subsections\n \"SUB\": {}})\n for k in range(out[\"NK\"]):\n L, i = sandy.read_list(df, i)\n D = {\"ELO\": L.C1, # Lowest incident neutron energy for this subsection\n \"EHI\": L.C2, # Highest incident neutron energy for this subsection\n \"LS\": L.L1, # Flago to indicate if the covariance matrix is symmetric\n \"LB\": L.L2, # Flag to indicate if the covariance matrix is given in absolute or relative terms\n \"NE\": L.N2, # Number of entries in the array containing outgoing particle energies\n \"EK\": L.B[:L.N2], # Array containing outgoing particle energies\n \"FKK\": L.B[L.N2:]} # Covariance matrix ordered by rows and starting from the diagonal term\n out[\"SUB\"].update({k+1: D})\n return out", "def readenergyfile(filename):\n def parsemeta(metalines):\n \"\"\"Parse metadata lines to get metadata object (ordered dict)\n\n Allow only numbers, lists of numbers and strings\n \"\"\"\n def parseline(line):\n res = [val.strip() for val in line[5:].split(u':', 1)]\n key, value = (res[0], res[1]) if len(res) == 2 else (res[0], u'')\n if re.match(r'^-?\\d*[\\.|,]?\\d+$', value):\n value = float(value)\n elif re.match(r'^\\[(.*)\\]', value):\n value = [val.strip() for val in value[1:-1].split(u',')]\n value = [float(val) if re.match(r'^-?\\d*[\\.|,]?\\d+$', val) else val for val in value]\n return key, value\n return OrderedDict(parseline(line) for line in metalines if line.startswith(u'#CTE_'))\n\n with io.open(filename, 'r') as datafile:\n components, meta = [], []\n for ii, line in enumerate(datafile):\n line = line.strip()\n if (line == '') or line.startswith('vector'):\n continue\n elif line.startswith('#'):\n meta.append(line)\n else:\n fields = line.split('#', 1)\n data = [x.strip() for x in fields[0].split(',')]\n comment = fields[1] if len(fields) > 1 else ''\n carrier, ctype, originoruse = data[0:3]\n values = [float(v.strip()) for v in data[3:]]\n\n if ctype not in ('PRODUCCION', 'CONSUMO'):\n raise ValueError(\"Carrier type is not 'CONSUMO' or 'PRODUCCION' in line %i\\n\\t%s\" % (ii+2, line))\n if originoruse not in ('EPB', 'NEPB', 'INSITU', 'COGENERACION'):\n raise ValueError((\"Origin or end use is not 'EPB', 'NEPB', 'INSITU' or 'COGENERACION'\"\n \" in line %i\\n\\t%s\" % (ii+2, line)))\n\n components.append({ \"carrier\": carrier, \"ctype\": ctype,\n \"originoruse\": originoruse,\n \"values\": values, \"comment\": comment })\n numsteps = [len(c['values']) for c in components]\n if max(numsteps) != min(numsteps):\n raise ValueError(\"All input must have the same number of timesteps.\")\n return (parsemeta(meta), components)", "def read_forces(self, fname):\n outfile = open(fname)\n lines = outfile.readlines()\n outfile.close()\n nats = len(self.atoms)\n forces = np.zeros((nats, 3), float)\n infinite_force=\"*****\"\n if 'mozyme' in self.str_params['job_type'].lower():\n for i, line in enumerate(lines):\n if line.find('FINAL POINT AND DERIVATIVES') != -1:\n for j in range(nats):\n gline = lines[i + j + 5]\n pre_force=gline[8:35]\n if(infinite_force in pre_force):\n forces[j] = [999999999.9999,999999999.9999,999999999.9999]\n else:\n forces[j] = [float( pre_force[0:9].strip()),float( pre_force[9:18].strip()),float( pre_force[18:27].strip())]\n else:\n for i, line in enumerate(lines):\n if line.find('GRADIENT\\n') != -1:\n for j in range(nats * 3):\n gline = lines[i + j + 1]\n pre_force=gline[49:62]\n if(infinite_force in pre_force):\n forces[int(j/3), int(j%3)] =999999999.9999\n else:\n forces[int(j/3), int(j%3)] = float(pre_force)\n break\n#do not change unit for mopac\n forces *= - (kcal / mol)\n return forces", "def _read_mist_iso_filecontent(data):\n import numpy as np\n\n try:\n try:\n f = data.decode('utf8').split('\\n')\n except:\n f = data.split('\\n')\n\n content = [line.split() for line in f]\n hdr = {'MIST': content[0][-1], 'MESA': content[1][-1]}\n abun = {content[3][i]:float(content[4][i]) for i in range(1,5)}\n hdr.update(**abun)\n hdr['ROT'] = float(content[4][-1])\n num_ages = int(content[6][-1])\n hdr['num_ages'] = num_ages\n\n #read one block for each isochrone\n iso_set = []\n counter = 0\n data = content[8:]\n\n # isochrone format\n for i_age in range(num_ages):\n\n #grab info for each isochrone\n _d = data[counter]\n num_eeps = int(_d[-2])\n num_cols = int(_d[-1])\n hdr_list = data[counter + 2][1:]\n if not py3k:\n # correcting for recfunctions not up to date for unicode dtypes\n hdr_list = [str(k) for k in hdr_list]\n formats = tuple([np.int32] + [np.float64 for i in range(num_cols - 1)])\n iso = np.zeros((num_eeps), {'names':tuple(hdr_list),'formats':tuple(formats)})\n\n #read through EEPs for each isochrone\n for eep in range(num_eeps):\n iso_chunk = data[3+counter+eep]\n iso[eep] = tuple(iso_chunk)\n\n iso_set.append(iso)\n\n counter += 3 + num_eeps + 2\n\n _data = np.lib.recfunctions.stack_arrays(iso_set, usemask=False)\n\n t = Table(_data, header=hdr)\n\n # make some aliases\n aliases = (('logL', 'log_L'),\n ('logT', 'log_Teff'),\n ('mass', 'star_mass'),\n ('logg', 'log_g'))\n\n if 'log10_isochrone_age_yr' in t:\n aliases += (('logA', 'log10_isochrone_age_yr'),)\n else:\n aliases += (('age', 'isochrone_age_yr'),)\n\n for a, b in aliases:\n t.set_alias(a, b)\n except ValueError:\n buf = StringIO(data.decode('utf8'))\n t = Table(buf, dtype='dat')\n \n t.header['NAME'] = 'MIST/MESA isochrones'\n\n return t", "def read_szf_fmv_12(eps_file):\n data = {}\n metadata = {}\n\n n_lines = eps_file.mdr_counter\n n_node_per_line = eps_file.mdr[\"LONGITUDE_FULL\"].shape[1]\n idx_nodes = np.arange(n_lines).repeat(n_node_per_line)\n\n # extract metadata\n metadata[\"spacecraft_id\"] = np.int8(eps_file.mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(eps_file.mphr[\"ORBIT_START\"])\n metadata[\"state_vector_time\"] = datetime.strptime(\n eps_file.mphr[\"STATE_VECTOR_TIME\"][:-4], \"%Y%m%d%H%M%S\")\n\n fields = [\n \"processor_major_version\", \"processor_minor_version\",\n \"format_major_version\", \"format_minor_version\"\n ]\n for f in fields:\n metadata[f] = np.int16(eps_file.mphr[f.upper()])\n\n # extract time\n dt = np.datetime64(\n \"2000-01-01\") + eps_file.mdr[\"UTC_LOCALISATION\"][\"day\"].astype(\n \"timedelta64[D]\"\n ) + eps_file.mdr[\"UTC_LOCALISATION\"][\"time\"].astype(\"timedelta64[ms]\")\n data[\"time\"] = dt[idx_nodes]\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"beam_number\", \"flagfield_rf1\", \"flagfield_rf2\", \"flagfield_pl\",\n \"flagfield_gen1\"\n ]\n\n # 101 min = 6082 seconds\n # state_vector_time = ascending node crossing time - 1520.5,\n # time crossing at -90 lat\n orbit_start_time = metadata[\"state_vector_time\"] - timedelta(\n seconds=1520.5)\n orbit_end_time = orbit_start_time + timedelta(seconds=6082)\n\n data[\"orbit_nr\"] = np.ma.zeros(\n data[\"time\"].size, dtype=np.int32,\n fill_value=int32_nan) + metadata[\"orbit_start\"]\n data[\"orbit_nr\"][data[\"time\"] > orbit_end_time] += 1\n\n metadata[\"orbits\"] = {}\n for orbit_nr in np.unique(data[\"orbit_nr\"]):\n if orbit_nr == metadata[\"orbit_start\"]:\n metadata[\"orbits\"][orbit_nr] = (orbit_start_time, orbit_end_time)\n else:\n metadata[\"orbits\"][orbit_nr] = (orbit_end_time, orbit_end_time +\n timedelta(seconds=6082))\n\n # extract data\n for f in fields:\n if eps_file.mdr_sfactor[f.upper()] == 1:\n data[f] = eps_file.mdr[f.upper()].flatten()[idx_nodes]\n else:\n data[f] = (eps_file.mdr[f.upper()].flatten() * 1. /\n eps_file.mdr_sfactor[f.upper()])[idx_nodes]\n\n data[\"swath_indicator\"] = (data[\"beam_number\"].flatten() > 3).astype(\n np.uint8)\n data[\"as_des_pass\"] = (data[\"sat_track_azi\"] < 270).astype(np.uint8)\n\n fields = [(\"longitude_full\", long_nan), (\"latitude_full\", long_nan),\n (\"sigma0_full\", long_nan), (\"inc_angle_full\", uint_nan),\n (\"azi_angle_full\", int_nan), (\"land_frac\", uint_nan),\n (\"flagfield_gen2\", byte_nan)]\n\n for f, nan_val in fields:\n data[f] = eps_file.mdr[f.upper()].flatten()\n invalid = eps_file.mdr[f.upper()].flatten() == nan_val\n\n if eps_file.mdr_sfactor[f.upper()] != 1:\n data[f] = data[f] * 1. / eps_file.mdr_sfactor[f.upper()]\n\n data[f][invalid] = nan_val\n\n # modify longitudes from (0, 360) to (-180, 180)\n mask = np.logical_and(data[\"longitude_full\"] != long_nan,\n data[\"longitude_full\"] > 180)\n data[\"longitude_full\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n idx = (data[\"azi_angle_full\"] != int_nan) & (data[\"azi_angle_full\"] < 0)\n data[\"azi_angle_full\"][idx] += 360\n\n # set flags\n data[\"f_usable\"] = set_flags(data)\n\n return data, metadata", "def read(f):\n \n if isinstance(f, basestring):\n # If the input is a string, treat as file name\n with open(f) as fh: # Ensure file is closed\n return read(fh) # Call again with file object\n \n # First line contains the date\n date = f.readline()\n if not date:\n raise IOError(\"Cannot read from input file \"+str(filename))\n \n # Second is description\n desc = f.readline()\n \n token = file_numbers(f)\n \n # Third contains number of mesh points\n try:\n npsi = int(token.next())\n ntheta = int(token.next())\n isym = int(token.next())\n except StopIteration:\n raise IOError(\"Unexpected end of file while reading grid size\")\n except ValueError:\n raise IOError(\"Third line should contain npsi, ntheta and isym\")\n \n # Check values\n if (isym < 0) or (isym > 1):\n raise IOError(\"isym must be either 0 or 1\")\n if (npsi < 1) or (ntheta < 1):\n raise IOError(\"Invalid npsi=\"+str(npsi)+\" or ntheta=\" + str(ntheta))\n \n # Read normalisation factors\n\n try:\n rcnt = float(token.next())\n xma = float(token.next())\n zma = float(token.next())\n btor = float(token.next())\n curtot = float(token.next())\n eaxe = float(token.next())\n dnorm = float(token.next())\n except:\n raise IOError(\"Couldn't read normalisation factors\")\n \n def read_array(n, name=\"Unknown\"):\n data = np.zeros([n])\n try:\n for i in np.arange(n):\n data[i] = float(token.next())\n except:\n raise IOError(\"Failed reading array '\"+name+\"' of size \", n)\n return data\n\n def read_2d(nx, ny, name=\"Unknown\"):\n data = np.zeros([nx, ny])\n for i in np.arange(nx):\n data[i,:] = read_array(ny, name+\"[\"+str(i)+\"]\")\n return data\n\n # Read 1D arrays\n psiflux = read_array(npsi, \"psiflux\")\n fnorm = read_array(npsi, \"fnorm\")\n ffpnorm = read_array(npsi, \"ffpnorm\")\n ponly = read_array(npsi, \"ponly\")\n pponly = read_array(npsi, \"pponly\")\n qsf = read_array(npsi, \"qsf\")\n d = read_array(npsi, \"d\")\n \n dpdz = read_array(ntheta, \"dpdz\")\n dpdr = read_array(ntheta, \"dpdr\")\n \n # 2D arrays\n \n xnorm = read_2d(ntheta, npsi, \"xnorm\")\n znorm = read_2d(ntheta, npsi, \"znorm\")\n \n # Try to read Br and Bz (may be present)\n try:\n Br = read_2d(ntheta, npsi, \"Br\")\n Bz = read_2d(ntheta, npsi, \"Bz\")\n except:\n Br = Bz = None\n \n ny = ntheta\n\n if isym == 1:\n # Fill in values for up-down symmetric case\n print(\"Grid is up-down symmetric. Reflecting grid about midplane\")\n ny = tsize = 2*(ntheta - 1) + 1\n \n def reflect(data, mapfunc = lambda x:x):\n \"\"\" Reflect a variable about midplane\n Optionally supply a mapping function\"\"\"\n data2 = np.zeros([tsize, npsi])\n # Copy the original data\n for i in np.arange(ntheta):\n data2[i,:] = data[i,:]\n # Now fill in the remainder\n for i in np.arange(ntheta, tsize):\n t0 = tsize - 1 - i\n data2[i,:] = mapfunc(data[t0,:])\n return data2\n \n xnorm = reflect(xnorm)\n znorm = reflect(znorm, lambda x: 2.*zma - x) # Reflect about zma\n if Br != None:\n Br = reflect(Br, lambda x:-x) # Br reverses\n if Bz != None:\n Bz = reflect(Bz) # Bz remains the same\n theta = tsize\n\n # Make sure we have Br, Bz and Bpol\n\n if (Br == None) or (Bz == None):\n # Calculate Bpol from psi then Br and Bz from Bpol\n # Use dpsi = R*Bp dx (for now)\n Bpol = np.zeros([ny, npsi])\n \n def deriv(f):\n n = np.size(f)\n dfdi = np.zeros(n)\n dfdi[1:-1] = (f[2:n] - f[0:-2])/2. # Central difference in the middle\n dfdi[0] = f[1] - f[0]\n dfdi[-1] = f[-1] - f[-2]\n return dfdi\n \n for i in np.arange(ntheta):\n drdi = deriv(xnorm[i, :])\n dzdi = deriv(znorm[i, :])\n dldi = sqrt(drdi**2 + dzdi**2) # Arc length\n dpsidi = deriv(psiflux)\n \n Bpol[i, :] = dpsidi / (dldi * xnorm[i,:])\n else:\n Bpol = np.sqrt(Br**2 + Bz**2)\n \n # Calculate toroidal field\n Btor = fnorm / xnorm\n \n #########################################\n # Create a dictionary of values to return\n # \n # Need to transpose 2D arrays to [psi, theta] \n # to be consistent with elite inputs\n \n var = {\"npsi\":npsi, \"npol\":ny, # Sizes\n \n \"psi\":psiflux,\n \"f(psi)\":fnorm,\n \"p\":ponly,\n \n \"R\": np.transpose(xnorm),\n \"Z\": np.transpose(znorm),\n\n \"Bp\":np.transpose(Bpol),\n \"Bt\":np.transpose(Btor),\n\n \"q\":qsf,\n\n \"ffprime\":ffpnorm,\n \"pprime\":pponly}\n\n if Br != None:\n var['Br'] = np.transpose(Br)\n if Bz != None:\n var['Bz'] = np.transpose(Bz)\n \n return var", "def read_mutat_file(\n FASTA, ENTRY,\n MUTAT\n ):\n\n # initialize lists for each mutation\n SNP_arr = []\n INDEL_arr = []\n # initialize line counter\n lineCNT = 1\n\n with open(MUTAT) as mutSummary:\n for line in mutSummary:\n line = re.split(r'\\t+', line.rstrip('\\n'))\n if line[0] == \"snp\":\n SNP_arr.append(line)\n elif line[0] == \"indel\":\n INDEL_arr.append(line)\n elif line[0] not in (\"snp\", \"indel\"):\n print(\"Line\", lineCNT, \"does not contain 'snp' or 'indel'\")\n print(\"Please check the mutation summary (-m) file\")\n print(\"Exiting now...\")\n sys.exit()\n lineCNT+=1\n\n # pass to replace_nucl_with_SNP function\n replace_nucl_with_SNP(\n FASTA, ENTRY,\n SNP_arr, INDEL_arr\n )", "def extract_points(fp: T.BinaryIO) -> T.Optional[T.List[geo.Point]]:\n\n points = None\n movie_timescale = None\n media_timescale = None\n elst_entries = None\n\n for h, s in parser.parse_path(fp, [b\"moov\", [b\"mvhd\", b\"trak\"]]):\n if h.type == b\"trak\":\n trak_start_offset = s.tell()\n\n descriptions = sample_parser.parse_descriptions_from_trak(\n s, maxsize=h.maxsize\n )\n camm_descriptions = [d for d in descriptions if d[\"format\"] == b\"camm\"]\n if camm_descriptions:\n s.seek(trak_start_offset, io.SEEK_SET)\n camm_samples = _extract_camm_samples(s, h.maxsize)\n\n points_with_nones = (\n _parse_point_from_sample(fp, sample)\n for sample in camm_samples\n if sample.description[\"format\"] == b\"camm\"\n )\n\n points = [p for p in points_with_nones if p is not None]\n if points:\n s.seek(trak_start_offset)\n elst_data = parser.parse_box_data_first(\n s, [b\"edts\", b\"elst\"], maxsize=h.maxsize\n )\n if elst_data is not None:\n elst_entries = cparser.EditBox.parse(elst_data)[\"entries\"]\n\n s.seek(trak_start_offset)\n mdhd_data = parser.parse_box_data_firstx(\n s, [b\"mdia\", b\"mdhd\"], maxsize=h.maxsize\n )\n mdhd = cparser.MediaHeaderBox.parse(mdhd_data)\n media_timescale = mdhd[\"timescale\"]\n else:\n assert h.type == b\"mvhd\"\n if not movie_timescale:\n mvhd = cparser.MovieHeaderBox.parse(s.read(h.maxsize))\n movie_timescale = mvhd[\"timescale\"]\n\n # exit when both found\n if movie_timescale is not None and points:\n break\n\n if points and movie_timescale and media_timescale and elst_entries:\n segments = [\n elst_entry_to_seconds(entry, movie_timescale, media_timescale)\n for entry in elst_entries\n ]\n points = list(filter_points_by_elst(points, segments))\n\n return points", "def mol_file_data(file_name: str, molfile: MolFile = None):\n\tans = \"\"\n\tatoms = list()\n\tValenceAngle.objects.all().delete()\n\tValenceAngleLink.objects.all().delete()\n\n\tmatrix_z_coordinates = \"unknown\"\n\tmatrix_z_units = \"unknown\"\n\tmz_espec = 2\n\tmz_skipped = False\n\tmz_cline = 0\n\tmz_crow = 0\n\tmz_last_line = 0\n\tmz_next_column = 0\n\n\tmatrix_z_lines = list()\n\tactive_doc = None\n\tatom_number = 1\n\ttry:\n\t\tactive_doc = Document.objects.get(is_active=True)\n\t\tatom_number = len(Atom.objects.filter(document=active_doc)) + 1\n\texcept Document.DoesNotExist:\n\t\tpass\n\n\t# чтение файла file_name\n\ttry:\n\t\twith open(file_name) as f:\n\t\t\tlines = f.readlines()\n\t\t\tf.seek(0)\n\t\t\ttext = f.read()\n\t\t\tmolfile = MolFile.objects.create(text=text)\n\n\texcept FileNotFoundError as ex:\n\t\tans += \"error while reading .mol data. \"\n\t\tans += str(ex)\n\t\t# ans += str(os.listdir())\n\t\traise MolFileReadingException(\"File not found\")\n\n\tmode = \"scan\" # активный режим работы\n\tn = len(lines)\n\ti = 0\n\twhile i < n: # цикл по строкам файла\n\t\ti += 1\n\t\tif mode == \"end\":\n\t\t\tbreak\n\n\t\tline = lines[i-1].split(\"//\")[0]\n\t\tif not line:\n\t\t\tcontinue\n\t\tans += dprint(\">> \" + line + \"<br/>\")\n\n\t\tif mode == \"scan\":\n\t\t\tif \"[Atoms]\" in line:\n\t\t\t\tdprint(\"GO readAtoms\")\n\t\t\t\tmode = \"readAtoms\"\n\t\t\t\tcontinue\n\t\t\tif \"[Matrix Z]\" in line:\n\t\t\t\tdprint(\"Go readMatrixZ\")\n\t\t\t\tmode = \"readMatrixZ\"\n\t\t\t\tcontinue\n\t\tif mode == \"readMatrixZ\":\n\t\t\tif line.isspace():\n\t\t\t\tcontinue\n\t\t\tif not line.startswith(\"[\") and i != n: # not end of readMatrixZ section and not end of file\n\t\t\t\tmatrix_z_lines.append(resub(r\"[ \\t\\r\\n]+\", \" \", line.replace(\"\\r\", \"\").replace(\"\\n\", \"\")))\n\t\t\telse: # end of readMatrixZ\n\t\t\t\tmz_size = len(atoms) * 3\n\t\t\t\tmatrix_z = np.zeros((mz_size, mz_size), dtype=np.float32)\n\n\t\t\t\tfor mline in matrix_z_lines:\n\t\t\t\t\tif mline.startswith(\"Coordinates=\"):\n\t\t\t\t\t\tmatrix_z_coordinates = mline.split('=')[1]\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif mline.startswith(\"Units=\"):\n\t\t\t\t\t\tmatrix_z_units = mline.split('=')[1]\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tsplited = list(filter(None, mline.split(\" \")))\n\t\t\t\t\tif len(splited) != mz_espec:\n\t\t\t\t\t\tif not mz_skipped: # first time skipped\n\t\t\t\t\t\t\tmz_skipped = True\n\t\t\t\t\t\t\tmz_espec -= 1\n\t\t\t\t\t\t\tmz_last_line = mz_cline\n\t\t\t\t\t\t\tmz_next_column += len(splited) - 1\n\t\t\t\t\t\telse: # already skipped\n\t\t\t\t\t\t\tmz_espec -= 1\n\n\t\t\t\t\tif not mz_skipped: # normal line\n\t\t\t\t\t\tfor ind in range(mz_espec-1):\n\t\t\t\t\t\t\tval = float(splited[ind+1])\n\t\t\t\t\t\t\tmatrix_z[mz_cline, mz_crow+ind] = val\n\t\t\t\t\t\t\tmatrix_z[mz_crow+ind, mz_cline] = val\n\t\t\t\t\t\tmz_espec += 1\n\t\t\t\t\t\tmz_cline += 1\n\t\t\t\t\telse: # line with skip\n\t\t\t\t\t\tif len(splited) != mz_espec:\n\t\t\t\t\t\t\tmz_skipped = False\n\t\t\t\t\t\t\tmz_espec = 2\n\t\t\t\t\t\t\tmz_cline = mz_last_line\n\t\t\t\t\t\t\tmz_crow = mz_next_column\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tfor ind in range(len(splited) - 1):\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\tval = float(splited[ind+1])\n\t\t\t\t\t\t\t\tmatrix_z[mz_cline, mz_crow + ind] = val\n\t\t\t\t\t\t\t\tmatrix_z[mz_crow + ind, mz_cline] = val\n\t\t\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\t\t\tmz_espec = 2\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tmz_espec += 1\n\t\t\t\t\t\tmz_cline += 1\n\n\t\t\t\t# сохранение результатов чтения\n\t\t\t\tMatrixZ.objects.create(\n\t\t\t\t\towner=molfile,\n\t\t\t\t\tcoordinates=matrix_z_coordinates,\n\t\t\t\t\tunits=matrix_z_units,\n\t\t\t\t\tdata=matrix_z.dumps()\n\t\t\t\t)\n\n\t\t\t\tmode = \"scan\"\n\n\t\telif mode == \"readAtoms\": # считывание информации об атомах\n\t\t\tif line.isspace(): # пустая строка - это конец считывания\n\t\t\t\t# mode = \"scan\"\n\t\t\t\tdprint(\"END: readAtoms: finded end<br/>\")\n\t\t\t\tmode = \"scan\"\n\t\t\t\tcontinue\n\t\t\tif line.startswith('//') or line.lower().startswith(\"length\") or line.lower().startswith(\"count\"):\n\t\t\t\tcontinue\n\n\t\t\telems = line.strip().split(' ')\n\t\t\telems = list(filter(None, elems))\n\n\t\t\tfirst = elems[0]\n\t\t\ttry:\n\t\t\t\tif first == \"[Atoms]\":\n\t\t\t\t\tcontinue\n\t\t\t\tdprint(\"first: \" + str(first) + \"<br/>\")\n\t\t\t\tdprint(elems)\n\t\t\t\tnumber = int(first)\n\t\t\t\tdprint(\"ReadAtom [{}]\".format(number))\n\t\t\t\tax = float(elems[1])\n\t\t\t\tdprint(\"!\")\n\t\t\t\tay = float(elems[2])\n\t\t\t\taz = float(elems[3])\n\t\t\t\tname = elems[4]\n\t\t\t\tmass = int(elems[5])\n\t\t\t\tnew_atom = Atom(\n\t\t\t\t\tx=ax, y=ay, z=az, name=name, mass=mass, document=active_doc, molfileindex=number)\n\t\t\t\tnew_atom.molfile = molfile\n\t\t\t\tnew_atom.documentindex = atom_number\n\t\t\t\tatom_number += 1\n\t\t\t\tnew_atom.save()\n\n\t\t\t\tif new_atom.name == \"H\":\n\t\t\t\t\tnew_atom.valence = 1\n\t\t\t\t\tnew_atom.mentableindex = 0\n\n\t\t\t\tif new_atom.name == \"C\":\n\t\t\t\t\tnew_atom.valence = 4\n\t\t\t\t\tnew_atom.mentableindex = 5\n\n\t\t\t\tatoms.append(new_atom)\n\n\t\t\texcept ValueError as ex:\n\t\t\t\tdprint(\"get_last_mol_file error: \" + str(ex))\n\t\t\t\tmode = \"scan\"\n\t\t\t\tcontinue\n\t\telif mode == \"readMatrixZ\":\n\t\t\tpass\n\n\t# считывание из файла завершено заполнен список atoms\n\t# ans = atoms2json(atoms)\n\t# return ans\n\n\t# вернём активный документ\n\treturn atoms", "def parsemeta(metalines):\n def parseline(line):\n res = [val.strip() for val in line[5:].split(u':', 1)]\n key, value = (res[0], res[1]) if len(res) == 2 else (res[0], u'')\n if re.match(r'^-?\\d*[\\.|,]?\\d+$', value):\n value = float(value)\n elif re.match(r'^\\[(.*)\\]', value):\n value = [val.strip() for val in value[1:-1].split(u',')]\n value = [float(val) if re.match(r'^-?\\d*[\\.|,]?\\d+$', val) else val for val in value]\n return key, value\n return OrderedDict(parseline(line) for line in metalines if line.startswith(u'#CTE_'))", "def import_data(in_file):\n\n print '\\n\\tImport data'\n sentence = []\n concept = []\n sentences = []\n concepts = []\n for line in open(in_file, 'r'):\n if line != '\\n':\n sentence += [ line.split()[0] ]\n concept += [ line.split()[1] ]\n else:\n sentences += [ sentence ]\n concepts += [ concept ]\n sentence = [ ]\n concept = [ ]\n pos = []\n lemma = []\n poss = []\n lemmas = []\n for line in open(in_file.replace('.data', '.feats.txt'), 'r'):\n if line != '\\n':\n pos += [ line.split()[ 1 ] ]\n lemma += [ line.split()[ 2 ] ]\n else:\n poss += [ pos ]\n lemmas += [ lemma ]\n pos = [ ]\n lemma = [ ]\n print '\\t--done'\n return sentences, poss, lemmas, concepts", "def _load_dat(self):\n modelfile = self.filename\n with open(modelfile) as f:\n content = f.readlines()\n\n self.comment = content.pop(0) # Comment line\n content = [x for x in content if not x.startswith('#')]\n\n for line in content:\n if('atoms' in line): self.natoms = int(line.split()[0])\n if('xlo' in line and 'xhi' in line):\n self.xsize = abs(float(line.split()[0])) + abs(float(line.split()[1]))\n if('ylo' in line and 'yhi' in line):\n self.ysize = abs(float(line.split()[0])) + abs(float(line.split()[1]))\n if('zlo' in line and 'zhi' in line):\n self.zsize = abs(float(line.split()[0])) + abs(float(line.split()[1]))\n if('atom types' in line): nelems = int(line.split()[0])\n if('Masses' in line): mflag = content.index(line) + 1\n if('Atoms' in line): aflag = content.index(line) + 1\n try:\n mflag\n except NameError:\n raise Exception(\"ERROR! You need to define the masses in the .dat file.\")\n atomtypes = {}\n while(nelems > 0):\n if(len(content[mflag].split()) == 2):\n atomtypes[int(content[mflag].split()[0])] = masses.get_znum(float(content[mflag].split()[1]))\n nelems -= 1\n mflag += 1\n self.atoms = []\n natoms = self.natoms\n while(natoms > 0):\n sline = content[aflag].split()\n if(len(sline) >= 5):\n # We found an atom\n id = int(sline[0])\n type = int(sline[1])\n x = float(sline[2])\n y = float(sline[3])\n z = float(sline[4])\n znum = atomtypes[type]\n # Add it to the model\n self.atoms.append(Atom(id,znum,x,y,z))\n natoms -= 1\n aflag += 1", "def read_cips_factoid_examples(input_file, is_training=False):\n with tf.gfile.Open(input_file, \"r\") as reader:\n input_data = json.load(reader)\n\n def is_whitespace(c):\n if c == \" \" or c == \"\\t\" or c == \"\\r\" or c == \"\\n\" or ord(c) == 0x202F:\n return True\n return False\n\n examples = []\n for entry in input_data:\n qas_id = entry[\"query_id\"]\n question_text = entry[\"answer\"]\n doc_tokens = []\n char_to_word_offset = []\n prev_is_whitespace = True\n paragraph_text = \"\"\n for paragraph in entry[\"passages\"]:\n paragraph_text += paragraph[\"passage_text\"]\n for c in paragraph_text:\n if is_whitespace(c):\n prev_is_whitespace = True\n else:\n if prev_is_whitespace:\n doc_tokens.append(c)\n else:\n doc_tokens[-1] += c\n prev_is_whitespace = False\n char_to_word_offset.append(len(doc_tokens) - 1)\n start_position = -1\n end_position = -1\n orig_answer_text = \"\"\n is_impossible = False\n example = SquadExample(\n qas_id=qas_id,\n question_text=question_text,\n doc_tokens=doc_tokens,\n orig_answer_text=orig_answer_text,\n start_position=start_position,\n end_position=end_position,\n is_impossible=is_impossible)\n examples.append(example)\n\n return examples", "def parse_trajectory(path: str) -> Optional[List[Dict[str, tuple]]]:\n lines = _get_lines_from_file(path)\n\n ess_file = False\n if path.split('.')[-1] != 'xyz':\n try:\n log = ess_factory(fullpath=path, check_for_errors=False)\n ess_file = True\n except (InputError, RMGInputError):\n ess_file = False\n\n if ess_file:\n if not isinstance(log, GaussianLog):\n raise NotImplementedError(f'Currently parse_trajectory only supports Gaussian files, got {type(log)}')\n traj = list()\n done = False\n i = 0\n while not done:\n if i >= len(lines) or 'Normal termination of Gaussian' in lines[i] or 'Error termination via' in lines[i]:\n done = True\n elif 'Input orientation:' in lines[i]:\n i += 5\n xyz_str = ''\n while len(lines) and '--------------------------------------------' not in lines[i]:\n splits = lines[i].split()\n xyz_str += f'{qcel.periodictable.to_E(int(splits[1]))} {splits[3]} {splits[4]} {splits[5]}\\n'\n i += 1\n traj.append(str_to_xyz(xyz_str))\n i += 1\n\n else:\n # this is not an ESS output file, probably an XYZ format file with several Cartesian coordinates\n skip_line = False\n num_of_atoms = 0\n traj, xyz_lines = list(), list()\n for line in lines:\n splits = line.strip().split()\n if len(splits) == 1 and all([c.isdigit() for c in splits[0]]):\n if len(xyz_lines):\n if len(xyz_lines) != num_of_atoms:\n raise ParserError(f'Could not parse trajectory, expected {num_of_atoms} atoms, '\n f'but got {len(xyz_lines)} for point {len(traj) + 1} in the trajectory.')\n traj.append(str_to_xyz(''.join([xyz_line for xyz_line in xyz_lines])))\n num_of_atoms = int(splits[0])\n skip_line = True\n xyz_lines = list()\n elif skip_line:\n # skip the comment line\n skip_line = False\n continue\n else:\n xyz_lines.append(line)\n\n if len(xyz_lines):\n # add the last point in the trajectory\n if len(xyz_lines) != num_of_atoms:\n raise ParserError(f'Could not parse trajectory, expected {num_of_atoms} atoms, '\n f'but got {len(xyz_lines)} for point {len(traj) + 1} in the trajectory.')\n traj.append(str_to_xyz(''.join([xyz_line for xyz_line in xyz_lines])))\n\n if not len(traj):\n logger.error(f'Could not parse trajectory from {path}')\n return None\n return traj", "def read_atoms(data, system, atom_style, units, atomsstart, atomscolumns):\n \n if atomsstart is not None:\n prop_info = atoms_prop_info(atom_style, units)\n ncols = countreadcolumns(prop_info)\n \n # Read Atoms table\n system = load_table(data, box=system.box, system=system, \n prop_info=prop_info, skiprows=atomsstart,\n nrows=system.natoms, comment='#',\n header=None, usecols=range(ncols))\n \n # Check if image flags are included\n if atomscolumns == ncols + 3:\n \n # Read image flags\n with uber_open_rmode(data) as f:\n imageflags = pd.read_csv(f, delim_whitespace=True, names=['bx', 'by', 'bz'],\n skiprows=atomsstart, nrows=system.natoms, comment='#',\n header=None, usecols=range(ncols, atomscolumns),\n dtype='int64')\n\n # Wrap atoms to correct images\n shift = imageflags.values.dot(system.box.vects)\n system.atoms.pos[:] += shift\n \n # Check for correct number of columns\n elif ncols != atomscolumns:\n raise FileFormatError(f'atom_style={atom_style} requires {ncols} or {ncols+3} Atoms table columns but {atomscolumns} found')\n\n return system", "def read_szx_fmv_13(eps_file):\n raw_data = eps_file.scaled_mdr\n raw_unscaled = eps_file.mdr\n mphr = eps_file.mphr\n\n n_node_per_line = raw_data[\"LONGITUDE\"].shape[1]\n n_lines = raw_data[\"LONGITUDE\"].shape[0]\n n_records = raw_data[\"LONGITUDE\"].size\n\n data = {}\n metadata = {}\n idx_nodes = np.arange(n_lines).repeat(n_node_per_line)\n\n ascat_time = shortcdstime2jd(raw_data[\"UTC_LINE_NODES\"].flatten()[\"day\"],\n raw_data[\"UTC_LINE_NODES\"].flatten()[\"time\"])\n data[\"jd\"] = ascat_time[idx_nodes]\n\n metadata[\"spacecraft_id\"] = np.int8(mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(mphr[\"ORBIT_START\"])\n\n fields = [\n \"processor_major_version\", \"processor_minor_version\",\n \"format_major_version\", \"format_minor_version\"\n ]\n\n for f in fields:\n metadata[f] = np.int16(mphr[f.upper()])\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"abs_line_number\"\n ]\n\n for f in fields:\n data[f] = raw_data[f.upper()].flatten()[idx_nodes]\n\n fields = [(\"longitude\", long_nan), (\"latitude\", long_nan),\n (\"swath indicator\", byte_nan)]\n\n for f, nan_val in fields:\n data[f] = raw_data[f.upper()].flatten()\n valid = raw_unscaled[f.upper()].flatten() != nan_val\n data[f][~valid] = nan_val\n\n fields = [(\"sigma0_trip\", long_nan), (\"inc_angle_trip\", uint_nan),\n (\"azi_angle_trip\", int_nan), (\"kp\", uint_nan),\n (\"num_val_trip\", ulong_nan), (\"f_kp\", byte_nan),\n (\"f_usable\", byte_nan), (\"land_frac\", uint_nan)]\n\n for f, nan_val in fields:\n data[f] = raw_data[f.upper()].reshape(n_records, 3)\n valid = raw_unscaled[f.upper()].reshape(n_records, 3) != nan_val\n data[f][~valid] = nan_val\n\n # modify longitudes from (0, 360) to (-180,180)\n mask = np.logical_and(data[\"longitude\"] != long_nan,\n data[\"longitude\"] > 180)\n data[\"longitude\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n mask = (data[\"azi_angle_trip\"] != int_nan) & (data[\"azi_angle_trip\"] < 0)\n data[\"azi_angle_trip\"][mask] += 360\n\n data[\"node_num\"] = np.tile((np.arange(n_node_per_line) + 1),\n n_lines).astype(np.uint8)\n\n data[\"line_num\"] = idx_nodes.astype(np.uint16)\n\n data[\"as_des_pass\"] = (data[\"sat_track_azi\"] < 270).astype(np.uint8)\n\n data[\"swath_indicator\"] = data.pop(\"swath indicator\")\n\n data[\"f_land\"] = data.pop(\"land_frac\")\n\n return data, metadata", "def readPubTator(args):\n if not os.path.exists('/'.join(args.output_file.split('/')[:-1])):\n os.makedirs('/'.join(args.output_file.split('/')[:-1]))\n\n abstracts = OrderedDict()\n entities = OrderedDict()\n relations = OrderedDict()\n\n with open(args.input_file, 'r') as infile:\n for line in tqdm(infile):\n\n # text\n if len(line.rstrip().split('|')) == 3 and \\\n (line.strip().split('|')[1] == 't' or line.strip().split('|')[1] == 'a'):\n line = line.strip().split('|')\n\n pmid = line[0]\n text = line[2] # .replace('>', '\\n')\n\n # replace weird symbols and spaces\n text = replace2symbol(text)\n text = replace2space(text)\n\n if pmid not in abstracts:\n abstracts[pmid] = [TextStruct(pmid, text)]\n else:\n abstracts[pmid] += [TextStruct(pmid, text)]\n\n # entities\n elif len(line.rstrip().split('\\t')) == 6:\n line = line.strip().split('\\t')\n pmid = line[0]\n offset1 = int(line[1])\n offset2 = int(line[2])\n ent_name = line[3]\n ent_type = line[4]\n kb_id = line[5].split('|')\n\n # replace weird symbols and spaces\n ent_name = replace2symbol(ent_name)\n ent_name = replace2space(ent_name)\n\n # currently consider each possible ID as another entity\n for k in kb_id:\n if pmid not in entities:\n entities[pmid] = [EntStruct(pmid, ent_name, offset1, offset2, ent_type, [k], -1, [], [])]\n else:\n entities[pmid] += [EntStruct(pmid, ent_name, offset1, offset2, ent_type, [k], -1, [], [])]\n\n elif len(line.rstrip().split('\\t')) == 7:\n line = line.strip().split('\\t')\n pmid = line[0]\n offset1 = int(line[1])\n offset2 = int(line[2])\n ent_name = line[3]\n ent_type = line[4]\n kb_id = line[5].split('|')\n extra_ents = line[6].split('|')\n\n # replace weird symbols and spaces\n ent_name = replace2symbol(ent_name)\n ent_name = replace2space(ent_name)\n for i, e in enumerate(extra_ents):\n if pmid not in entities:\n entities[pmid] = [EntStruct(pmid, ent_name, offset1, offset2, ent_type, [kb_id[i]], -1, [], [])]\n else:\n entities[pmid] += [EntStruct(pmid, ent_name, offset1, offset2, ent_type, [kb_id[i]], -1, [], [])]\n\n # relations\n elif len(line.rstrip().split('\\t')) == 4:\n line = line.strip().split('\\t')\n pmid = line[0]\n rel_type = line[1]\n arg1 = tuple((line[2].split('|')))\n arg2 = tuple((line[3].split('|')))\n\n if pmid not in relations:\n relations[pmid] = [RelStruct(pmid, rel_type, arg1, arg2)]\n else:\n relations[pmid] += [RelStruct(pmid, rel_type, arg1, arg2)]\n\n elif line == '\\n':\n continue\n\n return abstracts, entities, relations", "def read_marco_examples(input_file, is_training):\n with open(input_file, \"r\") as reader:\n source = json.load(reader)\n\n def is_whitespace(c):\n if c == \" \" or c == \"\\t\" or c == \"\\r\" or c == \"\\n\" or ord(c) == 0x202F:\n return True\n return False\n\n \n def is_alphabet(char):\n if (char >= '\\u0041' and char <= '\\u005a') or (char >= '\\u0061' and char <= '\\u007a'):\n return True\n return False\n def is_number(char):\n if char >= '\\u0030' and char <= '\\u0039':\n return True\n return False\n def is_other(uchar):\n if not (is_number(uchar) or is_alphabet(uchar)):\n return True\n return False\n\n \n def z_tokenize(text):\n doc_tokens = []\n char_to_word_offset = []\n\n # pas2tokens\n prev_is_whitespace = True\n for c in text:\n if is_whitespace(c):\n prev_is_whitespace = True\n else:\n if prev_is_whitespace or (doc_tokens[-1][-1] == '.' and not is_number(c)):\n doc_tokens.append(c)\n else:\n doc_tokens[-1] += c\n prev_is_whitespace = False\n char_to_word_offset.append(len(doc_tokens) - 1)\n char_to_word_offset.append(len(doc_tokens))\n\n return doc_tokens, char_to_word_offset\n\n query_ids = source['query_id']\n queries = source['query']\n passages = source['passages']\n answers = source.get('answers', {})\n\n examples = []\n for qid in query_ids:\n passage = passages[qid]\n query = queries[qid]\n answer = answers.get(qid)\n\n if is_training:\n if answer is None or answer[0] == 'No Answer Present.':\n continue\n\n for ans in answer[0:1]:\n if len(ans) == 0:\n continue\n\n \n is_match = False\n for ind, pas in enumerate(passage[:10]):\n paragraph_text = pas['passage_text']\n pos = paragraph_text.find(ans)\n if pos >= 0:\n is_match = True\n if not is_match:\n continue\n \n for ind, pas in enumerate(passage[:10]):\n paragraph_text = pas['passage_text']\n doc_tokens, char_to_word_offset = z_tokenize(paragraph_text)\n if pas.get('is_selected', False):\n is_select = 1.0\n else:\n is_select = 0.0\n pos = paragraph_text.find(ans)\n if pos >= 0:\n start = char_to_word_offset[pos]\n end = char_to_word_offset[pos+len(ans)]\n ans_text = ans\n has_answer = 1.0\n else:\n start = 0\n end = 0\n ans_text = ans\n has_answer = 0.0\n\n example = MarcoExample(\n qas_id=qid,\n question_text=query,\n doc_tokens=doc_tokens,\n orig_answer_text=ans_text,\n start_position=start,\n end_position=end,\n is_select=is_select,\n has_answer = has_answer)\n examples.append(example)\n if ind < 9:\n for idx in range(ind+1, 10):\n examples.append(example)\n else:\n if answer[0] == 'No Answer Present.':\n continue\n \n for ans in answer[0:1]:\n is_match = False\n for ind, pas in enumerate(passage[:10]):\n paragraph_text = pas['passage_text']\n pos = paragraph_text.find(ans)\n if pos >= 0:\n is_match = True\n \n start = None\n end = None\n ans_text = None\n if not is_match:\n ans_text = ''\n\n for ind, pas in enumerate(passage[:10]):\n paragraph_text = pas['passage_text']\n doc_tokens, char_to_word_offset = z_tokenize(paragraph_text)\n if pas.get('is_selected', False):\n is_select = 1.0\n else:\n is_select = 0.0\n pos = paragraph_text.find(ans)\n if pos >= 0:\n has_answer = 1.0\n else:\n has_answer = 0.0\n\n example = MarcoExample(\n qas_id=qid,\n question_text=query,\n doc_tokens=doc_tokens,\n orig_answer_text=ans_text,\n start_position=start,\n end_position=end,\n is_select=is_select,\n has_answer = has_answer)\n examples.append(example)\n if ind < 9:\n for idx in range(ind+1, 10):\n examples.append(example)\n\n return examples" ]
[ "0.6658475", "0.6070183", "0.60173386", "0.56104475", "0.5587364", "0.5542085", "0.5493325", "0.54809505", "0.5463508", "0.54497206", "0.5433617", "0.5410467", "0.5403503", "0.5391849", "0.53836143", "0.53776634", "0.5351196", "0.535097", "0.53310233", "0.5329605", "0.530887", "0.53058416", "0.5273308", "0.5270341", "0.52604413", "0.5247374", "0.524609", "0.5243965", "0.5233387", "0.5198822" ]
0.77015275
0
Convert from a lon/lat/depth/strike/dip/rake/mag string to a double couple type
def sdr2dc( string ): # convert first 7 columns into array try: data = NP.fromstring( string, count=7, sep =' ', dtype=float ) except IndexError: print >> sys.stderr, "Error: Require 7 columns: lon/lat/depth/strike/dip/rake/mag" return None # get variables hypo = data[0:3 ] # lon , lat, z sdr = data[3:6]*pi/180 # in radians mag = data[6] return DoubleCouple( c=hypo, h=hypo, strike = sdr[0], dip = sdr[1], rake=sdr[2], mag=mag )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert(coords):\n lat = coords[:4]\n lon = coords[4:]\n\n lat = lat[:2] + \".\" + lat[2:]\n\n if int(lon[0]) > 5:\n lon = \"-\" + lon[:2] + \".\" + lon[2:]\n else:\n lon = \"-1\" + lon[:2] + \".\" + lon[2:]\n\n return (float(lat), float(lon))", "def _parse_ra_dec(coord_str):\n if isinstance(coord_str, str):\n coord1 = coord_str.split()\n else:\n # This exception should never be raised from SkyCoord\n raise TypeError(\"coord_str must be a single str\")\n\n if len(coord1) == 6:\n coord = (\" \".join(coord1[:3]), \" \".join(coord1[3:]))\n elif len(coord1) > 2:\n coord = PLUS_MINUS_RE.split(coord_str)\n coord = (coord[0], \" \".join(coord[1:]))\n elif len(coord1) == 1:\n match_j = J_PREFIXED_RA_DEC_RE.match(coord_str)\n if match_j:\n coord = match_j.groups()\n if len(coord[0].split(\".\")[0]) == 7:\n coord = (\n f\"{coord[0][0:3]} {coord[0][3:5]} {coord[0][5:]}\",\n f\"{coord[1][0:3]} {coord[1][3:5]} {coord[1][5:]}\",\n )\n else:\n coord = (\n f\"{coord[0][0:2]} {coord[0][2:4]} {coord[0][4:]}\",\n f\"{coord[1][0:3]} {coord[1][3:5]} {coord[1][5:]}\",\n )\n else:\n coord = PLUS_MINUS_RE.split(coord_str)\n coord = (coord[0], \" \".join(coord[1:]))\n else:\n coord = coord1\n\n return coord", "def floatify(latlon):\n sign = -2. * (latlon[-1].lower() in ['s', 'w']) + 1\n return float(latlon[:-1]) * sign", "def floatify(latlon):\n sign = -2. * (latlon[-1].lower() in ['s', 'w']) + 1\n return float(latlon[:-1]) * sign", "def dd_to_dms_str(lat, lon):\n \"\"\" lat: latitude in degrees \"\"\"\n \"\"\" lon: longitude in degrees \"\"\"\n \"\"\" returns: string tuple in format (\"51 deg 30' 12.86\\\" N\", \"0 deg 7' 54.50\\\" W\") \"\"\"\n \"\"\" this is the same format used by exiftool's json format \"\"\"\n # TODO: add this to readme\n\n lat_deg, lat_min, lat_sec = _dd_to_dms(lat)\n lon_deg, lon_min, lon_sec = _dd_to_dms(lon)\n\n lat_hemisphere = \"N\"\n if any([lat_deg < 0, lat_min < 0, lat_sec < 0]):\n lat_hemisphere = \"S\"\n\n lon_hemisphere = \"E\"\n if any([lon_deg < 0, lon_min < 0, lon_sec < 0]):\n lon_hemisphere = \"W\"\n\n lat_str = (\n f\"{abs(lat_deg)} deg {abs(lat_min)}' {abs(lat_sec):.2f}\\\" {lat_hemisphere}\"\n )\n lon_str = (\n f\"{abs(lon_deg)} deg {abs(lon_min)}' {abs(lon_sec):.2f}\\\" {lon_hemisphere}\"\n )\n\n return lat_str, lon_str", "def extractData(coords: str) -> (str, float, float, float, float):\n aircraft_type = coords.split(\":\", 1)\n x_coord, y_coord, heading, speed = aircraft_type[1].split(\",\")\n\n return aircraft_type, float(x_coord), float(y_coord), float(heading), float(speed)", "def convert(self, lat, lon):\r\n a = self.a\r\n b = self.b\r\n long0 = self.long0\r\n k0 = self.k0\r\n dx = self.dx\r\n\r\n e = (1 - b ** 2 / a ** 2) ** 0.5\r\n e2 = e ** 2 / (1 - e ** 2)\r\n n = (a - b) / (a + b)\r\n nu = a / (1 - (e ** 2) * (sin(lat) ** 2)) ** 0.5\r\n p = lon - long0\r\n\r\n A = a * (1 - n + (5 / 4.0) * (n ** 2 - n ** 3) + (81 / 64.0)*(n ** 4 - n ** 5))\r\n B = (3 * a * n / 2.0) * (1 - n + (7 / 8.0) * (n ** 2 - n ** 3) + (55 / 64.0) * (n ** 4 - n ** 5))\r\n C = (15 * a * (n ** 2) / 16.0) * (1 - n + (3 / 4.0) * (n ** 2 - n ** 3))\r\n D = (35 * a * (n ** 3) / 48.0) * (1 - n + (11 / 16.0) * (n ** 2 - n ** 3))\r\n E = (315 * a * (n ** 4) / 51.0) * (1 - n)\r\n\r\n S = A * lat - B * sin(2 * lat) + C * sin(4 * lat) - D * sin(6 * lat) + E * sin(8 * lat)\r\n\r\n K1 = S * k0\r\n K2 = k0 * nu * sin(2 * lat)/4.0\r\n K3 = (k0 * nu * sin(lat) * (cos(lat) ** 3) / 24.0) * \\\r\n (5 - tan(lat) ** 2 + 9 * e2 * (cos(lat) ** 2) + 4 * (e2 ** 2) * (cos(lat) ** 4))\r\n\r\n y = K1 + K2 * (p ** 2) + K3 * (p ** 4)\r\n\r\n K4 = k0 * nu * cos(lat)\r\n K5 = (k0 * nu * (cos(lat) ** 3) / 6.0) * (1 - tan(lat) ** 2 + e2 * (cos(lat) ** 2))\r\n\r\n x = K4 * p + K5 * (p ** 3) + dx\r\n return x, y", "def normalize_simple(line):\n first = find_next_comma_newline(line,0)\n #print \"first: %d\" % first\n second = find_next_comma_newline(line,first+1)\n #print \"second: %d\" % second\n third = find_next_comma_newline(line,second+1)\n #print \"third: %d\" % third\n if third == -1:\n lon = float(line[second+1:])\n else:\n lon = float(line[second+1:third])\n return int(line[0:first]),float(line[first+1:second]),lon", "def dd2dm(lat,lon):\r\n lat_d = int(abs(lat)) #calculate latitude degrees\r\n lat_m = (abs(lat) - lat_d) * 60. #calculate latitude minutes\r\n\r\n lon_d = int(abs(lon))\r\n lon_m = (abs(lon) - lon_d) * 60.\r\n \r\n la=lat_d*100.+lat_m\r\n lo=lon_d*100.+lon_m\r\n return la,lo", "def getDouble(self, address: ghidra.program.model.address.Address) -> float:\n ...", "def parse_speed(as_str: str) -> float:\n return float(as_str.rstrip(\"x\"))", "def read_double(data):\n s_type = \"=%s\" % get_type(\"double\")\n return struct.unpack(s_type, data.read(8))[0]", "def decode_double(self, buf, pos):\n return self.decode_struct(self._double_fmt, buf, pos)", "def lnglat_to_meters(longitude, latitude):\n if isinstance(longitude, (list, tuple)):\n longitude = numpy.array(longitude)\n if isinstance(latitude, (list, tuple)):\n latitude = numpy.array(latitude)\n\n origin_shift = numpy.pi * 6378137\n easting = longitude * origin_shift / 180.0\n northing = numpy.log(numpy.tan((90 + latitude) * numpy.pi / 360.0)) * origin_shift / numpy.pi\n return (easting, northing)", "def read_double(self):\n return self._packers[\"d\"].unpack(self.read(8))[0]", "def _normalize_location(lat: float, lon: float):\n latitude = \"{0:.3f}\".format(round(lat, 3))\n longitude = \"{0:.3f}\".format(round(lon, 3))\n return latitude + \":\" + longitude", "def lonlat2xy(s_lon, s_lat): # x: easting, y: northing\r\n # convert decimals to seconds...\r\n s_lon = dec2sec(s_lon)\r\n s_lat = dec2sec(s_lat)\r\n\r\n ## Auxiliary values \r\n # i.e. differences of latitude and longitude relative to Bern in the unit [10000'']\r\n s_lng_aux = (s_lon - 26782.5)/10000.\r\n s_lat_aux = (s_lat - 169028.66)/10000.\r\n \r\n # easting\r\n s_x = (600072.37 \r\n + 211455.93*s_lng_aux \r\n - 10938.51*s_lng_aux*s_lat_aux \r\n - 0.36*s_lng_aux*(s_lat_aux**2) \r\n - 44.54*(s_lng_aux**3))\r\n \r\n # northing\r\n s_y = (200147.07 \r\n + 308807.95*s_lat_aux \r\n + 3745.25*(s_lng_aux**2) \r\n + 76.63*(s_lat_aux**2) \r\n - 194.56*(s_lng_aux**2)*s_lat_aux \r\n + 119.79*(s_lat_aux**3))\r\n\r\n return s_x, s_y", "def formatCoordinates(string):\n if string == 'N/A':\n return 0\n else:\n return float(string)", "def parselatlon_statontable(astring, positive_char, negative_char):\n lstr = astring.strip()\n if not lstr:\n return None\n lastch = lstr[-1]\n if not lastch in [positive_char, negative_char]:\n return None\n try:\n result = float(lstr[:-1])\n except ValueError:\n return None\n if lastch == negative_char:\n result = -result\n return result", "def read4num(self,s):\n s = self.getKeyword(s)\n return np.float_(s.split(','))", "def gpgga_convert(line):\r\n gps = line.strip().split(',')\r\n #check data\r\n if gps[6] == '0' :\r\n return\r\n fix = ''\r\n if gps[6] == '1':\r\n fix = 'GPS fix'\r\n elif gps[6] == '2':\r\n fix = 'DGPS fix'\r\n elif gps[6] == '4':\r\n fix = 'RTK Fix coordinate (centimeter precision)'\r\n elif gps[6] == '5':\r\n fix = 'RTK Float (decimeter precision)'\r\n #utc = gps[1][0:2] + ':' + gps[1][2:4] + ':' + gps[1][4:6]\r\n lat = ddm_dd_convert(gps[2], gps[3])\r\n long = ddm_dd_convert(gps[4], gps[5]) \r\n return [lat, long, fix]", "def gx_coords1(s: str) -> list[float]:\n return numarray(s.split(\" \"))", "def convertToDouble(boolean: bool) -> float:\n ...", "def __parse_traffic(str):\n return float(str.strip().split(\",\")[0].replace('.',''))", "def _decode(geohash):\n lat_val, lng_val, lat_err, lng_err = _decode_val_err(geohash)\r\n precision = _get_precision(lng_err)\n lat_val = \"%.*f\" % (precision, lat_val)\r\n lng_val = \"%.*f\" % (precision, lng_val)\r\n return lat_val, lng_val", "def gon2rad(gon):\n return radians(gon2dec(gon))", "def SI_string_to_float(inStr, debug = False):\n func_name = \"SI_string_to_float\"\n \n # Debug print incoming string. \n if debug: print(\"DEBUG: (Func = %s): Input-str: %s\" %( func_name, inStr ))\n \n #Remove all spaces from incoming string. \n inStr = inStr.replace(\" \", \"\"); \n if debug: print(\"DEBUG: (Func = %s): Removed spaces: %s\" %( func_name, inStr ))\n \n # Allocate return value, and search in\n result = None\n letters = re.search( r'([\\d\\.]+)([a-z A-Z]+)', inStr)\n \n # Query if match was found. If not, print warning then try to directly convert incoming string.\n if letters:\n try:\n value = float(letters.group(1))\n scale = float(SI_UNITS[letters.group(2)])\n result = value * scale\n if debug: print(\"DEBUG: (Func = %s): Value: %f, scale: %f, result: %f\"%(func_name, value,scale,result))\n except:\n print(\"ERROR: (Func = %s): Couldn't extract value and SI-Unit.\"%func_name)\n print(\" Possible issue with seaching 'SI_UNITS for (%s)\"% scale)\n else:\n print(\"WARNING: (Function = %s) Couldn't extract value and SI-Unit. Will attempt direct float conversion... \"%func_name)\n #print(\" Used the following regex: '([\\d\\.]+)([a-z A-Z]+)'\")\n result = float(inStr) # TODO : Insert try catch \n \n return result", "def lat_DMS_shdp2DD(lat):\n if REGEX_LAT_DMS_shdp.match(lat):\n dms_group = REGEX_LAT_DMS_shdp.search(lat)\n h = dms_group.group('hem') # Hemisphere\n d = int(dms_group.group('deg')) # Degrees\n m = int(dms_group.group('min')) # Minutes\n s = float(dms_group.group('sec')) # Seconds\n if d == 90 and (m > 0 or s > 0): # Check if does not exceed 90 00 00.00\n result = NOT_VALID\n else: # correct format - convert from DMS to DD\n result = ((s / 60) + m) / 60 + d\n if h in ['S', 's'] and result != 0: # If hemisphere is south coordinate is negative \n result = -result\n else:\n result = NOT_VALID\n return result", "def parse_galcoord(l, b):\n try:\n if (re.search(r\"[^\\d.+\\-]\", l) is None) and (\n re.search(r\"[^\\d.+\\-]\", b) is None\n ):\n coord = SkyCoord(l, b, unit=\"deg\", frame=\"galactic\")\n else:\n coord = SkyCoord(l, b, frame=\"galactic\")\n except ValueError:\n log.error(\"Unable to parse input coordinates '{},{}'\".format(ra, dec))\n return None\n return coord", "def test_location_to_coord():\n result_a = wwiki.location_to_coord(location)\n result_b = wwiki.location_to_coord(location)\n\n assert isinstance(result_a, str)\n assert result_b == \"48.76569917989272|2.392394129892722\"" ]
[ "0.5970741", "0.59387934", "0.59290093", "0.59290093", "0.5781259", "0.56974715", "0.56738037", "0.5610365", "0.55993795", "0.5574758", "0.55611956", "0.55337924", "0.551011", "0.55073005", "0.54503554", "0.53815985", "0.53663087", "0.5364104", "0.5358592", "0.53561544", "0.53412765", "0.5326255", "0.52906865", "0.52849627", "0.52696776", "0.5262801", "0.52598816", "0.52535117", "0.52521414", "0.52387166" ]
0.64489216
0
Perform a full update of the orderbook, asks and bids are expected to be in ascending and descending order respectively
def updateOrderbookFull(self, asks, bids): self.asks = asks self.bids = bids
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_order():", "def update_order():", "def update(self, book_info, destroy):\n self.connect()\n is_issue = len(book_info) == 2\n\n bid = book_info[0].get()\n if is_issue:\n issue_to = book_info[1].get()\n\n if is_issue:\n extract_bid = f\"select bid from {self.book_table}\"\n else:\n extract_bid = f\"select bid from {self.issued_table}\"\n\n status = False\n try:\n self.cur.execute(extract_bid)\n self.con.commit()\n for i in self.cur:\n self.all_bid.append(i[0])\n\n if bid in self.all_bid:\n check_avail = f\"select status from {self.book_table} where \" \\\n f\"bid = '{bid}'\"\n self.cur.execute(check_avail)\n self.con.commit()\n check = None\n for i in self.cur:\n check = i[0]\n\n if (is_issue and check == 'avail'\n or not is_issue and check == 'issued'):\n status = True\n else:\n status = False\n else:\n messagebox.showinfo(\"Error\", \"Book ID not present\")\n except MySQLError as err:\n messagebox.showinfo(\"Error\", \"Can't fetch Book IDs\")\n print(err)\n\n if is_issue:\n issue_sql = f\"insert into {self.issued_table} values ('{bid}',\" \\\n f\"'{issue_to}')\"\n up_status = f\"update {self.book_table} set status = 'issued' \" \\\n f\"where bid = '{bid}'\"\n else:\n issue_sql = f\"delete from {self.issued_table} where bid = '{bid}'\"\n up_status = f\"update {self.book_table} set status = 'avail' \" \\\n f\"where bid = '{bid}'\"\n\n try:\n if bid in self.all_bid and status:\n self.cur.execute(issue_sql)\n self.con.commit()\n self.cur.execute(up_status)\n self.con.commit()\n if is_issue:\n msg = \"Book Issued Successfully\"\n else:\n msg = \"Book Returned Successfully\"\n state = 'Success'\n else:\n if is_issue:\n msg = \"Book Already Issued\"\n else:\n msg = \"Please check the book ID\"\n state = \"Message\"\n messagebox.showinfo(state, msg)\n except MySQLError as err:\n messagebox.showinfo(\n \"Search Error\", \"The value entered is wrong, Try again\"\n )\n print(err)\n self.all_bid.clear()\n destroy()", "def update_book(self):\n while self.lowest_sell is not None and self.highest_buy is not None and self.lowest_sell <= self.highest_buy:\n sell = self.sell_levels[self.lowest_sell].head_order\n buy = self.buy_levels[self.highest_buy].head_order\n self.execute_trade(sell, buy)", "async def _book(self, msg: dict, timestamp: float):\n # PERF perf_start(self.id, 'book_msg')\n\n delta = {BID: [], ASK: []}\n # if we reset the book, force a full update\n forced = False\n pair = self.exchange_symbol_to_std_symbol(msg['data'][0]['symbol'])\n if not self.partial_received[pair]:\n # per bitmex documentation messages received before partial\n # should be discarded\n if msg['action'] != 'partial':\n return\n self.partial_received[pair] = True\n forced = True\n\n if msg['action'] == 'partial':\n for data in msg['data']:\n side = BID if data['side'] == 'Buy' else ASK\n price = Decimal(data['price'])\n size = Decimal(data['size'])\n order_id = data['id']\n\n self._l2_book[pair][side][price] = size\n self.order_id[pair][side][order_id] = price\n elif msg['action'] == 'insert':\n for data in msg['data']:\n side = BID if data['side'] == 'Buy' else ASK\n price = Decimal(data['price'])\n size = Decimal(data['size'])\n order_id = data['id']\n\n self._l2_book[pair][side][price] = size\n self.order_id[pair][side][order_id] = price\n delta[side].append((price, size))\n elif msg['action'] == 'update':\n for data in msg['data']:\n side = BID if data['side'] == 'Buy' else ASK\n update_size = Decimal(data['size'])\n order_id = data['id']\n\n price = self.order_id[pair][side][order_id]\n\n self._l2_book[pair][side][price] = update_size\n self.order_id[pair][side][order_id] = price\n delta[side].append((price, update_size))\n elif msg['action'] == 'delete':\n for data in msg['data']:\n side = BID if data['side'] == 'Buy' else ASK\n order_id = data['id']\n\n delete_price = self.order_id[pair][side][order_id]\n del self.order_id[pair][side][order_id]\n del self._l2_book[pair][side][delete_price]\n delta[side].append((delete_price, 0))\n\n else:\n LOG.warning(\"%s: Unexpected l2 Book message %s\", self.id, msg)\n return\n # PERF perf_end(self.id, 'book_msg')\n # PERF perf_log(self.id, 'book_msg')\n\n await self.book_callback(self._l2_book[pair], L2_BOOK, pair, forced, delta, timestamp, timestamp)", "def perform_update(self, response):\n if len(response) == 4: # Received data consists of [channelid, price, count, amount]\n # omit channelid from list because we only subscribed to BTCUSD channel\n update_item = response[1:]\n\n update_type = \"bid\" if update_item[2] > 0 else \"ask\" # set type = \"bid\" if amount > 0 else \"ask\"\n row = self.session.query(Book_Item).filter_by(exchange='Bitfinex',\n type=update_type,\n price=update_item[0]).first()\n\n if row:\n row.count = update_item[1] # update count\n if row.count == 0: # if row is updated to count = 0, delete row\n self.session.delete(row)\n self.session.commit()\n # print(row, \"Deleted from Bitfinex\")\n else: # if row doesn't exist, add item to db\n new_item = self.add_new_bitfinex_item(update_type, update_item[0], update_item[1])\n self.session.add(new_item)\n self.session.commit() # commit in order to set the id attribute\n row = self.session.query(Book_Item).filter_by(exchange=new_item.exchange,\n price=new_item.price,\n type=new_item.type,\n count=new_item.count).first()\n # print(row, \"Added to Bitfinex\")\n\n send_update_to_clients(row)", "def bin_book_update(binfile, book):\n trade_update_fmt = \"II\"\n trade_update_data = [0, 0]\n order_book_level_fmt = \"IIIIII\"\n levels = [\n (book.bid[-(i+1)].price * DECIMAL_CONVERT,\n book.bid[-(i+1)].qty,\n book.bid[-(i+1)].order_count,\n book.offer[i].price * DECIMAL_CONVERT,\n book.offer[i].qty,\n book.offer[i].order_count) for i in range(5)]\n order_book_level_data = []\n for data in levels:\n order_book_level_data += list(data)\n order_book_level_data = [int(v) for v in order_book_level_data]\n valids_fmt = \"I\"\n valids_data = [2]\n the_data = [now_nanos(), book.security] + \\\n trade_update_data + order_book_level_data + valids_data\n data = struct.pack(\"<QI\" + trade_update_fmt + order_book_level_fmt * 5 + valids_fmt,\n *the_data)\n binfile.write(data)", "def update_orderbook(self, existing_orderbook_obj, instrument, market_place, market_segment, market_capability, \\\n tick_size_list, round_lot, day_count, orderbook_name, tiering_level, orderbook_curr=None):\n logger.DLOG(\"Updating orderbook...\") \n clone_obj = existing_orderbook_obj.Clone()\n clone_obj.Instrument = instrument\n if orderbook_curr:\n clone_obj.Currency = orderbook_curr\n else:\n clone_obj.Currency = instrument.Currency()\n clone_obj.Quotation = instrument.Quotation()\n clone_obj.MarketPlace = market_place\n clone_obj.RoundLot = self.get_round_lot(instrument, round_lot)\n #clone_obj.PhysicalMarketSegment(market_segment)\n clone_obj.Name = orderbook_name\n clone_obj.QuoteFactor = 1\n clone_obj.TickSizeList = self.get_tick_size_list(tick_size_list, market_capability)\n if str(tiering_level):\n clone_obj.ExternalType = tiering_level\n clone_obj.ExternalId = orderbook_name\n\n try: \n existing_orderbook_obj.Apply(clone_obj)\n existing_orderbook_obj.Commit() \n \n #group_map = self.get_list_leaf(clone_obj,market_segment) \n #if group_map and clone_obj.GroupMaps().IndexOf(group_map) <0 :\n # clone_obj.GroupMaps().Add(group_map) \n # clone_obj.GroupMaps().Commit() \n \n logger.LOG(\"**Successfully** updated orderbook information: <%s> for instrument <%s>\"%(orderbook_name, instrument.Name()))\n except Exception as e:\n logger.ELOG('**Error** while updating OrderBook %s : %s'%(orderbook_name, e))", "async def _check_order_update(self, *args, **kwargs):\n order_nos = list(self._orders.keys())\n if not order_nos:\n return\n for order_no in order_nos:\n success, error = await self._rest_api.get_order_status(order_no)\n if error:\n return\n await self._update_order(success[\"data\"][0])", "def order_book_builder(self, data, timestamp, datetime, symbol):\n if isinstance(data[1], list):\n data = data[1]\n # Price, Count, Amount\n bids = {\n str(level[0]): [str(level[1]), str(level[2])]\n for level in data if level[2] > 0\n }\n asks = {\n str(level[0]): [str(level[1]), str(abs(level[2]))]\n for level in data if level[2] < 0\n }\n self.orderbooks[symbol].update({'bids': bids})\n self.orderbooks[symbol].update({'asks': asks})\n self.orderbooks[symbol].update({'timestamp': timestamp})\n self.orderbooks[symbol].update({'datetime': datetime})\n\n else:\n # Example update message structure [1765.2, 0, 1] where we have [price, count, amount].\n # Update algorithm pseudocode from Bitfinex documentation:\n # 1. - When count > 0 then you have to add or update the price level.\n # 1.1- If amount > 0 then add/update bids.\n # 1.2- If amount < 0 then add/update asks.\n # 2. - When count = 0 then you have to delete the price level.\n # 2.1- If amount = 1 then remove from bids\n # 2.2- If amount = -1 then remove from asks\n data = data[1:]\n data = [str(data[0]), str(data[1]), str(data[2])]\n if int(data[1]) > 0: # 1.\n\n if float(data[2]) > 0: # 1.1\n self.orderbooks[symbol]['bids'].update({data[0]: [data[1], data[2]]})\n\n elif float(data[2]) < 0: # 1.2\n self.orderbooks[symbol]['asks'].update({data[0]: [data[1], str(abs(float(data[2])))]})\n\n elif data[1] == '0': # 2.\n\n if data[2] == '1': # 2.1\n if self.orderbooks[symbol]['bids'].get(data[0]):\n del self.orderbooks[symbol]['bids'][data[0]]\n\n elif data[2] == '-1': # 2.2\n if self.orderbooks[symbol]['asks'].get(data[0]):\n del self.orderbooks[symbol]['asks'][data[0]]", "def update(d,title, author, year, isbn,shelf,raw):\n conn_obj = mysql.connector.connect(host='localhost',database='mydb',user='root',password='kks')\n cur_obj = conn_obj.cursor()\n cur_obj.execute(\"UPDATE book where isbn=%s\"\n \"SET title = %s, \"\n \"author = %s, \"\n \"year = %s, \"\n \"shelf=%s,\"\n \"raw=%s\", \n (isbn,title, author, year,shelf,raw))\n conn_obj.commit()\n conn_obj.close()", "def update(self, message):\n try:\n if message['type'] == 'l2update':\n if self.snapshot_received:\n self.l2update(message)\n else:\n self.backlog += message['changes']\n elif message['type'] == 'snapshot':\n self.snapshot(message)\n except Exception as e:\n raise Exception(\"Error processing {} OrderBook update: Message -> {}\".format(message['product_id'], e))", "def update_orders(comp, order, user_correct, payment_id):\n users_orders = []\n for item in order.items.all():\n users_orders.append(item.id)\n item.is_paid = True\n item.save()\n order.related_competition = comp\n order.payment_id = payment_id\n order.order_date = timezone.now()\n order.answer_correct = user_correct\n order.ordered = True\n order.save()\n return order", "def __on_update_bookticker(self, action, bookticker):\n self.best_bid_price = float(bookticker['b'])\n self.best_ask_price = float(bookticker['a'])", "def test_update_book_details(self):\n\n first_book_list = BookList()\n first_book = Book()\n\n first_book.create_book({\n \"title\": \"First Man\",\n \"author\": \"James R. Hansen\",\n \"year\": 2005,\n \"publisher_name\": \"Simon & Schuster\",\n \"publication_date\": \"01/01/2018\",\n \"num_copies\": 1\n })\n\n first_book_list.add_book(first_book)\n\n new_book_details = {\n \"title\": \"First Man\",\n \"author\": \"James Hansen\",\n \"year\": 2018,\n \"publisher_name\": \"Simon & Schuster\",\n \"publication_date\": \"01/01/2018\",\n \"num_copies\": 5\n }\n\n assert first_book_list.update_book_details(new_book_details) == True\n assert first_book_list.find_book(\"First Man\") == True\n\n for book in first_book_list.show_all():\n assert book.get(\"title\") == \"First Man\"\n assert book.set(\"title\", \"First Man: The Life of Neil A. Armstrong\") == True\n\n assert first_book_list.find_book(\"First Man: The Life of Neil A. Armstrong\") == True", "def rent_book(self, bookID):\n query = f\"\"\"UPDATE {TABLE} set quantity = quantity - 1 where bookID = '{bookID}';\"\"\"\n\n try:\n self.cursor.execute(query)\n self.conn.commit()\n except Error as e:\n print(e)", "def UpdateStatus(self,pid):\n\t\tb1=Rents.objects.filter(paymentid_id=pid).first()\n\t\tamount=Payment.objects.filter(paymentid=pid).values('amount')\n\t\tb=b1.__dict__\n\t\tquant=b['quantity']\n\t\tbookid=b['bookid_id']\n\t\tprice=amount[0]['amount']/quant\n\t\t#price=float(\"{.2f}\".format(amount[0]['amount']))/float(\"{0:.2f}\".format(quant))\n\t\tRents.objects.filter(paymentid_id=pid).update(status='r')\n\t\tBook.objects.filter(bookid=bookid).update(quantity=F('quantity')+quant)\n\t\tStatus.objects.filter(ISBN=b['ISBN'],rentprice=price).update(quantity=F('quantity')+quant)\n\t\tUpload.objects.filter(owner_id_id=b['owner_id_id'],sellprice=price).update(qtyavailable=F('qtyavailable')+quant)\n\t\tself.notifyBuyer(b['ISBN'])", "def m_ps_FieldsUpdated(self, sender, e):\r\n if e.Error == None:\r\n # Make sure that there is a valid bid\r\n if e.Fields.GetBestBidPriceField().HasValidValue:\r\n if self.m_orderKey == \"\":\r\n # If there is no order working, submit one through the first valid order feed.\r\n # You should use the order feed that is valid for your purposes.\r\n op = ttapi.OrderProfile(e.Fields.Instrument.GetValidOrderFeeds()[0], e.Fields.Instrument)\r\n op.BuySell = ttapi.BuySell.Buy\r\n op.AccountName = \"12345678\"\r\n op.AccountType = ttapi.AccountType.A1\r\n op.OrderQuantity = ttapi.Quantity.FromInt(e.Fields.Instrument, 1)\r\n op.OrderType = ttapi.OrderType.Limit\r\n op.LimitPrice = e.Fields.GetBestBidPriceField().Value\r\n if not self.m_ts.SendOrder(op):\r\n print(\"Send new order failed. {0}\".format(op.RoutingStatus.Message))\r\n self.Dispose()\r\n else:\r\n self.m_orderKey = op.SiteOrderKey\r\n print(\"Send new order succeeded.\")\r\n elif self.m_ts.Orders.ContainsKey(self.m_orderKey) and self.m_ts.Orders[self.m_orderKey].LimitPrice != e.Fields.GetBestBidPriceField().Value:\r\n # If there is a working order, reprice it if its price is not the same as the bid\r\n op = self.m_ts.Orders[self.m_orderKey].GetOrderProfile()\r\n op.LimitPrice = e.Fields.GetBestBidPriceField().Value\r\n op.Action = ttapi.OrderAction.Change\r\n if not self.m_ts.SendOrder(op):\r\n print(\"Send change order failed. {0}\".format(op.RoutingStatus.Message))\r\n else:\r\n print(\"Send change order succeeded.\")\r\n else:\r\n if e.Error.IsRecoverableError == False:\r\n print(\"Unrecoverable price subscription error: {0}\".format(e.Error.Message))\r\n self.Dispose()", "async def update(self, *args, **kwargs):\n if not self.__bought:\n random_stock = 1\n stock_price = self.priceindicator[random_stock].price\n if stock_price != 0:\n random_const = float(decimal.Decimal(random.randrange(-5,5))/100)\n stock_price = stock_price + stock_price*random_const\n stock_price = int(stock_price)\n await self.place_buy_order(random_stock, self.settings[\"stocks_per_company\"], stock_price, 1)\n log_message = \"StockBuyerBot(\" + self.name + \") bought \" + str(random_stock)\n print(log_message)\n else:\n log_message = \"StockBuyerBot(\" + self.name + \") bought nothing\"\n print(log_message)\n self.add_to_log(self.id, log_message)", "def book(self, irc, msg, args, thing):\n self.db.deleteExpired(self.registryValue('orderExpiry'))\n results = self.db.getCurrencyBook(thing)\n if len(results) == 0:\n irc.error(\"No orders for this currency present in database.\")\n return\n if len(results) > self.registryValue('maxOrdersInBookList'):\n irc.error(\"Too many orders to list on IRC. Visit the web \"\n \"order book, http://bitcoin-otc.com/vieworderbook.php?eitherthing=%s \"\n \"to see list of orders for this item.\" % (thing,))\n return\n self._getMtgoxQuote()\n L = [\"#%s %s %s %s %s %s @ %s %s (%s)\" % (id,\n time.ctime(refreshed_at),\n nick,\n buysell,\n amount,\n thing,\n self._getIndexedValue(price),\n otherthing,\n notes) \\\n for (id,\n created_at,\n refreshed_at,\n buysell,\n nick,\n host,\n amount,\n thing,\n price,\n otherthing,\n notes) in results]\n irc.replies(L, joiner=\" || \")", "def update_price_books(self, barcode, new_price):\n try:\n self.db.cursor.execute('UPDATE books SET price = %s where id_books = %s', (round(new_price, 2), barcode))\n except Exception as error:\n print(error)\n else:\n self.db.con.commit()\n self.db.con.close()\n print('Updated Successfully!')", "def _update(self, count=True, forced=False):", "def update_OpenOrders(self, market):\n mid = self.marketid(market)\n o_orders = self.Request.fetch('marketorders',params={'marketid':mid})\n ##check the form of o_orders\n \n print o_orders\n #self.OpenOrders[self.Pairs[mid]] = \n return 0", "def update_bid(self, auction, amount):\n try:\n amount = Decimal(amount)\n except Exception, e:\n amount = Decimal('0')\n\n bid = Bid.objects.get(bid_busket=self, auction=auction)\n if not bid.is_locked():\n if amount == 0:\n bid.delete()\n else:\n bid.amount = amount\n bid.save()\n self.save()\n return bid", "def update_book_scores(self):\n self.cursor.execute(\"\"\"UPDATE book SET avg_rating=NULL, total_rating_score=0, num_ratings=0\"\"\")\n self.db.commit()\n self.cursor.execute(\"\"\"SELECT * FROM comment\"\"\")\n for comment in self.cursor.fetchall():\n self.cursor.execute(\"\"\"UPDATE book SET total_rating_score=total_rating_score+%s,\n num_ratings=num_ratings+1 WHERE ISBN=%s\"\"\", (comment[3], comment[1]))\n self.db.commit()\n self.update_average_book_rating(comment[1])", "def sync(self):\n\n new_book = {}\n update_list = [self.book[WAIT_OPEN], self.book[OPEN]]\n\n for status, booklet in self.book.items():\n new_book[status] = {}\n\n for status, booklet in self.book.items():\n for pos_id, position in booklet.items():\n\n position.update()\n new_status = position.status\n\n if status == new_status:\n new_book[status][pos_id] = position\n else:\n new_book[new_status][pos_id] = position\n\n self.book = new_book", "def update_bid(self, bid_price, bidder):\n bidder_info = \"Starting Bid\"\n if self.current_bidder is not None:\n bidder_info = self.current_bidder.name\n print(f\"{bidder.name} bidded {bid_price} in response to \"\n f\"{bidder_info}'s bid of {self.current_bid}!\")\n self._highest_current_bid = bid_price\n self._highest_current_bidder = bidder\n self.start_new_bids()", "def test_update_book(self):\n\n delete_books()\n\n book = create_book(\"title one\")[\"book\"]\n\n with test_client.put(\n \"/book/{}/\".format(book[\"id\"]),\n data={\n \"title\": \"title one updated\"\n }\n ) as response:\n\n self.assertEqual(\n json.loads(\n response.get_data(as_text=True)\n ),\n {\n \"status\": \"success\",\n \"book\": {\n **book,\n \"title\": \"title one updated\"\n }\n }\n )\n\n self.assertEqual(\n read_book(book[\"id\"]),\n {\n \"status\": \"success\",\n \"book\": {\n **book,\n \"title\": \"title one updated\"\n }\n }\n )\n\n \"\"\"\n clear the table, create several books, update them and read them\n \"\"\"\n\n delete_books()\n\n book_one = create_book(\"title one\")[\"book\"]\n book_two = create_book(\"title two\")[\"book\"]\n\n with test_client.put(\n \"/book/{}/\".format(book_one[\"id\"]),\n data={\n \"title\": \"title one updated\"\n }\n ) as response:\n\n self.assertEqual(\n json.loads(\n response.get_data(as_text=True)\n ),\n {\n \"status\": \"success\",\n \"book\": {\n **book_one,\n \"title\": \"title one updated\"\n }\n }\n )\n\n self.assertEqual(\n read_book(book_one[\"id\"]),\n {\n \"status\": \"success\",\n \"book\": {\n **book_one,\n \"title\": \"title one updated\"\n }\n }\n )\n\n with test_client.put(\n \"/book/{}/\".format(book_two[\"id\"]),\n data={\n \"title\": \"title two updated\"\n }\n ) as response:\n\n self.assertEqual(\n json.loads(\n response.get_data(as_text=True)\n ),\n {\n \"status\": \"success\",\n \"book\": {\n **book_two,\n \"title\": \"title two updated\"\n }\n }\n )\n\n self.assertEqual(\n read_book(book_two[\"id\"]),\n {\n \"status\": \"success\",\n \"book\": {\n **book_two,\n \"title\": \"title two updated\"\n }\n }\n )", "def test_api_can_update_book(self):\n\n\t\t# create book\n\t\tadd_book = {\n\t\t\t'title': 'Hello Books',\n\t\t\t'isbn': '5698745124'\n\t\t}\n\t\tlogin_data = self.login_test_user()\n\t\ttoken = login_data['auth_token']\n\t\tres = self.client.post(\n\t\t\tf'{URL_BOOKS}',\n\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\tcontent_type='application/json',\n\t\t\tdata=json.dumps(add_book)\n\t\t)\n\n\t\t# update book\n\t\tbook = self.client.put(\n\t\t\tf'{URL_BOOKS}/1',\n\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\tcontent_type='application/json',\n\t\t\tdata=json.dumps(\n\t\t\t\tdict(\n\t\t\t\t\ttitle='updated book'\n\t\t\t\t)\n\t\t\t)\n\t\t)\n\n\t\tbook_res = json.loads(book.data.decode())\n\t\tself.assertTrue(book_res['title'] == 'updated book')", "def test_partly_update_book(self):\n data = {'isbn':'96712116-2'}\n response = self.client.patch(self.book.get_absolute_url(), data, format='json', content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n response = self.client.get(self.book.get_absolute_url())\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertContains(response, '96712116-2')" ]
[ "0.7094027", "0.7094027", "0.6515729", "0.64941806", "0.6293617", "0.6264788", "0.6104545", "0.6064261", "0.59736687", "0.591912", "0.5854378", "0.5806818", "0.57774216", "0.5761083", "0.57600677", "0.5716418", "0.5709784", "0.56838304", "0.5666011", "0.5658304", "0.564571", "0.5631941", "0.56300193", "0.55978006", "0.55960923", "0.5592065", "0.5586635", "0.5577287", "0.5555572", "0.5521864" ]
0.78466827
0
Voice Shuffle the playing queue. Accepts no arguments.
def on_shuffle(self, event): self.pre_check(event) shuffle(self.get_player(event.guild.id).queue) api_loop(event.channel.send_message, "Queue shuffled.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def shuffle(self, ctx: commands.Context) -> Optional[bool]:\n\n queue = self.queue[ctx.guild.id]\n\n queue.shuffle = not queue.shuffle\n if queue.shuffle:\n queue.original_queue = queue.queue\n\n play_queue = queue.queue[queue.pos + 1 :]\n shuffled_queue = random.sample(play_queue, len(play_queue))\n queue.queue = (\n queue.queue[: queue.pos] + [queue.now_playing] + shuffled_queue\n )\n\n return queue.shuffle", "def shuffle_songs(self):\n random.shuffle(self.playlist)", "def shuffle(self) -> None:\n shuffle(self.cards)", "def shuffle(self):\n shuffle(self.cards)", "def shuffle(self):\n shuffle(self.cards)", "async def queue(self, ctx, *args):\r\n if ctx.message.channel.id != 701868237771505695:\r\n return await ctx.send(\"**Error:** Music Bot commands are only available in <#701868237771505695>\")\r\n if self.music_off:\r\n return await ctx.send(\"**Error:** Music Bot features are currently off\")\r\n if ctx.voice_client is None or ctx.voice_client is not self.voice:\r\n return await ctx.send(\"**Error:** You must be connected to the voice channel.\")\r\n\r\n is_mod = False\r\n for role in ctx.message.author.roles:\r\n if role.name == \"Moderators\":\r\n is_mod = True\r\n\r\n queue_string = \"```\"\r\n try:\r\n # if(args[0]==\"clear\"):\r\n #\tself.songs=[]\r\n if args[0] == \"remove\":\r\n pos = len(self.songs) - 1\r\n while pos > 0:\r\n if args[1].lower() in self.songs[pos][0].title.lower():\r\n if ctx.author.id not in self.songs[pos][4]:\r\n self.songs[pos][4].append(ctx.author.id)\r\n shortened_title = self.title_shorten(self.songs[pos][0].title)\r\n print(self.songs[pos][1])\r\n print(ctx.message.author)\r\n if (len(ctx.message.author.voice.channel.members) - 1 > len(self.songs[pos][4]) * 2\r\n and not is_mod\r\n and not ctx.message.author.id == self.songs[pos][1]):\r\n await ctx.send(\"{0} remove votes registered for `{1}`, need {2} to remove song.\".format(\r\n len(self.songs[pos][4]),\r\n shortened_title,\r\n int((len(ctx.message.author.voice.channel.members) - 1) / 2)))\r\n else:\r\n await ctx.send(\"Removing `{0}`\".format(shortened_title))\r\n self.del_song(pos)\r\n pos = pos - 1\r\n except:\r\n pass\r\n pos = 0\r\n for song in self.songs:\r\n if pos == 0:\r\n pos_indicator = \"> \"\r\n else:\r\n pos_indicator = \"{0}.\".format(str(pos))\r\n shortened_title = self.title_shorten(song[0].title)\r\n queue_string = \"{0}{1}{2}\\n\".format(queue_string, pos_indicator, shortened_title)\r\n pos = pos + 1\r\n if queue_string == \"```\":\r\n return await ctx.send(\"Queue is empty\")\r\n await ctx.send(\"{0}```\".format(queue_string))", "def shuffle_question(self):\n r = random.SystemRandom()\n r.shuffle(self.question_list)", "def test_play_queue(self):\n source = procedural.WhiteNoise(0.5)\n player = media.Player()\n player.play()\n player.queue(source)", "def test_queue_play(self):\n source = procedural.WhiteNoise(0.5)\n player = media.Player()\n player.queue(source)\n player.play()", "async def queue(self, ctx):\n state = self.get_voice_state(ctx.message.server)\n if state.voice is None:\n await self.bot.say('**Not in a voice channel!**')\n return False\n if not state.songs:\n await self.bot.say('**Song queue is empty!**')\n return False\n if (not state.songs._queue) and (not state.current):\n await self.bot.say('**Song queue is empty!**')\n return False\n target = self.bot.user\n au = target.avatar_url\n avatar_link = (au if au else target.default_avatar_url)\n if (not state.songs._queue) and (state.current):\n key_str = 'are no songs in queue. One is playing right now.'\n elif state.songs._queue:\n key_str = 'is 1 song playing, and %s in queue.' % str(len(state.songs._queue))\n emb = discord.Embed(color=int('0x%06X' % random.randint(0, 256**3-1), 16), title='Voice Queue', description='There ' + key_str)\n emb.set_author(name=target.display_name, url='https://blog.khronodragon.com/', icon_url=avatar_link)\n emb.set_footer(text='Best bot! :3', icon_url=avatar_link)\n if state.current:\n emb.add_field(name='**[NOW PLAYING]** ' + state.current.get_name(), value=state.current.get_desc(), inline=False)\n else:\n await self.bot.say('**Not playing anything right now!**')\n return False\n for e in state.songs._queue:\n emb.add_field(name=e.get_name(), value=e.get_desc())\n await self.bot.say('🎶🎵', embed=emb)", "def shuffle( self ):\n random.shuffle(self.__deck)", "def shuffle(self):\r\n self._current = 0\r\n random.shuffle(self._cards)", "def shuffle(self):\n reorder(self.cards) #importing shuffle as reorder", "def make_music_rand():\n pass", "def _shuffle(self):\n print \"Shuffled the bag\"\n # The 'random' library provides a really handy function we can\n # use called 'shuffle'. You provide 'shuffle' with a\n # 'sequence' (basically, a list) and the shuffle function\n # randomizes the placement of all items in the sequence\n # automatically. There is no return value from\n # \"random.shuffle\" because \"self.bag\" is modified in place.\n random.shuffle(self.bag)", "def _shuffle():\n\n random.shuffle(deck)", "def shuffle(self):\r\n random.shuffle(self.deck)", "def Shuffle(self):\r\n random.shuffle(self.cards_list)", "def shuffle(self):\n random.shuffle(self.cards)", "def shuffle(self):\n random.shuffle(self.cards)", "def shuffle(self):\n random.shuffle(self.cards)", "def shuffle(self):\n random.shuffle(self.cards)", "def shuffle(self):\n random.shuffle(self.cards)", "def shuffle(self):\n import random\n random.shuffle(self.cards)", "def generate_shuffle(self):\n self._shuffle_list = range(len(self._playlist))\n random.shuffle(self._shuffle_list)\n if self._active_index in self._shuffle_list:\n self._shuffle_list.remove(self._active_index)", "def play_all(pre, choice, post=\"\"):\n options = pre + choice + post\n play(options, \"1-\" + str(len(g.model.songs)))", "def shuffle(self) -> None:\r\n random.shuffle(self._deck)", "def shuffle(self):\n random.shuffle(self.get_cards())", "def shuffle(self):\n random.SystemRandom().shuffle(self.deck)", "def shuffle(self):\n\n args = list(self)\n random.shuffle(args)\n\n self.clear()\n super(DogeDeque, self).__init__(args)" ]
[ "0.6754124", "0.61670613", "0.6148838", "0.6137774", "0.6137774", "0.6024911", "0.59862536", "0.59546244", "0.59360856", "0.5931226", "0.58906937", "0.5836928", "0.5814776", "0.5808147", "0.5798833", "0.5793498", "0.57764506", "0.57740307", "0.573743", "0.573743", "0.573743", "0.573743", "0.573743", "0.57193595", "0.5710587", "0.56892335", "0.56836635", "0.56522864", "0.56210744", "0.5612994" ]
0.6967331
0
Voice Get the information of a certain song in the queue or the amount of songs in the queue. If an integer argument is input, then this will return the relevant queue entry. If a string is input, then the string will be used to search for queue entry titles. Otherwise, if no arguments are passed, this will return the current length of the queue.
def on_queued_command(self, event, index=None): self.pre_check(event) if not self.get_player(event.guild.id).queue: api_loop( event.channel.send_message, "There aren't any songs queued right now.", ) elif index is None: api_loop( event.channel.send_message, "There are {} songs queued ({} minutes). To get a specific song's info, just do this command + index.".format( len(self.get_player(event.guild.id).queue), self.minutes_format(self.get_player( event.guild.id, ).queue_length), ), ) elif (index.replace("-", "").strip(" ").isdigit() and 0 <= (int(index.replace("-", "").strip(" ")) - 1) <= len(self.get_player(event.guild.id).queue)): ytdata = self.get_ytdl_values( self.get_player(event.guild.id).queue[ int(index.replace("-", "").strip(" ")) - 1 ].metadata, ) api_loop( event.channel.send_message, "The song at index ``{}`` is ``{}`` by ``{}`` with length ``{}`` minutes and is sourced from ``{}``.".format( int(index.replace("-", "").strip(" ")), ytdata["title"], ytdata["uploader"], ytdata["time_formated"], ytdata["source"], ), ) elif index.replace("-", "").isdigit(): api_loop(event.channel.send_message, "Invalid index input.") else: matched_list = dict() for item in self.get_player(event.guild.id).queue: ratio = partial_ratio(item.metadata["title"], index) if ratio >= 70: matched_list["#{} ({}% match)".format( self.get_player(event.guild.id).queue.index(item)+1, ratio, )] = item.metadata["title"] if matched_list: embed = bot.generic_embed_values( title="Queue search results", footer_text="Requested by {}".format(event.author), non_inlines={ k: matched_list[k] for k in list(matched_list)[-25:] }, footer_img=event.author.get_avatar_url(size=32), timestamp=event.msg.timestamp.isoformat(), ) api_loop(event.channel.send_message, embed=embed) else: api_loop( event.channel.send_message, "No similar items found in queue.", )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def queue(self, ctx):\n state = self.get_voice_state(ctx.message.server)\n if state.voice is None:\n await self.bot.say('**Not in a voice channel!**')\n return False\n if not state.songs:\n await self.bot.say('**Song queue is empty!**')\n return False\n if (not state.songs._queue) and (not state.current):\n await self.bot.say('**Song queue is empty!**')\n return False\n target = self.bot.user\n au = target.avatar_url\n avatar_link = (au if au else target.default_avatar_url)\n if (not state.songs._queue) and (state.current):\n key_str = 'are no songs in queue. One is playing right now.'\n elif state.songs._queue:\n key_str = 'is 1 song playing, and %s in queue.' % str(len(state.songs._queue))\n emb = discord.Embed(color=int('0x%06X' % random.randint(0, 256**3-1), 16), title='Voice Queue', description='There ' + key_str)\n emb.set_author(name=target.display_name, url='https://blog.khronodragon.com/', icon_url=avatar_link)\n emb.set_footer(text='Best bot! :3', icon_url=avatar_link)\n if state.current:\n emb.add_field(name='**[NOW PLAYING]** ' + state.current.get_name(), value=state.current.get_desc(), inline=False)\n else:\n await self.bot.say('**Not playing anything right now!**')\n return False\n for e in state.songs._queue:\n emb.add_field(name=e.get_name(), value=e.get_desc())\n await self.bot.say('🎶🎵', embed=emb)", "async def search(self, ctx: commands.Context, *, query: t.Optional[str]) -> None:\n if query is None:\n # Maybe the user didn't know to pass in a query?\n embed = discord.Embed(colour=Colours.regular, timestamp=Embeds.now())\n embed.description = (\n \"No query passed in. Try passing in something: `$search arabic music`\"\n )\n embed.set_footer(\n text=\"See $help voice for more commands.\", icon_url=Icons.info\n )\n return await ctx.send(embed=embed)\n\n if (results := await self.get_tracks(query, True, False)) is not None:\n # Ensure that we're connected before playing.\n await ctx.invoke(self.connect, channel=None)\n player = self.get_player(ctx.guild)\n if not player.is_connected:\n return\n\n embed = discord.Embed(colour=Colours.regular, timestamp=Embeds.now())\n embed.set_footer(\n text=f\"Showing 5/{len(results)} results.\",\n icon_url=ctx.author.avatar_url,\n )\n embed.description = \"\"\n results = results[:5]\n\n for index, track in enumerate(results, 1):\n m, s = self.get_formatted_length(track.length, True)\n embed.description += (\n f\"**{index}**. [{track.title}]({track.uri}) ({m}:{s})\\n\"\n )\n\n # Get a integer selection using Choice.prompt().\n if (\n choice := await Choices.prompt(\n ctx=ctx, embed=embed, n=5, author_only=True\n )\n ) is None:\n if player.queue.empty:\n await ctx.invoke(self.disconnect)\n return\n\n embed = discord.Embed(\n title=\"Now queued.\" if player.is_playing else \"Now playing.\",\n description=f\"[{results[choice].title}]({results[choice].uri})\",\n colour=Colours.regular,\n timestamp=Embeds.now(),\n )\n\n m, s = self.get_formatted_length(results[choice].length, False)\n embed.set_footer(\n text=f\"Track goes for {m} minutes and {s} seconds.\",\n icon_url=ctx.author.avatar_url,\n )\n if results[choice].thumb is not None:\n embed.set_thumbnail(url=results[choice].thumb)\n await ctx.send(embed=embed)\n\n player.queue.add_tracks(results[choice])\n if not player.is_playing:\n await player.playback()\n else:\n fail = Embeds.status(success=False, desc=\"Failed to find any results.\")\n await ctx.send(embed=fail)", "def findQueued(self):\r\n songs = Queue.objects.filter(played=False, playtime__lte = datetime.datetime.now()).order_by('-priority', 'id')\r\n if not songs: # Since OR queries have been problematic on production server earlier, we do this hack..\r\n songs = Queue.objects.filter(played=False, playtime = None).order_by('-priority', 'id')\r\n if settings.PLAY_JINGLES:\r\n jingle = self.JingleTime()\r\n if jingle:\r\n return jingle\r\n if songs:\r\n song = songs[0]\r\n common.play_queued(song)\r\n return song.song\r\n else:\r\n return self.getRandom()", "def get_queue(queue_name=\"\"):\n print(get_qstat_arg(queue_name))\n q = subprocess.Popen(\n _get_qstat_arg(queue_name), stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, stdin=subprocess.PIPE\n )\n o, e = q.communicate()\n\n return o", "async def _queue(self, msg):\n if msg.voice_client is not None:\n if msg.guild.id in self.player:\n if self.player[msg.guild.id]['queue']:\n emb = discord.Embed(\n colour=self.random_color, title='queue')\n emb.set_footer(\n text=f'Command used by {msg.author.name}', icon_url=msg.author.avatar_url)\n for i in self.player[msg.guild.id]['queue']:\n emb.add_field(\n name=f\"**{i['author'].author.name}**\", value=i['title'], inline=False)\n return await msg.send(embed=emb, delete_after=120)\n\n return await msg.send(\"No songs in queue\")", "def get_next_song():\n try:\n next_song = queue.getSong()\n queue_change()\n currently_playing_change(next_song)\n return json.dumps(next_song.to_dict())\n except IndexError as e:\n return json.dumps({'error': 'No songs in the queue'})", "def get_queue_items(self, queue_name):\n proc = start_proc([\"/usr/bin/sudo\", \"rabbitmqctl\", \"list_queues\"],\n shell=False)\n for line in iter(proc.stdout.readline, \"\"):\n print(\"LIST QUEUES:\" + line)\n m = re.search(r\"%s\\s+([0-9]+)\" % queue_name, line)\n if m:\n return int(m.group(1))\n return None", "def get_queue_song_args(self,comArgs):\n params, flags = self.get_params(comArgs)\n args = [params.get('P', 1)]\n return args", "async def search(self, ctx, *, query):\n player = self.bot.lavalink.players.get(ctx.guild.id)\n query = \"ytsearch:{}\".format(query)\n if player.is_connected:\n if not ctx.author.voice or not ctx.author.voice.channel or player.connected_channel.id != ctx.author.voice.channel.id:\n return await ctx.send(\"You have to be in my voice channel to queue a song :no_entry:\")\n else:\n if not ctx.author.voice or not ctx.author.voice.channel:\n return await ctx.send(\"Join a voice channel :no_entry:\")\n else:\n player.store('sessionowner', ctx.author.id)\n player.store('channel', ctx.channel.id)\n await player.connect(ctx.author.voice.channel.id)\n results = await self.bot.lavalink.get_tracks(query)\n if not results or not results['tracks']:\n return await ctx.send(\"I could not find any songs matching that query :no_entry:\")\n msg = \"\"\n for i, x in enumerate(results[\"tracks\"][:10], start=1):\n msg += \"{}. **[{}]({})**\\n\".format(i, x[\"info\"][\"title\"], x[\"info\"][\"uri\"])\n message = await ctx.send(embed=discord.Embed(description=msg).set_footer(text=\"Choose a number to the queue the song | cancel\"))\n def check(m):\n return m.channel == ctx.channel and m.author == ctx.author and (m.content.isdigit() or m.content.lower() == \"cancel\")\n try:\n response = await self.bot.wait_for(\"message\", check=check, timeout=60)\n if response.content.lower() == \"cancel\":\n await response.delete()\n return await message.delete()\n else:\n track = results[\"tracks\"][int(response.content) + 1]\n player.add(requester=ctx.author.id, track=track)\n timetill = 0\n for x in player.queue:\n timetill += x.duration\n if player.current:\n timetill += player.current.duration - player.position\n else:\n timetill = 0 \n index = [x.track for x in player.queue].index(track[\"track\"]) + 1\n s=discord.Embed()\n s.set_author(name=\"Added to Queue\", icon_url=ctx.author.avatar_url)\n s.set_thumbnail(url=\"https://img.youtube.com/vi/{}/default.jpg\".format(track[\"info\"][\"identifier\"]))\n s.add_field(name=\"Song\", value=\"[{}]({})\".format(track[\"info\"][\"title\"], track[\"info\"][\"uri\"]), inline=False)\n s.add_field(name=\"Duration\", value=self.format_time(track[\"info\"][\"length\"]), inline=True)\n s.add_field(name=\"Position in Queue\", value=index)\n if timetill != 0:\n s.add_field(name=\"Estimated time till playing\", value=self.format_time(timetill-track[\"info\"][\"length\"]))\n else:\n s.add_field(name=\"Estimated time till playing\", value=\"Next\")\n await response.delete()\n await message.delete()\n await self.bot.get_channel(player.fetch('channel')).send(embed=s)\n if not player.is_playing:\n await player.play()\n except asyncio.TimeoutError:\n return await ctx.send(\"Timed out :stopwatch:\")", "async def queue(self, ctx, *args):\r\n if ctx.message.channel.id != 701868237771505695:\r\n return await ctx.send(\"**Error:** Music Bot commands are only available in <#701868237771505695>\")\r\n if self.music_off:\r\n return await ctx.send(\"**Error:** Music Bot features are currently off\")\r\n if ctx.voice_client is None or ctx.voice_client is not self.voice:\r\n return await ctx.send(\"**Error:** You must be connected to the voice channel.\")\r\n\r\n is_mod = False\r\n for role in ctx.message.author.roles:\r\n if role.name == \"Moderators\":\r\n is_mod = True\r\n\r\n queue_string = \"```\"\r\n try:\r\n # if(args[0]==\"clear\"):\r\n #\tself.songs=[]\r\n if args[0] == \"remove\":\r\n pos = len(self.songs) - 1\r\n while pos > 0:\r\n if args[1].lower() in self.songs[pos][0].title.lower():\r\n if ctx.author.id not in self.songs[pos][4]:\r\n self.songs[pos][4].append(ctx.author.id)\r\n shortened_title = self.title_shorten(self.songs[pos][0].title)\r\n print(self.songs[pos][1])\r\n print(ctx.message.author)\r\n if (len(ctx.message.author.voice.channel.members) - 1 > len(self.songs[pos][4]) * 2\r\n and not is_mod\r\n and not ctx.message.author.id == self.songs[pos][1]):\r\n await ctx.send(\"{0} remove votes registered for `{1}`, need {2} to remove song.\".format(\r\n len(self.songs[pos][4]),\r\n shortened_title,\r\n int((len(ctx.message.author.voice.channel.members) - 1) / 2)))\r\n else:\r\n await ctx.send(\"Removing `{0}`\".format(shortened_title))\r\n self.del_song(pos)\r\n pos = pos - 1\r\n except:\r\n pass\r\n pos = 0\r\n for song in self.songs:\r\n if pos == 0:\r\n pos_indicator = \"> \"\r\n else:\r\n pos_indicator = \"{0}.\".format(str(pos))\r\n shortened_title = self.title_shorten(song[0].title)\r\n queue_string = \"{0}{1}{2}\\n\".format(queue_string, pos_indicator, shortened_title)\r\n pos = pos + 1\r\n if queue_string == \"```\":\r\n return await ctx.send(\"Queue is empty\")\r\n await ctx.send(\"{0}```\".format(queue_string))", "async def _queue(self, ctx: commands.Context, *, page: int = 1):\n\n if len(ctx.voice_state.songs) == 0:\n return await ctx.send('The queue is empty.')\n\n items_per_page = 1\n pages = math.ceil(len(ctx.voice_state.songs) / items_per_page)\n\n start = (page - 1) * items_per_page\n end = start + items_per_page\n\n queue = ''\n for i, song in enumerate(ctx.voice_state.songs[start:end], start=start):\n queue += f\"`{i + 1}.` [**{song.source.title}**]({song.source.url})\\n\"\n\n embed = (discord.Embed(\n description=f\"**{len(ctx.voice_state.songs)} tracks:**\\n\\n{queue}\")\n .set_footer(\n text=f\"Viewing page {page}/{pages}\"))\n\n await ctx.send(embed=embed)", "async def queue(self, ctx: commands.Context) -> None:\n player = self.get_player(ctx.guild)\n\n if player.queue.empty:\n # The queue is empty. Send an embed to the user to let them know.\n embed = discord.Embed(\n description=\"Queue is currently empty.\",\n colour=Colours.regular,\n timestamp=Embeds.now(),\n )\n embed.set_footer(\n text=\"Try playing some tracks!\", icon_url=ctx.author.avatar_url\n )\n return await ctx.send(embed=embed)\n\n embed = discord.Embed(colour=Colours.regular, timestamp=Embeds.now())\n embed.set_footer(\n text=f\"Current repeat mode: {player.queue.repeating.name}.\",\n icon_url=ctx.author.avatar_url,\n )\n\n # Add a history field if we have tracks in the queue history.\n if player.queue.history and (text := \"\") is not None:\n for index, track in enumerate(player.queue.history):\n text += f\"**{index + 1}**. [{track.title}]({track.uri})\\n\"\n\n embed.add_field(name=\"Queue history:\", value=text, inline=False)\n\n # Add an upcoming field if we have tracks ahead.\n if player.queue.upcoming and (text := \"\") is not None:\n for index, track in enumerate(player.queue.upcoming):\n text += f\"**{index + 1}**. [{track.title}]({track.uri})\\n\"\n\n embed.add_field(name=\"Upcoming tracks:\", value=text, inline=False)\n\n # Add the current track's field and its current status.\n if (current := player.queue.current_track) is not None:\n if not player.is_paused:\n em, es = self.get_formatted_length(player.position, True)\n tm, ts = self.get_formatted_length(current.length, True)\n if current.is_stream:\n title = f\"Currently streaming: ```[{em}:{es}]```\"\n else:\n title = f\"Currently playing: ```[{em}:{es} / {tm}:{ts}]```\"\n else:\n title = \"Currently paused.\"\n\n embed.add_field(\n name=title, value=f\"[{current.title}]({current.uri})\", inline=False\n )\n await ctx.send(embed=embed)", "async def play(self, ctx, *, query):\n player = self.bot.lavalink.players.get(ctx.guild.id)\n query = query.strip('<>')\n if player.is_connected:\n if not ctx.author.voice or not ctx.author.voice.channel or player.connected_channel.id != ctx.author.voice.channel.id:\n return await ctx.send(\"You have to be in my voice channel to queue a song :no_entry:\")\n else:\n if not ctx.author.voice or not ctx.author.voice.channel:\n return await ctx.send(\"Join a voice channel :no_entry:\")\n else:\n player.store('sessionowner', ctx.author.id)\n player.store('channel', ctx.channel.id)\n await player.connect(ctx.author.voice.channel.id)\n if not url_re.match(query):\n query = \"ytsearch:{}\".format(query)\n results = await self.bot.lavalink.get_tracks(query)\n if not results or not results['tracks']:\n return await ctx.send(\"I could not find any songs matching that query :no_entry:\")\n s=discord.Embed()\n if results[\"loadType\"] == \"PLAYLIST_LOADED\":\n tracks = results[\"tracks\"]\n for track in tracks:\n player.add(requester=ctx.author.id, track=track)\n s.description = \"Enqueued {} with **{}** tracks <:done:403285928233402378>\".format(results['playlistInfo']['name'], len(tracks))\n await self.bot.get_channel(player.fetch('channel')).send(embed=s)\n else:\n track = results[\"tracks\"][0]\n player.add(requester=ctx.author.id, track=track)\n timetill = 0\n for x in player.queue:\n timetill += x.duration\n if player.current:\n timetill += player.current.duration - player.position\n else:\n timetill = 0 \n index = [x.track for x in player.queue].index(track[\"track\"]) + 1\n s.set_author(name=\"Added to Queue\", icon_url=ctx.author.avatar_url)\n s.set_thumbnail(url=\"https://img.youtube.com/vi/{}/default.jpg\".format(track[\"info\"][\"identifier\"]))\n s.add_field(name=\"Song\", value=\"[{}]({})\".format(track[\"info\"][\"title\"], track[\"info\"][\"uri\"]), inline=False)\n s.add_field(name=\"Duration\", value=self.format_time(track[\"info\"][\"length\"]), inline=True)\n s.add_field(name=\"Position in Queue\", value=index)\n if timetill != 0:\n s.add_field(name=\"Estimated time till playing\", value=self.format_time(timetill-track[\"info\"][\"length\"]))\n else:\n s.add_field(name=\"Estimated time till playing\", value=\"Next\")\n await self.bot.get_channel(player.fetch('channel')).send(embed=s)\n if not player.is_playing:\n await player.play()", "async def _queue(self, ctx: commands.Context, *, page: int = 1):\n\n if len(ctx.voice_state.songs) == 0:\n return await ctx.send('Empty queue.')\n\n items_per_page = 10\n pages = math.ceil(len(ctx.voice_state.songs) / items_per_page)\n\n start = (page - 1) * items_per_page\n end = start + items_per_page\n\n queue = ''\n for i, song in enumerate(ctx.voice_state.songs[start:end], start=start):\n queue += '`{0}.` [**{1.source.title}**]({1.source.url})\\n'.format(i + 1, song)\n\n embed = (discord.Embed(description='**{} tracks:**\\n\\n{}'.format(len(ctx.voice_state.songs), queue))\n .set_footer(text='Viewing page {}/{}'.format(page, pages)))\n await ctx.send(embed=embed)", "def search_queue_number(self, Q_strip):\n if Q_strip is self.PF_Q_strip:\n out = self.dut.send_expect(\"cat config/common_base\", \"]# \", 10)\n pattern = \"(%s=)(\\d*)\" % Q_strip\n else :\n out = self.dut.send_expect(\"cat drivers/net/i40e/i40e_ethdev.c\", \"]# \", 10)\n pattern = \"#define %s\\s*(\\d*)\" % Q_strip\n s = re.compile(pattern)\n res = s.search(out)\n if res is None:\n print utils.RED('Search no queue number.')\n return None\n else:\n if Q_strip is self.VF_Q_strip:\n queue = res.group(1)\n else :\n queue = res.group(2)\n return int(queue)", "def get_queue_number(self):\n outstring = self.dut.send_expect(\"stop\", \"testpmd> \")\n time.sleep(2)\n result_scanner = r\"Forward Stats for RX Port= %s/Queue=\\s?([0-9]+)\" % self.dut_ports[0]\n scanner = re.compile(result_scanner, re.DOTALL)\n m = scanner.search(outstring)\n queue_id = m.group(1)\n print \"queue is %s\" % queue_id\n self.dut.send_expect(\"start\", \"testpmd> \")\n return queue_id", "def showqueue(self, irc, msg, args):\n if len(self._queue) == 0:\n irc.reply(\"The queue is empty\", private=True)\n return\n pos = self._find_in_queue(msg.nick)\n if pos < 0:\n irc.reply(\"You're not in the queue, did your nick change?\",\n private=True)\n return\n irc.reply(\"You are queued at position %d\" % (pos + 1), private=True)", "def get_q(data):\n des_n = re.findall(r'^([Ss]ize )\\d+( size)', data['item_description'])\n name_n = re.findall(r'\\d+', data['name'])\n # import ipdb; ipdb.set_trace()\n if len(des_n) == 1 and len(name_n) == 1:\n if int(des_n[0]) == int(name_n[0]):\n ret_n = int(des_n[0])\n else:\n ret_n = 1\n return ret_n\n\t# pass\n # return pd.concat(qnty_matches).reset_index().drop_duplicates(\n # subset='index', keep='last').set_index('index')", "def show_queue(Q):\n print(\"(Size of the queue:\", Q.qsize(), \")\", end=\" \")\n for n in list(Q.queue):\n print(n, end=\" \")\n print()", "def get_current_queue_position(speaker, tracks=None):\n qp = 0\n is_playing = False\n track_title = None\n\n try:\n track_info = speaker.get_current_track_info()\n qp = int(track_info[\"playlist_position\"])\n track_title = track_info[\"title\"]\n except:\n qp = 0\n\n try:\n cts = speaker.get_current_transport_info()[\"current_transport_state\"]\n if cts == \"PLAYING\":\n if tracks is not None:\n try:\n if tracks[qp - 1].title == track_title:\n is_playing = True\n else:\n is_playing = False\n qp = 1\n except:\n is_playing = False\n qp = 1\n else:\n is_playing = True\n else:\n is_playing = False\n except:\n is_playing = False\n\n return qp, is_playing", "def get_item_from_queue(Q, timeout=0.01):\n try:\n item = Q.get(True, 0.01)\n except Queue.Empty:\n return None\n return item", "def get_queue_num(self, qos_id, queue_id):\n\n q_num = None\n queues = self.qos_dict[qos_id][\"ovsdb:qos-entries\"][0][\"queue-list\"]\n\n # Go through all queues\n for queue in queues:\n cur_queue_id = queue[\"queue-ref\"].split(\"'\")[-2]\n # If we have a match, get the q_num and break\n if cur_queue_id == queue_id:\n q_num = queue[\"queue-number\"]\n break\n\n # queue_id is not found in the qos\n if q_num is None:\n #print(json.dumps(self.qos_dict[qos_id], indent=3))\n raise KeyError\n\n return q_num", "def read(self,getindex):\n if getindex<0:\n #print(\"Indicies are non-negative\")\n return None\n try:\n bufinx = len(self.buffer)+(getindex - self.index.value)\n if bufinx<0:\n #print(\"This item has been deleted, try increasing the queue size\")\n return None\n return self.buffer[bufinx]\n except IndexError:\n #print(\"This item doesn't exist yet\")\n return None", "def media_track(self):\n return self.coordinator.data.nowplaying[self.zone.SourceID].QueueSongIndex", "def get_item_from_queue(Q, timeout=0.01):\n try:\n item = Q.get(True, 0.01)\n except queue.Empty:\n return None\n return item", "def getSong(self):\n queue = self.instantiate_queue()\n song_data = queue.pop(0)\n\n history = self.instantiate_history()\n history_song_data = deepcopy(song_data)\n history_song_data['time_played'] = time() + 5\n history.append(history_song_data)\n\n if len(queue) < 5:\n self.addImplicit(queue, history)\n \n self.ageSongs(queue)\n self.calculateScore(queue)\n queue = self.sortSongs(queue)\n\n self.cache.set('queue', queue)\n self.cache.set('history', history)\n\n keys = ['name', 'track_id', 'artist', 'album_uri', 'album_name', 'duration', 'explicit', 'valence', 'energy']\n args = [song_data[key] for key in keys]\n return Song(*args)", "async def queue(self, ctx):\n srv = self.get_server_dict(ctx.message.server.id)\n que = srv['queue']\n msg = self.format_song_display('▶', srv['song'][1], srv['song'][2], srv['song'][3])\n i = 1\n for item in que:\n line = self.format_song_display(i, item[1], item[2], item[3])\n i += 1\n msg += line\n await ctx.bot.send_message(ctx.message.channel, msg)", "async def queue(self, msg, song):\n title1 = await Downloader.get_info(self, url=song)\n title = title1[0]\n data = title1[1]\n # NOTE:needs fix here\n if data['queue']:\n await self.playlist(data, msg)\n # NOTE: needs to be embeded to make it better output\n return await msg.send(f\"Added playlist {data['title']} to queue\")\n self.player[msg.guild.id]['queue'].append(\n {'title': title, 'author': msg})\n return await msg.send(f\"**{title} added to queue**\".title())", "def song_length(ans):\r\n length = 0\r\n flag = 1\r\n albums = simple_album_list()\r\n for album in albums:\r\n songs = simple_songs_list(album)\r\n for song in songs:\r\n if ans == song:\r\n words = dbase()[album][0][song]\r\n words = words[2]\r\n words = words.split()\r\n for word in words:\r\n length += 1\r\n flag = 1\r\n return str(length)\r\n\r\n elif ans != song and flag == 0:\r\n return \"song not found!\"", "def qstat(self, *options):\n if self.in_queue():\n jobid = self.get_db('jobid')\n cmd = ['qstat'] + list(options) + [jobid]\n\n status, output, err = getstatusoutput(cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n if status == 0:\n print(output)\n else:\n print(output + err)\n else:\n print('{} not in queue.'.format(self.directory))" ]
[ "0.58874625", "0.57933635", "0.57293785", "0.5726627", "0.5717032", "0.568477", "0.5672505", "0.56646675", "0.56352663", "0.5583631", "0.5564733", "0.55526024", "0.545143", "0.5411789", "0.5408162", "0.53968614", "0.5395845", "0.5395595", "0.53679144", "0.53476036", "0.5339481", "0.53372246", "0.5333851", "0.5329167", "0.5321969", "0.5302311", "0.5287741", "0.52833444", "0.5133867", "0.5108962" ]
0.6463423
0
Voice Move an item that's already queued to the front of the queue by index. Only accepts a single integer argument (the index of the target queue item).
def on_queue_next_command(self, event, index): self.pre_check(event) self.same_channel_check(event) if 1 < index <= len(self.get_player(event.guild.id).queue): index -= 1 self.get_player(event.guild.id).queue.insert( 0, self.get_player(event.guild.id).queue.pop(index), ) ytdata = self.get_ytdl_values( self.get_player(event.guild.id).queue[0].metadata, ) api_loop( event.channel.send_message, "Moved ``{}`` to the front of the queue.".format( ytdata["title"], ytdata["uploader"], ytdata["time_formated"], ytdata["source"], ), ) else: api_loop(event.channel.send_message, "Invalid index input.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def moveTo(self, index):\n\n await self.VoiceClient.http.setQueueSource(self.tag, {\"index\": index})\n\n return self", "def MoveItem(src_queue, trg_queue, order_func):\n score, item = heapq.heappop(src_queue)\n score = - float(order_func(- score, item))\n heapq.heappush(trg_queue, (score, item))\n return item", "def remove(self, index):\n if index < 0 or index >= len(self):\n raise AttributeError(\"i must be >= 0 and < size of queue\")\n if index == 0:\n oldItem = self._front.data\n self._front = self._front.next\n else:\n probe = self._front\n while index > 1:\n probe = probe.next\n index -= 1\n oldItem = probe.next.data\n probe.next = probe.next.next\n self._size -= 1\n if self.isEmpty():\n self._rear = None\n return oldItem", "def enqueue_front(self, item):\n self._items.insert(0, item)", "async def _remove(self, ctx: commands.Context, index: int):\n\n if len(ctx.voice_state.songs) == 0:\n return await ctx.send('Empty queue.')\n\n ctx.voice_state.songs.remove(index - 1)\n await ctx.message.add_reaction('✅')", "async def _remove(self, ctx: commands.Context, index: int):\n\n if len(ctx.voice_state.songs) == 0:\n return await ctx.send('Empty queue.')\n\n ctx.voice_state.songs.remove(index - 1)\n await ctx.message.add_reaction('✅')", "async def _remove(self, ctx: commands.Context, index: int):\n\n if len(ctx.voice_state.songs) == 0:\n return await ctx.send('Cannot remove song because the queue is empty.')\n\n ctx.voice_state.songs.remove(index - 1)\n await ctx.message.add_reaction('✅')", "async def skip(self, ctx: commands.Context, index: int = None) -> Optional[Player]:\n\n queue = self.queue[ctx.guild.id]\n\n # Created duplicate to make sure InvalidSkipIndex isn't raised when the user does pass an index and the queue\n # is empty.\n skip_index = 0 if index is None else index - 1\n if not skip_index < len(queue.queue) and not queue.pos < skip_index:\n if index:\n await self.call_event(\n \"on_music_error\", ctx, InvalidSkipIndex(\"Skip index invalid.\")\n )\n return\n\n if (\n not queue.autoplay\n and queue.loop != Loops.QUEUE_LOOP\n and (len(queue.queue) - 1) <= queue.pos + skip_index\n ):\n await self.call_event(\n \"on_music_error\", ctx, SkipError(\"No song to skip to.\")\n )\n return\n\n original_position = queue.pos\n queue.pos += skip_index\n\n if queue.autoplay:\n last_video_id = queue.played_history[-1].data[\"videoDetails\"][\"videoId\"]\n player = (await Player.get_similar_videos(last_video_id, self.youtube))[0]\n queue.add(player)\n else:\n player = queue.queue[original_position]\n\n await maybe_coroutine(ctx.voice_client.stop)\n return player", "def queue_shift(current_queue):\n current_queue.pop('сar 1')\n\n if current_queue['cars in the queue'] != 1:\n for car_number in range(1, current_queue['cars in the queue']):\n current_queue['сar ' + str(car_number)] = current_queue['сar ' + str(car_number + 1)].copy()\n current_queue.pop('сar ' + str(current_queue['cars in the queue']))\n\n current_queue['cars in the queue'] -= 1\n return current_queue", "async def queue_remove(self, ctx: commands.Context, index: int) -> Optional[Player]:\n\n try:\n queue = self.queue[ctx.guild.id]\n\n return queue.remove(queue.pos + index)\n except IndexError:\n await self.call_event(\n \"on_music_error\",\n ctx,\n RemoveIndexInvalid(\"Failure when removing player from queue\"),\n )", "def on_queued_command(self, event, index=None):\n self.pre_check(event)\n if not self.get_player(event.guild.id).queue:\n api_loop(\n event.channel.send_message,\n \"There aren't any songs queued right now.\",\n )\n elif index is None:\n api_loop(\n event.channel.send_message,\n \"There are {} songs queued ({} minutes). To get a specific song's info, just do this command + index.\".format(\n len(self.get_player(event.guild.id).queue),\n self.minutes_format(self.get_player(\n event.guild.id,\n ).queue_length),\n ),\n )\n elif (index.replace(\"-\", \"\").strip(\" \").isdigit() and\n 0 <= (int(index.replace(\"-\", \"\").strip(\" \")) - 1) <=\n len(self.get_player(event.guild.id).queue)):\n ytdata = self.get_ytdl_values(\n self.get_player(event.guild.id).queue[\n int(index.replace(\"-\", \"\").strip(\" \")) - 1\n ].metadata,\n )\n api_loop(\n event.channel.send_message,\n \"The song at index ``{}`` is ``{}`` by ``{}`` with length ``{}`` minutes and is sourced from ``{}``.\".format(\n int(index.replace(\"-\", \"\").strip(\" \")),\n ytdata[\"title\"],\n ytdata[\"uploader\"],\n ytdata[\"time_formated\"],\n ytdata[\"source\"],\n ),\n )\n elif index.replace(\"-\", \"\").isdigit():\n api_loop(event.channel.send_message, \"Invalid index input.\")\n else:\n matched_list = dict()\n for item in self.get_player(event.guild.id).queue:\n ratio = partial_ratio(item.metadata[\"title\"], index)\n if ratio >= 70:\n matched_list[\"#{} ({}% match)\".format(\n self.get_player(event.guild.id).queue.index(item)+1,\n ratio,\n )] = item.metadata[\"title\"]\n if matched_list:\n embed = bot.generic_embed_values(\n title=\"Queue search results\",\n footer_text=\"Requested by {}\".format(event.author),\n non_inlines={\n k: matched_list[k] for k in list(matched_list)[-25:]\n },\n footer_img=event.author.get_avatar_url(size=32),\n timestamp=event.msg.timestamp.isoformat(),\n )\n api_loop(event.channel.send_message, embed=embed)\n else:\n api_loop(\n event.channel.send_message,\n \"No similar items found in queue.\",\n )", "def add_prev(self, item, index):\n if index in self.d_buffer.keys():\n return\n if len(self) == self._size:\n self.pop_last()\n self.add_item(item, index)", "def move(self):\n active_item = self.stack.pop()\n self.backlog.put(active_item)", "async def previous(\n self, ctx: commands.Context, index: int = None, no_autoplay: bool = False\n ) -> Optional[List[Player]]:\n\n queue = self.queue[ctx.guild.id]\n\n previous_index = 2 if index is None else index + 1\n if 0 >= previous_index:\n if index:\n await self.call_event(\n \"on_music_error\",\n ctx,\n InvalidPreviousIndex(\"Previous index invalid.\"),\n )\n return\n\n original_queue_position = queue.pos\n queue.pos -= previous_index\n previous_players = queue.queue[queue.pos + 1 : original_queue_position]\n\n if no_autoplay:\n for player in previous_players[:]:\n if not player.requester:\n previous_players.remove(player)\n queue.queue.remove(player)\n\n await maybe_coroutine(ctx.voice_client.stop)\n return previous_players", "async def remove(self, ctx, index: int):\n player = self.bot.lavalink.players.get(ctx.guild.id)\n if not player.is_connected:\n return await ctx.send(\"I'm not connected to a voice channel :no_entry:\")\n if not player.is_playing:\n return await ctx.send(\"Nothing is currently playing :no_entry:\")\n if not player.queue:\n return await ctx.send('Nothing is queued :no_entry:')\n if index > len(player.queue) or index < 1:\n return await ctx.send(\"Invalid song index :no_entry:\")\n index -= 1\n removed = player.queue.pop(index)\n\n await ctx.send(\"Removed **\" + removed.title + \"** from the queue <:done:403285928233402378>\")", "async def jump(self, ctx, song_index: int):\n player = self.bot.lavalink.player_manager.get(ctx.guild.id)\n\n if not player.is_connected:\n # We can't disconnect, if we're not connected.\n return await ctx.send(embed=self.error_embed(f'Not playing. [{ctx.message.author.mention}]'))\n\n if not ctx.author.voice or (player.is_connected and ctx.author.voice.channel.id != int(player.channel_id)):\n # Abuse prevention. Users not in voice channels, or not in the same voice channel as the bot\n # may not disconnect the bot.\n return await ctx.send(embed=self.error_embed(f'Not connected to the same voice channel. [{ctx.message.author.mention}]'))\n\n if song_index > len(player.queue) + 1:\n return await ctx.send(embed=self.error_embed(\"There is no such song in the queue.\"))\n\n for i in range(song_index - 1):\n player.queue.pop(0)\n await player.skip()\n await ctx.message.add_reaction(\"✅\")", "def _do_move_item(self, event, index, is_down):\n if index <= 0:\n return wx.Bell()\n\n level = self.item_level(index)\n items_to_move = [ self._get_all_texts(index) ]\n i = index+1\n while i < self.items.GetItemCount():\n # collect the items to move up\n if self.item_level(i) > level:\n items_to_move.append(self._get_all_texts(i))\n i += 1\n else: break\n i = index-1\n while i >= 0:\n lvl = self.item_level(i)\n if level == lvl: break\n elif level > lvl:\n return wx.Bell()\n i -= 1\n for j in range(len(items_to_move)-1, -1, -1):\n self.items.DeleteItem(index+j)\n items_to_move.reverse()\n for level, label, event_handler, name, type_, help_str, id in items_to_move:\n i = self._insert_item_string(i, level)\n self._set_item_string(i, \"label\", label)\n self._set_item_string(i, \"name\", name)\n self._set_item_string(i, \"help_str\", help_str)\n self._set_item_string(i, \"type\", type_)\n self._set_item_string(i, \"event_handler\", event_handler)\n self._set_item_string(i, \"id\", id)\n ret_idx = i\n if is_down: ret_idx += len(items_to_move)\n self._select_item(ret_idx, True)", "def loot_enq_move(self, item, toloc):\n itemloc = item.Location.ToLocation()\n self.pqi.enq(2, ['move', [[itemloc.X, itemloc.Y, itemloc.Z],\n item.Id, itemloc.Z,\n [toloc.X, toloc.Y, toloc.Z],\n item.Count\n ]])", "def enqueueItem(item):\n if item.type not in AgentInventory.__idQueue__:\n AgentInventory.__idQueue__[item.type] = []\n\n # If item id is already in queue, move it to front. Otherwise, just prepend it\n if item.id in AgentInventory.__idQueue__[item.type]:\n idx = AgentInventory.__idQueue__[item.type].index(item.id)\n del AgentInventory.__idQueue__[item.type][idx]\n AgentInventory.__idQueue__[item.type].insert(0, item.id)\n else:\n AgentInventory.__idQueue__[item.type].insert(0, item.id)", "def queue(self, irc, msg, args, notice):\n pos = self._find_in_queue(msg.nick)\n QUEUE_SLOTS = self.registryValue('queueSlots')\n if pos < 0:\n if QUEUE_SLOTS >= 0 and self._count >= QUEUE_SLOTS:\n irc.reply(\"Sorry, but the queue is out of slots\")\n return\n self._queue.append((msg.nick, notice))\n self._count += 1\n irc.reply(\"I queued you at position %s in the queue\" % len(self._queue))\n self._dump_queue()\n elif self._queue[pos][1] != notice:\n self._queue[pos] = (msg.nick, notice)\n irc.reply(\"You're queued at position %s already, I've updated \"\\\n \"notice to '%s'\" % (pos + 1, notice))\n self._dump_queue()\n else:\n irc.reply(\"You're already in the queue at position %s.\" % (pos+1))", "def enqueue(self, item):\n # double size of array if necessary and recopy to front of array\n if self._N == len(self._q):\n self._resize(2*len(self._q)) # double size of array if necessary\n self._q[self._last] = item # add item\n self._last += 1\n if self._last == len(self._q):\n self._last = 0 # wrap-around\n self._N += 1", "def getitem(self, index):\n #FIXME: A better way to get item without removing it.\n priority,size,trace=self.queues[index].get()\n self.enqueue(index,trace,priority)\n return trace", "def enqueue(self, item):\n self.__queue.insert(0, item)", "def rotate_queue(queue):\n\n next_value = queue[0]\n queue.rotate(-1)\n\n return next_value", "def add_next(self, item, index):\n if index in self.d_buffer.keys():\n return\n if len(self) == self.size:\n self.pop_first()\n self.add_item(item, index)", "def move_aquarium(aq: Aquarium):\n move_fish_list(aq.fishes, aq.width)", "def bypass_queue(self, name):\n # self.queue = [name] + self.queue\n # self.queue.insert(0, name)\n\n # self.lst = [name] + self.lst # This person is brought to the front of the queue\n self.lst.insert(0, name) #Not constant time as the pointer is moved for all the members of the queue, 0(n)\n print(f\"{name} has bypassed the queue\")", "def enqueue(self, item):\n\t\tself.items.insert(0, item)", "def bqm_move_queue(self):\n self.bqm.turn_once()", "def play_card_by_index(self, index, **kwargs):\n target = kwargs.get('target', None)\n self.hand[index].play(target=target)" ]
[ "0.7173604", "0.6445944", "0.6257959", "0.6167987", "0.6000821", "0.6000821", "0.5986443", "0.5971937", "0.5970165", "0.59335166", "0.5919013", "0.5912924", "0.5876682", "0.58699167", "0.5845589", "0.58440965", "0.5759581", "0.5729532", "0.572736", "0.56740284", "0.56264627", "0.5610853", "0.56090325", "0.5570422", "0.5551701", "0.55509853", "0.5548645", "0.55369216", "0.5534488", "0.5530116" ]
0.7288909
0
View the timeline of a transaction
def get_transaction_history(self, txn_id_or_ref): response = self.get(f"{self.gateway_path}/timeline/{txn_id_or_ref}") return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def history():\n\n #Query transactions by user id\n trans = Transactions.query.filter_by(owner=session['user_id']).all()\n\n #Convert Price to US Dollars and format transaction time\n for t in trans:\n t.price = usd(t.price)\n t.transacted = t.transacted.strftime('%Y-%m-%d %H:%M:%S')\n\n #Return history.html\n return render_template('history.html', trans=trans)", "def showTransactions(self):\n self.scanTransactions()\n txns = []\n\n # Summarize the stats\n for x in range(len(self._trans)):\n stats = self._trans[x]\n trans_time = 0\n remote_calls = 0\n for name, stat in stats:\n trans_time += stat.total_tt\n remote_calls += 1\n txns.append((x, trans_time, remote_calls))\n\n results = [\"TX#\\tTime\\tCalls\",\n \"=\" * 22]\n\n for item in txns:\n results.append(\"%3d\\t%4f\\t%5d\" % item)\n \n return \"\\n\".join(results)", "def history():\n\n # get all transactions for current user\n transactions = db.execute(\"SELECT * FROM transactions WHERE user_id = :user_id\", user_id=session[\"user_id\"])\n\n # render history.html with all user transactions\n return render_template(\"history.html\", transactions=transactions, usd=usd)", "def history():\n transactions_list = db.execute(\"SELECT stock, units, price, time, type FROM transactions WHERE id = :current_id\",\n current_id=session[\"user_id\"])\n\n return render_template(\"history.html\", transactions=transactions_list)", "def history():\n\n transactions = db.execute(\"SELECT * FROM transactions WHERE user_id = ? ORDER BY date DESC, time DESC\", session[\"user_id\"])\n\n return render_template(\"history.html\", transactions=transactions)", "def view_transactions(request, id):\n account = get_object_or_404(Account, pk=id, user=request.user)\n return render(request, 'ledger/pages/view_transactions.html', {\n 'title': \"View Transactions\",\n 'breadcrumbs': [account],\n 'account': account,\n })", "def history():\n transactions = db.execute(\"SELECT Symbol, Shares, Transacted FROM cash WHERE id=:id\", id=session[\"user_id\"])\n return render_template(\"history.html\", transactions=transactions)", "def general_timeline():\n return render_template('timeline.html', general=True, show_username=True)", "def timeline(self, start_date, interval, amount, field_key, user_id=None,\r\n pipeline_id=None, filter_id=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/timeline'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json", "def public_timeline():\n return render_template('timeline.html', messages=query_db('''\n select message.*, user.* from message, user\n where message.author_id = user.user_id\n order by message.pub_date desc limit ?''', [PER_PAGE]))", "def public_timeline():\n return render_template('timeline.html', messages=query_db('''\n select message.*, user.* from message, user\n where message.author_id = user.user_id\n order by message.pub_date desc limit ?''', [PER_PAGE]))", "def history():\n\n rows = db.execute(\"SELECT * FROM 'transaction' WHERE u_id = :user_id\", user_id = session[\"user_id\"])\n return render_template(\"history.html\", rows = rows)", "def view_transactions(self) -> None:\n user_choice = Menu.prompt_view_transactions()\n if user_choice == 5:\n print(\"Returning to main menu...\")\n return\n\n budget_category = BudgetManager.category_mapping[user_choice]\n print(f\"\\nTransactions in the {budget_category.value} \"\n f\"category: \")\n for tx in self.user.tx_manager:\n if tx.budget_category == user_choice:\n print(f\"\\n{tx}\")", "def history():\n\n transactions = db.execute(\"SELECT stock, amount, price, date, time, total_amount FROM transactions WHERE id=:id\", id=session['user_id'])\n\n\n return render_template(\"index.html\", transactions=transactions)", "def timeline(self, language=None):\n return self._get('/incidents/timeline.{language}.json', language)", "def show_timeline(\n request: HttpRequest,\n pk: Optional[int] = None,\n workflow: Optional[Workflow] = None,\n) -> HttpResponse:\n action = None\n if pk:\n action = workflow.actions.filter(pk=pk).first()\n\n if not action:\n # The action is not part of the selected workflow\n return redirect('home')\n logs = workflow.logs.filter(payload__action_id=action.id)\n else:\n logs = workflow.logs\n\n event_names = [\n Log.SCHEDULE_EMAIL_EXECUTE,\n Log.DOWNLOAD_ZIP_ACTION,\n Log.SCHEDULE_JSON_EXECUTE,\n Log.SCHEDULE_CANVAS_EMAIL_EXECUTE,\n Log.SCHEDULE_EMAIL_EDIT,\n Log.SCHEDULE_JSON_EDIT,\n Log.SCHEDULE_CANVAS_EMAIL_EXECUTE,\n Log.SURVEY_INPUT,\n ]\n\n # Filter the logs to display and transform into values (process the json\n # and the long value for the log name\n logs = [\n {'id': log.id,\n 'name': log.get_name_display(),\n 'modified': log.modified,\n 'payload': json.dumps(log.payload, indent=2),\n 'action_name': log.payload['action'],\n 'action_id': log.payload['action_id']}\n for log in logs.filter(name__in=event_names)\n ]\n\n return render(\n request,\n 'action/timeline.html',\n {'event_list': logs, 'action': action})", "def history():\n\n userId = session[\"user_id\"]\n\n shares = db.execute(f\"SELECT symbol, shares, price, trans_time FROM transactions WHERE user_id={userId} ORDER BY trans_id DESC\")\n\n return render_template(\"history.html\", shares=shares)", "def history():\n\n # Access user's id\n user_id = session[\"user_id\"]\n\n # Obtain history information for logged in user\n TRANSACTIONS = db.execute(\"SELECT * FROM history WHERE user_id = ? ORDER BY transacted DESC\", user_id)\n\n return render_template(\"history.html\", transactions=TRANSACTIONS)", "def history():\n rows = db.execute(text(\n \"SELECT symbol, shares, price, time FROM transactions \"\n \"WHERE user_id=:id\"),\n id=session[\"user_id\"])\n transactions = []\n for row in rows:\n transaction = dict(row)\n transaction[\"price\"] = usd(transaction[\"price\"])\n transactions.append(transaction)\n return render_template(\"history.html\", transactions=transactions)", "def history():\n transactions = db.execute(\"SELECT Symbol, Shares, Price, Date FROM history WHERE UserID = :userid\", userid=session.get(\"user_id\"))\n return render_template(\"history.html\", transactionList = transactions, currentUser=session.get(\"user_id\"))", "def get_timeline_feed(self):\n return self.send_request('feed/timeline/')", "def transaction_list(request, model_class=Transaction, template_name='budget/transactions/list.html'):\n transaction_list = model_class.active.order_by('-date', '-created')\n try:\n paginator = Paginator(transaction_list, getattr(settings, 'BUDGET_LIST_PER_PAGE', 50))\n page = paginator.page(request.GET.get('page', 1))\n transactions = page.object_list\n except InvalidPage:\n raise Http404('Invalid page requested.')\n return render_to_response(template_name, {\n 'transactions': transactions,\n 'paginator': paginator,\n 'page': page,\n }, context_instance=RequestContext(request))", "def transaction_history(user_id):\n # Run the transaction in the background\n executor.submit(transaction_run)\n user_id = login_session['user_id']\n # Get all transaction made by all the users\n user_tran = Transaction.query.filter_by(done=True).filter_by(user_id=user_id).all()\n target_tran = Transaction.query.filter_by(done=True).filter_by(target_user=user_id).all()\n user_curr = Currency.query.filter_by(user_id=user_id).first()\n\n return render_template('trans_history.html',\n transactions=user_tran + target_tran,\n currency=user_curr)", "def __str__(self):\r\n return '{user}\\'s timeline'.format(user=self.user.username)", "def get_transactions():\n\n wallet = \"TTfoWGU2M939cgZm8CksPtz1ytJRM9GiN7\"\n\n url = \"https://api.trongrid.io/v1/accounts/{}/transactions\".format(wallet)\n\n response = requests.request(\"GET\", url)\n\n print(response.text)", "def history():\n\n user = session[\"user_id\"]\n rows = db.execute(\"SELECT * FROM transactions WHERE user_id = :user\", user=user)\n\n # transactions list\n transactions = []\n for row in rows:\n stock_data = lookup(row['symbol'])\n transactions.append(list((\n stock_data['symbol'],\n stock_data['name'],\n row['amount'],\n row['value'],\n row['date'],\n )))\n\n return render_template(\"history.html\", transactions=transactions)", "def history():\n userid = session[\"user_id\"]\n transactions = db.execute(\"SELECT * FROM purchase WHERE userid = :userid\", userid = userid)\n for transaction in transactions:\n transaction[\"price\"] = usd(transaction[\"tot\"]/transaction[\"shares\"])\n transaction[\"name\"] = lookup(transaction[\"symbol\"])['name']\n return render_template(\"history.html\", transactions=transactions)", "def do_gethistory(self,args):\n #Very rough. pretty print it\n history=bitstamp.get_usertransactions()\n ppdict(history)", "def get_transition_details(self, account_id, transaction_id):\n endpoint = 'accounts/{0}/transactions{1}'.format(account_id,\n transaction_id)\n\n return self._api.request(endpoint)", "def get_tx_history(account_id, total):\n query = iroha.query(\"GetTransactions\", account_id=account_id, page_size=total)\n ic.sign_query(query, user_private_key)\n response = net.send_query(query)\n data = MessageToDict(response)\n pprint(data, indent=2)" ]
[ "0.66819555", "0.66066486", "0.6323791", "0.6302746", "0.6277081", "0.6261644", "0.62445635", "0.62431467", "0.62283117", "0.62170583", "0.62170583", "0.62117505", "0.621032", "0.6209831", "0.6196624", "0.6149073", "0.6040678", "0.60146815", "0.5949876", "0.5923738", "0.5908442", "0.5858187", "0.5851381", "0.58397734", "0.5817024", "0.5769966", "0.5693661", "0.56812465", "0.56771404", "0.5656718" ]
0.68347484
0
return the index corresponding at the first occurence of value in list
def index(liste, value): for ii in range(len(liste)): if liste[ii] == value: return ii return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def index(self, value):\n self.__validate_value(value)\n for index, v in enumerate(self.__list):\n if v == value:\n return index", "def closest_value_index(val, lst):\n index = 0\n for item in lst:\n if item > val:\n return index\n index += 1\n return index-1", "def my_index(list_, element):\n pos = []\n for i in range(len(list_)):\n if list_[i] == element:\n pos.append(i)\n return pos", "def __idx_of_minimum(cls, lst: list) -> int:\n\t\treturn lst.index(min(lst))", "def find(self, list, key, value):\n for i, dic in enumerate(list):\n if dic[key] == value:\n return i\n return -1", "def linear_search(lst, value):\n i = 0\n while i != len(lst) and lst[i] != value:\n i = i + 1\n if i == len(lst):\n return -1\n else:\n return i", "def get_idx(self, key):\n found = [i for i, e in enumerate(self.list) if e.key == key]\n if found:\n return found[0]\n\n else:\n return -1", "def linear_search_sentinal(lst, value):\n\n lst.insert(0, value)\n\n i = len(lst) - 1\n\n while lst[i] != value:\n i = i - 1\n\n lst.pop(0)\n\n if i == 0:\n return -1\n else:\n return i - 1", "def get_closest_value_index_in_sorted_list(value, list_):\n if value <= list_[0]:\n return 0\n if value >= list_[-1]:\n return len(list_) - 1\n pos = bisect.bisect_left(list_, value)\n before = list_[pos - 1]\n after = list_[pos]\n if after - value < value - before:\n return pos\n else:\n return pos - 1", "def index(*, list : Union[List[Any], ConduitVariable], item : Any) -> int:\n return -1 if item not in list else list.index(item)", "def linear_search(element, list_of_elements):\n for i, elem in enumerate(list_of_elements):\n if elem == element:\n return i\n return None", "def index(a_list, i):\n try:\n return a_list[int(i)]\n except IndexError:\n return None", "def linear_search(mylist, key):\r\n for i in range(len(mylist)):\r\n if mylist[i] == key:\r\n return i\r\n return -1", "def look_up(self, val):\n index = 0\n if self.head is None:\n print(\"List is empty\")\n start = self.head\n while start is not None:\n if start.data == val:\n return index\n start = start.next\n index += 1\n return \"No such element\"", "def last_index(list_, value):\n\n found = None\n for index, val in enumerate(list_):\n if val == value:\n found = index\n if found is None:\n raise ValueError(\"{} is not in list {}\".format(value, list_))\n return found", "def findindex(iteratee, seq):\n iteratee = fnc.iteratee(iteratee)\n return next((i for i, value in enumerate(seq) if iteratee(value)), -1)", "def search(elements_list, element):\n for index, item in enumerate(elements_list):\n if item == element:\n return index\n return -1", "def findIndex(lst, key, value):\r\n\r\n for i, dic in enumerate(lst):\r\n if dic['properties'][key] == value:\r\n return i\r\n return -1", "def find_first(item, vec):\n for i in range(len(vec)):\n if item == vec[i]:\n return i\n return -1", "def index_equals_value_search1(arr):\n for key, value in enumerate(arr):\n if value == key:\n return value\n return -1", "def find_min(list):\n return find_value_at(list, -1)", "def linear_search(list, target):\n for i in range (0, len(list)):\n if list[i] == target:\n return i\n\n\n return None", "def matchloc(alist,val): \n return [ilc for ilc,jlc in enumerate(alist) if jlc==val]", "def linear_search(self, num_lst, key):\r\n # Running time: O(n)\r\n for i in range(len(num_lst)):\r\n if num_lst[i] == key:\r\n return i\r\n \r\n return -1", "def linear_search(alist, key):\n for i in range(len(alist)):\n if alist[i] == key:\n return i\n return -1", "def _get_index_closest_val(list, val):\n\n return min(range(len(list)), key=lambda i: abs(list[i]-val))", "def smallest_elem_index(input_list):\n if len(input_list) == 0:\n raise Exception(\"List must contain at least 1 element\")\n \n min_index = 0 \n for i in range(1, len(input_list)):\n if input_list[i] < input_list[min_index]:\n min_index = i\n return min_index", "def get_index(y, value):\n\n for i in range(len(y)):\n if y[i] <= value:\n continue\n\n return i", "def get_index(y, value):\n\n for i in range(len(y)):\n if y[i] <= value:\n continue\n\n return i", "def indices(lst, element):\n result = []\n offset = -1\n while True:\n try:\n offset = lst.index(element, offset + 1)\n except ValueError:\n return result\n result.append(offset)" ]
[ "0.7666883", "0.75553674", "0.75322366", "0.73974407", "0.7306116", "0.72406524", "0.71949124", "0.71868026", "0.7173489", "0.71538794", "0.71089154", "0.7069118", "0.7064808", "0.70324296", "0.7016907", "0.70150346", "0.69772476", "0.69540596", "0.69502425", "0.69284594", "0.69161266", "0.6891916", "0.6882502", "0.6879058", "0.6872981", "0.6807972", "0.6804545", "0.67761403", "0.67761403", "0.6772871" ]
0.81286085
0
Creates job_params dict for api call to launch a Plexus job. Some parameters required to launch a job are not available to the user in the Plexus UI. For example, an app id is required, but only the app name is provided in the UI. This function acts as a backend lookup of the required param value using the userprovided value.
def construct_job_params(self, hook: Any) -> dict[Any, Any | None]: missing_params = self.required_params - set(self.job_params) if missing_params: raise AirflowException(f"Missing the following required job_params: {', '.join(missing_params)}") params = {} for prm in self.job_params: if prm in self.lookups: v = self._api_lookup(param=prm, hook=hook) params[prm] = v else: params[prm] = self.job_params[prm] return params
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_job_spec(\n self,\n job_id: Text,\n training_input: Dict[Text, Any],\n job_labels: Optional[Dict[Text, Text]] = None) -> Dict[Text, Any]:\n\n job_spec = {\n 'jobId': job_id,\n 'trainingInput': training_input,\n 'labels': job_labels,\n }\n return job_spec", "def _create_job_spec(\n self,\n job_id: Text,\n training_input: Dict[Text, Any],\n job_labels: Optional[Dict[Text, Text]] = None) -> Dict[Text, Any]:\n\n job_spec = {\n 'display_name': job_id,\n 'job_spec': training_input,\n 'labels': job_labels,\n }\n return job_spec", "def get_powermax_job_parameters():\n return dict(\n job_id=dict(type='str', required=True)\n )", "def makeSearchJobParamsDict(cls, options, forRunning=False):\n if options[\"searchMethod\"] == \"v2\":\n hsVersion = \"v2\"\n else:\n raise Exception(\"Unsupported search method: %r\" % options[\"searchMethod\"])\n\n maxModels = options[\"maxPermutations\"]\n if options[\"action\"] == \"dryRun\" and maxModels is None:\n maxModels = 1\n\n useTerminators = options[\"useTerminators\"]\n if useTerminators is None:\n params = {\n \"hsVersion\": hsVersion,\n \"maxModels\": maxModels,\n }\n else:\n params = {\n \"hsVersion\": hsVersion,\n \"useTerminators\": useTerminators,\n \"maxModels\": maxModels,\n }\n\n if forRunning:\n params[\"persistentJobGUID\"] = str(uuid.uuid1())\n\n if options[\"permutationsScriptPath\"]:\n params[\"permutationsPyFilename\"] = options[\"permutationsScriptPath\"]\n elif options[\"expDescConfig\"]:\n params[\"description\"] = options[\"expDescConfig\"]\n else:\n with open(options[\"expDescJsonPath\"], mode=\"r\") as fp:\n params[\"description\"] = json.load(fp)\n\n return params", "def get_user_params(job_data):\n try:\n # Get the user parameters which contain the stack, artifact and file settings\n user_parameters = job_data['actionConfiguration']['configuration']['UserParameters']\n decoded_parameters = json.loads(user_parameters)\n\n except Exception as e:\n # We're expecting the user parameters to be encoded as JSON\n # so we can pass multiple values. If the JSON can't be decoded\n # then fail the job with a helpful message.\n raise Exception('UserParameters could not be decoded as JSON')\n\n required_items = ['source_artifact', 'build_artifact', 'template_artifact', 'template_subdir_path']\n for i in required_items:\n if i not in decoded_parameters:\n raise Exception('Your UserParameters JSON must include ' + i)\n print(i + \" = \" + decoded_parameters[i])\n\n return decoded_parameters", "def _create_request_dict(\n job_id,\n region,\n image_uri,\n chief_config,\n worker_count,\n worker_config,\n entry_point_args,\n job_labels,\n service_account\n):\n training_input = {}\n training_input[\"region\"] = region\n training_input[\"scaleTier\"] = \"custom\"\n training_input[\"masterType\"] = gcp.get_machine_type(\n chief_config.cpu_cores,\n chief_config.memory,\n chief_config.accelerator_type)\n\n # Set master config\n chief_machine_config = {}\n chief_machine_config[\"imageUri\"] = image_uri\n chief_machine_config[\"acceleratorConfig\"] = {}\n chief_machine_config[\"acceleratorConfig\"][\"count\"] = str(\n chief_config.accelerator_count\n )\n chief_machine_config[\"acceleratorConfig\"][\n \"type\"] = gcp.get_accelerator_type(\n chief_config.accelerator_type.value)\n\n training_input[\"masterConfig\"] = chief_machine_config\n training_input[\"workerCount\"] = str(worker_count)\n\n if worker_count > 0:\n training_input[\"workerType\"] = gcp.get_machine_type(\n worker_config.cpu_cores,\n worker_config.memory,\n worker_config.accelerator_type,\n )\n\n worker_machine_config = {}\n worker_machine_config[\"imageUri\"] = image_uri\n worker_machine_config[\"acceleratorConfig\"] = {}\n worker_machine_config[\"acceleratorConfig\"][\"count\"] = str(\n worker_config.accelerator_count\n )\n worker_machine_config[\"acceleratorConfig\"][\n \"type\"] = gcp.get_accelerator_type(\n worker_config.accelerator_type.value)\n\n # AI Platform runtime version spec is required for training\n # on cloud TPUs.\n # Use TF runtime version 2.1 (latest supported) as the default.\n # https://cloud.google.com/ai-platform/training/docs/runtime-version-list#tpu-support # pylint: disable=line-too-long\n if machine_config.is_tpu_config(worker_config):\n worker_machine_config[\"tpuTfVersion\"] = \"2.1\"\n training_input[\"workerConfig\"] = worker_machine_config\n\n if entry_point_args is not None:\n training_input[\"args\"] = entry_point_args\n\n # This is temporarily required so that the `TF_CONFIG` generated by\n # CAIP uses the keyword 'chief' instead of 'master'.\n training_input[\"use_chief_in_tf_config\"] = True\n request_dict = {}\n request_dict[\"jobId\"] = job_id\n request_dict[\"trainingInput\"] = training_input\n if job_labels:\n request_dict[\"labels\"] = job_labels\n if service_account:\n training_input[\"serviceAccount\"] = service_account\n return request_dict", "def _create_job_spec(\n self,\n job_id: Text,\n training_input: Dict[Text, Any],\n job_labels: Optional[Dict[Text, Text]] = None) -> Dict[Text, Any]:\n pass", "def generate_template_dict(self):\n # Get the existing parameters\n params = super().generate_template_dict()\n\n # Add our custom parameters\n params['job_parameter_file'] = self.job_parameter_file\n params['job_output_directory'] = self.job_output_directory\n\n # Return the updated params\n return params", "def _create_job_config(\n self,\n experiment_id: str,\n params: Optional[dict],\n pipeline_package_path: Optional[str],\n pipeline_id: Optional[str],\n version_id: Optional[str],\n enable_caching: Optional[bool],\n ):\n\n class JobConfig:\n\n def __init__(self, spec, resource_references):\n self.spec = spec\n self.resource_references = resource_references\n\n params = params or {}\n pipeline_json_string = None\n if pipeline_package_path:\n pipeline_obj = self._extract_pipeline_yaml(pipeline_package_path)\n\n # Caching option set at submission time overrides the compile time settings.\n if enable_caching is not None:\n self._override_caching_options(pipeline_obj, enable_caching)\n\n pipeline_json_string = json.dumps(pipeline_obj)\n api_params = [\n kfp_server_api.V1Parameter(\n name=sanitize_k8s_name(name=k, allow_capital_underscore=True),\n value=str(v) if type(v) not in (list, dict) else json.dumps(v))\n for k, v in params.items()\n ]\n resource_references = []\n key = kfp_server_api.models.V1ResourceKey(\n id=experiment_id,\n type=kfp_server_api.models.V1ResourceType.EXPERIMENT)\n reference = kfp_server_api.models.V1ResourceReference(\n key=key, relationship=kfp_server_api.models.V1Relationship.OWNER)\n resource_references.append(reference)\n\n if version_id:\n key = kfp_server_api.models.V1ResourceKey(\n id=version_id,\n type=kfp_server_api.models.V1ResourceType.PIPELINE_VERSION)\n reference = kfp_server_api.models.V1ResourceReference(\n key=key,\n relationship=kfp_server_api.models.V1Relationship.CREATOR)\n resource_references.append(reference)\n\n spec = kfp_server_api.models.V1PipelineSpec(\n pipeline_id=pipeline_id,\n workflow_manifest=pipeline_json_string,\n parameters=api_params)\n return JobConfig(spec=spec, resource_references=resource_references)", "def launch_params(self):\n state = str(uuid.uuid4())\n params = {\n 'response_type': 'code',\n 'client_id': self.config['client_id'],\n 'redirect_uri': self.config['redirect_uri'],\n 'scope': self.config['scope'],\n 'state': state,\n 'aud': self.config['aud'],\n }\n if 'extra_launch_params' in self.config:\n params.update(self.config['extra_launch_params'])\n\n return params", "def create_job_object(job_type: int = 0,\n team_id: int = 0,\n destination_name: str = None,\n destination_lat: float = 0,\n destination_lng: float = 0,\n destination_text: str = None,\n destination_url: str = None,\n text_dispatcher: str = None,\n text_receiver: str = None,\n contact_name: str = None,\n contact_phone: str = None,\n contact_email: str = None,\n day: int = None,\n priority: int = None,\n number: int = None,\n on_site_seconds: int = None,\n window_start: int = None,\n window_end: int = None,\n order_id: int = None,\n dispatcher_uid: str = None,\n place_uid: str = None,\n worker: str = None,\n items_to_dropoff: int = None,\n items_to_pickup: int = None,\n custom_attributes: dict = None) -> dict:\n\n job = {\n \"type\": job_type,\n \"teamId\": team_id,\n \"destinationName\": destination_name,\n \"destinationLat\": destination_lat,\n \"destinationLng\": destination_lng,\n \"destinationText\": destination_text,\n \"destinationUrl\": destination_url,\n \"textDispatcher\": text_dispatcher,\n \"textReceiver\": text_receiver,\n \"contactName\": contact_name,\n \"contactPhone\": contact_phone,\n \"contactEmail\": contact_email,\n \"day\": day,\n \"priority\": priority,\n \"number\": number,\n \"onSiteSeconds\": on_site_seconds,\n \"windowStart\": window_start,\n \"windowEnd\": window_end,\n \"orderId\": order_id,\n \"dispatcherUid\": dispatcher_uid,\n \"placeUid\": place_uid,\n \"worker\": worker,\n \"itemsToDropoff\": items_to_dropoff,\n \"itemsToPickup\": items_to_pickup\n }\n job_without_none = {k: v for k, v in job.items() if v is not None}\n job.clear()\n job.update(job_without_none)\n\n if custom_attributes:\n job.update({f\"custom_{k}\": v for k, v in custom_attributes.items() if k})\n\n return job", "def prepare_jobs_data(self, ecosystem, package, version):\n return \\\n {\n \"flow_arguments\": [\n {\n \"ecosystem\": ecosystem,\n \"name\": package,\n \"version\": version,\n \"force\": True,\n \"force_graph_sync\": True,\n \"recursive_limit\": 0\n }\n ],\n \"flow_name\": \"bayesianApiFlow\"\n }", "def __read_job_params_file__(self):\n # | - __read_job_params_file__\n job_params = {}\n\n # file_path = self.full_path + \"/\" + \"job_parameters.json\"\n\n file_exists = False\n\n file_path = os.path.join(\n self.full_path,\n \"job_parameters.json\")\n if os.path.exists(file_path):\n file_exists = True\n with open(file_path, \"r\") as fle:\n job_params = json.load(fle)\n\n\n ind_i = self.full_path.rfind(self.full_path.split(\"/\")[-1])\n path_i_rt = self.full_path[:ind_i - 1]\n\n file_path = os.path.join(\n # self.full_path[0:-2],\n path_i_rt,\n \"job_parameters.json\",\n )\n if os.path.exists(file_path):\n file_exists = True\n with open(file_path, \"r\") as fle:\n job_params = json.load(fle)\n\n\n file_path = os.path.join(\n # self.full_path[0:-2],\n path_i_rt,\n \"job_params.json\",\n )\n if os.path.exists(file_path):\n file_exists = True\n with open(file_path, \"r\") as fle:\n job_params = json.load(fle)\n\n if not file_exists:\n print(\"No job_params file found for following job:\")\n print(self.full_path)\n\n return(job_params)\n # __|", "def build_submit_params(job_params, use_submitter_env=True, more_environment=None):\n submit_params = DEFAULT_JOB_PARAMS.copy()\n if use_submitter_env:\n submit_params['getenv'] = 'True'\n if more_environment:\n submit_params['environment'] = _build_environment(more_environment)\n submit_params.update(job_params)\n lines = ['%s=%s' % (k, v) for k, v in submit_params.iteritems()]\n lines.append('Queue')\n return '\\n'.join(lines)", "def create_params(self):\n\n params = {'time_step':\n DesignParameter('time_step',\n unit='s',\n description='Time step with which the component model will be discretized'),\n 'horizon':\n DesignParameter('horizon',\n unit='s',\n description='Horizon of the optimization problem'),\n 'lines': DesignParameter('lines',\n unit='-',\n description='List of names of the lines that can be found in the network, e.g. '\n '\\'supply\\' and \\'return\\'',\n val=['supply', 'return'])\n }\n return params", "def updateBuildParams(self, job, item, params):\n\n # NOTE(jhesketh): The params need to stay in a key=value data pair\n # as workers cannot necessarily handle lists.\n\n if callable(job.parameter_function):\n pargs = inspect.getargspec(job.parameter_function)\n if len(pargs.args) == 2:\n job.parameter_function(item, params)\n else:\n job.parameter_function(item, job, params)\n self.log.debug(\"Custom parameter function used for job %s, \"\n \"change: %s, params: %s\" % (job, item.change,\n params))\n\n # NOTE(mmedvede): Swift parameter creation should remain after the call\n # to job.parameter_function to make it possible to update LOG_PATH for\n # swift upload url using parameter_function mechanism.\n if job.swift and self.swift.connection:\n\n for name, s in job.swift.items():\n swift_instructions = {}\n s_config = {}\n s_config.update((k, v.format(item=item, job=job,\n change=item.change))\n if isinstance(v, six.string_types)\n else (k, v)\n for k, v in s.items())\n\n (swift_instructions['URL'],\n swift_instructions['HMAC_BODY'],\n swift_instructions['SIGNATURE']) = \\\n self.swift.generate_form_post_middleware_params(\n params['LOG_PATH'], **s_config)\n\n if 'logserver_prefix' in s_config:\n swift_instructions['LOGSERVER_PREFIX'] = \\\n s_config['logserver_prefix']\n elif self.config.has_option('swift',\n 'default_logserver_prefix'):\n swift_instructions['LOGSERVER_PREFIX'] = \\\n self.config.get('swift', 'default_logserver_prefix')\n\n # Create a set of zuul instructions for each instruction-set\n # given in the form of NAME_PARAMETER=VALUE\n for key, value in swift_instructions.items():\n params['_'.join(['SWIFT', name, key])] = value", "def _get_default_pipeline_params(\n project: str,\n location: str,\n root_dir: str,\n target_column: str,\n prediction_type: str,\n optimization_objective: str,\n transformations: str,\n train_budget_milli_node_hours: float,\n stage_1_num_parallel_trials: Optional[int] = None,\n stage_2_num_parallel_trials: Optional[int] = None,\n stage_2_num_selected_trials: Optional[int] = None,\n data_source_csv_filenames: Optional[str] = None,\n data_source_bigquery_table_path: Optional[str] = None,\n predefined_split_key: Optional[str] = None,\n timestamp_split_key: Optional[str] = None,\n stratified_split_key: Optional[str] = None,\n training_fraction: Optional[float] = None,\n validation_fraction: Optional[float] = None,\n test_fraction: Optional[float] = None,\n weight_column: Optional[float] = None,\n study_spec_parameters_override: Optional[List[Dict[str, Any]]] = None,\n optimization_objective_recall_value: Optional[float] = None,\n optimization_objective_precision_value: Optional[float] = None,\n stage_1_tuner_worker_pool_specs_override: Optional[Dict[str, Any]] = None,\n cv_trainer_worker_pool_specs_override: Optional[Dict[str, Any]] = None,\n export_additional_model_without_custom_ops: bool = False,\n stats_and_example_gen_dataflow_machine_type: Optional[str] = None,\n stats_and_example_gen_dataflow_max_num_workers: Optional[int] = None,\n stats_and_example_gen_dataflow_disk_size_gb: Optional[int] = None,\n transform_dataflow_machine_type: Optional[str] = None,\n transform_dataflow_max_num_workers: Optional[int] = None,\n transform_dataflow_disk_size_gb: Optional[int] = None,\n dataflow_subnetwork: Optional[str] = None,\n dataflow_use_public_ips: bool = True,\n encryption_spec_key_name: Optional[str] = None,\n additional_experiments: Optional[Dict[str, Any]] = None,\n dataflow_service_account: Optional[str] = None,\n max_selected_features: Optional[int] = None,\n apply_feature_selection_tuning: bool = False,\n run_evaluation: bool = True,\n evaluation_batch_predict_machine_type: Optional[str] = None,\n evaluation_batch_predict_starting_replica_count: Optional[int] = None,\n evaluation_batch_predict_max_replica_count: Optional[int] = None,\n evaluation_batch_explain_machine_type: Optional[str] = None,\n evaluation_batch_explain_starting_replica_count: Optional[int] = None,\n evaluation_batch_explain_max_replica_count: Optional[int] = None,\n evaluation_dataflow_machine_type: Optional[str] = None,\n evaluation_dataflow_starting_num_workers: Optional[int] = None,\n evaluation_dataflow_max_num_workers: Optional[int] = None,\n evaluation_dataflow_disk_size_gb: Optional[int] = None,\n run_distillation: bool = False,\n distill_batch_predict_machine_type: Optional[str] = None,\n distill_batch_predict_starting_replica_count: Optional[int] = None,\n distill_batch_predict_max_replica_count: Optional[int] = None,\n stage_1_tuning_result_artifact_uri: Optional[str] = None,\n quantiles: Optional[List[float]] = None,\n enable_probabilistic_inference: bool = False,\n num_selected_features: Optional[int] = None,\n model_display_name: str = '',\n model_description: str = '',\n) -> Dict[str, Any]:\n if not study_spec_parameters_override:\n study_spec_parameters_override = []\n if not stage_1_tuner_worker_pool_specs_override:\n stage_1_tuner_worker_pool_specs_override = []\n if not cv_trainer_worker_pool_specs_override:\n cv_trainer_worker_pool_specs_override = []\n if not quantiles:\n quantiles = []\n\n parameter_values = {}\n parameters = {\n 'project': project,\n 'location': location,\n 'root_dir': root_dir,\n 'target_column': target_column,\n 'prediction_type': prediction_type,\n 'data_source_csv_filenames': data_source_csv_filenames,\n 'data_source_bigquery_table_path': data_source_bigquery_table_path,\n 'predefined_split_key': predefined_split_key,\n 'timestamp_split_key': timestamp_split_key,\n 'stratified_split_key': stratified_split_key,\n 'training_fraction': training_fraction,\n 'validation_fraction': validation_fraction,\n 'test_fraction': test_fraction,\n 'optimization_objective': optimization_objective,\n 'train_budget_milli_node_hours': train_budget_milli_node_hours,\n 'stage_1_num_parallel_trials': stage_1_num_parallel_trials,\n 'stage_2_num_parallel_trials': stage_2_num_parallel_trials,\n 'stage_2_num_selected_trials': stage_2_num_selected_trials,\n 'weight_column': weight_column,\n 'optimization_objective_recall_value': (\n optimization_objective_recall_value\n ),\n 'optimization_objective_precision_value': (\n optimization_objective_precision_value\n ),\n 'study_spec_parameters_override': study_spec_parameters_override,\n 'stage_1_tuner_worker_pool_specs_override': (\n stage_1_tuner_worker_pool_specs_override\n ),\n 'cv_trainer_worker_pool_specs_override': (\n cv_trainer_worker_pool_specs_override\n ),\n 'export_additional_model_without_custom_ops': (\n export_additional_model_without_custom_ops\n ),\n 'dataflow_subnetwork': dataflow_subnetwork,\n 'dataflow_use_public_ips': dataflow_use_public_ips,\n 'dataflow_service_account': dataflow_service_account,\n 'encryption_spec_key_name': encryption_spec_key_name,\n 'max_selected_features': max_selected_features,\n 'stage_1_tuning_result_artifact_uri': stage_1_tuning_result_artifact_uri,\n 'quantiles': quantiles,\n 'enable_probabilistic_inference': enable_probabilistic_inference,\n 'model_display_name': model_display_name,\n 'model_description': model_description,\n }\n parameter_values.update(\n {param: value for param, value in parameters.items() if value is not None}\n )\n\n if run_evaluation:\n eval_parameters = {\n 'evaluation_batch_predict_machine_type': (\n evaluation_batch_predict_machine_type\n ),\n 'evaluation_batch_predict_starting_replica_count': (\n evaluation_batch_predict_starting_replica_count\n ),\n 'evaluation_batch_predict_max_replica_count': (\n evaluation_batch_predict_max_replica_count\n ),\n 'evaluation_batch_explain_machine_type': (\n evaluation_batch_explain_machine_type\n ),\n 'evaluation_batch_explain_starting_replica_count': (\n evaluation_batch_explain_starting_replica_count\n ),\n 'evaluation_batch_explain_max_replica_count': (\n evaluation_batch_explain_max_replica_count\n ),\n 'evaluation_dataflow_machine_type': evaluation_dataflow_machine_type,\n 'evaluation_dataflow_starting_num_workers': (\n evaluation_dataflow_starting_num_workers\n ),\n 'evaluation_dataflow_max_num_workers': (\n evaluation_dataflow_max_num_workers\n ),\n 'evaluation_dataflow_disk_size_gb': evaluation_dataflow_disk_size_gb,\n 'run_evaluation': run_evaluation,\n }\n parameter_values.update(\n {\n param: value\n for param, value in eval_parameters.items()\n if value is not None\n }\n )\n\n # V1 pipeline without FTE\n if num_selected_features is None:\n if not additional_experiments:\n additional_experiments = {}\n\n parameters = {\n 'transformations': transformations,\n 'stats_and_example_gen_dataflow_machine_type': (\n stats_and_example_gen_dataflow_machine_type\n ),\n 'stats_and_example_gen_dataflow_max_num_workers': (\n stats_and_example_gen_dataflow_max_num_workers\n ),\n 'stats_and_example_gen_dataflow_disk_size_gb': (\n stats_and_example_gen_dataflow_disk_size_gb\n ),\n 'transform_dataflow_machine_type': transform_dataflow_machine_type,\n 'transform_dataflow_max_num_workers': (\n transform_dataflow_max_num_workers\n ),\n 'transform_dataflow_disk_size_gb': transform_dataflow_disk_size_gb,\n 'additional_experiments': additional_experiments,\n }\n parameter_values.update(\n {\n param: value\n for param, value in parameters.items()\n if value is not None\n }\n )\n\n if apply_feature_selection_tuning:\n parameter_values.update({\n 'apply_feature_selection_tuning': apply_feature_selection_tuning,\n })\n\n if run_distillation:\n distillation_parameters = {\n 'distill_batch_predict_machine_type': (\n distill_batch_predict_machine_type\n ),\n 'distill_batch_predict_starting_replica_count': (\n distill_batch_predict_starting_replica_count\n ),\n 'distill_batch_predict_max_replica_count': (\n distill_batch_predict_max_replica_count\n ),\n 'run_distillation': run_distillation,\n }\n parameter_values.update(\n {\n param: value\n for param, value in distillation_parameters.items()\n if value is not None\n }\n )\n\n # V2 pipeline (with FTE)\n else:\n if run_distillation:\n raise ValueError(\n 'Distillation is currently not supported'\n ' when num_selected_features is specified.'\n )\n\n parameters = {\n 'num_selected_features': num_selected_features,\n 'dataset_level_custom_transformation_definitions': [],\n 'dataset_level_transformations': [],\n 'tf_auto_transform_features': {},\n 'tf_custom_transformation_definitions': [],\n 'legacy_transformations_path': transformations,\n 'feature_transform_engine_dataflow_machine_type': (\n transform_dataflow_machine_type\n ),\n 'feature_transform_engine_dataflow_max_num_workers': (\n transform_dataflow_max_num_workers\n ),\n 'feature_transform_engine_dataflow_disk_size_gb': (\n transform_dataflow_disk_size_gb\n ),\n }\n parameter_values.update(\n {\n param: value\n for param, value in parameters.items()\n if value is not None\n }\n )\n\n return parameter_values", "def args_to_params(args: list) -> dict:\n found = {}\n\n # Setup the dictionary identifying the parameters\n found['sensor'] = args.sensor\n found['filename'] = args.filename\n found['working_space'] = args.working_space\n if args.userid:\n found['userid'] = args.userid\n\n # Note: Return an empty dict if we're missing mandatory parameters\n return found", "def launch_job(self,\n job_id: Text,\n parent: Text,\n training_input: Dict[Text, Any],\n job_labels: Optional[Dict[Text, Text]] = None) -> None:\n pass", "def get_required_params():\n return {}", "def getObject(self, userguid, jobguid=\"\",executionparams=dict()):", "def getJob(workload):\n job = Job()\n job[\"task\"] = workload.getTask(\"reco\").getPathName()\n job[\"workflow\"] = workload.name()\n job[\"location\"] = \"T1_US_FNAL\"\n job[\"owner\"] = \"evansde77\"\n job[\"group\"] = \"DMWM\"\n return job", "def launch_job(self,\n job_id: Text,\n project: Text,\n training_input: Dict[Text, Any],\n job_labels: Optional[Dict[Text, Text]] = None) -> None:\n\n parent = 'projects/{project}/locations/{location}'.format(\n project=project, location=self._region)\n\n job_spec = self._create_job_spec(job_id, training_input, job_labels)\n\n # Submit job to AIP Training\n logging.info('TrainingInput=%s', training_input)\n logging.info('Submitting custom job=\\'%s\\', project=\\'%s\\''\n ' to AI Platform (Unified).', job_id, parent)\n response = self._client.create_custom_job(parent=parent,\n custom_job=job_spec)\n self._job_name = response.name", "def get_parameters(cause_id, age_start, age_end, model_version_type_id,\n hybridizer=False,\n start_date=datetime(2019, 1, 11), end_date=datetime.now(),\n jobs=None):\n if model_version_type_id == 3 and not hybridizer:\n raise ValueError(\"Cannot pull non-hybridizer model version type IDs when you've asked for hybridizer jobs.\")\n if hybridizer:\n logger.info(\"Getting parameters for cause_id {c}, age_start {s}, age_end {e}, and model_version_type_id {m}\".format(\n c=cause_id, s=age_start, e=age_end, m=model_version_type_id\n ))\n model_versions = get_model_versions(cause_id, age_start, age_end, model_version_type_id)\n if jobs is None:\n jobs = get_jobs(start_date=start_date, end_date=end_date, hybridizer=hybridizer)\n jobs = jobs.loc[jobs.model_version_id.isin(model_versions)].copy()\n jobs['ran_covariate_selection'] = jobs['model_version_id'].apply(lambda x:\n check_covariate_selection(x, conn_def='codem'))\n logger.info(f\"{jobs['ran_covariate_selection'].mean()*100} % of jobs ran covariate selection.\")\n jobs.loc[~jobs.ran_covariate_selection, 'runtime_min'] = \\\n jobs.loc[~jobs.ran_covariate_selection, 'runtime_min'] + 60*24*2\n jobs = jobs.loc[(jobs.exit_status == 0) & (jobs.failed == 0)]\n if jobs.empty:\n jobs = pd.DataFrame.from_dict(DEFAULT_PARAMS, orient='columns')\n warnings.warn(\"QPID did not capture any run information for these model versions {}.\".format(\n ', '.join([str(x) for x in model_versions])\n ), RuntimeWarning)\n bad_jobs = jobs.loc[jobs.ram_gb == -1]\n if not bad_jobs.empty:\n warnings.warn(\"Cannot have -1 for ram GB. Defaulting to have the ram GB requested instead.\", RuntimeWarning)\n for index, row in bad_jobs.iterrows():\n job_number = row['job_number']\n job_name = row['job_name']\n start_time = row['start_time']\n file_name = f\"FILEPATH\"\n if not os.path.exists(file_name):\n f = open(file_name, 'w')\n f.close()\n jobs.loc[jobs.ram_gb < 0, 'ram_gb'] = jobs.loc[jobs.ram_gb < 0]['ram_gb_requested']\n parameters = jobs[['cores_requested', 'ram_gb', 'ram_gb_requested', 'runtime_min']].mean().to_dict()\n else:\n parameters = {\n 'cores_requested': 3,\n 'ram_gb': 1,\n 'ram_gb_requested': 1,\n 'runtime_min': int(60*24*5)\n }\n logger.info(f\"parameters: {parameters}\")\n return parameters", "def _identifying_params(self) -> dict[str, Any]:\n return {**{\"model_path\": self.model_path}, **self._default_params}", "def jobs(\n ctx: typer.Context,\n op_id: str = typer.Argument(\n ...,\n autocompletion=completion_op_id,\n callback=check_for_op_id,\n help=\"A valid op-id. e.g. get_markets_prices\",\n ),\n param_string: Optional[str] = typer.Option(\n None,\n \"--param-string\",\n \"-p\",\n help=\"Optional. Full or partial parameters as a json encoded dictionary string. \"\n \"Keys must be valid parameters for selected op_id.\",\n ),\n default_params: bool = typer.Option(\n False,\n \"-d\",\n \"--default-params\",\n help=\"Include all parameters that are required, or have default values. \"\n \"Missing values will be 'NOTSET'.\",\n ),\n callback_path: Optional[Path] = typer.Option(\n None,\n \"-c\",\n \"--callbacks\",\n help=\"Optional. Path to custom callbacks to be used. \",\n ),\n file_name: str = typer.Option(\n \"created-jobs/${esi_job_op_id}-${esi_job_uid}\",\n \"-n\",\n \"--file-name\",\n help=(\n \"File name for the new job, must be unique if multiple jobs. \"\n \"Can include directories, \"\n \"and the file type suffix will be added based on --format-id.\"\n ),\n ),\n data_path: Optional[Path] = typer.Option(\n None,\n \"--data-file\",\n \"-i\",\n help=(\n \"Optional. Path to json, csv, or yaml file with full or partial parameters. \"\n \"Must result in a list of dicts.\"\n ),\n ),\n format_id: FormatChoices = typer.Option(\n FormatChoices.json,\n \"-f\",\n \"--format-id\",\n show_choices=True,\n help=\"Output file format.\",\n ),\n path_out: Path = typer.Argument(\n \"./tmp\",\n help=\"Parent path for saving the new jobs, will be prepended to --file-name.\",\n ),\n):\n operation_manifest: OperationManifest = ctx.obj[\"operation_manifest\"]\n # path_out = optional_object(path_out, Path, \".\")\n if path_out.is_file:\n typer.BadParameter(\"path_out must not be a file.\")\n file_data: Optional[List[Dict]] = get_params_from_file(data_path)\n parameters: Dict = decode_param_string(param_string)\n if callback_path is None:\n callback_collection = default_callback_collection()\n else:\n callback_collection = load_callbacks(callback_path)\n jobs_: List[EsiJob] = []\n try:\n op_info = operation_manifest.op_info(op_id)\n if not file_data:\n job = op_info.create_job(\n parameters,\n callback_collection,\n include_default_params=default_params,\n # only_required_default_params=False,\n # allow_notset=False,\n )\n jobs_.append(job)\n else:\n for params in file_data:\n params.update(parameters)\n job = op_info.create_job(\n params,\n callback_collection,\n include_default_params=default_params,\n # only_required_default_params=False,\n # allow_notset=False,\n )\n jobs_.append(job)\n except Exception as ex:\n raise typer.BadParameter(\n f\"Exception creating job. {ex.__class__.__name__}: {ex}\"\n )\n for job in jobs_:\n file_path = resolve_job_file_path(job, file_name, path_out)\n try:\n save_path = job.serialize_file(file_path, format_id)\n except Exception as ex:\n raise typer.BadParameter(\n f\"Error saving job to {save_path}. {ex.__class__.__name__}, {ex}\"\n )\n logger.info(\"Saved job %s at %s\", job.uid, file_path)\n typer.echo(f\"{len(jobs_)} jobs saved to {path_out}\")\n report_finished_task(ctx)", "def setup_and_get_job_details_for_sf(self):\n\n self.create_compute_environment()\n jq_response = self.create_job_queue()\n jd_response = self.register_job_definition()\n return dict(jobDefinition=jd_response[\"jobDefinitionName\"], jobQueue=jq_response)", "def make_job(self, script, factors=None):\n job = {'script': script}\n if factors is not None:\n job['factors'] = factors\n return job", "def _prepare_init_params_from_job_description(cls, job_details, model_channel_name=None):\n init_params = super(TensorFlow, cls)._prepare_init_params_from_job_description(job_details,\n model_channel_name)\n\n # Move some of the tensorflow specific init params from hyperparameters into the main init params.\n for argument in ('checkpoint_path', 'training_steps', 'evaluation_steps', 'model_dir'):\n value = init_params['hyperparameters'].pop(argument, None)\n if value is not None:\n init_params[argument] = value\n\n image_name = init_params.pop('image')\n framework, py_version, tag, script_mode = fw.framework_name_from_image(image_name)\n if not framework:\n # If we were unable to parse the framework name from the image it is not one of our\n # officially supported images, in this case just add the image to the init params.\n init_params['image_name'] = image_name\n return init_params\n\n if script_mode:\n init_params['script_mode'] = True\n\n init_params['py_version'] = py_version\n\n # We switched image tagging scheme from regular image version (e.g. '1.0') to more expressive\n # containing framework version, device type and python version (e.g. '1.5-gpu-py2').\n # For backward compatibility map deprecated image tag '1.0' to a '1.4' framework version\n # otherwise extract framework version from the tag itself.\n init_params['framework_version'] = '1.4' if tag == '1.0' else fw.framework_version_from_tag(\n tag)\n\n training_job_name = init_params['base_job_name']\n if framework != cls.__framework_name__:\n raise ValueError(\"Training job: {} didn't use image for requested framework\".format(\n training_job_name))\n\n return init_params", "def _build_param_dict(self):\n self._build_common_param_dict()\n\n self._param_dict.add(Parameter.NUM_AVG_SAMPLES,\n r'ScansToAverage>([\\d]+)</ScansToAverage>',\n lambda match: int(match.group(1)),\n str,\n type=ParameterDictType.INT,\n display_name=\"Scans to Average\",\n description=\"Number of samples to average (must be even)\",\n range=INT16,\n startup_param=True,\n direct_access=False,\n default_value=4,\n visibility=ParameterDictVisibility.READ_WRITE)\n self._param_dict.add(Parameter.MIN_COND_FREQ,\n r'MinimumCondFreq>([\\d]+)</MinimumCondFreq',\n lambda match: int(match.group(1)),\n str,\n type=ParameterDictType.INT,\n display_name=\"Minimum Conductivity Frequency\",\n range=INT16,\n description=\"Minimum conductivity frequency to enable pump turn-on.\",\n startup_param=True,\n direct_access=False,\n default_value=500,\n units=Units.HERTZ,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.PUMP_DELAY,\n r'PumpDelay>([\\d]+)</PumpDelay',\n lambda match: int(match.group(1)),\n str,\n type=ParameterDictType.INT,\n display_name=\"Pump Delay\",\n range=INT16,\n description=\"Time to wait after minimum conductivity frequency is reached before turning pump on.\",\n startup_param=True,\n direct_access=False,\n default_value=60,\n units=Units.SECOND,\n visibility=ParameterDictVisibility.READ_WRITE)\n self._param_dict.add(Parameter.AUTO_RUN,\n r'AutoRun>(.*)</AutoRun',\n lambda match: True if match.group(1) == 'yes' else False,\n self._true_false_to_string,\n type=ParameterDictType.BOOL,\n display_name=\"Auto Run\",\n description=\"Enable automatic logging when power is applied: (true | false).\",\n range={'True': True, 'False': False},\n startup_param=True,\n direct_access=True,\n default_value=False,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.IGNORE_SWITCH,\n r'IgnoreSwitch>(.*)</IgnoreSwitch',\n lambda match: True if match.group(1) == 'yes' else False,\n self._true_false_to_string,\n type=ParameterDictType.BOOL,\n display_name=\"Ignore Switch\",\n description=\"Disable magnetic switch position for starting or stopping logging: (true | false)\",\n range={'True': True, 'False': False},\n startup_param=True,\n direct_access=True,\n default_value=True,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.OPTODE,\n r'OPTODE>(.*)</OPTODE',\n lambda match: True if match.group(1) == 'yes' else False,\n self._true_false_to_string,\n type=ParameterDictType.BOOL,\n display_name=\"Optode Attached\",\n description=\"Enable optode: (true | false)\",\n range={'True': True, 'False': False},\n startup_param=True,\n direct_access=True,\n default_value=True,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.VOLT1,\n r'ExtVolt1>(.*)</ExtVolt1',\n lambda match: True if match.group(1) == 'yes' else False,\n self._true_false_to_string,\n type=ParameterDictType.BOOL,\n display_name=\"Volt 1\",\n description=\"Enable external voltage 1: (true | false)\",\n range={'True': True, 'False': False},\n startup_param=True,\n direct_access=True,\n default_value=True,\n visibility=ParameterDictVisibility.IMMUTABLE)\n\n self._build_ctd_specific_params()" ]
[ "0.623002", "0.6157073", "0.6141183", "0.6118042", "0.59162587", "0.58894557", "0.58691126", "0.5811645", "0.57410806", "0.5705661", "0.5667334", "0.5509608", "0.5486221", "0.54622126", "0.5394866", "0.5381501", "0.53673935", "0.5350924", "0.5328032", "0.52987236", "0.52917624", "0.5247251", "0.5214663", "0.52130544", "0.5211171", "0.52043736", "0.5201807", "0.5201332", "0.5196052", "0.51957554" ]
0.76147777
0
method to set the name of the render_database
def set_render_database_name(self, file_name): try: self.render_database=file_name self.filepath_render_database = os.path.join(self.filepath, self.render_database) print("set render_database filename to", file_name) except: print("setting render database failed") self.render_database="Render_database.db" self.filepath_object_database = os.path.join(self.filepath, self.render_database) print("set render database name to default:", self.render_database) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def db_name(self, db_name):\n\n self._db_name = db_name", "def get_db_name(self):\n\t\treturn conf.db_name", "def db_name(self):\n return self._db_name", "async def set_db_name_field(self, db_name_field):\n self.db_name_field = db_name_field", "def get_name(self) -> str:\n return self.dbname", "def set_output_database (self, file_name):\n try:\n self.object_database=file_name\n self.filepath_output_database = os.path.join(self.filepath, self.output_database)\n print(\"set output_database filename to\", file_name)\n except:\n print(\"setting object database failed\")\n self.output_database=\"Output_database.db\"\n self.filepath_output_database = os.path.join(self.filepath, self.output_database)\n print(\"set output database name to default:\", self.object_database)\n return", "def database_name(self) -> str:\n return pulumi.get(self, \"database_name\")", "def getDatabaseName(self):\n return f\"n{self.name.capitalize()}\"", "def getDatabaseName(self):\n raise NotImplementedError", "def __set_name(self):\n table_name = self.get_table_name()\n record, timestamp = self.__get_max_timestamp()\n self.name = \"%s_%s_%s\" % (table_name, record, timestamp)", "def set_name(self, name, modify=False):\n assert not hasattr(self, \"_dbcnx\") # Must be done before any write ops.\n if name == self.db.get(\"name\"):\n return\n if not constants.NAME_RX.match(name):\n raise ValueError(\"invalid database name\")\n if modify:\n modified = name\n for n in range(1, 1000): # Bail out if too many.\n if get_db(modified) is None:\n name = modified\n break\n modified = f\"{name}-{n}\"\n if get_db(name):\n raise ValueError(\"database name already in use\")\n old_dbname = self.db.get(\"name\")\n if old_dbname:\n # Rename the Sqlite3 file if the database already exists.\n os.rename(utils.get_dbpath(old_dbname), utils.get_dbpath(name))\n # The entries in the dbs_log will be fixed in '__exit__'\n self.db[\"name\"] = name\n return self.db[\"name\"]", "def dbName(self, code) -> str:\n return f'{code}{self.name}'", "def database_name(self):\n try:\n return self._database_name\n except:\n pass\n\n if 'X-UnitTest' in self.request.headers:\n if self.request.headers['X-UnitTest'] == 'True':\n self._database_name = TEST_DATABASE\n return TEST_DATABASE\n default_database = self.application.databases['default']['NAME']\n self._database_name = default_database\n return default_database", "def set_object_database (self, file_name):\n try:\n self.object_database=file_name\n self.filepath_object_database = os.path.join(self.filepath, self.object_database)\n print(\"set object_database filename to\", file_name)\n except:\n print(\"setting object database failed\")\n self.object_database=\"Object_database.db\"\n self.filepath_object_database = os.path.join(self.filepath, self.object_database)\n print(\"set object database name to default:\", self.object_database)\n return", "def setDB(dbname):\n global DBNAME\n DBNAME = dbname", "def getDatabaseName( self ):\n return self.mDbname", "def database_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"database_name\")", "def database_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"database_name\")", "def db_for_write(self, model, **hints):\n state_db = self._db_name(model)\n if state_db in settings.DATABASES:\n name = state_db\n else:\n name = 'default'\n logger.debug('db_for_write({}): {}'.format(state_db, name))\n return name", "def change_db(cls, dbname):\n setattr(cls, 'db', staticmethod(lambda: Db(dbname)))", "def db_for_write(self, model, **hints):\n if hasattr(model, 'db_name'):\n return model.db_name\n return 'default'", "def database_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"database_name\")", "def __init__(self, database: Database, name: str):\n self._database = database\n self.name = name", "def getDatabaseName(self):\n return self._base.getDatabaseName()", "def get_database_name(self, data: dict) -> str: # pylint: disable=arguments-differ\n if not data[\"database_name\"] and self.service_connection.database:\n return self.service_connection.database\n return data[\"database_name\"]", "def set_name(self,name):\r\n self._name = __name", "def name(self, name):\n\n self.container['name'] = name", "def name(self, name):\n\n self.container['name'] = name", "def backend_name(self) -> str:\n return self._db_data.backend", "def dbname(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"dbname\")" ]
[ "0.7080127", "0.6651994", "0.6637986", "0.6630052", "0.6611546", "0.6575934", "0.65543795", "0.65191627", "0.6510718", "0.65007985", "0.6497756", "0.6437254", "0.6436953", "0.6349015", "0.63008165", "0.6236277", "0.61813396", "0.61277485", "0.6054977", "0.6046384", "0.6044267", "0.60411125", "0.6037435", "0.60278654", "0.5977049", "0.5967234", "0.5915419", "0.5915419", "0.59097713", "0.58886963" ]
0.8418282
0
method to set the name of the object_database
def set_object_database (self, file_name): try: self.object_database=file_name self.filepath_object_database = os.path.join(self.filepath, self.object_database) print("set object_database filename to", file_name) except: print("setting object database failed") self.object_database="Object_database.db" self.filepath_object_database = os.path.join(self.filepath, self.object_database) print("set object database name to default:", self.object_database) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def db_name(self, db_name):\n\n self._db_name = db_name", "def db_name(self):\n return self._db_name", "def get_name(self) -> str:\n return self.dbname", "def getDatabaseName(self):\n raise NotImplementedError", "def get_db_name(self):\n\t\treturn conf.db_name", "def set_object_name(self, object_name = \"DefaultObject\"):\n self.obj_name = object_name", "async def set_db_name_field(self, db_name_field):\n self.db_name_field = db_name_field", "def set_output_database (self, file_name):\n try:\n self.object_database=file_name\n self.filepath_output_database = os.path.join(self.filepath, self.output_database)\n print(\"set output_database filename to\", file_name)\n except:\n print(\"setting object database failed\")\n self.output_database=\"Output_database.db\"\n self.filepath_output_database = os.path.join(self.filepath, self.output_database)\n print(\"set output database name to default:\", self.object_database)\n return", "def __init__(self, database: Database, name: str):\n self._database = database\n self.name = name", "def database_name(self) -> str:\n return pulumi.get(self, \"database_name\")", "def getDatabaseName( self ):\n return self.mDbname", "def setDB(dbname):\n global DBNAME\n DBNAME = dbname", "def getDatabaseName(self):\n return self._base.getDatabaseName()", "def __set_name(self):\n table_name = self.get_table_name()\n record, timestamp = self.__get_max_timestamp()\n self.name = \"%s_%s_%s\" % (table_name, record, timestamp)", "def db_for_write(self, model, **hints):\n if hasattr(model, 'db_name'):\n return model.db_name\n return 'default'", "def set_name(self, name, modify=False):\n assert not hasattr(self, \"_dbcnx\") # Must be done before any write ops.\n if name == self.db.get(\"name\"):\n return\n if not constants.NAME_RX.match(name):\n raise ValueError(\"invalid database name\")\n if modify:\n modified = name\n for n in range(1, 1000): # Bail out if too many.\n if get_db(modified) is None:\n name = modified\n break\n modified = f\"{name}-{n}\"\n if get_db(name):\n raise ValueError(\"database name already in use\")\n old_dbname = self.db.get(\"name\")\n if old_dbname:\n # Rename the Sqlite3 file if the database already exists.\n os.rename(utils.get_dbpath(old_dbname), utils.get_dbpath(name))\n # The entries in the dbs_log will be fixed in '__exit__'\n self.db[\"name\"] = name\n return self.db[\"name\"]", "def getDatabaseName(self):\n return f\"n{self.name.capitalize()}\"", "def database_name(self):\n try:\n return self._database_name\n except:\n pass\n\n if 'X-UnitTest' in self.request.headers:\n if self.request.headers['X-UnitTest'] == 'True':\n self._database_name = TEST_DATABASE\n return TEST_DATABASE\n default_database = self.application.databases['default']['NAME']\n self._database_name = default_database\n return default_database", "def change_db(cls, dbname):\n setattr(cls, 'db', staticmethod(lambda: Db(dbname)))", "def db_for_write(self, model, **hints):\n state_db = self._db_name(model)\n if state_db in settings.DATABASES:\n name = state_db\n else:\n name = 'default'\n logger.debug('db_for_write({}): {}'.format(state_db, name))\n return name", "def db_for_write(self, model, **hints):\n if model._meta.app_label == self.app_label:\n return self.db_name\n return None", "def db_for_write(self, model, **hints):\n\n return self.db_name", "def db_for_write(self, model, **hints):\r\n if model._meta.app_label == self.APP_LABEL:\r\n return self.DB_NAME\r\n return None", "def set_render_database_name(self, file_name):\n try:\n self.render_database=file_name\n self.filepath_render_database = os.path.join(self.filepath, self.render_database)\n print(\"set render_database filename to\", file_name)\n except:\n print(\"setting render database failed\")\n self.render_database=\"Render_database.db\"\n self.filepath_object_database = os.path.join(self.filepath, self.render_database)\n print(\"set render database name to default:\", self.render_database)\n return", "def db_for_read(self, model, **hints):\n if hasattr(model, 'db_name'):\n return model.db_name\n return 'default'", "def get_database_name(self, data: dict) -> str: # pylint: disable=arguments-differ\n if not data[\"database_name\"] and self.service_connection.database:\n return self.service_connection.database\n return data[\"database_name\"]", "def database_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"database_name\")", "def db_for_read(self, model, **hints):\n state_db = self._db_name(model)\n if state_db in settings.DATABASES:\n name = state_db\n else:\n name = 'default'\n logger.debug('db_for_read({}): {}'.format(state_db, name))\n return name", "def db_for_read(self, model, **hints):\n\n return self.db_name", "def database(self, database):\n self._database = database" ]
[ "0.72566205", "0.7061423", "0.7017317", "0.7010737", "0.68976414", "0.68876076", "0.6882641", "0.6880423", "0.68634754", "0.68143404", "0.6771887", "0.6689018", "0.6677643", "0.6660668", "0.66574013", "0.66081303", "0.6601504", "0.65738755", "0.6570008", "0.6501405", "0.6492557", "0.64703125", "0.6415281", "0.6384418", "0.6357481", "0.6320398", "0.6268569", "0.6226121", "0.61766976", "0.61760724" ]
0.76832587
0
method to set the name of the output_database
def set_output_database (self, file_name): try: self.object_database=file_name self.filepath_output_database = os.path.join(self.filepath, self.output_database) print("set output_database filename to", file_name) except: print("setting object database failed") self.output_database="Output_database.db" self.filepath_output_database = os.path.join(self.filepath, self.output_database) print("set output database name to default:", self.object_database) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def database_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"database_name\")", "def getDatabaseName(self):\n raise NotImplementedError", "def get_db_name(self):\n\t\treturn conf.db_name", "def get_name(self) -> str:\n return self.dbname", "def db_name(self, db_name):\n\n self._db_name = db_name", "def database_name(self) -> str:\n return pulumi.get(self, \"database_name\")", "def db_name(self):\n return self._db_name", "def database_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"database_name\")", "def db_for_write(self, model, **hints):\n state_db = self._db_name(model)\n if state_db in settings.DATABASES:\n name = state_db\n else:\n name = 'default'\n logger.debug('db_for_write({}): {}'.format(state_db, name))\n return name", "def database_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"database_name\")", "def getDatabaseName( self ):\n return self.mDbname", "def dbname(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"dbname\")", "def set_render_database_name(self, file_name):\n try:\n self.render_database=file_name\n self.filepath_render_database = os.path.join(self.filepath, self.render_database)\n print(\"set render_database filename to\", file_name)\n except:\n print(\"setting render database failed\")\n self.render_database=\"Render_database.db\"\n self.filepath_object_database = os.path.join(self.filepath, self.render_database)\n print(\"set render database name to default:\", self.render_database)\n return", "def db_for_write(self, model, **hints):\n if hasattr(model, 'db_name'):\n return model.db_name\n return 'default'", "def setDB(dbname):\n global DBNAME\n DBNAME = dbname", "def getDatabaseName(self):\n return f\"n{self.name.capitalize()}\"", "def db_for_write(self, model, **hints):\n\n return self.db_name", "def database_name(self):\n try:\n return self._database_name\n except:\n pass\n\n if 'X-UnitTest' in self.request.headers:\n if self.request.headers['X-UnitTest'] == 'True':\n self._database_name = TEST_DATABASE\n return TEST_DATABASE\n default_database = self.application.databases['default']['NAME']\n self._database_name = default_database\n return default_database", "def __set_name(self):\n table_name = self.get_table_name()\n record, timestamp = self.__get_max_timestamp()\n self.name = \"%s_%s_%s\" % (table_name, record, timestamp)", "def getDatabaseName(self):\n return self._base.getDatabaseName()", "def db_for_write(self, model, **hints):\n if model._meta.app_label == self.app_label:\n return self.db_name\n return None", "def setdb_params(self, mongouri=None, dbname=\"testrecommender\"):\n if mongouri is not None:\n self._mongouri = mongouri\n if dbname is not None:\n self._dbname = dbname\n self._outputlogfile = self._outputlogfile + dbname + \"_outputlog.txt\"", "def set_object_database (self, file_name):\n try:\n self.object_database=file_name\n self.filepath_object_database = os.path.join(self.filepath, self.object_database)\n print(\"set object_database filename to\", file_name)\n except:\n print(\"setting object database failed\")\n self.object_database=\"Object_database.db\"\n self.filepath_object_database = os.path.join(self.filepath, self.object_database)\n print(\"set object database name to default:\", self.object_database)\n return", "def dbName(self, code) -> str:\n return f'{code}{self.name}'", "def db_for_write(self, model, **hints):\r\n if model._meta.app_label == self.APP_LABEL:\r\n return self.DB_NAME\r\n return None", "def __init__(self, database_name):\n self.conn = sqlite3.connect(\"output/%s.db\" % database_name)", "def database(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"database\")", "def fulldbname(self):\n return 'myfls_'+self.user.username+'_'+self.dbname", "def set_name(self, name, modify=False):\n assert not hasattr(self, \"_dbcnx\") # Must be done before any write ops.\n if name == self.db.get(\"name\"):\n return\n if not constants.NAME_RX.match(name):\n raise ValueError(\"invalid database name\")\n if modify:\n modified = name\n for n in range(1, 1000): # Bail out if too many.\n if get_db(modified) is None:\n name = modified\n break\n modified = f\"{name}-{n}\"\n if get_db(name):\n raise ValueError(\"database name already in use\")\n old_dbname = self.db.get(\"name\")\n if old_dbname:\n # Rename the Sqlite3 file if the database already exists.\n os.rename(utils.get_dbpath(old_dbname), utils.get_dbpath(name))\n # The entries in the dbs_log will be fixed in '__exit__'\n self.db[\"name\"] = name\n return self.db[\"name\"]", "def db_for_write(self, model, **hints):\n if is_recon_model(model):\n return settings.RECON_NG_DATABASE_NAME\n\n return None" ]
[ "0.71581715", "0.7139252", "0.70207834", "0.6973042", "0.6896915", "0.6870785", "0.6853191", "0.6820632", "0.668634", "0.6684695", "0.6680187", "0.66756105", "0.66751665", "0.66531605", "0.6643873", "0.66307545", "0.6598226", "0.65926546", "0.6548423", "0.65220237", "0.6405724", "0.6366217", "0.6358535", "0.6332535", "0.63023645", "0.6227131", "0.622009", "0.6210734", "0.6186832", "0.61823314" ]
0.8193771
0
method to import excel data to the correct database
def import_excel(self, filepath_excel,database_type): if database_type == "render": try: connection = sqlite3.connect(self.filepath_render_database) pointer = connection.cursor() sql_anweisung = """ INSERT INTO render_information ( object_type, name, radius, polar_angle_min, polar_anglel_max, polar_angle_segments, polar_angle_random_rad, azimuth_angle_min, azimuth_angle_max, azimuth_angle_segments, azimuth_angle_random_rad, tracking_obj, segmentation ) VALUES ( :object_type, :name, :radius, :polar_angle_min, :polar_anglel_max, :polar_angle_segments, :polar_angle_random_rad, :azimuth_angle_min, :azimuth_angle_max, :azimuth_angle_segments, :azimuth_angle_random_rad, :tracking_obj, :segmentation ) """ with open(filepath_excel) as csvdatei: csv_reader_object = csv.reader(csvdatei, delimiter=';') next(csv_reader_object) pointer.executemany(sql_anweisung, csv_reader_object) connection.commit() connection.close() print("render data addet from excel file") except : print("adding render data from excel file failed") elif database_type == "object": try: connection = sqlite3.connect(self.filepath_object_database) pointer = connection.cursor() sql_anweisung = """ INSERT INTO object_information ( obj_filepath, obj_name, obj_type, obj_scale_factor, obj_type, obj_location_x, obj_location_y, obj_location_z, obj_rotation_x, obj_rotation_y, obj_rotation_z, obj_amount_percent, obj_material_path, obj_point_in_time, maximum_random_rotation_degree_z, maximum_random_translation, random_amount ) VALUES ( :obj_filepath, :obj_name, :obj_type, :obj_scale_factor, :obj_type, :obj_location_x, :obj_location_y, :obj_location_z, :obj_rotation_x, :obj_rotation_y, :obj_rotation_z, :obj_amount_percent, :obj_material_path, :obj_point_in_time, :maximum_random_rotation_degree_z, :maximum_random_translation, :random_amount ) """ with open(filepath_excel) as csvdatei: csv_reader_object = csv.reader(csvdatei, delimiter=';') print(csv_reader_object) next(csv_reader_object) pointer.executemany(sql_anweisung, csv_reader_object) connection.commit() connection.close() print("object data added from excel file") except : print("adding object data from excel file failed") else: print("no Database found, maybe check spelling in method call??") return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def importXlsxIntoDb(input):\n #import global variable\n global UPLOAD_ID\n global PATIENT_NUM\n global DATABASE\n\n connection = db.create_connection(DATABASE)\n\n xlsx = pd.read_excel(input)\n\n #looping on each row\n print(\" - Importing data in DB\", end = '')\n for index, row in xlsx.iterrows():\n if (pd.isna(row['DATE_MORT']) == False):\n DEATH_DATE = row['DATE_MORT']\n DEATH_CODE = 1\n else :\n DEATH_DATE = None #insert null in db\n DEATH_CODE = 0\n if (pd.isna(row['NOM_JEUNE_FILLE']) == False):\n MAIDEN_NAME = row['NOM_JEUNE_FILLE']\n else:\n MAIDEN_NAME = None\n db.insert_patient(connection, (PATIENT_NUM, row['NOM'], row['PRENOM'], row['DATE_NAISSANCE'], row['SEXE'], MAIDEN_NAME, row['ADRESSE'], row['TEL'], row['CP'], row['VILLE'], DEATH_DATE, row['PAYS'], DEATH_CODE, UPLOAD_ID))\n db.insert_patient_ipphist(connection, (PATIENT_NUM, row['HOSPITAL_PATIENT_ID'], \"export_patient.xlsx\", 0, UPLOAD_ID))\n PATIENT_NUM = PATIENT_NUM + 1\n UPLOAD_ID = UPLOAD_ID + 1\n if (index % 100 == 0):\n print(\".\", end = '')\n #commit the changes to db\t\t\t\n connection.commit()\n #close the connection\n connection.close()\n print(\"\\n\")", "def import_data_model(directory):\n analyses = pd.read_excel(directory + 'analyses.xlsx')\n analytes = pd.read_excel(directory + 'analytes.xlsx')\n for index, analysis in analyses.iterrows():\n analyte_data = []\n analyte_names = analysis.analyte_keys.split(', ')\n for analyte_key in analyte_names:\n analyte_item = analytes.loc[analytes.key == analyte_key]\n analyte_data.append(analyte_item.to_dict(orient='records'))\n analyses.at[index, 'analytes'] = analyte_data \n analyses_data = analyses.to_dict(orient='records')\n for index, values in analyses_data.iterrows():\n doc_id = str(values.key)\n doc_data = values.to_dict()\n ref = ''\n update_document(ref, doc_data)\n # doc_data = data.to_dict(orient='index')\n # data_ref = create_reference(db, ref)\n # data_ref.document(doc_id).set(doc_data, merge=True)\n # data_ref.set(doc_data, merge=True)\n\n return NotImplementedError", "def importItem(file_path):\n\n #Ouverture du fichier\n rb = open_workbook(file_path)\n r_sheet = rb.sheet_by_index(0)\n\n for row_index in range (1, r_sheet.nrows):\n #Hydratation or get Supplier Model\n item_supplier= r_sheet.cell(row_index, 4).value\n item_supplier, created = Supplier.objects.get_or_create(name=item_supplier)\n\n #Hydratation or get Category Model\n current_category = r_sheet.cell(row_index, 0).value\n item_category, created = Category.objects.get_or_create(name=current_category)\n\n #Hydratation Item\n item_name = r_sheet.cell(row_index, 1).value\n item_ref = current_supplier= r_sheet.cell(row_index, 3).value\n item_quantity = r_sheet.cell(row_index, 2).value\n item, created = Item.objects.get_or_create(ref=item_ref, name=item_name, category=item_category, supplier=item_supplier, quantity=item_quantity)", "def import_excel(self):\n self.ensure_one()\n if self.file_import:\n filecontent = base64.b64decode(self.file_import)\n try:\n # Todo: import excel\n input = cStringIO.StringIO()\n input.write(filecontent)\n wb = open_workbook(file_contents=input.getvalue())\n problem_emails = {\"inserted_names\": [],\n \"inserted_emails\": [],\n \"invalid_emails\": [],\n \"duplicate_names\": [],\n \"duplicate_emails\": []}\n for sheet in wb.sheets():\n try:\n self.insert_db(sheet, wb, problem_emails)\n except Exception as e:\n raise (str(e))\n\n except:\n # todo: import csv\n wb = filecontent.split('\\r\\n')\n for line in range(1, len(wb) - 1):\n line_data = wb[line].split(',')\n self.crete_line(line_data[0], line_data[1])\n\n if problem_emails['invalid_emails']:\n raise except_orm(_('Invalid Email Format Found!'),\n _( '\\n'.join(map(str, list(item for item in problem_emails['invalid_emails']))) + '\\n\\n Please check and try again.'))\n if problem_emails['duplicate_names']:\n raise except_orm(_('Duplicate Name Found!'),\n _( '\\n'.join(map(str, list(item for item in problem_emails['duplicate_names']))) + '\\n\\n Please check and try again.'))\n if problem_emails['duplicate_emails']:\n raise except_orm(_('Duplicate Email Found!'),\n _( '\\n'.join(map(str, list(item for item in problem_emails['duplicate_emails']))) + '\\n\\n Please check and try again.'))\n\n return {\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'shipmaster.invitation',\n 'res_id': self.id,\n 'view_id': False,\n 'type': 'ir.actions.act_window',\n 'target': 'new',\n }", "def importData():\n #importChallengeDataToDB()\n importTrendingDataToDB()", "def Excel_Load_Data( self, ExcelFilename ):\n pass", "def import_data(self):\n\n self.worksheet = (\n xlrd.open_workbook(filename=self.source).sheet_by_index(0)\n )\n # Import conversion data from worksheet and store as scipy arrays\n self.T_exp = np.array(\n self.worksheet.col_values(0, start_rowx=4, end_rowx=None)\n ) + 273.15\n self.HCout_raw = np.array(\n self.worksheet.col_values(4, start_rowx=4, end_rowx=None)\n )\n self.HCin_raw = np.array(\n self.worksheet.col_values(8, start_rowx=4, end_rowx=None)\n )\n self.eta_exp = (\n (self.HCin_raw - self.HCout_raw) / self.HCin_raw\n )\n self.T_model = np.linspace(\n self.T_exp[0] - 50, self.T_exp[-1] + 50, 25\n )\n self.T_array = self.T_model", "def load_data(self):\n df= self.read_file()\n for row,col in df.iterrows():\n Employeeid = int(col['Empolyeeid'])\n Employee_Name = col['Employee_Name']\n Age = col['Age']\n Salary = col['Salary']\n self.table.put_item(\n Item={\n \"Employeeid\":Employeeid,\n \"Employee_Name\": Employee_Name,\n \"Age\": Age,\n \"Salary\": Salary\n }\n )\n return True", "def load_xls(cursor, datadir, schema_name, config_path, config_name):\n files = os.listdir(datadir)\n files_xls = [f for f in files if f.split('.')[-1] in ('xlsx', 'xls')]\n logger.info(files_xls)\n\n for filename in files_xls:\n df = pd.read_excel(datadir + '/' + filename, skiprows=1)\n if df.empty:\n logger.info('No data')\n continue\n df.columns = map(str.lower, df.columns)\n logger.info(\"added \" + filename)\n logger.info(df.columns)\n\n # load the data into pg\n engine = postgres_engine_pandas(config_path, config_name)\n table_name = filename.split('.')[0]\n create_pg_schema(cursor, schema_name)\n df.to_sql(table_name, engine, schema=schema_name, if_exists='replace') # ,dtype={geom: Geometry('POINT', srid='4326')})\n logger.info(filename + ' added as ' + table_name)\n create_geoms(cursor, schema_name, table_name,'x-coordinaat','y-coordinaat')\n cursor.execute(sql.SQL(\"\"\"ALTER TABLE {}.{} ADD COLUMN id SERIAL PRIMARY KEY;\"\"\")\n .format(sql.Identifier(schema_name),\n sql.Identifier(table_name)),)", "def import_data(self):\n\t\tif not self.log_files or len(self.log_files) ==0:\n\t\t\tprint \"There is no log files need to import into database\"\n\t\telse:\n\t\t\tfor log_file in self.log_files:\n\t\t\t\tdata = self.read_file(log_file)\n\t\t\t\tself.conn.insert(data)", "def import_db(import_file):\n import_data(import_file)", "def load_xl2db(xlfilepath, sheetname, header_cols, insfunc, start_row=0):\n def is_header(row):\n hText = ('!!'.join(header_cols)).lower()\n rText = ('!!'.join([str(r) for r in row[:len(header_cols)]])).lower()\n return hText == rText\n \n #load sheet & file header\n sheet = XlSheet(xlfilepath, sheetname)\n row = sheet.getrow()\n while not is_header(row):\n row = sheet.getrow()\n \n # now perform data load\n for row in sheet.getrows(start_row=(start_row or 0)):\n insfunc(norm_row(row))", "def load_from_excel(self, excel_fp: str):\n # TODO:\n pass", "def importbusinesshours(self):\n self.stdout.write(\"Preparing to import business hours to your table\")\n\n # Check to see if table already contains data!\n count = BusinessHour.objects.count();\n if count > 0:\n # Data exists on this table, confirm:\n self.stdout.write(self.style.ERROR(f\"Could not import any data.\"))\n self.stdout.write(self.style.NOTICE(f\" - Please empty the BusinessHours table first, and then import.\"))\n return\n\n csvfile = getData(\"businesshours.csv\")\n businesshours_csv = csv.reader(csvfile)\n\n businesshours = []\n row_count = 0\n for day, openingtime_str, closingtime_str in businesshours_csv:\n # this skips first line of the file because it contains the csv headers.\n if not (row_count == 0):\n bh = BusinessHour(\n day=day, \n opening_time=str_to_time(openingtime_str), \n closing_time=str_to_time(closingtime_str)\n )\n businesshours.append(bh)\n self.stdout.write(self.style.NOTICE(f\"+ {BusinessHour.DAYS[int(day)]}\"))\n row_count += 1\n \n # Bulk create\n BusinessHour.objects.bulk_create(businesshours)\n self.stdout.write(self.style.SUCCESS(f\"Succesfully imported {len(businesshours)} business hours.\"))", "def read_xls_csv(self):\n filename = str(self.filename)\n location_stock_id = self.location\n vals = []\n inventory_create = self.env['stock.inventory']\n\n if (filename.endswith('xls') or filename.endswith('xlsx')):\n wb = xlrd.open_workbook(\n file_contents=base64.decodestring(self.xls_file))\n sheet = wb.sheet_by_index(0)\n\n for i in range(1, sheet.nrows):\n row = sheet.row_values(i)\n firstrow = sheet.row_values(0)\n firstrow = [str(item).lower() for item in firstrow]\n pid = row[firstrow.index('id')]\n quantity = row[firstrow.index('quantity')]\n product_obj = self.env['product.product'].search(\n [('id', '=', pid)])\n vals.append({\n 'product_code': product_obj.default_code,\n 'product_qty': quantity,\n 'location_id': location_stock_id.id,\n 'product_id': product_obj.id\n })\n inv = inventory_create.create({'name': self.inventory_name,\n 'location_id': location_stock_id.id,\n 'filter': 'partial'})\n stock_inventory_line = self.env['stock.inventory.line']\n # inv.prepare_inventory()\n for record in vals:\n record.update({'inventory_id': inv.id})\n stock_inventory_line.create(record)\n inv.action_done()\n\n else:\n xls_file = base64.b64decode(self.xls_file)\n file_input = cStringIO.StringIO(xls_file)\n file_input.seek(0)\n rows = []\n delimeter = ','\n reader = csv.reader(file_input, delimiter=delimeter,\n lineterminator='\\r\\n')\n for row in reader:\n rows.append(row)\n for row in rows[1:]:\n rows[0] = [str(item).lower() for item in rows[0]]\n product_obj = self.env['product.product'].search(\n [('id', '=', row[rows[0].index('id')])])\n vals.append({\n 'product_code': row[rows[0].index('id')],\n 'product_qty': row[rows[0].index('quantity')],\n 'location_id': location_stock_id.id,\n 'product_id': product_obj.id\n })\n inv = inventory_create.create({'name': self.inventory_name,\n 'location_id': location_stock_id.id,\n 'filter': 'partial'})\n stock_inventory_line = self.env['stock.inventory.line']\n # inv.prepare_inventory()\n for record in vals:\n record.update({'inventory_id': inv.id})\n stock_inventory_line.create(record)\n inv.action_done()\n return {\n 'name': 'Stock import',\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'res_id': self.id,\n 'view_mode': 'tree,form',\n 'res_model': 'stock.inventory',\n 'target': 'current',\n }", "def _importInDjango(self):\n\n with open(settings.DATA_PATH, 'r', encoding='latin-1') as csv_file:\n reader = csv.DictReader(csv_file, delimiter=';')\n for raw in reader:\n\n # Créer ou mettre à jour la division\n division, created = Division.objects.get_or_create(\n nom=raw['Division']\n )\n if created:\n self.stdout.write(\n 'Divion {} ajoutée'.format(division.nom)\n )\n\n # Créer ou mettre à jour les équipes\n equipeDom, created = Equipe.objects.get_or_create(\n nom=raw['Equipe 1'],\n division=division\n )\n if created:\n self.stdout.write(\n 'Equipe {} ajoutée'.format(equipeDom.nom)\n )\n\n equipeExt, created = Equipe.objects.get_or_create(\n nom=raw['Equipe 2'],\n division=division\n )\n if created:\n self.stdout.write(\n 'Equipe {} ajoutée'.format(equipeExt.nom)\n )\n\n # Créer ou mettre à jour la rencontre\n scoreDom = 0 if raw['Score 1'] == '' else int(raw['Score 1'])\n scoreExt = 0 if raw['Score 2'] == '' else int(raw['Score 2'])\n forfaitDom = True if raw['Forfait 1'] == 'true' else False\n forfaitExt = True if raw['Forfait 2'] == 'true' else False\n date = datetime.datetime.strptime(raw['Date de rencontre'], '%d/%m/%Y')\n heure = datetime.datetime.strptime(raw['Heure'], '%H:%M')\n rencontre, created = Rencontre.objects.update_or_create(\n numero=int(raw['N° de match']),\n equipeDom=equipeDom,\n equipeExt=equipeExt,\n defaults={\n 'date': date,\n 'heure': heure,\n 'scoreDom': scoreDom,\n 'scoreExt': scoreExt,\n 'forfaitDom': forfaitDom,\n 'forfaitExt': forfaitExt,\n }\n )\n if created:\n self.stdout.write(\n 'Rencontre {} / {} ajoutée'.format(\n rencontre.equipeDom,\n rencontre.equipeExt\n )\n )", "def import_heat_data(self):\n worksheet = (\n xlrd.open_workbook(filename=self.filename_heat).sheet_by_index(0)\n ) \n self.exh.corrected_reading = np.array(worksheet.col_values(0,\n start_rowx=self.start_rowx, end_rowx=self.end_rowx)) \n self.exh.datum = worksheet.cell_value(2,4) # manometer datum (in) \n self.exh.pressure_drop = ( (self.exh.corrected_reading -\n self.exh.datum) * 2. * self.H2O_kPa ) \n # pressure drop across heat exchanger (kPa)\n self.cummins.torque = np.array(worksheet.col_values(1,\n start_rowx=self.start_rowx, end_rowx=self.end_rowx))\n self.exh.T_inlet_array = np.array(worksheet.col_values(2,\n start_rowx=self.start_rowx, end_rowx=self.end_rowx)) \n self.exh.T_outlet_array = np.array(worksheet.col_values(3,\n start_rowx=self.start_rowx, end_rowx=self.end_rowx)) \n self.cool.T_inlet_array = np.array(worksheet.col_values(5,\n start_rowx=self.start_rowx, end_rowx=self.end_rowx)) \n self.cool.T_outlet_array = np.array(worksheet.col_values(4,\n start_rowx=self.start_rowx, end_rowx=self.end_rowx))", "def load_dataset():\n\n df_ = pd.read_excel(\"D:\\VERİBİLİMİOKULU\\VERİSETLERİ\\post_bilgileri.xlsx\")\n df = df_.copy()\n return df", "def test_from_file_xls(self):\n with TemporaryDirectory() as tmp:\n fp, df_test = save_simple_dataframe(tmp, 'test.xls')\n df_read = BaseDataClass.from_file(fp).df\n self.assertEqual(\n pd.testing.assert_frame_equal(df_test, df_read),\n None,\n )", "def import_previous_grades_into_db(year, semester, db_name='./grades.sqlite3', filename='./grades.xls'):\n if not os.path.isfile(db_name):\n raise Exception(\"DB not found\")\n\n df1 = pd.read_excel(filename)\n\n try:\n cls = df1.filter(like='CL')\n except Exception as e:\n print(e)\n cls = None # no CLA's found\n\n try:\n ols = df1.filter(like='OL')\n except Exception as e:\n print(e)\n ols = None # no OLAs found\n\n try:\n ids = df1.filter(like='sername').values.ravel().tolist()\n ids_len = len(ids)\n except Exception as e:\n print('Was not able to parse user ids, check xls file you are trying to import: ', e)\n raise e # may be improved in the future - strange case\n try:\n names = df1.filter(like='Name').values.ravel().tolist()\n except Exception as e: # either does not exist or has different name\n print(e)\n names = None\n\n class_dict = get_ids_in_class_by_year_semester(year, semester, db_name)\n\n if (not class_dict and not names) or (class_dict and len(class_dict) < ids_len and not names):\n raise Exception('Did not find ids in table CLASS and did not find names in xls file')\n elif names and (not class_dict or (class_dict and len(class_dict) < ids_len)):\n print('Did not find existing students, but found names in xsl\\nAdding new students...\\n')\n existing_ids = get_pipeline_ids(db_name)\n need_to_update_students = False\n # otherwise just add ids to the class list\n if existing_ids:\n for sid in ids:\n if sid not in existing_ids:\n need_to_update_students = True\n else:\n need_to_update_students = True\n\n if need_to_update_students:\n fname, lname = zip(*(name.split(', ') for name in names))\n fname = (name.strip() for name in fname)\n lname = (name.strip() for name in lname)\n insert_students(ids, fname, lname, db_name)\n register_students_in_class(ids, year, semester, db_name)\n\n class_ids = [class_dict[sid] for sid in ids]\n if ols is None and cls is None or len(class_ids) == 0:\n raise Exception('No grades to load')\n\n grades_tupples = list()\n if ols is not None:\n for lab_name in ols:\n grades = (str(grade) for grade in ols[lab_name].values)\n grades_tupples += list(zip(class_ids, [lab_name] * ids_len, [-1] * ids_len, grades, ['TRUE'] * ids_len))\n\n if cls is not None:\n for lab_name in cls:\n grades = (str(grade) for grade in cls[lab_name].values)\n grades_tupples += list(zip(class_ids, [lab_name] * ids_len, [-1] * ids_len, grades, ['TRUE'] * ids_len))\n\n with lite.connect(db_name) as con:\n cur = con.cursor()\n cur.executemany('INSERT OR REPLACE INTO grades\\\n (class_id, lab, attempt, grade, pass_fail) VALUES (?, ?, ?, ?, ?)', grades_tupples)\n con.commit()", "def load_funding_resource(input_file, sheet_name=None):\n df = read_excel(input_file, sheet_name=sheet_name)\n\n for source_name in df[df.columns[1]]:\n src_ = session.query(FundingSource).filter(\n FundingSource.source == source_name).first()\n if src_ is None:\n funding_source = FundingSource(source=source_name)\n session.add(funding_source)\n session.commit()\n\n '''\n staff = Staff(\n #staff_fname = row['first name'],\n #staff_lname = row['last name'],\n staff_email = row['all main researcher email']\n )\n department = Department(\n department_name=row['all department']\n )\n '''\n #session.add(staff)\n #session.add(department)", "def add_xlsx_database(dbname, infile, size):\n tmp = tempfile.NamedTemporaryFile(suffix=\".xlsx\")\n tmp.write(infile.read())\n tmp.seek(0)\n try:\n wb = openpyxl.load_workbook(tmp.name)\n check_quota(size=size)\n with DbSaver() as saver:\n dbname = saver.set_name(dbname)\n saver.initialize()\n db = get_db(dbname, complete=True)\n except (ValueError, IOError) as error:\n raise ValueError(str(error))\n for sheet in wb:\n # Ensure the table name is unique.\n tname = utils.name_cleaned(sheet.title)\n tablename = tname\n count = 1\n while tablename in db[\"tables\"]:\n count += 1\n tablename = f\"{tname}{count}\"\n records = list(sheet.values)\n # The header determines the number of columns;\n # clip off any trailing None values.\n for pos, item in enumerate(records[0]):\n if item is None:\n records[0] = records[0][:pos]\n break\n\n # Truncate records to same number of items as header; convert to lists.\n # Convert records from tuples to lists.\n length = len(records[0])\n records = [list(r[:length]) for r in records]\n with DbSaver(db) as saver:\n saver.create_table_load_records(tablename, records)\n return db", "def _open_data_source(self, *args):\n if len(args) != 0:\n # For first call to open (open())\n self.ds_filename = args[0]\n self.ds_tablename = args[1]\n self.ds_file = load_workbook(filename = args[0], use_iterators = True)\n self.ds_table = self.ds_file.get_sheet_by_name(name = args[1])\n else:\n # For reopening the file (reset())\n self.ds_file = load_workbook(filename = self.ds_filename, use_iterators = True)\n self.ds_table = self.ds_file.get_sheet_by_name(name = self.ds_tablename)\n # In any case we need a reader object to iterate over the table content \n self.ds_reader = self.ds_table.iter_rows()", "def importDatabase(self):\n db_conn.execute(\"INSERT INTO Fietsenstalling (Naam, Achternaam, Telefoon, FietsNr, PIN) VALUES \"\n \"(?, ?, ?, ?, ?);\", (naamInvoer.get(), achternaamInvoer.get(), telefoonnummerInvoer.get(), FietsNr, pincodeInvoer.get()))\n\n db_conn.commit()", "def load_data_to_db(self, path):\n table_names = ['train_transaction', 'train_identity', 'test_transaction', 'test_identity']\n for table_name in table_names:\n pat = self.TRANSACTION_NON_NUMBER_PATTERN if 'transaction' in table_name else self.IDENTITY_NON_NUMBER_PATTERN\n print(\"Loading table: \" + table_name)\n fn = os.path.join(path, table_name + '.csv')\n self.dbinstance.build_table_from_csv(fn, pat, table_name)\n print(\"Loaded table \" + table_name)", "def test_import_data():\n\n file_path = os.path.join(CONST_ADVANTICSYS_DIR, CONST_ADVANTICSYS_TEST_1)\n\n # Bring df\n success, log, test_ingress_df = advanticsys_import(file_path)\n assert success, log\n assert isinstance(test_ingress_df, pd.DataFrame)\n\n # Test import function\n success, log = import_data(\n test_ingress_df,\n CONST_ADVANTICSYS,\n SQL_USER,\n SQL_PASSWORD,\n SQL_HOST,\n SQL_PORT,\n SQL_TEST_DBNAME\n )\n\n assert success is True, log \n assert log == \"New: 0 (uploaded); Duplicates: 75 (ignored)\"", "def import_data_handler():\n\n result = ''\n try:\n current_dir = os.getcwd()\n directory_name = current_dir + '\\\\lesson5\\\\data\\\\'\n file_name_dict = {'products': 'products.csv', 'customers': 'customers.csv',\n 'rentals': 'rentals.csv'}\n for key, value in file_name_dict.items():\n tmp_file = directory_name + value\n mongo_insert = ImportUnitTestData()\n result = mongo_insert.import_data(key, tmp_file)\n print(result)\n except FileNotFoundError as e:\n logger.error('exception %s', e, exc_info=True)\n result = 'exception {}'.format(e)\n print(result)\n return result", "def import_data_helper(self): \n if len(self.components) == 1:\n hapi.fetch(TableName = self.tablename, M = self.components[0][0], I = self.components[0][1], numin = self.min_x, numax = self.max_x)\n else: \n global_id = []\n for c in self.components:\n global_id.append(hapi.ISO[c][0])\n hapi.fetch_by_ids(TableName = self.tablename, iso_id_list = global_id, numin = self.min_x, numax = self.max_x)", "def connect(self) -> bool:\n\n if self.connected:\n self.workbook.close()\n self.connected = False\n\n if self.settings.excelFileName:\n\n # What OLEDB does is scan the first n rows (default=8) and determines a data type.\n # If you leave out the IMEX=1 then it will return Null for any values that do not\n # match that data type. If you include IMEX=1 and the scan encounters mixed data\n # types then it will return text. If your sheet has a text header then you can help\n # this process by specifying HDR=No and discarding the header.\n # However OLEDB will always scan the first n rows to determine the data type and\n # return results accordingly.\n #\n # The Rows to scan is determined by the value of TypeGuessRows.\n #\n # The older Microsoft.Jet.OLEDB.4.0 driver would allow you to specify TypeGuessRows\n # in the connection string but Microsoft.ACE.OLEDB.12.0 does not.\n # TypeGuessRows is now held in the registry under...\n #\n # Excel 2007: HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Office\\12.0\\Access Connectivity Engine\\Engines\\Excel\\TypeGuessRows\n # Excel 2010: HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Office\\14.0\\Access Connectivity Engine\\Engines\\Excel\\TypeGuessRows\n # Excel 2013: HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Office\\15.0\\Access Connectivity Engine\\Engines\\Excel\\TypeGuessRows\n #\n # 32 Bit applications running on a 64 Bit Machine will find them under the Wow6432Node. E.g...\n #\n #HKEY_LOCAL_MACHINE\\SOFTWARE\\Wow6432Node\\Microsoft\\Office\\12.0\\Access Connectivity Engine\\Engines\\Excel\\TypeGuessRows\n\n connection_string = \\\n 'Driver={{Microsoft Excel Driver (*.xls, *.xlsx, *.xlsm, *.xlsb)}};DBQ={};ReadOnly=1;IMEX=1;'.\\\n format(self.settings.excelFileName)\n try:\n self.workbook = pyodbc.connect(connection_string, autocommit=True)\n except pyodbc.Error as err:\n # vs.SetItemText(importDialog, kWidgetID_excelSheetNameLabel, \"Invalid Excel file!\")\n vs.AlertCritical(err.value[1], \"Talk to Carlos\")\n else:\n self.connected = True\n\n return self.connected", "def load_raw_data(apps, schema_editor):\n from season.import_raw_data import InitialDataProcessor\n matches_path = str(BASE_DIR) + '/season/migrations/matches.csv'\n deliveries_path = str(BASE_DIR) + '/season/migrations/deliveries.csv'\n # Initialization path to read data\n load_data = InitialDataProcessor(matches_path=matches_path, deliveries_path=deliveries_path)\n # transform data frame and save the data step by step\n # only support new season import for the first tym when data structure is ready to use\n load_data.transform_input_save()" ]
[ "0.76412356", "0.70799965", "0.70136434", "0.7009659", "0.67064077", "0.66789573", "0.6583739", "0.656014", "0.6531297", "0.6317989", "0.6263179", "0.6260795", "0.61410016", "0.6118086", "0.6028901", "0.60090876", "0.60072404", "0.600629", "0.5996244", "0.5956326", "0.59562224", "0.59510124", "0.5939843", "0.59148705", "0.5901591", "0.58821285", "0.5864896", "0.5858767", "0.5856663", "0.58539253" ]
0.75868785
1
establish connection to frontend notebook
def connect(): if not is_notebook(): print('Python session is not running in a Notebook Kernel') return global _comm kernel = get_ipython().kernel kernel.comm_manager.register_target('tdb', handle_comm_opened) # initiate connection to frontend. _comm = Comm(target_name='tdb', data={}) # bind recv handler _comm.on_msg(None)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_notebook():\n \n # Note: not using IPython Comm objects yet, since they seem rather\n # undocumented and I could not get them to work when I tried for a bit.\n # This means though, that flexx in the notebook only works on localhost.\n \n from IPython.display import display, clear_output, HTML\n # from .. import ui # noqa - make ui assets available\n \n # Make default log level warning instead of \"info\" to avoid spamming\n # This preserves the log level set by the user\n config.load_from_string('log_level = warning', 'init_notebook')\n set_log_level(config.log_level)\n \n # Get session or create new\n session = manager.get_default_session()\n if session is None:\n session = manager.create_default_session()\n \n # Open server - the notebook helper takes care of the JS resulting\n # from running a cell, but any interaction goes over the websocket.\n server = current_server()\n host, port = server.serving\n \n # Trigger loading phosphor assets\n if 'flexx.ui' in sys.modules:\n from flexx import ui\n session.register_model_class(ui.Widget)\n \n # Get assets, load all known modules to prevent dynamic loading as much as possible\n js_assets, css_assets = session.get_assets_in_order(css_reset=False, load_all=True)\n \n # Pop the first JS asset that sets flexx.app_name and flexx.session_id\n # We set these in a way that it does not end up in exported notebook.\n js_assets.pop(0)\n url = 'ws://%s:%i/flexx/ws/%s' % (host, port, session.app_name)\n flexx_pre_init = \"\"\"<script>window.flexx = window.flexx || {};\n window.flexx.app_name = \"%s\";\n window.flexx.session_id = \"%s\";\n window.flexx.ws_url = \"%s\";\n window.flexx.is_live_notebook = true;\n </script>\"\"\" % (session.app_name, session.id, url)\n \n # Check if already loaded, if so, re-connect\n if not getattr(session, 'init_notebook_done', False):\n session.init_notebook_done = True # also used in assetstore\n else:\n display(HTML(flexx_pre_init))\n clear_output()\n display(HTML(\"\"\"<script>\n flexx.is_exported = !flexx.is_live_notebook;\n flexx.init();\n </script>\n <i>Flexx already loaded. Reconnected.</i>\n \"\"\"))\n return # Don't inject Flexx twice\n # Note that exporting will not work anymore since out assets\n # are no longer in the outputs\n \n # Install helper to make things work in exported notebooks\n NoteBookHelper(session)\n \n # Compose HTML to inject\n t = \"<i>Injecting Flexx JS and CSS</i>\"\n t += '\\n\\n'.join([asset.to_html('{}', 0) for asset in css_assets + js_assets])\n t += \"\"\"<script>\n flexx.is_notebook = true;\n flexx.is_exported = !flexx.is_live_notebook;\n /* If Phosphor is already loaded, disable our Phosphor CSS. */\n if (window.jupyter && window.jupyter.lab) {\n document.getElementById('phosphor-all.css').disabled = true;\n }\n flexx.init();\n </script>\"\"\"\n \n display(HTML(flexx_pre_init)) # Create initial Flexx info dict\n clear_output() # Make sure the info dict is gone in exported notebooks\n display(HTML(t))\n \n # Note: the Widget._repr_html_() method is responsible for making\n # the widget show up in the notebook output area.", "def connect_to_master():", "def browse(notebook):\n nb = select_notebook(notebook)\n click.launch('http://localhost:{0}/{1}/'.format(conf.PORT, nb.path.rel))", "def connect_to_ibkr(self):\n\n self.update_console(\"Reporting connection to the server...\")\n print(\"Reporting connection to the server...\")\n result = report_login_to_server(self.settings)\n self.update_console(result)\n connector = Worker(self.ibkrworker.prepare_and_connect)\n connector.signals.result.connect(self.connection_done)\n connector.signals.status.connect(self.update_status)\n connector.signals.notification.connect(self.update_console)\n # Execute\n self.threadpool.start(connector)", "def connect():", "def connect(self):\n pwd = self.passwordEdit.text()\n user = self.usernameEdit.text()\n url = self.backendEdit.text()\n\n if user == \"\":\n user = None\n if pwd == \"\":\n pwd = None\n\n auth = self.connection.connect(url, username=user, password=pwd)\n\n if not auth:\n warning(self.iface, \"Authentication failed!\")\n return\n\n collection_result = self.connection.list_collections()\n process_result = self.connection.list_processes()\n self.processes = process_result\n\n self.collectionBox.clear()\n self.processBox.clear()\n\n # Load Collections from Backend\n for col in collection_result:\n if \"id\" in col:\n self.collectionBox.addItem(col['id'])\n\n # Load Processes of Backend\n for pr in process_result:\n if \"id\" in pr:\n self.processBox.addItem(pr['id'])\n\n self.refresh_jobs()\n\n if len(collection_result) == 0 and len(process_result) == 0:\n warning(self.iface, \"Backend URL does not have collections or processes defined, or is not valid!\")\n return\n\n # Update Status text\n boldFont = QtGui.QFont()\n boldFont.setBold(True)\n self.statusLabel.setFont(boldFont)\n if user:\n self.statusLabel.setText(\"Connected to {} as {}\".format(url, user))\n else:\n self.statusLabel.setText(\"Connected to {} without user\".format(url))", "def notebook():\n pass", "def notebook():\n pass", "def connect(self) -> None:", "def connect(self):\n\t\tpass", "def jupyter():", "def run(self):\n self.connect()", "def connect():\n\n crate = get_crate()\n crate.mch_comms.ipmitool_shell_connect()", "def connect(self):\n pass", "def connect(self):\n pass", "def connect(self):\n pass", "def connect(self):\n pass", "def connect(self):\n pass", "def connect(self):\n pass", "def connect(self):\n pass", "def connect(self):\n pass", "def connect(self) -> None:\n ...", "def connect(self):\n self.jade.connect()", "def connect(self):\n pass", "def connect(self):", "def connect(self):", "def run(self):\n\t\t\n\t\tself.connect(self.config[\"server\"])", "def start_notebook_instance(NotebookInstanceName=None):\n pass", "def connect(self):\n self.conn.connect()", "def connect(self):\n self.impl.connect()" ]
[ "0.69533813", "0.6855013", "0.67147774", "0.6329623", "0.628003", "0.62060857", "0.61278397", "0.61278397", "0.6008197", "0.5966993", "0.5948698", "0.59225404", "0.5851347", "0.5850793", "0.5850793", "0.5850793", "0.5850793", "0.5850793", "0.5850793", "0.5850793", "0.5850793", "0.58233", "0.5820409", "0.580433", "0.57908654", "0.57908654", "0.575259", "0.5719023", "0.57141286", "0.57139975" ]
0.82992566
0
sends figure to frontend
def send_fig(fig, name): imgdata = BytesIO() fig.savefig(imgdata, format='png') imgdata.seek(0) # rewind the data uri = 'data:image/png;base64,' + urllib.parse.quote( base64.encodebytes(imgdata.getbuffer())) send_action("update_plot", params={"src": uri, "name": name})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_figure(self):\n\n self.draw()", "def fig_response(fig):\n img_bytes = io.BytesIO()\n fig.savefig(img_bytes)\n img_bytes.seek(0)\n return send_file(img_bytes, mimetype='image/png')", "def update_plot():\n pass", "def draw(self):\n self.figure.show()\n self.figure.canvas.draw()", "def render(self, mode='human', close=False):\n plt.figure(figsize=(20,12))\n plt.plot(self.history)\n plt.show()", "def save_figure(self, data):\n\n\t\tsizes = np.shape(data)\n\t\tfig = plt.figure()\n\t\tfig.set_size_inches(1, 1. * sizes[0]/sizes[1], forward = False)\n\t\tax = plt.Axes(fig, [0., 0., 1., 1.])\n\t\tax.set_axis_off()\n\t\tfig.add_axes(ax)\n\t\tax.imshow(data, \"gray\")\n\n\t\t#plt.show()\n\t\tself.plotfile = os.path.join('static', 'Figure' + '.png')\n\t\tplt.savefig(self.plotfile, dpi = sizes[1])", "def plot(self):\n pass", "def draw(self):\n fig=self.circuit.draw(output='mpl')\n return self.figToResponse(fig)", "def data_graph():\n station_reference = request.args.get(\"stationReference\")\n station_name = request.args.get(\"stationName\")\n station_name = station_name.replace(\" \",\"+\")\n\n if station_name is not None:\n # station_data = station_data.replace(\" \", \"+\")\n station = station_data.loc[station_data.stationName == station_name]\n else:\n station = station_data.loc[station_data.stationReference == station_reference]\n result_station = station.iloc[0]\n\n # Get optional parameters\n time_from = request.args.get(\"from\")\n time_to = request.args.get(\"to\")\n if time_from:\n pass\n else:\n time_from = None\n if time_to:\n pass\n else:\n time_to = None\n # plot pic\n magic_trick= data.station_graph(result_station.stationName, time_from, time_to)\n # img_stream = io.BytesIO(img)\n # img = Image.open(img_stream)\n # imgByteArr = io.BytesIO()\n # img.save(imgByteArr,format='PNG')\n # imgByteArr = imgByteArr.getvalue()\n # return send_file(io.BytesIO(imgByteArr),\n # mimetype = 'image/png',\n # as_attachment = True,\n # attachment_filename = 'tmp.png')\n image_data = open(\"tmp.png\", \"rb\").read()\n response = make_response(image_data)\n response.headers['Content-Type'] = 'image/png'\n return response", "def save_plot(self, ):\n pass", "def add_figure(self,sig,index,title='',xlabel='',ylabel=''):\n self.last_index = index\n ax = self.fig.add_subplot(self.position+index)\n ax.set_title(title)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n ax.plot(sig)", "def plot_refresh():\n figure.canvas.draw()", "def plot():\n pass", "def draw(self):\n self.figure.canvas.draw_idle()", "def plot(self, *args, **kwargs):\n pass", "def setup_figure(self):\n \n # connect ui widgets to measurement/hardware settings or functions\n self.ui.start_pushButton.clicked.connect(self.start)\n self.ui.interrupt_pushButton.clicked.connect(self.interrupt)\n self.settings.save_h5.connect_to_widget(self.ui.save_h5_checkBox)\n self.settings.save_movie.connect_to_widget(self.ui.save_movie_checkBox)\n \n # Set up pyqtgraph graph_layout in the UI\n self.graph_layout=pg.GraphicsLayoutWidget()\n self.ui.plot_groupBox.layout().addWidget(self.graph_layout)\n \n self.aux_graph_layout=pg.GraphicsLayoutWidget()\n self.ui.aux_plot_groupBox.layout().addWidget(self.aux_graph_layout)\n \n self.camera_layout=pg.GraphicsLayoutWidget()\n self.ui.camera_groupBox.layout().addWidget(self.camera_layout)\n\n # Create PlotItem object (a set of axes) \n \n self.plot1 = self.graph_layout.addPlot(row=1,col=1,title=\"Lick\")\n self.plot2 = self.graph_layout.addPlot(row=2,col=1,title=\"breathing\")\n\n # Create PlotDataItem object ( a scatter plot on the axes )\n self.breathing_plot = self.plot2.plot([0])\n self.lick_plot_0 = self.plot1.plot([0])\n self.lick_plot_1 = self.plot1.plot([1]) \n \n self.lick_plot_0.setPen('y')\n self.lick_plot_1.setPen('g')\n \n self.T=np.linspace(0,10,10000)\n self.k=0\n \n self.camera_view=pg.ViewBox()\n self.camera_layout.addItem(self.camera_view)\n self.camera_image=pg.ImageItem()\n self.camera_view.addItem(self.camera_image)", "def visualize(self):\r\n self.aggregator.plot_loss()\r\n self.save_figure()", "def create_figure(self) -> None:\n plt.ion()\n self.fig = plt.figure(1)\n self.axis = self.fig.add_subplot(111, xlim=(0, 1), ylim=(0, 1))\n self.axis.grid(True)\n plt.xticks(np.linspace(0, 1, self._param[\"n_v\"] + 1))\n plt.yticks(np.linspace(0, 1, self._param[\"n_v\"] + 1))\n a_plt, = self.axis.plot([], [], 'bx', markersize=5)\n l_plt, = self.axis.plot([], [], 'r.', markersize=15)\n self.plots = [a_plt, l_plt]", "def _draw_plot(self, *args, **kw):\n # Simple compatibility with new-style rendering loop\n return self._draw_component(*args, **kw)", "def show_figure(self):\n pylab.show()", "def subplot_to_figure(self):\n if self.format is \"show\":\n plt.show()\n elif self.format is \"png\":\n plt.savefig(self.path + self.filename + \".png\", bbox_inches=\"tight\")", "def show_plot(self):\r\n\t\tself.generate_plot()\r\n\t\tplt.show()", "def redraw(self, **kwargs):\n #src_dict = self.data_sources\n #self.remove_sources(src_dict.keys())\n self.renderers = {}\n #self.renderers = {}\n self.figure = self.draw_figure(**kwargs)\n #self.add_sources(src_dict)\n # todo does the old figure linger on?\n self.render_sources(self.data_sources)\n self.bk_pane.object = self.figure", "def save_render(self, file_name: str) -> None:\n self.fig.savefig(file_name)", "def goplot(self, sender):\n cube = self.cube_picker.value\n if cube:\n IPython.display.clear_output()\n fig = plt.figure()\n x_name = self.x_coord.value\n y_name = self.y_coord.value\n if (cube.coord(axis='X').name() == x_name and\n cube.coord(axis='Y').name() == y_name):\n projection = cube.coord_system().as_cartopy_projection()\n ax = fig.add_subplot(111, projection=projection)\n ax.coastlines()\n else:\n ax = fig.add_subplot(111)\n conf = self.plot_type.value(cube, ax, coords=[x_name, y_name])\n self.browser = cube_browser.Browser(conf)\n self.browser.on_change(None)\n self.plot_container.children = [self.browser.form]", "def plot_preview_png():\n name = request.args.get('prev_instance')\n name = str(name)\n fig = create_preview(name)\n output = io.BytesIO()\n FigureCanvas(fig).print_png(output)\n return Response(output.getvalue(), mimetype='image/png')", "def execute(self, fig):\n # subclasses must implement this.\n raise NotImplementedError", "def embed_matplotlib(self):", "def plot_graph(self) -> None:", "def show():\n setup()\n plt.show()" ]
[ "0.7138022", "0.6838671", "0.6577294", "0.6567905", "0.65288734", "0.64819086", "0.6415332", "0.6354225", "0.6308796", "0.62955534", "0.62464154", "0.6243661", "0.62136626", "0.6211995", "0.6189271", "0.6188972", "0.6187396", "0.6175288", "0.61647385", "0.61150765", "0.6106328", "0.6105145", "0.6090672", "0.6062928", "0.60375935", "0.6029813", "0.60261196", "0.6024095", "0.6020456", "0.6013552" ]
0.71985286
0
Creates a Qt's MIME data object for data in valid QmxGraph's drag&drop format.
def create_qt_mime_data(data): from PyQt5.QtCore import QByteArray, QDataStream, QIODevice, QMimeData item_data = QByteArray() data_stream = QDataStream(item_data, QIODevice.WriteOnly) qgraph_mime = { 'version': qmxgraph.constants.QGRAPH_DD_MIME_VERSION, } qgraph_mime.update(data) data_stream.writeString(json.dumps(qgraph_mime).encode('utf8')) mime_data = QMimeData() mime_data.setData(qmxgraph.constants.QGRAPH_DD_MIME_TYPE, item_data) return mime_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _on_drop(self, event):\n data = event.mimeData().data(constants.QGRAPH_DD_MIME_TYPE)\n if not data.isNull():\n data_stream = QDataStream(data, QIODevice.ReadOnly)\n parsed = json.loads(data_stream.readString().decode('utf8'))\n\n # Refer to `mime.py` for docs about format\n version = parsed['version']\n if version not in (1, 2):\n raise ValueError(\"Unsupported version of QmxGraph MIME data: {}\".format(version))\n\n x = event.pos().x()\n y = event.pos().y()\n\n if version in (1, 2):\n vertices = parsed.get('vertices', [])\n scale = self.api.get_zoom_scale()\n for v in vertices:\n # place vertices with an offset so their center falls\n # in the event point.\n vertex_x = x + (v['dx'] - v['width'] * 0.5) * scale\n vertex_y = y + (v['dy'] - v['height'] * 0.5) * scale\n self.api.insert_vertex(\n x=vertex_x,\n y=vertex_y,\n width=v['width'],\n height=v['height'],\n label=v['label'],\n style=v.get('style', None),\n tags=v.get('tags', {}),\n )\n\n if version in (2,):\n decorations = parsed.get('decorations', [])\n for v in decorations:\n self.api.insert_decoration(\n x=x,\n y=y,\n width=v['width'],\n height=v['height'],\n label=v['label'],\n style=v.get('style', None),\n tags=v.get('tags', {}),\n )\n\n event.acceptProposedAction()\n else:\n event.ignore()", "def mouseMoveEvent(self, e):\n if e.buttons() != Qt.LeftButton:\n return\n\n mimeData = QtCore.QMimeData()\n mimeData.setData(\n app.NODE_MIMETYPE,\n QtCore.QByteArray(bytes('data string', 'utf-8')),\n )\n\n drag = QtGui.QDrag(self)\n drag.setMimeData(mimeData)\n drag.setHotSpot(e.pos() - self.rect().topLeft())\n \n dropAction = drag.exec_(Qt.MoveAction)", "def dropMimeData(self, p_int, QMimeData, Qt_DropAction): # real signature unknown; restored from __doc__\r\n return False", "def drag_data_received(self, widget, context, x, y, sel_data, info, time):\n if not sel_data:\n return\n #modern file managers provide URI_LIST. For Windows split sel_data.data\n files = sel_data.get_uris()\n for file in files:\n if win():\n clean_string = conv_to_unicode(\n file.replace('\\0',' ').replace(\"\\r\", \" \").strip(),\n None)\n else:\n clean_string = file\n protocol, site, mfile, j, k, l = urlparse(clean_string)\n if protocol == \"file\":\n name = url2pathname(mfile)\n mime = get_type(name)\n if not is_valid_type(mime):\n return\n photo = MediaObject()\n self.uistate.set_busy_cursor(True)\n photo.set_checksum(create_checksum(name))\n self.uistate.set_busy_cursor(False)\n base_dir = cuni(media_path(self.dbstate.db))\n if os.path.exists(base_dir):\n name = relative_path(name, base_dir)\n photo.set_path(name)\n photo.set_mime_type(mime)\n basename = os.path.basename(name)\n (root, ext) = os.path.splitext(basename)\n photo.set_description(root)\n with DbTxn(_(\"Drag Media Object\"), self.dbstate.db) as trans:\n self.dbstate.db.add_object(photo, trans)\n widget.emit_stop_by_name('drag_data_received')", "def mimeData(self, indices):\n \n pass", "def get_mime_encoded_user_data(self):\n # Split the frequencies\n index_underscore = find(self._frequency_id, '_')\n index_tilde = find(self._frequency_id, '~')\n min_freq = self._frequency_id[index_underscore + 1:index_tilde]\n max_freq = self._frequency_id[index_tilde + 1:]\n LOGGER.info('min_freq: {0}, max_freq: {1}'.format(min_freq, max_freq))\n\n # Build the mime message\n user_data = MIMEMultipart()\n user_data.attach(get_cloud_init())\n\n swap_size = self.get_swap_size()\n data_formatted = self._user_data.format(self._frequency_id, min_freq, max_freq, swap_size, PIP_PACKAGES)\n user_data.attach(MIMEText(self._setup_disks + data_formatted))\n return user_data.as_string()", "def package_data(self, data):\n pass", "def startDrag(self):\n data = QtCore.QMimeData()\n data.versionId = self.id\n data.controller = self.scene().controller\n drag = QtGui.QDrag(self.scene().views()[0])\n drag.setMimeData(data)\n drag.setPixmap(CurrentTheme.VERSION_DRAG_PIXMAP)\n drag.start()", "def _copy_attachment(self, name, data, mimetype, mfg_event):\n attachment = mfg_event.attachment.add()\n attachment.name = name\n attachment.value_binary = data\n if mimetype in test_runs_converter.MIMETYPE_MAP:\n attachment.type = test_runs_converter.MIMETYPE_MAP[mimetype]\n elif mimetype == test_runs_pb2.MULTIDIM_JSON:\n attachment.type = mimetype\n else:\n attachment.type = test_runs_pb2.BINARY", "def get_previewdata(cls, pydata):\n return JsonContentType.dumps(pydata)", "def cast(*args):\n return _itkMeshSourcePython.itkMeshSourceMD2Q_cast(*args)", "def serialize(self, data):", "def cast(*args):\n return _itkMeshSourcePython.itkMeshSourceMD3Q_cast(*args)", "def coerce(cls, md): \n # See if the data is already of the right type. If it is then we know \n # we are in the same process. \n if isinstance(md, cls): \n return md \n\n # See if the data type is supported. \n if not md.hasFormat(cls.MIME_TYPE): \n return None \n\n nmd = cls() \n nmd.setData(cls.MIME_TYPE, md.data()) \n\n return nmd", "def itkMeshSourceMD2Q_cast(*args):\n return _itkMeshSourcePython.itkMeshSourceMD2Q_cast(*args)", "def dropEvent(self, event):\r\n source = event.mimeData()\r\n if source.hasUrls():\r\n files = mimedata2url(source)\r\n if files:\r\n self.plugin.load(files)\r\n elif source.hasText():\r\n editor = self.currentWidget()\r\n if editor is not None:\r\n editor.insert_text( source.text() )\r\n event.acceptProposedAction()", "def _approve_only_dd_mime_type(self, event):\n data = event.mimeData().data(constants.QGRAPH_DD_MIME_TYPE)\n if not data.isNull():\n event.acceptProposedAction()\n else:\n event.ignore()", "def dropEvent(self, event):\r\n source = event.mimeData()\r\n if source.hasUrls():\r\n files = mimedata2url(source)\r\n if files:\r\n files = [\"r'%s'\" % path for path in files]\r\n if len(files) == 1:\r\n text = files[0]\r\n else:\r\n text = \"[\" + \", \".join(files) + \"]\"\r\n self.shell.insert_text(text)\r\n elif source.hasText():\r\n lines = unicode(source.text())\r\n self.shell.set_cursor_position('eof')\r\n self.shell.execute_lines(lines)\r\n event.acceptProposedAction()", "def itkMeshSourceMD3Q_cast(*args):\n return _itkMeshSourcePython.itkMeshSourceMD3Q_cast(*args)", "def getTransferData(self, f: java.awt.datatransfer.DataFlavor) -> object:\n ...", "def decode_data(self, data):\n self._mms_message = message.MMSMessage()\n self._mms_data = data\n body_iter = self.decode_message_header()\n self.decode_message_body(body_iter)\n return self._mms_message", "def _preprocess_data(data, data_type, auth=None):\n if data_type == \"raw\":\n # Use passed data as is\n return data\n elif data_type == \"json_pgframe\":\n return PandasPGFrame.from_json(data)\n elif data_type == \"nexus_dataset\":\n if auth is None:\n raise ValueError(\n \"To use Nexus-hosted property graph as the dataset \"\n \"authentication token should be provided in the \"\n \"request header\")\n forge = KnowledgeGraphForge(\n app.config[\"FORGE_CONFIG\"], endpoint=data[\"endpoint\"],\n bucket=data[\"bucket\"], token=auth)\n resource = forge.retrieve(data[\"resource_id\"])\n forge.download(\n resource, \"distribution.contentUrl\",\n app.config[\"DOWNLOAD_DIR\"])\n downloaded_file = os.path.join(\n app.config[\"DOWNLOAD_DIR\"], resource.distribution.name)\n graph = PandasPGFrame.load_json(downloaded_file)\n os.remove(downloaded_file)\n return graph\n else:\n raise ValueError(\"Unknown data type\")", "def to_internal_value(self, data):\n try: # ToDo penetrate in order test if it has any security flaws\n decoded = base64.b64decode(data)\n mime_type = magic.from_buffer(decoded, mime=True)\n file_ext = mimetypes.guess_extension(mime_type)\n except TypeError:\n raise serializers.ValidationError(\n _('Not a valid base64 file')\n )\n\n if file_ext not in settings.VALID_FILE_EXTENSIONS:\n raise serializers.ValidationError(\n _('Forbidden file extension')\n )\n\n file_name = \"{0}{1}\".format(uuid.uuid4(), file_ext)\n data = ContentFile(decoded, name=file_name)\n return data", "def get_mimetype(data: bytes) -> str:\n f = magic.Magic(keep_going=True, mime=False)\n return f.from_buffer(data)", "def deserialize(self, data):", "def serialize(self, data):\n raise NotImplementedError", "def _create_mime_attachment(self, content, mimetype):\n basetype, subtype = mimetype.split('/', 1)\n if basetype == 'text':\n encoding = self.encoding or getattr(settings, \"EMAIL_CHARSET\",\n settings.DEFAULT_CHARSET)\n attachment = SafeMIMEText(content, subtype, encoding)\n elif basetype == 'message' and subtype == 'rfc822':\n # Bug #18967: per RFC2046 s5.2.1, message/rfc822 attachments\n # must not be base64 encoded.\n if isinstance(content, EmailMessage):\n # convert content into an email.Message first\n content = content.message()\n elif not isinstance(content, Message):\n # For compatibility with existing code, parse the message\n # into an email.Message object if it is not one already.\n content = message_from_string(content)\n\n attachment = SafeMIMEMessage(content, subtype)\n else:\n # Encode non-text attachments with base64.\n attachment = MIMEBase(basetype, subtype)\n attachment.set_payload(content)\n encoders.encode_base64(attachment)\n return attachment", "def process_dropped_data(self, item, origin: str, path: Path):\n\n layer_type_origin_map = {\n PRESENTATION: LayerType.PRESENTATION,\n PRIMARY: LayerType.PRIMARY,\n SECONDARY: LayerType.SECONDARY\n }\n\n bounding_rect = item.boundingRect()\n\n pos = Position(item.x() * SVG_SCALE_FACTOR, item.y() * SVG_SCALE_FACTOR, item.zValue())\n\n size = Size(bounding_rect.width() * SVG_SCALE_FACTOR, bounding_rect.height() * SVG_SCALE_FACTOR)\n\n if origin == BACKGROUND:\n self.__template.set_background(str(path), size=size)\n else:\n try:\n layer = self.__template.add_layer(pos=pos, size=size, _type=layer_type_origin_map[origin])\n except NoBaseSvgError as err:\n self.removeItem(item)\n error_dialog = QErrorMessage(self.parent())\n error_dialog.showMessage(str(err))\n else:\n self.__template.map_layer_with_item(layer, graphic_item=item)", "def __init__(self, content_type=\"file-path/raw-bytes\"):\n super(DataSerializer, self).__init__(content_type=content_type)", "def serialize(self, data):\n return data" ]
[ "0.6764802", "0.57681173", "0.57338566", "0.54429924", "0.54129314", "0.5173887", "0.49697393", "0.49251208", "0.4924302", "0.49108702", "0.48752618", "0.48632976", "0.48584068", "0.48275545", "0.47537842", "0.47520423", "0.47033548", "0.46772367", "0.46584952", "0.46547464", "0.462936", "0.46080074", "0.45958254", "0.4590937", "0.45899907", "0.45510575", "0.45429683", "0.45418736", "0.45412883", "0.45192996" ]
0.7841038
0
execute the NFS call If an error code is specified in the exceptions it means that the caller wants to handle the error himself
def execute(self, ops, exceptions=[], delay=5, maxretries=3): retry_errors = [NFS4ERR_DELAY, NFS4ERR_GRACE] state_errors = [NFS4ERR_STALE_CLIENTID, NFS4ERR_BADSESSION, NFS4ERR_BADSLOT, NFS4ERR_DEADSESSION] while True: res = self.sess.compound(ops) if res.status == NFS4_OK or res.status in exceptions: return res elif res.status in retry_errors: if maxretries > 0: maxretries -= 1 time.sleep(delay) else: log.error("Too many retries with DS %s" % self.server) raise Exception("Dataserver communication retry error") elif res.status in state_errors: self.disconnect() self.connect() else: log.error("Unhandled status %s from DS %s" % (nfsstat4[res.status], self.server)) raise Exception("Dataserver communication error")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shellExecErrorCode(cmd):\n return subprocess.call(cmd, shell=True)", "def _runCommandRaiseIfFail (self, command, killTimeout = DEAFULT_KILL_TIMEOUT, warningTimeout = DEAFULT_WARNING_TIMEOUT, shell=False):\n (rc,outText,errText) = self._runCommand(command, killTimeout = killTimeout, warningTimeout = warningTimeout, shell = shell)\n if rc != 0:\n self._log(\"run-command-raising\").warning(\"Command returned '%s', raising exception\", rc)\n raise SdUtilsError(\"Failed running command %s\" % command)\n return (outText,errText)", "def _run(self):\n try:\n self.logger.debug('Temp directory {} contents {}'.format('/tmp', get_subdirs('/tmp/')))\n return self.__execute()\n except Exception as e:\n self.report_error(e.message)\n raise Exception, Exception(e), sys.exc_info()[2]", "def _run_process(self, raise_exception=None):\n if not self._hosts:\n raise CommandFailure('No hosts specified for fio command')\n\n if raise_exception is None:\n raise_exception = self.exit_status_exception\n\n # Run fio remotely\n result = run_remote(self.log, self._hosts, self.with_exports, timeout=None)\n if raise_exception and not result.passed:\n raise CommandFailure(\"Error running fio on: {}\".format(result.failed_hosts))\n return result", "def _do_mount(self, cmd, ensure):\n try:\n self._execute(*cmd, run_as_root=True)\n except exception.ProcessExecutionError as exc:\n if ensure and 'already mounted' in exc.stderr:\n LOG.warn(_LW(\"%s is already mounted\"),\n self.gluster_manager.export)\n else:\n raise exception.GlusterfsException(\n 'Unable to mount Gluster volume'\n )", "def execute(self, fn, *args, **kwargs):\n self.ex(fn, *args, **kwargs)\n return self.ecute()", "def handle_exceptions(self, excp):\r\n try:\r\n if excp:\r\n errorstr = \"Exception: {0}\".format(excp.__class__.__name__)\r\n errorstr = errorstr+\"({0})\".format(excp.message) if \\\r\n hasattr(excp, \"message\") else errorstr\r\n LOGGER.info(errorstr)\r\n raise\r\n # ****** RDMC ERRORS ******\r\n except ConfigurationFileError as excp:\r\n self.retcode = ReturnCodes.CONFIGURATION_FILE_ERROR\r\n UI().error(excp)\r\n sys.exit(excp.errcode)\r\n except CommandNotEnabledError as excp:\r\n self.retcode = ReturnCodes.COMMAND_NOT_ENABLED_ERROR\r\n UI().command_not_enabled(excp)\r\n extensions.Commands['HelpCommand'](rdmc=self).run(\"\")\r\n except InvalidCommandLineError as excp:\r\n self.retcode = ReturnCodes.INVALID_COMMAND_LINE_ERROR\r\n UI().invalid_commmand_line(excp)\r\n except NoCurrentSessionEstablished as excp:\r\n self.retcode = ReturnCodes.NO_CURRENT_SESSION_ESTABLISHED\r\n UI().error(excp)\r\n except NoChangesFoundOrMadeError as excp:\r\n self.retcode = ReturnCodes.NO_CHANGES_MADE_OR_FOUND\r\n UI().invalid_commmand_line(excp)\r\n except StandardBlobErrorHandler as excp:\r\n self.retcode = ReturnCodes.GENERAL_ERROR\r\n UI().standard_blob_error(excp)\r\n except InvalidFileInputError as excp:\r\n self.retcode = ReturnCodes.INVALID_FILE_INPUT_ERROR\r\n UI().invalid_commmand_line(excp)\r\n except InvalidCommandLineErrorOPTS as excp:\r\n self.retcode = ReturnCodes.INVALID_COMMAND_LINE_ERROR\r\n except InvalidFileFormattingError as excp:\r\n self.retcode = ReturnCodes.INVALID_FILE_FORMATTING_ERROR\r\n UI().invalid_file_formatting(excp)\r\n except NoContentsFoundForOperationError as excp:\r\n self.retcode = ReturnCodes.NO_CONTENTS_FOUND_FOR_OPERATION\r\n UI().no_contents_found_for_operation(excp)\r\n except InfoMissingEntriesError as excp:\r\n self.retcode = ReturnCodes.NO_VALID_INFO_ERROR\r\n UI().error(excp)\r\n except (InvalidOrNothingChangedSettingsError, redfish.ris.rmc_helper.\\\r\n IncorrectPropValue) as excp:\r\n self.retcode = ReturnCodes.SAME_SETTINGS_ERROR\r\n UI().error(excp)\r\n except NoDifferencesFoundError as excp:\r\n self.retcode = ReturnCodes.NO_CHANGES_MADE_OR_FOUND\r\n UI().no_differences_found(excp)\r\n except MultipleServerConfigError as excp:\r\n self.retcode = ReturnCodes.MULTIPLE_SERVER_CONFIG_FAIL\r\n UI().multiple_server_config_fail(excp)\r\n except InvalidMSCfileInputError as excp:\r\n self.retcode = ReturnCodes.MULTIPLE_SERVER_INPUT_FILE_ERROR\r\n UI().multiple_server_config_input_file(excp)\r\n except FirmwareUpdateError as excp:\r\n self.retcode = ReturnCodes.FIRMWARE_UPDATE_ERROR\r\n UI().error(excp)\r\n except FailureDuringCommitError as excp:\r\n self.retcode = ReturnCodes.FAILURE_DURING_COMMIT_OPERATION\r\n UI().error(excp)\r\n except BootOrderMissingEntriesError as excp:\r\n self.retcode = ReturnCodes.BOOT_ORDER_ENTRY_ERROR\r\n UI().error(excp)\r\n except NicMissingOrConfigurationError as excp:\r\n self.retcode = ReturnCodes.NIC_MISSING_OR_INVALID_ERROR\r\n UI().error(excp)\r\n except (IncompatibleiLOVersionError, redfish.ris.rmc_helper.\\\r\n IncompatibleiLOVersionError) as excp:\r\n self.retcode = ReturnCodes.INCOMPATIBLE_ILO_VERSION_ERROR\r\n UI().printmsg(excp)\r\n except IncompatableServerTypeError as excp:\r\n self.retcode = ReturnCodes.INCOMPATIBLE_SERVER_TYPE\r\n UI().printmsg(excp)\r\n except IloLicenseError as excp:\r\n UI().printmsg(excp)\r\n self.retcode = ReturnCodes.ILO_LICENSE_ERROR\r\n except InvalidCListFileError as excp:\r\n self.retcode = ReturnCodes.INVALID_CLIST_FILE_ERROR\r\n UI().error(excp)\r\n except PartitionMoutingError as excp:\r\n self.retcode = ReturnCodes.UNABLE_TO_MOUNT_BB_ERROR\r\n UI().error(excp)\r\n except TimeOutError as excp:\r\n self.retcode = ReturnCodes.UPDATE_SERVICE_BUSY\r\n UI().error(excp)\r\n except DownloadError as excp:\r\n self.retcode = ReturnCodes.FAILED_TO_DOWNLOAD_COMPONENT\r\n UI().error(excp)\r\n except UploadError as excp:\r\n self.retcode = ReturnCodes.FAILED_TO_UPLOAD_COMPONENT\r\n UI().error(excp)\r\n except BirthcertParseError as excp:\r\n self.retcode = ReturnCodes.BIRTHCERT_PARSE_ERROR\r\n UI().error(excp)\r\n except ResourceExists as excp:\r\n self.retcode = ReturnCodes.RESOURCE_EXISTS_ERROR\r\n UI().error(excp)\r\n except InvalidKeyError as excp:\r\n self.retcode = ReturnCodes.ENCRYPTION_ERROR\r\n UI().error(\"Invalid key has been entered for \" \\\r\n \"encryption/decryption.\")\r\n except UnableToDecodeError as excp:\r\n self.retcode = ReturnCodes.ENCRYPTION_ERROR\r\n UI().error(excp)\r\n except UnabletoFindDriveError as excp:\r\n self.retcode = ReturnCodes.DRIVE_MISSING_ERROR\r\n UI().error(excp)\r\n UI().printmsg(\"Error occurred while reading device labels.\")\r\n except PathUnavailableError as excp:\r\n self.retcode = ReturnCodes.PATH_UNAVAILABLE_ERROR\r\n if excp:\r\n UI().error(excp)\r\n else:\r\n UI().printmsg(\"Requested path is unavailable.\")\r\n except TaskQueueError as excp:\r\n self.retcode = ReturnCodes.TASKQUEUE_ERROR\r\n UI().error(excp)\r\n # ****** CLI ERRORS ******\r\n except cliutils.CommandNotFoundException as excp:\r\n self.retcode = ReturnCodes.UI_CLI_COMMAND_NOT_FOUND_EXCEPTION\r\n UI().command_not_found(excp)\r\n extensions.Commands['HelpCommand'](rdmc=self).run(\"\")\r\n # ****** RMC/RIS ERRORS ******\r\n except redfish.ris.UndefinedClientError:\r\n self.retcode = ReturnCodes.RIS_UNDEFINED_CLIENT_ERROR\r\n UI().error(\"Please login before making a selection\")\r\n except (redfish.ris.InstanceNotFoundError, redfish.ris.\\\r\n RisInstanceNotFoundError) as excp:\r\n self.retcode = ReturnCodes.RIS_INSTANCE_NOT_FOUND_ERROR\r\n UI().printmsg(excp)\r\n except redfish.ris.CurrentlyLoggedInError as excp:\r\n self.retcode = ReturnCodes.RIS_CURRENTLY_LOGGED_IN_ERROR\r\n UI().error(excp)\r\n except redfish.ris.NothingSelectedError as excp:\r\n self.retcode = ReturnCodes.RIS_NOTHING_SELECTED_ERROR\r\n UI().nothing_selected()\r\n except redfish.ris.NothingSelectedFilterError as excp:\r\n self.retcode = ReturnCodes.RIS_NOTHING_SELECTED_FILTER_ERROR\r\n UI().nothing_selected_filter()\r\n except redfish.ris.NothingSelectedSetError as excp:\r\n self.retcode = ReturnCodes.RIS_NOTHING_SELECTED_SET_ERROR\r\n UI().nothing_selected_set()\r\n except redfish.ris.InvalidSelectionError as excp:\r\n self.retcode = ReturnCodes.RIS_INVALID_SELECTION_ERROR\r\n UI().error(excp)\r\n except redfish.ris.rmc_helper.UnableToObtainIloVersionError as excp:\r\n self.retcode = ReturnCodes.INCOMPATIBLE_ILO_VERSION_ERROR\r\n UI().error(excp)\r\n except redfish.ris.IdTokenError as excp:\r\n if excp.message:\r\n UI().printmsg(excp.message)\r\n else:\r\n UI().printmsg(u\"Logged-in account does not have the privilege \"\\\r\n \" required to fulfill the request or a required \"\\\r\n \" token is missing.\"\\\r\n \"\\nEX: biospassword flag if bios password present \"\\\r\n \"or tpmenabled flag if TPM module present.\")\r\n self.retcode = ReturnCodes.RIS_MISSING_ID_TOKEN\r\n except redfish.ris.SessionExpired as excp:\r\n self.retcode = ReturnCodes.RIS_SESSION_EXPIRED\r\n self.app.logout()\r\n UI().printmsg(\"Current session has expired or is invalid, \"\\\r\n \"please login again with proper credentials to continue.\\n\")\r\n except redfish.ris.ValidationError as excp:\r\n self.retcode = ReturnCodes.RIS_VALIDATION_ERROR\r\n except redfish.ris.ValueChangedError as excp:\r\n self.retcode = ReturnCodes.RIS_VALUE_CHANGED_ERROR\r\n except redfish.ris.ris.SchemaValidationError as excp:\r\n UI().printmsg(\"Error found in schema, try running with the \"\\\r\n \"--latestschema flag.\")\r\n self.retcode = ReturnCodes.RIS_SCHEMA_PARSE_ERROR\r\n # ****** RMC/RIS ERRORS ******\r\n except redfish.rest.v1.RetriesExhaustedError as excp:\r\n self.retcode = ReturnCodes.V1_RETRIES_EXHAUSTED_ERROR\r\n UI().retries_exhausted_attemps()\r\n except redfish.rest.v1.InvalidCredentialsError as excp:\r\n self.retcode = ReturnCodes.V1_INVALID_CREDENTIALS_ERROR\r\n UI().invalid_credentials(excp)\r\n except redfish.rest.v1.JsonDecodingError as excp:\r\n self.retcode = ReturnCodes.JSON_DECODE_ERROR\r\n UI().error(excp)\r\n except redfish.rest.v1.ServerDownOrUnreachableError as excp:\r\n self.retcode = \\\r\n ReturnCodes.V1_SERVER_DOWN_OR_UNREACHABLE_ERROR\r\n UI().error(excp)\r\n except redfish.rest.v1.ChifDriverMissingOrNotFound as excp:\r\n self.retcode = ReturnCodes.V1_CHIF_DRIVER_MISSING_ERROR\r\n UI().printmsg(\"Chif driver not found, please check that the \" \\\r\n \"chif driver is installed.\")\r\n except redfish.rest.v1.SecurityStateError as excp:\r\n self.retcode = ReturnCodes.V1_SECURITY_STATE_ERROR\r\n if isinstance(excp.message, int):\r\n UI().printmsg(\"High security mode [%s] has been enabled. \" \\\r\n \"Please provide credentials.\" % excp.message)\r\n else:\r\n UI().error(excp)\r\n except redfish.hpilo.risblobstore2.ChifDllMissingError as excp:\r\n self.retcode = ReturnCodes.REST_ILOREST_CHIF_DLL_MISSING_ERROR\r\n UI().printmsg(\"iLOrest Chif dll not found, please check that the \"\\\r\n \"chif dll is present.\")\r\n except redfish.hpilo.risblobstore2.UnexpectedResponseError as excp:\r\n self.retcode = ReturnCodes.REST_ILOREST_UNEXPECTED_RESPONSE_ERROR\r\n UI().printmsg(\"Unexpected data received from iLO.\")\r\n except redfish.hpilo.risblobstore2.HpIloError as excp:\r\n self.retcode = ReturnCodes.REST_ILOREST_ILO_ERROR\r\n UI().printmsg(\"iLO returned a failed error code.\")\r\n except redfish.hpilo.risblobstore2.Blob2CreateError as excp:\r\n self.retcode = ReturnCodes.REST_ILOREST_CREATE_BLOB_ERROR\r\n UI().printmsg(\"Blob create operation failed.\")\r\n except redfish.hpilo.risblobstore2.Blob2ReadError as excp:\r\n self.retcode = ReturnCodes.REST_ILOREST_READ_BLOB_ERROR\r\n UI().printmsg(\"Blob read operation failed.\")\r\n except redfish.hpilo.risblobstore2.Blob2WriteError as excp:\r\n self.retcode = ReturnCodes.REST_ILOREST_WRITE_BLOB_ERROR\r\n UI().printmsg(\"Blob write operation failed.\")\r\n except redfish.hpilo.risblobstore2.Blob2DeleteError as excp:\r\n self.retcode = ReturnCodes.REST_ILOREST_BLOB_DELETE_ERROR\r\n UI().printmsg(\"Blob delete operation failed.\")\r\n except redfish.hpilo.risblobstore2.Blob2OverrideError as excp:\r\n self.retcode = ReturnCodes.REST_ILOREST_BLOB_OVERRIDE_ERROR\r\n UI().error(excp)\r\n UI().printmsg(\"\\nBlob was overwritten by another user. Please \" \\\r\n \"ensure only one user is making changes at a time locally.\")\r\n except redfish.hpilo.risblobstore2.BlobRetriesExhaustedError as excp:\r\n self.retcode = ReturnCodes.REST_BLOB_RETRIES_EXHAUSETED_ERROR\r\n UI().printmsg(\"\\nBlob operation still fails after max retries.\")\r\n except redfish.hpilo.risblobstore2.Blob2FinalizeError as excp:\r\n self.retcode = ReturnCodes.REST_ILOREST_BLOB_FINALIZE_ERROR\r\n UI().printmsg(\"Blob finalize operation failed.\")\r\n except redfish.hpilo.risblobstore2.BlobNotFoundError as excp:\r\n self.retcode = ReturnCodes.REST_ILOREST_BLOB_NOT_FOUND_ERROR\r\n UI().printmsg(\"Blob not found with key and namespace provided.\")\r\n except redfish.ris.rmc_helper.InvalidPathError as excp:\r\n self.retcode = ReturnCodes.RIS_REF_PATH_NOT_FOUND_ERROR\r\n UI().printmsg(\"Reference path not found.\")\r\n except redfish.ris.rmc_helper.IloResponseError as excp:\r\n self.retcode = ReturnCodes.RIS_ILO_RESPONSE_ERROR\r\n except redfish.ris.rmc_helper.UserNotAdminError as excp:\r\n UI().user_not_admin()\r\n self.retcode = ReturnCodes.USER_NOT_ADMIN\r\n except redfish.hpilo.rishpilo.HpIloInitialError as excp:\r\n UI().error(excp)\r\n self.retcode = ReturnCodes.RIS_ILO_INIT_ERROR\r\n except redfish.hpilo.rishpilo.HpIloWriteError as excp:\r\n UI().error(excp)\r\n self.retcode = ReturnCodes.RESOURCE_ALLOCATION_ISSUES_ERROR\r\n except redfish.hpilo.rishpilo.HpIloReadError as excp:\r\n UI().error(excp)\r\n self.retcode = ReturnCodes.RESOURCE_ALLOCATION_ISSUES_ERROR\r\n # ****** RIS OBJECTS ERRORS ******\r\n except redfish.ris.ris.BiosUnregisteredError as excp:\r\n self.retcode = ReturnCodes.RIS_RIS_BIOS_UNREGISTERED_ERROR\r\n UI().bios_unregistered_error()\r\n # ****** GENERAL ERRORS ******\r\n except SystemExit:\r\n self.retcode = ReturnCodes.GENERAL_ERROR\r\n raise\r\n except Exception as excp:\r\n self.retcode = ReturnCodes.GENERAL_ERROR\r\n sys.stderr.write('ERROR: %s\\n' % excp)\r\n\r\n if self.opts.debug:\r\n traceback.print_exc(file=sys.stderr)", "def _fake_execute(self, *cmd, **kwargs):\n cmdlist = list(cmd)\n exe = cmdlist.pop(0)\n if exe == 'vgc-cluster':\n exe = cmdlist.pop(0)\n if exe == \"request-cancel\":\n self._request_cancel = True\n if self._return_blocked > 0:\n return 'Request cancelled', ''\n else:\n raise processutils.ProcessExecutionError(exit_code=1)\n elif self._fail_vgc_cluster:\n raise processutils.ProcessExecutionError(exit_code=1)\n elif exe == \"--version\":\n return \"HGST Solutions V2.5.0.0.x.x.x.x.x\", ''\n elif exe == \"space-list\":\n return self._parse_space_list(cmdlist)\n elif exe == \"space-create\":\n self._parse_space_create(cmdlist)\n if self._return_blocked > 0:\n self._return_blocked = self._return_blocked - 1\n out = \"VGC_CREATE_000002\\nBLOCKED\\n\"\n raise processutils.ProcessExecutionError(stdout=out,\n exit_code=1)\n return '', ''\n elif exe == \"space-delete\":\n return self._parse_space_delete(cmdlist)\n elif exe == \"space-extend\":\n return self._parse_space_extend(cmdlist)\n elif exe == \"host-storage\":\n if self._fail_host_storage:\n raise processutils.ProcessExecutionError(exit_code=1)\n return HGST_HOST_STORAGE, ''\n elif exe == \"domain-list\":\n return self._parse_domain_list()\n elif exe == \"network-list\":\n return self._parse_network_list()\n elif exe == \"space-set-apphosts\":\n if self._fail_set_apphosts:\n raise processutils.ProcessExecutionError(exit_code=1)\n return '', ''\n else:\n raise NotImplementedError\n elif exe == 'ip':\n if self._fail_ip:\n raise processutils.ProcessExecutionError(exit_code=1)\n else:\n return IP_OUTPUT, ''\n elif exe == 'dd':\n self.dd_count = -1\n for p in cmdlist:\n if 'count=' in p:\n self.dd_count = int(p[6:])\n elif 'bs=' in p:\n self.bs = p[3:]\n return DD_OUTPUT, ''\n else:\n return '', ''", "def try_and_except(error_filepath, function, *parameters, **named_parameters):\n import sys, traceback, subprocess, logging\n try:\n return function(*parameters, **named_parameters)\n except phe_exceptions.PheException as phe_e:\n # This exception is created by the 'call_external', when exit code != 0\n \n logger = logging.getLogger(\"stdout_stderr_logger\")\n logger.exception(function.__name__ + \" has raised an exception: \\n\" + str(phe_e))\n \n # Exit with the returncode specified in the called process.\n # TODO: Should it be a different return code? E.g. ranged for traceback.\n sys.exit(phe_e.phe_returncode)\n except Exception as e:\n error_string = \"There was an error in the function '\" + function.__name__ + \"'\"\n error_divider = \"_\" * 60\n print error_string\n print error_divider\n traceback.print_exc()\n print error_divider\n\n error_file = open(error_filepath, \"a\")\n error_file.write(error_string + \"\\n\")\n error_file.write(error_divider + \"\\n\")\n traceback.print_exc(file = error_file)\n error_file.write(error_divider + \"\\n\")\n error_file.close()\n sys.exit(1)", "def execute(self, args):\r\n all_args = list(args)\r\n try:\r\n return self._cmd(all_args)\r\n except OSError as e:\r\n if errno.E2BIG == e.errno:\r\n args1, args2 = self._split_args(all_args)\r\n result = self.execute(args1)\r\n if result != 0:\r\n return result\r\n return self.execute(args2)\r\n else:\r\n raise e", "def command_mount(self, system_id, *system_ids):\n system_ids = (system_id,) + system_ids\n has_failed = False\n for system_id in system_ids:\n try:\n system = SystemModel.create_by_id(system_id, self.environment)\n controller = SystemControllerModel(system, self.environment)\n controller.mount()\n except SftpConfigException as e:\n sys.stderr.write('Cannot mount %s: %s\\n\\n' % (system_id, str(e)))\n has_failed = True\n except SftpMountException as e:\n sys.stderr.write('Cannot mount %s!\\n\\n' % system_id)\n sys.stderr.write('Mount command: \\n%s\\n\\n' % e.mount_cmd)\n sys.stderr.write('Command output: \\n%s\\n\\n' % e.mount_cmd_output)\n has_failed = True\n if has_failed:\n sys.exit(1)", "def run(self):\n try:\n self.runCommand()\n except TortugaException as ex:\n print(ex.getErrorMessage())\n raise SystemExit(ex.getErrorCode())\n except SystemExit:\n raise\n except Exception as ex:\n print(str(ex))\n raise SystemExit(-1)", "def execute(self, args):\n all_args = list(args)\n try:\n return self._cmd(all_args)\n except OSError as e:\n if errno.E2BIG == e.errno:\n args1, args2 = self._split_args(all_args)\n result = self.execute(args1)\n if result != 0:\n return result\n return self.execute(args2)\n else:\n raise e", "def process(file_path):\n\n try:\n _process(file_path)\n except Exception as exc:\n print(\"UPS\")\n raise exc", "def executor(host):\n try:\n exec_call(sssh(host))\n except Exception as msg:\n print('%s' % str(msg))\n pass", "def test_mount_failure(self):\n with prepared_image_file(create_filesystem=False):\n program = RsyncSystemBackup(\n crypto_device=CRYPTO_NAME,\n destination=os.path.join(MOUNT_POINT, 'latest'),\n mount_point=MOUNT_POINT,\n )\n # When `mount' fails it should exit with a nonzero exit code,\n # thereby causing executor to raise an ExternalCommandFailed\n # exception that obscures the FailedToMountError exception that\n # we're interested in. The check=False option enables our\n # `last resort error handling' code path to be reached.\n program.destination_context.options['check'] = False\n self.assertRaises(FailedToMountError, program.execute)", "def _procedure_call(self, command, arguments=\"\",\n path='', timeout=10):\n command = os.path.join(path, command)\n if len(self.command_prefix):\n command = SPACER.format(self.command_prefix,\n command)\n\n self.logger.debug(\"calling 'client.exec_command({0})'\".format(command))\n stdout = self.client.exec_command(SPACER.format(command, arguments),\n timeout=timeout)\n self.logger.debug(\"Completed 'client.exec_command({0})'\".format(command))\n\n \n\n stderr = StringIO(\"\")\n\n return OutputError(OutputFile(stdout, self.validate), stderr)", "def _ipc_call(self, fn_name, *args, **kwargs):\n if not callable(getattr(self, fn_name)):\n raise TypeError(f\"{fn_name} is not callable\")\n with self._mpsing_lock:\n msg = (fn_name, args, kwargs)\n self._mpsing_client_conn.send(msg)\n result = self._mpsing_client_conn.recv()\n if isinstance(result, BaseException):\n # TODO: sending the exception through the IPC pipe will strip its\n # __traceback__ property, as traceback objects cannot be\n # pickled. It would be nice to send some kind of traceback\n # info back though.\n raise result\n return result", "def run_cmd(cmd):\n command = cmd.split(\" \")[0]\n if command == \"ls\":\n r = requests.get(url.format(cmd.split(\" \")[1], \"OPEN\", userName))\n print(r.json())\n elif command == 'put':\n # https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/WebHDFS.html#Append_to_a_File\n # this part usess system call to contact the remote\n # server first creating the file then append toit\n # Sample use\n # >>> PUT <file-name> <file-path>\n fileName = cmd.split(\" \")[1]\n system_call = ['curl', '-i', '-X', 'PUT', url.format(\n fileName, 'CREATE', userName)]\n subprocess.call(system_call)\n system_call = ['curl', '-i', '-X', 'POST', url.format(\n fileName, 'APPEND', userName)]\n subprocess.call(system_call)\n system_call = ['curl', '-i', '-X', 'POST', '-T', cmd.slpit(\" \")[2],\n url.format(fileName, 'APPEND', userName)]\n subprocess.call(system_call)\n\n elif command == 'get':\n # https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/WebHDFS.html#Open_and_Read_a_File\n # this part usess system call to contact the remote\n # to read from file\n # Sample use\n # >>> GET <file-path>\n fileName = cmd.split(\" \")[1]\n system_call = ['curl', '-i', '-L', url.format(\n fileName, 'OPEN', userName)]\n subprocess.call(system_call)\n elif command == 'mkdir':\n # https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/WebHDFS.html#Make_a_Directory\n # this part usess system call to contact the remote\n # to read from file\n # Sample use\n # >>> mkdir <folder-Path>\n folderPath = cmd.split(\" \")[1]\n system_call = ['curl', '-i', '-X', 'PUT', url.format(\n folderPath, 'MKDIRS', userName)]\n subprocess.call(system_call)\n elif command == 'rmdir':\n # https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/WebHDFS.html#Delete_a_FileDirectory\n # this part usess system call to contact the remote\n # to read from file\n # Sample use\n # >>> rmdir <file-path>\n folderPath = cmd.split(\" \")[1]\n system_call = ['curl', '-i', '-X', 'DELETE', url.format(\n folderPath, 'DELETE', userName)]\n subprocess.call(system_call)\n else:\n print 'Command is invalid.'", "def wrap_exceptions(fun):\n @functools.wraps(fun)\n def wrapper(self, *args, **kwargs):\n try:\n return fun(self, *args, **kwargs)\n except OSError as err:\n raise convert_oserror(err, pid=self.pid, name=self._name)\n return wrapper", "def _raise_performing_request_error(self, *args, **kwargs):", "def send_rpc_error(req, rpcreq, e):", "def os_call( self, cmd_arg, ):\n while True: # will exit when it works or run out of editors\n a_command = self.working_command\n if a_command is None:\n a_command = self.get_next_command( )\n\n if a_command is None: # no commands left to try\n msg = \"Run out of editors to try\"\n# AppGlobal.__logger.error( msg )\n raise RuntimeError( msg ) # or fail in some other where\n break # we are aread done\n try:\n if cmd_arg is None:\n proc = Popen( [ a_command, ] )\n else:\n proc = Popen( [ a_command, cmd_arg ] )\n self.working_command = a_command\n break # do not get here if exception so command \"worked \"\n except Exception as excpt: # this should let us loop ignoring exception\n pass\n msg = ( f\"os_call exception trying to use >{a_command}< with cmd_arg >{cmd_arg}< exception: {excpt}\" )\n # if exception proc not returned f\"\\npopen returned {proc}\" )\n AppGlobal.logger.debug( msg )", "def process_error(self, id, code, error):\n raise NotImplementedError('process_error not implemented in BaseService')", "def run(self, host):\n # run fio command\n self.run_cmd = self.run_cmd.replace(\"fio\", ' ').replace(\" POSIX\", '')\n print(\"Running: {}\".format('fio' + self.run_cmd))\n ret_code = general_utils.pcmd(host, 'fio' + self.run_cmd)\n\n # check for any failures\n if 0 not in ret_code:\n error_hosts = NodeSet(\n \",\".join(\n [str(node_set) for code, node_set in ret_code.items()\n if code != 0]))\n raise CommandFailure(\n \"Error starting fio on the following hosts: {}\".format(\n error_hosts))", "def try_execute(*cmd, **kwargs):\n try:\n return execute(*cmd, **kwargs)\n except (processutils.ProcessExecutionError, OSError) as e:\n LOG.debug('Command failed: %s', e)", "def run(ceph_cluster, **kw):\n try:\n log.info(f\"MetaData Information {log.metadata} in {__name__}\")\n fs_util = FsUtils(ceph_cluster)\n\n config = kw.get(\"config\")\n build = config.get(\"build\", config.get(\"rhbuild\"))\n clients = ceph_cluster.get_ceph_objects(\"client\")\n clients[0].upload_file(\n \"tests/cephfs/clients/file_lock_utitlity.py\",\n \"/home/cephuser/file_lock_utility.py\",\n sudo=True,\n )\n clients[1].upload_file(\n \"tests/cephfs/clients/file_lock_utitlity.py\",\n \"/home/cephuser/file_lock_utility.py\",\n sudo=True,\n )\n version, rc = clients[0].exec_command(\n sudo=True, cmd=\"ceph version --format json\"\n )\n fs_util.prepare_clients([clients[0]], build)\n fs_util.auth_list([clients[0], clients[1]])\n if not build.startswith((\"3\", \"4\", \"5\")):\n if not fs_util.validate_fs_info(clients[0], \"cephfs\"):\n log.error(\"FS info Validation failed\")\n return 1\n mounting_dir = \"\".join(\n random.choice(string.ascii_lowercase + string.digits)\n for _ in list(range(10))\n )\n fuse_mounting_dir = f\"/mnt/cephfs_fuse{mounting_dir}/\"\n fs_util.fuse_mount([clients[0], clients[1]], fuse_mounting_dir)\n\n kernel_mounting_dir = f\"/mnt/cephfs_kernel{mounting_dir}/\"\n mon_node_ips = fs_util.get_mon_node_ips()\n fs_util.kernel_mount(\n [clients[0], clients[1]], kernel_mounting_dir, \",\".join(mon_node_ips)\n )\n rc = unlink_file(\n clients[0],\n clients[1],\n \"fuse_mount.txt\",\n fuse_mounting_dir,\n validate_from=[kernel_mounting_dir],\n )\n\n if rc:\n raise CommandFailed(\"Unlink of the file is failing when file is locked\")\n rc = unlink_file(\n clients[0],\n clients[1],\n \"kernel_mount.txt\",\n kernel_mounting_dir,\n validate_from=[fuse_mounting_dir],\n )\n if rc:\n raise CommandFailed(\"Unlink of the file is failing when file is locked\")\n\n return 0\n\n except Exception as e:\n log.error(e)\n log.error(traceback.format_exc())\n return 1\n finally:\n log.info(\"---clean up---------\")\n fs_util.client_clean_up(\n \"umount\", fuse_clients=[clients[0]], mounting_dir=fuse_mounting_dir\n )\n fs_util.client_clean_up(\n \"umount\",\n kernel_clients=[clients[0]],\n mounting_dir=kernel_mounting_dir,\n )", "def execute(self, rc):\n pass", "def raise_on_error(code, data):\n # get detailed server response message\n if code != 200:\n try:\n server_info = data.read()\n except Exception:\n server_info = None\n else:\n server_info = server_info.decode('ASCII', errors='ignore')\n if server_info:\n server_info = \"\\n\".join(\n line for line in server_info.splitlines() if line)\n # No data.\n if code == 204:\n raise FDSNNoDataException(\"No data available for request.\",\n server_info)\n elif code == 400:\n msg = (\"Bad request. If you think your request was valid \"\n \"please contact the developers.\")\n raise FDSNBadRequestException(msg, server_info)\n elif code == 401:\n raise FDSNUnauthorizedException(\"Unauthorized, authentication \"\n \"required.\", server_info)\n elif code == 403:\n raise FDSNForbiddenException(\"Authentication failed.\",\n server_info)\n elif code == 413:\n raise FDSNRequestTooLargeException(\"Request would result in too much \"\n \"data. Denied by the datacenter. \"\n \"Split the request in smaller \"\n \"parts\", server_info)\n # Request URI too large.\n elif code == 414:\n msg = (\"The request URI is too large. Please contact the ObsPy \"\n \"developers.\", server_info)\n raise NotImplementedError(msg)\n elif code == 429:\n msg = (\"Sent too many requests in a given amount of time ('rate \"\n \"limiting'). Wait before making a new request.\", server_info)\n raise FDSNTooManyRequestsException(msg, server_info)\n elif code == 500:\n raise FDSNInternalServerException(\"Service responds: Internal server \"\n \"error\", server_info)\n elif code == 503:\n raise FDSNServiceUnavailableException(\"Service temporarily \"\n \"unavailable\",\n server_info)\n elif code is None:\n if \"timeout\" in str(data).lower() or \"timed out\" in str(data).lower():\n raise FDSNTimeoutException(\"Timed Out\")\n else:\n raise FDSNException(\"Unknown Error (%s): %s\" % (\n (str(data.__class__.__name__), str(data))))\n # Catch any non 200 codes.\n elif code != 200:\n raise FDSNException(\"Unknown HTTP code: %i\" % code, server_info)", "def launch(self):\n\n try:\n # clear error\n self.error = None\n\n self.load_url()\n except ChangedUrlWarning, e:\n self.handle_error(err.redirect)\n raise\n except filetype.WrongFileTypeError:\n self.handle_error(err.wrong_type)\n except ZeroDataError:\n self.handle_error(err.no_data)\n except urllib.ContentTooShortError:\n self.handle_error(err.incomplete)\n except ResumeChecksumFailed:\n self.handle_error(err.checksum)\n except ResumeNotSupported:\n self.handle_error(err.no_resume)\n except ErrorAlreadyProcessed:\n pass\n except IOError, exc:\n if exc and exc.args: \n if len(exc.args) == 2:\n (_, errobj) = exc.args\n if type(errobj) == socket.gaierror:\n self.handle_error(err.dns)\n return\n elif type(errobj) == socket.timeout:\n self.handle_error(err.timeout)\n return\n elif type(errobj) == socket.sslerror:\n self.handle_error(err.ssl)\n return\n elif type(errobj) == socket.error:\n self.handle_error(err.socket)\n return\n elif type(errobj) == ftplib.error_perm:\n self.handle_error(err.auth)\n return\n self.handle_error(err.url_error)\n except socket.timeout:\n self.handle_error(err.timeout)\n except KeyboardInterrupt:\n io.write_abort()\n raise" ]
[ "0.55850905", "0.55532014", "0.5517713", "0.54477644", "0.5419037", "0.53720886", "0.53673285", "0.53601074", "0.5353408", "0.5334857", "0.5279487", "0.52709293", "0.52592814", "0.524232", "0.5225657", "0.52237153", "0.51882654", "0.5178912", "0.5161693", "0.5132047", "0.51294494", "0.5103006", "0.510185", "0.50728464", "0.5069312", "0.5069179", "0.5022605", "0.5016087", "0.5010792", "0.50037557" ]
0.62338924
0
Populates a postgres DB with a `test_df` table in the `connection_test` schema to test DataConnectors against
def test_connectable_postgresql_db(sa, test_backends, test_df): if "postgresql" not in test_backends: pytest.skip("skipping fixture because postgresql not selected") url = get_sqlalchemy_url( drivername="postgresql", username="postgres", password="", host=os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost"), port="5432", database="test_ci", ) engine = sa.create_engine(url) with engine.begin() as connection: schema_check_results = connection.execute( sa.text( "SELECT schema_name FROM information_schema.schemata WHERE schema_name = 'connection_test';" ) ).fetchall() if len(schema_check_results) == 0: with engine.begin() as connection: connection.execute(sa.text("CREATE SCHEMA connection_test;")) table_check_results = connection.execute( sa.text( """ SELECT EXISTS ( SELECT FROM information_schema.tables WHERE table_schema = 'connection_test' AND table_name = 'test_df' ); """ ) ).fetchall() if table_check_results != [(True,)]: add_dataframe_to_db( df=test_df, name="test_df", con=engine, index=True, schema="connection_test", ) # Return a connection string to this newly-created db return engine
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_from_dataframe(self):\n self.insert()\n data = self.tbl.select()\n data.index.name = None\n tbl = Table.create(':memory:', \"Foo_2\", data, verbose=True,\n primary_key='id', autoincrement=True)\n self.check(self.idata, tbl.select())", "def setUp(self):\n self.addTypeEqualityFunc(pandas.DataFrame, self.assertDataframeEqual)\n self.database_connection.connect()", "def setUp(self):\n\n self.app = Flask(__name__)\n self.client = self.app.test_client()\n self.app.config['TESTING'] = True\n db = SQLAlchemy()\n\n connect_to_db(self.app, \"postgresql:///testdb\")\n\n subprocess.check_output(\"psql testdb < test_query.sql\", shell=True)\n\n with self.app.test_request_context():\n\n db.create_all()", "def setUp(self):\n\n self.app = Flask(__name__)\n self.client = self.app.test_client()\n self.app.config['TESTING'] = True\n db = SQLAlchemy()\n\n connect_to_db(self.app, \"postgresql:///testdb\")\n\n subprocess.check_output(\"psql testdb < test_query.sql\", shell=True)\n\n with self.app.test_request_context():\n\n db.create_all()", "def setUp(self):\n\n self.app = Flask(__name__)\n self.client = self.app.test_client()\n self.app.config['TESTING'] = True\n db = SQLAlchemy()\n\n connect_to_db(self.app, \"postgresql:///testdb\")\n\n subprocess.check_output(\"psql testdb < test_query.sql\", shell=True)\n\n with self.app.test_request_context():\n\n db.create_all()", "def create_test_db(self, *args, **kw):\n self.destroy_test_db()\n self.connection.use_test_datastore = True\n self.connection.flush()", "def setUp(self):\n\n self.app = Flask(__name__)\n self.client = self.app.test_client()\n self.app.config['TESTING'] = True\n\n db = SQLAlchemy()\n connect_to_db(self.app, \"postgresql:///testdb\")\n\n subprocess.check_output(\"psql testdb < test_query.sql\", shell=True)\n\n with self.app.test_request_context():\n\n db.create_all()", "def migrated_postgres(pg_url, migrated_postgres_template):\n template_db = URL(migrated_postgres_template).name\n with tmp_database(pg_url, 'pytest', template=template_db) as tmp_url:\n yield tmp_url", "def test_database():\n conn = psycopg2.connect(host=DB_HOST,\n port=DB_PORT,\n dbname=DB_NAME,\n user=DB_USER,\n password=DB_PASS)\n\n cur = conn.cursor()\n\n # Create table\n cur.execute(\"CREATE TABLE test (id serial PRIMARY KEY, num integer, data varchar);\")\n\n # Bung in some random data\n for entry in range(10):\n cur.execute(\"INSERT INTO test (num, data) VALUES (%s, %s);\",\n (randint(0, 1000), _randstr()))\n\n # Read back everything we just inserted.\n cur.execute(\"SELECT * FROM test;\")\n data = cur.fetchall()\n print(json.dumps(data, indent=4, sort_keys=True))\n\n conn.commit()\n cur.close()\n conn.close()", "def generate_fake_db(path_db: Path) -> None:\n print(f'Creating: {path_db}') # noqa: T001\n with SQLConnection(path_db) as conn:\n cursor = conn.cursor()\n cursor.execute('DROP TABLE IF EXISTS test_data;')\n conn.commit()\n\n cursor.execute(\"\"\"CREATE TABLE test_data (\n time FLOAT NOT NULL,\n temp FLOAT NOT NULL,\n min FLOAT NOT NULL,\n max FLOAT NOT NULL\n );\"\"\")\n conn.commit()\n\n while True:\n # Generate random data points and add to the database\n points = 1000\n mu, sigma = (10, 8) # mean and standard deviation\n samples = np.random.normal(mu, sigma, points)\n for idx in tqdm(range(points)):\n values = f'{time.time()}, {samples[idx]}, {samples[idx] - 2.1}, {samples[idx] + 3.2}'\n cursor.execute(f'INSERT INTO test_data (time, temp, min, max) VALUES ({values});') # noqa: S608, Q440\n conn.commit()\n time.sleep(1)", "def _db_init_data_tables(self):\n\n #\n # TESTTYPE table\n #\n return self._db_execute(\n \"\"\"\n create table TESTTYPE (\n KEY text unique,\n VALUE text\n )\n \"\"\"\n )", "def dburl(\n tmp_path_factory: pytest.TempPathFactory,\n person_data: pandas.DataFrame,\n student_data: pandas.DataFrame,\n school_data: pandas.DataFrame,\n ) -> str:\n path = tmp_path_factory.mktemp('alchemy') / 'test.db'\n url = f'sqlite:///{path.absolute()}'\n connection = sqlalchemy.create_engine(url)\n person_data.to_sql('person', connection, index=False)\n student_data.to_sql('student', connection, index=False)\n school_data.to_sql('school', connection, index=False)\n return url", "def withOutPandas()-> None:\n logging.info(f\"Making sure the DB is set up {getTime()}\" )\n\n with getCon() as conn:\n with getCursor(conn,True) as cur:\n cur.execute(\"CREATE TABLE IF NOT EXISTS data (iso_code TEXT,continent TEXT,location TEXT,date DATE,total_cases FLOAT,new_cases FLOAT,new_cases_smoothed FLOAT,total_deaths FLOAT,new_deaths FLOAT,new_deaths_smoothed FLOAT,total_cases_per_million FLOAT,new_cases_per_million FLOAT,new_cases_smoothed_per_million FLOAT,total_deaths_per_million FLOAT,new_deaths_per_million FLOAT,new_deaths_smoothed_per_million FLOAT,reproduction_rate FLOAT,icu_patients FLOAT,icu_patients_per_million FLOAT,hosp_patients FLOAT,hosp_patients_per_million FLOAT,weekly_icu_admissions FLOAT,weekly_icu_admissions_per_million FLOAT,weekly_hosp_admissions FLOAT,weekly_hosp_admissions_per_million FLOAT,new_tests FLOAT,total_tests FLOAT,total_tests_per_thousand FLOAT,new_tests_per_thousand FLOAT,new_tests_smoothed FLOAT,new_tests_smoothed_per_thousand FLOAT,positive_rate FLOAT,tests_per_case FLOAT,tests_units TEXT,total_vaccinations FLOAT,people_vaccinated FLOAT,people_fully_vaccinated FLOAT,total_boosters FLOAT,new_vaccinations FLOAT,new_vaccinations_smoothed FLOAT,total_vaccinations_per_hundred FLOAT,people_vaccinated_per_hundred FLOAT,people_fully_vaccinated_per_hundred FLOAT,total_boosters_per_hundred FLOAT,new_vaccinations_smoothed_per_million FLOAT,stringency_index FLOAT,population FLOAT,population_density FLOAT,median_age FLOAT,aged_65_older FLOAT,aged_70_older FLOAT,gdp_per_capita FLOAT,extreme_poverty FLOAT,cardiovasc_death_rate FLOAT,diabetes_prevalence FLOAT,female_smokers FLOAT,male_smokers FLOAT,handwashing_facilities FLOAT,hospital_beds_per_thousand FLOAT,life_expectancy FLOAT,human_development_index FLOAT,excess_mortality_cumulative_absolute FLOAT,excess_mortality_cumulative FLOAT,excess_mortality FLOAT,excess_mortality_cumulative_per_million FLOAT)\")\n cur.execute(\"TRUNCATE data\")\n \n with open(DATA_FILE) as f:\n data = list(csv.reader(f))\n logging.info(f\"Slicing {getTime()}\")\n\n SLICE_SIZE = len(data) // 100\n rows = [data[i:i + SLICE_SIZE] for i in range(1, len(data), SLICE_SIZE)]\n logging.info(f\"Finished slicing {getTime()}\")\n logging.info(f\"Inserting {getTime()}\")\n\n with Pool(2) as p:\n p.map(insert,rows)\n logging.info(f\"Finished Inserting {getTime()}\")\n \n logging.info(f\"Gettign Uniqe Contries {getTime()}\")\n with getCon() as conn:\n with getCursor(conn) as cur:\n cur.execute(\"SELECT DISTINCT location FROM data\")\n result =cur.fetchall()\n with open(RESULT_FILE,\"w\", newline='') as r:\n writer = csv.DictWriter(r,fieldnames=[\"Uniqe Countries\"])\n writer.writeheader()\n writer.writerow({\"Uniqe Countries\":len(result)})", "def test_create_tables(self):\n conn_object = ParentConnection()\n conn_object.create_tables()\n conn = psycopg2.connect(**{\"host\": \"localhost\",\n \"database\": \"test\",\n \"user\": \"test\",\n \"password\": \"test\"})\n cur = conn.cursor()\n cur.execute(\"SELECT * from information_schema.tables \"\n \"WHERE table_schema = 'public' \"\n \"AND table_type = 'BASE TABLE';\")\n result = cur.fetchall()\n result = [x[2] for x in result]\n self.assertCountEqual(result,\n ['bioms', 'counts', 'networks',\n 'taxonomy', 'edges', 'samples', 'meta']\n )\n cur.close()\n conn.close()\n conn_object.delete_tables()", "def ingest_data(ingestion_df: pd.DataFrame, db_credentials: Tuple[str, str], mock_data: bool = True) -> None:\n try:\n try:\n Neo4JTools.connect(\n \"bolt://graph-db:7687\", db_credentials[0], db_credentials[1])\n except:\n Neo4JTools.connect(\n \"bolt://localhost:7687\", db_credentials[0], db_credentials[1])\n except neo4j.exceptions.ServiceUnavailable as e:\n logging.log(logging.ERROR, str(e))\n\n Neo4JTools.initalise_database()\n Neo4JTools.clear_database()\n\n if mock_data:\n ingestion_df: pd.DataFrame = ToolingManager.add_mock_data(ingestion_df)\n\n Neo4JTools.populate_model(ingestion_df, mock_data=mock_data)\n return", "def load_testdb(c, dbname=\"test_template\", fpath=\"tests/test_db.sql\"):\n default_env = {\n \"PATH\": os.environ[\"PATH\"],\n \"PYTHONPATH\": os.path.abspath(os.path.dirname(__file__)),\n \"LANG\": \"en_US.UTF-8\",\n \"POSTGRES_DB\": dbname,\n \"POSTGRES_HOST\": \"localhost\",\n \"POSTGRES_USER\": \"postgres\",\n \"POSTGRES_PORT\": \"5432\",\n }\n\n env = os.environ\n env.update(default_env)\n\n psql_command = (\n f'psql -h {default_env[\"POSTGRES_HOST\"]} '\n f'-p {default_env[\"POSTGRES_PORT\"]} '\n f'-U {default_env[\"POSTGRES_USER\"]}'\n )\n\n c.run(f'{psql_command} postgres -c \"drop database if exists {dbname}\";', env=env)\n c.run(f'{psql_command} postgres -c \"create database {dbname}\";', env=env)\n c.run(f\"{psql_command} {dbname} < {fpath}\", env=env)\n # update test db to the latest migrations\n c.run(f\"alembic -c ./alembic.ini upgrade head\", env=env)", "def setUp(self):\n self.conn = seed.connect_to_db(\"testing\")\n self.cur = self.conn.cursor()\n\n seed.cur = self.conn.cursor()\n seed.conn = self.conn\n\n self.tables = [\n {\n \"name\": \"people\", \n \"schema\": [(\"firstname\", \"10\", \"VARCHAR\"), (\"lastname\", \"10\", \"VARCHAR\"), (\"age\", \"3\", \"INTEGER\"), (\"active\", \"1\", \"BOOLEAN\")]\n },\n {\n \"name\": \"animals\",\n \"schema\": [(\"animal_id\", \"7\", \"INTEGER\"), (\"name\", \"10\", \"VARCHAR\"), (\"species\", \"20\", \"VARCHAR\")]\n },\n {\n \"name\":\"testformat1\",\n \"schema\": [(\"name\", \"10\", \"VARCHAR\"), (\"valid\", \"1\", \"BOOLEAN\"), (\"count\", \"3\", \"INTEGER\")]\n }\n ]\n for table in self.tables:\n seed.create_table(table[\"name\"], table[\"schema\"])", "async def populate_test_data(self):\n async with (await self._get_connection_pool()).acquire() as conn:\n await conn.execute('delete from foglamp.tasks')\n await conn.execute('delete from foglamp.schedules')\n await conn.execute('delete from foglamp.scheduled_processes')\n await conn.execute(\n '''insert into foglamp.scheduled_processes(name, script)\n values('sleep1', '[\"python3\", \"../scripts/sleep.py\", \"1\"]')''')\n await conn.execute(\n '''insert into foglamp.scheduled_processes(name, script)\n values('sleep10', '[\"python3\", \"../scripts/sleep.py\", \"10\"]')''')\n await conn.execute(\n '''insert into foglamp.scheduled_processes(name, script)\n values('sleep30', '[\"python3\", \"../scripts/sleep.py\", \"30\"]')''')\n await conn.execute(\n '''insert into foglamp.scheduled_processes(name, script)\n values('sleep5', '[\"python3\", \"../scripts/sleep.py\", \"5\"]')''')", "async def test_db():\n test_uta_db = UTADatabase()\n await test_uta_db._create_genomic_table()\n return test_uta_db", "def create_database(connection):\r\n cursor = connection.cursor()\r\n\r\n # create an orders table, dropping some duplicate rows to satisfy the primary key constraint\r\n print(\"Creating orders table ...\")\r\n cursor.execute('''\r\n CREATE TABLE JD_order_data (\r\n order_ID TEXT NOT NULL CHECK(LENGTH(order_ID) = 10),\r\n sku_ID TEXT NOT NULL CHECK(LENGTH(sku_ID) = 10),\r\n user_ID TEXT NOT NULL CHECK(LENGTH(user_ID) = 10),\r\n order_time DATETIME NOT NULL,\r\n quantity INT NOT NULL,\r\n final_unit_price REAL NOT NULL,\r\n PRIMARY KEY (order_ID, sku_ID)\r\n )\r\n ''')\r\n orders = pd.read_csv('../../data/JD_order_data.csv', low_memory=False)\r\n orders = orders[['order_ID', 'sku_ID', 'user_ID', 'order_time', 'quantity', 'final_unit_price']]\r\n orders = orders.groupby(['order_ID', 'sku_ID'], as_index=False).first()\r\n orders.to_sql('JD_order_data', connection, index=False, if_exists='append')\r\n cursor.execute('CREATE INDEX orders_user_index ON JD_order_data (user_ID)')\r\n\r\n # create a delivery table\r\n print(\"Creating delivery table ...\")\r\n cursor.execute('''\r\n CREATE TABLE JD_delivery_data (\r\n order_ID TEXT NOT NULL CHECK(LENGTH(order_ID) = 10),\r\n package_ID TEXT NOT NULL CHECK(LENGTH(package_ID) = 10),\r\n ship_out_time DATETIME NOT NULL,\r\n PRIMARY KEY (order_ID, package_ID),\r\n FOREIGN KEY (order_ID) REFERENCES JD_order_data (order_ID)\r\n )\r\n ''')\r\n delivery = pd.read_csv('../../data/JD_delivery_data.csv', parse_dates=['ship_out_time'])\r\n delivery = delivery[['order_ID', 'package_ID', 'ship_out_time']]\r\n delivery.to_sql('JD_delivery_data', connection, index=False, if_exists='append')\r\n\r\n # create a clicks table\r\n print(\"Creating clicks table ...\")\r\n cursor.execute('''\r\n CREATE TABLE JD_click_data (\r\n user_ID TEXT NOT NULL CHECK(LENGTH(user_ID) = 10),\r\n sku_ID TEXT NOT NULL CHECK(LENGTH(sku_ID) = 10),\r\n request_time DATETIME NOT NULL,\r\n FOREIGN KEY (user_ID) REFERENCES JD_order_data (user_ID),\r\n FOREIGN KEY (sku_ID) REFERENCES JD_order_data (sku_ID)\r\n )\r\n ''')\r\n clicks = pd.read_csv('../../data/JD_click_data.csv', parse_dates=['request_time'])\r\n clicks = clicks[clicks['user_ID'] != '-']\r\n clicks = clicks[['user_ID', 'sku_ID', 'request_time']]\r\n clicks.to_sql('JD_click_data', connection, index=False, if_exists='append')\r\n cursor.execute('CREATE INDEX clicks_user_index ON JD_click_data (user_ID)')\r\n cursor.execute('CREATE INDEX clicks_sku_index ON JD_click_data (sku_ID)')\r\n\r\n # Create a user table\r\n print(\"Creating users table ...\")\r\n cursor.execute('''\r\n CREATE TABLE JD_user_data (\r\n user_ID TEXT NOT NULL CHECK(LENGTH(user_ID) = 10),\r\n plus INT NOT NULL CHECK (plus IN (0, 1)),\r\n PRIMARY KEY (user_ID)\r\n )\r\n ''')\r\n users = pd.read_csv('../../data/JD_user_data.csv', low_memory=False)\r\n users = users[['user_ID', 'plus']]\r\n users = users.groupby(['user_ID'], as_index=False).first()\r\n users.to_sql('JD_user_data', connection, index=False, if_exists='append')\r\n cursor.execute('CREATE INDEX users_user_index ON JD_user_data (user_ID)')", "def test_create_staging_table_should_sends_create_table_sql(db_connection):\n loader = PostgresLoader(\"local\", 5432, \"db\", \"user\", \"pass\")\n loader.create_staging_table()\n execute_params = []\n for name, args, _ in db_connection.mock_calls:\n if name.endswith(\"execute\"):\n for arg in args:\n execute_params.append(arg)\n assert any(\"CREATE\" in arg for arg in execute_params)\n assert any(\"TABLE\" in arg for arg in execute_params)\n assert any(loader.table_name in arg for arg in execute_params)", "def insert_db():\n populate_tables()", "def init_database(db: sa.engine.Connectable):\n\n # setup the Postgres extensions and schema\n db.execute(\"\"\"\n CREATE EXTENSION IF NOT EXISTS \"uuid-ossp\" WITH SCHEMA public;\n \"\"\")\n db.execute(\n ';\\n'.join(\n 'CREATE SCHEMA IF NOT EXISTS {}'.format(s) for s in SCHEMAS.values()\n )\n )\n\n # create the schema from the models\n METADATA.create_all(bind=db)", "def setUp(self):\n\n # Get the Flask test client\n self.client = app.test_client()\n app.config['TESTING'] = True\n\n # Connect to test database\n connect_to_db(app, \"postgresql:///testdb\")\n\n # Create tables and add sample data\n db.create_all()\n example_data()", "def create_example_test_table(conn):\n execute_sql_script(conn, \"06_create_example_test_table.sql\")", "def setUp(self):\n self.app = create_app()\n self.client = self.app.test_client\n self.database_name = \"trivia_test\"\n #self.database_path = \"postgres://{}/{}\".format('localhost:5432', self.database_name)\n self.database_path = 'postgresql+psycopg2://{}:{}@{}/{}'.format('postgres','picasso0', 'localhost:5432', self.database_name)\n setup_db(self.app, self.database_path)\n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n # create all tables\n self.db.create_all()", "def create_teachers_db(connection):\r\n with connection:\r\n connection.execute(CREATE_TABLE_TEACHERS_DATA)", "def setUp(self):\n\n # Get the Flask test client.\n self.client = app.test_client()\n app.config[\"TESTING\"] = True\n app.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\n\n # Connect to the test database.\n connect_to_db(app, db_uri=\"postgresql:///testnourish\") \n\n # Create the tables and add the sample data.\n db.create_all()\n load_test_data()", "def test_inserted_data(client):\n data = pd.read_csv(\"housing.csv\")\n data = format_data_housing(data)\n House.insert_from_pd(data)\n houses: DataFrame = pd.read_sql(\"SELECT * FROM house\", db.engine)\n assert len(houses) == data.shape[0]\n houses = house_results_to_dataframe(houses)\n assert_frame_equal(houses, data, check_dtype=False)", "def setUp(self):\n db.drop_all() # clean up the last tests\n db.create_all() # make our sqlalchemy tables" ]
[ "0.6764111", "0.6593601", "0.65928036", "0.65928036", "0.65928036", "0.6573342", "0.65390044", "0.6480772", "0.6448915", "0.6418131", "0.63582337", "0.6346796", "0.62995", "0.6225315", "0.62085634", "0.6200947", "0.61726683", "0.6159483", "0.61411804", "0.61193913", "0.6107856", "0.61013675", "0.6095047", "0.6088667", "0.60839653", "0.60691196", "0.606584", "0.60461533", "0.60407764", "0.6029333" ]
0.7746262
0
Tests the golden path for setting up a StreamlinedSQLDatasource using test_yaml_config
def test_golden_path_sql_datasource_configuration( mock_emit, caplog, empty_data_context_stats_enabled, sa, test_connectable_postgresql_db, ): context: DataContext = empty_data_context_stats_enabled with set_directory(context.root_directory): # Everything below this line (except for asserts) is what we expect users to run as part of the golden path. import great_expectations as gx context = gx.get_context() db_hostname = os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost") yaml_config = f""" class_name: SimpleSqlalchemyDatasource credentials: drivername: postgresql username: postgres password: "" host: {db_hostname} port: 5432 database: test_ci introspection: whole_table_with_limits: sampling_method: _sample_using_limit sampling_kwargs: n: 10 """ # noinspection PyUnusedLocal report_object = context.test_yaml_config( name="my_datasource", yaml_config=yaml_config, return_mode="report_object", ) assert mock_emit.call_count == 2 # Substitute anonymized names since it changes for each run anonymized_datasource_name = mock_emit.call_args_list[1][0][0]["event_payload"][ "anonymized_name" ] anonymized_data_connector_name = mock_emit.call_args_list[1][0][0][ "event_payload" ]["anonymized_data_connectors"][0]["anonymized_name"] expected_call_args_list = [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "data_context.test_yaml_config", "event_payload": { "anonymized_name": anonymized_datasource_name, "parent_class": "SimpleSqlalchemyDatasource", "anonymized_execution_engine": { "parent_class": "SqlAlchemyExecutionEngine" }, "anonymized_data_connectors": [ { "anonymized_name": anonymized_data_connector_name, "parent_class": "InferredAssetSqlDataConnector", } ], }, "success": True, } ), ] assert mock_emit.call_args_list == expected_call_args_list print(json.dumps(report_object, indent=2)) print(context.datasources) context.get_batch_list( "my_datasource", "whole_table_with_limits", "test_df", ) # assert len(my_batch.data.fetchall()) == 10 with pytest.raises(KeyError): context.get_batch_list( "my_datasource", "whole_table_with_limits", "DOES_NOT_EXIST", ) my_validator = context.get_validator( datasource_name="my_datasource", data_connector_name="whole_table_with_limits", data_asset_name="test_df", expectation_suite=ExpectationSuite( "my_expectation_suite", data_context=context ), ) my_evr = my_validator.expect_table_columns_to_match_set(column_set=[]) print(my_evr) # my_evr = my_validator.expect_column_values_to_be_between( # column="x", # min_value=0, # max_value=4, # ) # assert my_evr.success # TODO: <Alex>ALEX</Alex> # my_evr = my_validator.expect_table_columns_to_match_ordered_list(ordered_list=["a", "b", "c"]) # assert my_evr.success # Confirm that logs do not contain any exceptions or invalid messages assert not usage_stats_exceptions_exist(messages=caplog.messages) assert not usage_stats_invalid_messages_exist(messages=caplog.messages)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pytest_configure():\n exec(open(\"script/generate_sql\").read())", "def test_export_datasources_original(app_context, fs):\n # pylint: disable=reimported, redefined-outer-name\n import superset.cli.importexport # noqa: F811\n\n # reload to define export_dashboards correctly based on the\n # feature flags\n importlib.reload(superset.cli.importexport)\n\n runner = app.test_cli_runner()\n response = runner.invoke(\n superset.cli.importexport.export_datasources, (\"-f\", \"datasources.yaml\")\n )\n\n assert response.exit_code == 0\n\n assert Path(\"datasources.yaml\").exists()\n\n # check that file is valid JSON\n with open(\"datasources.yaml\") as fp:\n contents = fp.read()\n yaml.safe_load(contents)", "def pytest_configure(config):\n config.addinivalue_line(\"markers\", \"format_sql: mark format_sql tests.\")", "def test_golden_path_runtime_data_connector_pandas_datasource_configuration(\n mock_emit, caplog, empty_data_context_stats_enabled, test_df, tmp_path_factory\n):\n base_directory = str(\n tmp_path_factory.mktemp(\"test_golden_path_pandas_datasource_configuration\")\n )\n\n create_files_in_directory(\n directory=base_directory,\n file_name_list=[\n \"test_dir_charlie/A/A-1.csv\",\n \"test_dir_charlie/A/A-2.csv\",\n \"test_dir_charlie/A/A-3.csv\",\n ],\n file_content_fn=lambda: test_df.to_csv(header=True, index=False),\n )\n\n context: DataContext = empty_data_context_stats_enabled\n\n with set_directory(context.root_directory):\n import great_expectations as gx\n\n context = gx.get_context()\n mock_emit.reset_mock() # Remove data_context.__init__ call\n\n yaml_config = \"\"\"\n class_name: Datasource\n\n execution_engine:\n class_name: PandasExecutionEngine\n\n data_connectors:\n default_runtime_data_connector_name:\n class_name: RuntimeDataConnector\n batch_identifiers:\n - default_identifier_name\n \"\"\"\n\n # noinspection PyUnusedLocal\n report_object = context.test_yaml_config(\n name=\"my_directory_datasource\",\n yaml_config=yaml_config,\n return_mode=\"report_object\",\n )\n\n assert report_object[\"execution_engine\"] == {\n \"caching\": True,\n \"module_name\": \"great_expectations.execution_engine.pandas_execution_engine\",\n \"class_name\": \"PandasExecutionEngine\",\n \"discard_subset_failing_expectations\": False,\n \"boto3_options\": {},\n \"azure_options\": {},\n \"gcs_options\": {},\n }\n assert report_object[\"data_connectors\"][\"count\"] == 1\n\n # checking the correct number of data_assets have come back\n assert (\n report_object[\"data_connectors\"][\"default_runtime_data_connector_name\"][\n \"data_asset_count\"\n ]\n == 0\n )\n\n # checking that note has come back\n assert (\n report_object[\"data_connectors\"][\"default_runtime_data_connector_name\"][\n \"note\"\n ]\n == \"RuntimeDataConnector will not have data_asset_names until they are passed in through RuntimeBatchRequest\"\n )\n\n # Confirm that logs do not contain any exceptions or invalid messages\n assert not usage_stats_exceptions_exist(messages=caplog.messages)\n assert not usage_stats_invalid_messages_exist(messages=caplog.messages)", "def test_is_datasource_for(lasco):\n assert lasco.is_datasource_for(lasco.data, lasco.meta)", "async def test_setup_config(recorder_mock: Recorder, hass: HomeAssistant) -> None:\n with patch(\n \"homeassistant.components.sql.config_flow.sqlalchemy.create_engine\",\n ):\n assert await async_setup_component(hass, DOMAIN, YAML_CONFIG_NO_DB)\n await hass.async_block_till_done()", "def test_golden_path_configured_asset_pandas_datasource_configuration(\n mock_emit, caplog, empty_data_context_stats_enabled, test_df, tmp_path_factory\n):\n base_directory = str(\n tmp_path_factory.mktemp(\"test_golden_path_pandas_datasource_configuration\")\n )\n\n create_files_in_directory(\n directory=base_directory,\n file_name_list=[\n \"test_dir_foxtrot/A/A-1.csv\",\n \"test_dir_foxtrot/A/A-2.csv\",\n \"test_dir_foxtrot/A/A-3.csv\",\n \"test_dir_foxtrot/B/B-1.txt\",\n \"test_dir_foxtrot/B/B-2.txt\",\n \"test_dir_foxtrot/B/B-3.txt\",\n \"test_dir_foxtrot/C/C-2017.csv\",\n \"test_dir_foxtrot/C/C-2018.csv\",\n \"test_dir_foxtrot/C/C-2019.csv\",\n \"test_dir_foxtrot/D/D-aaa.csv\",\n \"test_dir_foxtrot/D/D-bbb.csv\",\n \"test_dir_foxtrot/D/D-ccc.csv\",\n \"test_dir_foxtrot/D/D-ddd.csv\",\n \"test_dir_foxtrot/D/D-eee.csv\",\n ],\n file_content_fn=lambda: test_df.to_csv(header=True, index=False),\n )\n\n context: DataContext = empty_data_context_stats_enabled\n\n with set_directory(context.root_directory):\n import great_expectations as gx\n\n context = gx.get_context()\n mock_emit.reset_mock() # Remove data_context.__init__ call\n\n yaml_config = f\"\"\"\n class_name: Datasource\n\n execution_engine:\n class_name: PandasExecutionEngine\n\n data_connectors:\n my_filesystem_data_connector:\n class_name: ConfiguredAssetFilesystemDataConnector\n base_directory: {base_directory}\n # glob_directive: \"*\"\n\n default_regex:\n pattern: (.+)\\\\.csv\n group_names:\n - alphanumeric\n\n assets:\n A:\n base_directory: {base_directory}/test_dir_foxtrot/A\n pattern: (.+)-(\\\\d+)\\\\.csv\n group_names:\n - letter\n - number\n B:\n base_directory: {base_directory}/test_dir_foxtrot/B\n pattern: (.+)-(\\\\d+)\\\\.csv\n group_names:\n - letter\n - number\n C:\n base_directory: {base_directory}/test_dir_foxtrot/C\n pattern: (.+)-(\\\\d+)\\\\.csv\n group_names:\n - letter\n - year\n D:\n base_directory: {base_directory}/test_dir_foxtrot/D\n pattern: (.+)-(\\\\d+)\\\\.csv\n group_names:\n - letter\n - checksum\n \"\"\"\n\n # noinspection PyUnusedLocal\n context.test_yaml_config(\n name=\"my_directory_datasource\",\n yaml_config=yaml_config,\n return_mode=\"report_object\",\n )\n # print(json.dumps(report_object, indent=2))\n # print(context.datasources)\n assert mock_emit.call_count == 1\n # Substitute anonymized names since it changes for each run\n anonymized_datasource_name = mock_emit.call_args_list[0][0][0][\"event_payload\"][\n \"anonymized_name\"\n ]\n anonymized_execution_engine_name = mock_emit.call_args_list[0][0][0][\n \"event_payload\"\n ][\"anonymized_execution_engine\"][\"anonymized_name\"]\n anonymized_data_connector_name = mock_emit.call_args_list[0][0][0][\n \"event_payload\"\n ][\"anonymized_data_connectors\"][0][\"anonymized_name\"]\n expected_call_args_list = [\n mock.call(\n {\n \"event\": \"data_context.test_yaml_config\",\n \"event_payload\": {\n \"anonymized_name\": anonymized_datasource_name,\n \"parent_class\": \"Datasource\",\n \"anonymized_execution_engine\": {\n \"anonymized_name\": anonymized_execution_engine_name,\n \"parent_class\": \"PandasExecutionEngine\",\n },\n \"anonymized_data_connectors\": [\n {\n \"anonymized_name\": anonymized_data_connector_name,\n \"parent_class\": \"ConfiguredAssetFilesystemDataConnector\",\n }\n ],\n },\n \"success\": True,\n }\n ),\n ]\n assert mock_emit.call_args_list == expected_call_args_list\n\n my_batch_list = context.get_batch_list(\n datasource_name=\"my_directory_datasource\",\n data_connector_name=\"my_filesystem_data_connector\",\n data_asset_name=\"A\",\n batch_identifiers={\n \"number\": \"2\",\n },\n batch_spec_passthrough={\n \"sampling_method\": \"_sample_using_hash\",\n \"sampling_kwargs\": {\n \"column_name\": \"date\",\n \"hash_function_name\": \"md5\",\n \"hash_value\": \"f\",\n },\n },\n )\n my_batch = my_batch_list[0]\n assert my_batch.batch_definition[\"data_asset_name\"] == \"A\"\n assert mock_emit.call_count == 2\n\n my_batch.head()\n\n df_data = my_batch.data.dataframe\n assert df_data.shape == (10, 10)\n df_data[\"date\"] = df_data.apply(\n lambda row: datetime.datetime.strptime(row[\"date\"], \"%Y-%m-%d\").date(),\n axis=1,\n )\n assert (\n test_df[\n (test_df[\"date\"] == datetime.date(2020, 1, 15))\n | (test_df[\"date\"] == datetime.date(2020, 1, 29))\n ]\n .drop(\"timestamp\", axis=1)\n .equals(df_data.drop(\"timestamp\", axis=1))\n )\n\n # Empty batch list won't error but still will emit usage stats\n batch_list = context.get_batch_list(\n datasource_name=\"my_directory_datasource\",\n data_connector_name=\"my_filesystem_data_connector\",\n data_asset_name=\"DOES_NOT_EXIST\",\n )\n assert len(batch_list) == 0\n assert mock_emit.call_count == 3\n\n my_validator = context.get_validator(\n datasource_name=\"my_directory_datasource\",\n data_connector_name=\"my_filesystem_data_connector\",\n data_asset_name=\"C\",\n data_connector_query={\"batch_filter_parameters\": {\"year\": \"2019\"}},\n create_expectation_suite_with_name=\"my_expectations\",\n batch_spec_passthrough={\n \"sampling_method\": \"_sample_using_hash\",\n \"sampling_kwargs\": {\n \"column_name\": \"date\",\n \"hash_function_name\": \"md5\",\n \"hash_value\": \"f\",\n },\n },\n )\n my_evr = my_validator.expect_column_values_to_be_between(\n column=\"d\", min_value=1, max_value=31\n )\n assert my_evr.success\n assert mock_emit.call_count == 4\n\n # my_evr = my_validator.expect_table_columns_to_match_ordered_list(ordered_list=[\"x\", \"y\", \"z\"])\n # assert my_evr.success\n\n # No other usage stats calls detected\n assert mock_emit.call_count == 4\n\n # Confirm that logs do not contain any exceptions or invalid messages\n assert not usage_stats_exceptions_exist(messages=caplog.messages)\n assert not usage_stats_invalid_messages_exist(messages=caplog.messages)", "def setUpClass(cls):\n super(ExistingDataSourceTest, cls).setUpClass()\n django.setup()", "def test_golden_path_inferred_asset_pandas_datasource_configuration(\n mock_emit, caplog, empty_data_context_stats_enabled, test_df, tmp_path_factory\n):\n base_directory = str(\n tmp_path_factory.mktemp(\"test_golden_path_pandas_datasource_configuration\")\n )\n\n create_files_in_directory(\n directory=base_directory,\n file_name_list=[\n \"test_dir_charlie/A/A-1.csv\",\n \"test_dir_charlie/A/A-2.csv\",\n \"test_dir_charlie/A/A-3.csv\",\n \"test_dir_charlie/B/B-1.csv\",\n \"test_dir_charlie/B/B-2.csv\",\n \"test_dir_charlie/B/B-3.csv\",\n \"test_dir_charlie/C/C-1.csv\",\n \"test_dir_charlie/C/C-2.csv\",\n \"test_dir_charlie/C/C-3.csv\",\n \"test_dir_charlie/D/D-1.csv\",\n \"test_dir_charlie/D/D-2.csv\",\n \"test_dir_charlie/D/D-3.csv\",\n ],\n file_content_fn=lambda: test_df.to_csv(header=True, index=False),\n )\n\n context: DataContext = empty_data_context_stats_enabled\n\n with set_directory(context.root_directory):\n import great_expectations as gx\n\n context = gx.get_context()\n mock_emit.reset_mock() # Remove data_context.__init__ call\n\n yaml_config = f\"\"\"\n class_name: Datasource\n\n execution_engine:\n class_name: PandasExecutionEngine\n\n data_connectors:\n my_filesystem_data_connector:\n class_name: InferredAssetFilesystemDataConnector\n base_directory: {base_directory}/test_dir_charlie\n glob_directive: \"*/*.csv\"\n\n default_regex:\n pattern: (.+)/(.+)-(\\\\d+)\\\\.csv\n group_names:\n - subdirectory\n - data_asset_name\n - number\n \"\"\"\n\n # noinspection PyUnusedLocal\n context.test_yaml_config(\n name=\"my_directory_datasource\",\n yaml_config=yaml_config,\n return_mode=\"report_object\",\n )\n # print(json.dumps(report_object, indent=2))\n # print(context.datasources)\n assert mock_emit.call_count == 1\n # Substitute anonymized names since it changes for each run\n anonymized_datasource_name = mock_emit.call_args_list[0][0][0][\"event_payload\"][\n \"anonymized_name\"\n ]\n anonymized_execution_engine_name = mock_emit.call_args_list[0][0][0][\n \"event_payload\"\n ][\"anonymized_execution_engine\"][\"anonymized_name\"]\n anonymized_data_connector_name = mock_emit.call_args_list[0][0][0][\n \"event_payload\"\n ][\"anonymized_data_connectors\"][0][\"anonymized_name\"]\n expected_call_args_list = [\n mock.call(\n {\n \"event\": \"data_context.test_yaml_config\",\n \"event_payload\": {\n \"anonymized_name\": anonymized_datasource_name,\n \"parent_class\": \"Datasource\",\n \"anonymized_execution_engine\": {\n \"anonymized_name\": anonymized_execution_engine_name,\n \"parent_class\": \"PandasExecutionEngine\",\n },\n \"anonymized_data_connectors\": [\n {\n \"anonymized_name\": anonymized_data_connector_name,\n \"parent_class\": \"InferredAssetFilesystemDataConnector\",\n }\n ],\n },\n \"success\": True,\n }\n ),\n ]\n assert mock_emit.call_args_list == expected_call_args_list\n\n my_batch_list = context.get_batch_list(\n datasource_name=\"my_directory_datasource\",\n data_connector_name=\"my_filesystem_data_connector\",\n data_asset_name=\"A\",\n batch_identifiers={\n \"number\": \"2\",\n },\n batch_spec_passthrough={\n \"sampling_method\": \"_sample_using_hash\",\n \"sampling_kwargs\": {\n \"column_name\": \"date\",\n \"hash_function_name\": \"md5\",\n \"hash_value\": \"f\",\n },\n },\n )\n my_batch = my_batch_list[0]\n assert my_batch.batch_definition[\"data_asset_name\"] == \"A\"\n assert mock_emit.call_count == 2\n\n df_data = my_batch.data.dataframe\n assert df_data.shape == (10, 10)\n df_data[\"date\"] = df_data.apply(\n lambda row: datetime.datetime.strptime(row[\"date\"], \"%Y-%m-%d\").date(),\n axis=1,\n )\n assert (\n test_df[\n (test_df[\"date\"] == datetime.date(2020, 1, 15))\n | (test_df[\"date\"] == datetime.date(2020, 1, 29))\n ]\n .drop(\"timestamp\", axis=1)\n .equals(df_data.drop(\"timestamp\", axis=1))\n )\n\n # Empty batch list won't error but still will emit usage stats\n batch_list = context.get_batch_list(\n datasource_name=\"my_directory_datasource\",\n data_connector_name=\"my_filesystem_data_connector\",\n data_asset_name=\"DOES_NOT_EXIST\",\n )\n assert len(batch_list) == 0\n assert mock_emit.call_count == 3\n\n my_validator = context.get_validator(\n datasource_name=\"my_directory_datasource\",\n data_connector_name=\"my_filesystem_data_connector\",\n data_asset_name=\"D\",\n data_connector_query={\"batch_filter_parameters\": {\"number\": \"3\"}},\n expectation_suite=ExpectationSuite(\n \"my_expectation_suite\", data_context=context\n ),\n batch_spec_passthrough={\n \"sampling_method\": \"_sample_using_hash\",\n \"sampling_kwargs\": {\n \"column_name\": \"date\",\n \"hash_function_name\": \"md5\",\n \"hash_value\": \"f\",\n },\n },\n )\n assert mock_emit.call_count == 4\n\n my_evr = my_validator.expect_column_values_to_be_between(\n column=\"d\", min_value=1, max_value=31\n )\n assert my_evr.success\n\n # TODO: <Alex>ALEX</Alex>\n # my_evr = my_validator.expect_table_columns_to_match_ordered_list(ordered_list=[\"x\", \"y\", \"z\"])\n # assert my_evr.success\n\n # No other usage stats calls detected\n # assert mock_emit.call_count == 1\n assert mock_emit.call_count == 4\n\n # Confirm that logs do not contain any exceptions or invalid messages\n assert not usage_stats_exceptions_exist(messages=caplog.messages)\n assert not usage_stats_invalid_messages_exist(messages=caplog.messages)", "def setUp(self):\n super(PlayTests, self).setUp(\n \"tests/data/shakespeare/\", \"structure.json\", \"brief_example.xml\")", "def test_golden_path_runtime_data_connector_and_inferred_data_connector_pandas_datasource_configuration(\n mock_emit, caplog, empty_data_context_stats_enabled, test_df, tmp_path_factory\n):\n base_directory = str(\n tmp_path_factory.mktemp(\"test_golden_path_pandas_datasource_configuration\")\n )\n\n create_files_in_directory(\n directory=base_directory,\n file_name_list=[\n \"test_dir_charlie/A/A-1.csv\",\n \"test_dir_charlie/A/A-2.csv\",\n \"test_dir_charlie/A/A-3.csv\",\n \"test_dir_charlie/B/B-1.csv\",\n \"test_dir_charlie/B/B-2.csv\",\n \"test_dir_charlie/B/B-3.csv\",\n \"test_dir_charlie/C/C-1.csv\",\n \"test_dir_charlie/C/C-2.csv\",\n \"test_dir_charlie/C/C-3.csv\",\n \"test_dir_charlie/D/D-1.csv\",\n \"test_dir_charlie/D/D-2.csv\",\n \"test_dir_charlie/D/D-3.csv\",\n ],\n file_content_fn=lambda: test_df.to_csv(header=True, index=False),\n )\n\n context: DataContext = empty_data_context_stats_enabled\n\n with set_directory(context.root_directory):\n import great_expectations as gx\n\n context = gx.get_context()\n mock_emit.reset_mock() # Remove data_context.__init__ call\n\n yaml_config = f\"\"\"\n class_name: Datasource\n\n execution_engine:\n class_name: PandasExecutionEngine\n\n data_connectors:\n default_runtime_data_connector_name:\n class_name: RuntimeDataConnector\n batch_identifiers:\n - default_identifier_name\n default_inferred_data_connector_name:\n class_name: InferredAssetFilesystemDataConnector\n base_directory: {base_directory}/test_dir_charlie\n glob_directive: \"*/*.csv\"\n\n default_regex:\n pattern: (.+)/(.+)-(\\\\d+)\\\\.csv\n group_names:\n - subdirectory\n - data_asset_name\n - number\n \"\"\"\n\n # noinspection PyUnusedLocal\n report_object = context.test_yaml_config(\n name=\"my_directory_datasource\",\n yaml_config=yaml_config,\n return_mode=\"report_object\",\n )\n\n assert report_object[\"execution_engine\"] == {\n \"caching\": True,\n \"module_name\": \"great_expectations.execution_engine.pandas_execution_engine\",\n \"class_name\": \"PandasExecutionEngine\",\n \"discard_subset_failing_expectations\": False,\n \"boto3_options\": {},\n \"azure_options\": {},\n \"gcs_options\": {},\n }\n assert report_object[\"data_connectors\"][\"count\"] == 2\n assert report_object[\"data_connectors\"][\n \"default_runtime_data_connector_name\"\n ] == {\n \"class_name\": \"RuntimeDataConnector\",\n \"data_asset_count\": 0,\n \"data_assets\": {},\n \"example_data_asset_names\": [],\n \"example_unmatched_data_references\": [],\n \"note\": \"RuntimeDataConnector will not have data_asset_names until they are \"\n \"passed in through RuntimeBatchRequest\",\n \"unmatched_data_reference_count\": 0,\n }\n assert report_object[\"data_connectors\"][\n \"default_inferred_data_connector_name\"\n ] == {\n \"class_name\": \"InferredAssetFilesystemDataConnector\",\n \"data_asset_count\": 4,\n \"example_data_asset_names\": [\"A\", \"B\", \"C\"],\n \"data_assets\": {\n \"A\": {\n \"batch_definition_count\": 3,\n \"example_data_references\": [\"A/A-1.csv\", \"A/A-2.csv\", \"A/A-3.csv\"],\n },\n \"B\": {\n \"batch_definition_count\": 3,\n \"example_data_references\": [\"B/B-1.csv\", \"B/B-2.csv\", \"B/B-3.csv\"],\n },\n \"C\": {\n \"batch_definition_count\": 3,\n \"example_data_references\": [\"C/C-1.csv\", \"C/C-2.csv\", \"C/C-3.csv\"],\n },\n },\n \"unmatched_data_reference_count\": 0,\n \"example_unmatched_data_references\": [],\n }\n\n # Confirm that logs do not contain any exceptions or invalid messages\n assert not usage_stats_exceptions_exist(messages=caplog.messages)\n assert not usage_stats_invalid_messages_exist(messages=caplog.messages)", "def test_scrapping(self):\n self.assertEqual(ScrappingConfig.name, \"scrapping\")", "def test_config():\n\n # assert create_app().testing\n assert create_app(\"testing\", settings={\n \"TESTING\": True,\n \"SQLALCHEMY_TRACK_MODIFICATIONS\": False\n }).testing", "def setUp(self):\n\n self.test_data_path = 'testing/test_data/'", "def test_load_config_safe(self):\n self.__test_load_config_safe(\".scuba.yml\")", "def test_load_yaml_def(self):\n la_provider = self.la_provider\n with self.assertRaises((MsticpyException, ValueError)) as cm:\n file_path = Path(_TEST_DATA, \"data_q_meta_fail.yaml\")\n la_provider.import_query_file(query_file=file_path)\n self.assertIn(\"no data families defined\", str(cm.exception))\n\n with self.assertRaises((MsticpyException, ValueError)) as cm:\n file_path = Path(_TEST_DATA, \"data_q_source_fail_param.yaml\")\n la_provider.import_query_file(query_file=file_path)\n self.assertIn(\"Missing parameters are\", str(cm.exception))\n\n with self.assertRaises((MsticpyException, ValueError)) as cm:\n file_path = Path(_TEST_DATA, \"data_q_source_fail_type.yaml\")\n la_provider.import_query_file(query_file=file_path)\n self.assertIn(\"Parameters with missing types\", str(cm.exception))\n\n before_queries = len(list(la_provider.list_queries()))\n file_path = Path(_TEST_DATA, \"data_q_success.yaml\")\n la_provider.import_query_file(query_file=file_path)\n\n self.assertEqual(before_queries + 3, len(list(la_provider.list_queries())))", "async def test_remove_configured_db_url_if_not_needed_when_needed(\n recorder_mock: Recorder,\n hass: HomeAssistant,\n) -> None:\n db_url = \"mssql://\"\n\n config = {\n \"db_url\": db_url,\n \"query\": \"SELECT 5 as value\",\n \"column\": \"value\",\n \"name\": \"count_tables\",\n }\n\n config_entry = await init_integration(hass, config)\n\n assert config_entry.options.get(\"db_url\") == db_url", "def setUp(self):\n self.test_data = MockPyMySqlDataSource().load()", "def setUpTestData(cls):\n call_command('loaddata', 'db.json', verbosity=0)", "def test_make_dataset_happy_path(self):\n # User story: user runs src.make_dataset() on the current directory\n # and gets a fully functional dataset\n pass", "async def test_setup_invalid_config(\n recorder_mock: Recorder, hass: HomeAssistant\n) -> None:\n with patch(\n \"homeassistant.components.sql.config_flow.sqlalchemy.create_engine\",\n ):\n assert not await async_setup_component(hass, DOMAIN, YAML_CONFIG_INVALID)\n await hass.async_block_till_done()", "def conf_master_datasource():\n\n if DB_TYPE == \"mysql\":\n file_path = '../data/dbconnectors/mysql/master-datasources.xml'\n url = ['jdbc:mysql://%s:%d/%s?useSSL=false' % (HOST, PORT, REG_DB),\n 'jdbc:mysql://%s:%d/%s?useSSL=false' % (HOST, PORT, USER_DB),\n 'jdbc:mysql://%s:%d/%s?useSSL=false' % (HOST, PORT, AM_DB)]\n try:\n master_datasource_conf(file_path, 'url', url)\n master_datasource_conf(file_path, 'username', USER_NAME)\n master_datasource_conf(file_path, 'password', PWD)\n print(\"Successfully configured master-datasource.xml file for MySQL database!\")\n except:\n print(\"ERROR: configuring master datasource for MySQL database!!!\")\n elif DB_TYPE == \"oracle\":\n file_path = '../data/dbconnectors/oracle/master-datasources.xml'\n url = 'jdbc:oracle:thin:%s@%s:%d/%s' % (USER_NAME, HOST, PORT, SID)\n try:\n master_datasource_conf(file_path, 'url', url)\n master_datasource_conf(file_path, 'username', USER_NAME)\n master_datasource_conf(file_path, 'password', PWD)\n print(\"Successfully configured master-datasource.xml file for Oracle database!\")\n except:\n print(\"ERROR: configuring master datasource for Oracle database!!!\")\n elif DB_TYPE == \"mssql\":\n file_path = '../data/dbconnectors/mssql/master-datasources.xml'\n url = ['jdbc:sqlserver://%s:%d;databaseName=%s;SendStringParametersAsUnicode=false' % (HOST, PORT, REG_DB),\n 'jdbc:sqlserver://%s:%d;databaseName=%s;SendStringParametersAsUnicode=false' % (HOST, PORT, USER_DB),\n 'jdbc:sqlserver://%s:%d;databaseName=%s;SendStringParametersAsUnicode=false' % (HOST, PORT, AM_DB)]\n try:\n master_datasource_conf(file_path, 'url', url)\n master_datasource_conf(file_path, 'username', USER_NAME)\n master_datasource_conf(file_path, 'password', PWD)\n print(\"Successfully configured master-datasource.xml file for MSSQL database!\")\n except:\n print(\"ERROR: configuring master datasource for MSSQL database!!!\")\n elif DB_TYPE == \"postgresql\":\n file_path = '../data/dbconnectors/postgresql/master-datasources.xml'\n url = ['jdbc:postgresql://%s:%d/%s' % (HOST, PORT, REG_DB),\n 'jdbc:postgresql://%s:%d/%s' % (HOST, PORT, USER_DB),\n 'jdbc:postgresql://%s:%d/%s' % (HOST, PORT, AM_DB)]\n try:\n master_datasource_conf(file_path, 'url', url)\n master_datasource_conf(file_path, 'username', USER_NAME)\n master_datasource_conf(file_path, 'password', PWD)\n print(\"Successfully configured master-datasource.xml file for PostgreSQL database!\")\n except:\n print(\"ERROR: configuring master datasource for PostgreSQL database!!!\")\n else:\n print(\"Database type is invalid!!!\")", "def configs() -> Path:\n return TEST_ROOT.parent / \"fixtures\" / \"configs\"", "def test_init_from(config):\n\n config.init_from()\n config.init_from(file='../../config.cfg')", "def test_paths_properties():\n template_script = get_template_script(output_dir='output1')\n template_script['options']['setup_dir'] = 'setup1'\n exp_builder = ExperimentBuilder(template_script)\n\n # The database path is configured correctly.\n assert exp_builder._db.setup_dir == os.path.join('output1', 'setup1')\n\n # Updating paths also updates the database main directory.\n exp_builder.output_dir = 'output2'\n exp_builder.setup_dir = 'setup2'\n assert exp_builder._db.setup_dir == os.path.join('output2', 'setup2')", "def test_missing_data_sources(self):", "def setUp(self):\n with open(SRC_PATH + \"configs/etl_config.json\", \"r\") as f:\n self.config = json.loads(f.read())\n self.spark = SparkBuilder(\"test\").build_sc()\n self.test_data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../tests/test_data/')", "def setUp(self):\n self.settings = MockSettings()\n django_yamlconf.load(project=\"testing\", settings=self.settings)", "def test_load_configs_testing(self):\n global locator, config_paths\n locator.load_config(config_paths[0])\n\n self.assertEqual(locator.config['routines'], ['debug'])\n self.assertEqual(locator.config['driver'],\n {\n 'type': 'TestDriver',\n 'kwargs': {\n 'verbose': False\n }\n })", "def configuration(request):\n config = testing.setUp(settings={\n 'sqlalchemy.url': 'postgres:///test_database'\n })\n config.include(\"space_rocks.models\")\n\n def teardown():\n testing.tearDown()\n\n request.addfinalizer(teardown)\n return config" ]
[ "0.65528226", "0.60728145", "0.6067507", "0.600542", "0.59881943", "0.59565294", "0.59358394", "0.5818095", "0.57616323", "0.5759329", "0.57457626", "0.57400787", "0.57036346", "0.56909466", "0.567239", "0.5665459", "0.5654157", "0.56532407", "0.56267065", "0.55962425", "0.55950546", "0.55599606", "0.55483943", "0.5547329", "0.55470437", "0.55408436", "0.5538024", "0.5530479", "0.5528021", "0.5502738" ]
0.6693482
0
Tests the golden path for InferredAssetFilesystemDataConnector with PandasExecutionEngine using test_yaml_config
def test_golden_path_inferred_asset_pandas_datasource_configuration( mock_emit, caplog, empty_data_context_stats_enabled, test_df, tmp_path_factory ): base_directory = str( tmp_path_factory.mktemp("test_golden_path_pandas_datasource_configuration") ) create_files_in_directory( directory=base_directory, file_name_list=[ "test_dir_charlie/A/A-1.csv", "test_dir_charlie/A/A-2.csv", "test_dir_charlie/A/A-3.csv", "test_dir_charlie/B/B-1.csv", "test_dir_charlie/B/B-2.csv", "test_dir_charlie/B/B-3.csv", "test_dir_charlie/C/C-1.csv", "test_dir_charlie/C/C-2.csv", "test_dir_charlie/C/C-3.csv", "test_dir_charlie/D/D-1.csv", "test_dir_charlie/D/D-2.csv", "test_dir_charlie/D/D-3.csv", ], file_content_fn=lambda: test_df.to_csv(header=True, index=False), ) context: DataContext = empty_data_context_stats_enabled with set_directory(context.root_directory): import great_expectations as gx context = gx.get_context() mock_emit.reset_mock() # Remove data_context.__init__ call yaml_config = f""" class_name: Datasource execution_engine: class_name: PandasExecutionEngine data_connectors: my_filesystem_data_connector: class_name: InferredAssetFilesystemDataConnector base_directory: {base_directory}/test_dir_charlie glob_directive: "*/*.csv" default_regex: pattern: (.+)/(.+)-(\\d+)\\.csv group_names: - subdirectory - data_asset_name - number """ # noinspection PyUnusedLocal context.test_yaml_config( name="my_directory_datasource", yaml_config=yaml_config, return_mode="report_object", ) # print(json.dumps(report_object, indent=2)) # print(context.datasources) assert mock_emit.call_count == 1 # Substitute anonymized names since it changes for each run anonymized_datasource_name = mock_emit.call_args_list[0][0][0]["event_payload"][ "anonymized_name" ] anonymized_execution_engine_name = mock_emit.call_args_list[0][0][0][ "event_payload" ]["anonymized_execution_engine"]["anonymized_name"] anonymized_data_connector_name = mock_emit.call_args_list[0][0][0][ "event_payload" ]["anonymized_data_connectors"][0]["anonymized_name"] expected_call_args_list = [ mock.call( { "event": "data_context.test_yaml_config", "event_payload": { "anonymized_name": anonymized_datasource_name, "parent_class": "Datasource", "anonymized_execution_engine": { "anonymized_name": anonymized_execution_engine_name, "parent_class": "PandasExecutionEngine", }, "anonymized_data_connectors": [ { "anonymized_name": anonymized_data_connector_name, "parent_class": "InferredAssetFilesystemDataConnector", } ], }, "success": True, } ), ] assert mock_emit.call_args_list == expected_call_args_list my_batch_list = context.get_batch_list( datasource_name="my_directory_datasource", data_connector_name="my_filesystem_data_connector", data_asset_name="A", batch_identifiers={ "number": "2", }, batch_spec_passthrough={ "sampling_method": "_sample_using_hash", "sampling_kwargs": { "column_name": "date", "hash_function_name": "md5", "hash_value": "f", }, }, ) my_batch = my_batch_list[0] assert my_batch.batch_definition["data_asset_name"] == "A" assert mock_emit.call_count == 2 df_data = my_batch.data.dataframe assert df_data.shape == (10, 10) df_data["date"] = df_data.apply( lambda row: datetime.datetime.strptime(row["date"], "%Y-%m-%d").date(), axis=1, ) assert ( test_df[ (test_df["date"] == datetime.date(2020, 1, 15)) | (test_df["date"] == datetime.date(2020, 1, 29)) ] .drop("timestamp", axis=1) .equals(df_data.drop("timestamp", axis=1)) ) # Empty batch list won't error but still will emit usage stats batch_list = context.get_batch_list( datasource_name="my_directory_datasource", data_connector_name="my_filesystem_data_connector", data_asset_name="DOES_NOT_EXIST", ) assert len(batch_list) == 0 assert mock_emit.call_count == 3 my_validator = context.get_validator( datasource_name="my_directory_datasource", data_connector_name="my_filesystem_data_connector", data_asset_name="D", data_connector_query={"batch_filter_parameters": {"number": "3"}}, expectation_suite=ExpectationSuite( "my_expectation_suite", data_context=context ), batch_spec_passthrough={ "sampling_method": "_sample_using_hash", "sampling_kwargs": { "column_name": "date", "hash_function_name": "md5", "hash_value": "f", }, }, ) assert mock_emit.call_count == 4 my_evr = my_validator.expect_column_values_to_be_between( column="d", min_value=1, max_value=31 ) assert my_evr.success # TODO: <Alex>ALEX</Alex> # my_evr = my_validator.expect_table_columns_to_match_ordered_list(ordered_list=["x", "y", "z"]) # assert my_evr.success # No other usage stats calls detected # assert mock_emit.call_count == 1 assert mock_emit.call_count == 4 # Confirm that logs do not contain any exceptions or invalid messages assert not usage_stats_exceptions_exist(messages=caplog.messages) assert not usage_stats_invalid_messages_exist(messages=caplog.messages)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_golden_path_runtime_data_connector_pandas_datasource_configuration(\n mock_emit, caplog, empty_data_context_stats_enabled, test_df, tmp_path_factory\n):\n base_directory = str(\n tmp_path_factory.mktemp(\"test_golden_path_pandas_datasource_configuration\")\n )\n\n create_files_in_directory(\n directory=base_directory,\n file_name_list=[\n \"test_dir_charlie/A/A-1.csv\",\n \"test_dir_charlie/A/A-2.csv\",\n \"test_dir_charlie/A/A-3.csv\",\n ],\n file_content_fn=lambda: test_df.to_csv(header=True, index=False),\n )\n\n context: DataContext = empty_data_context_stats_enabled\n\n with set_directory(context.root_directory):\n import great_expectations as gx\n\n context = gx.get_context()\n mock_emit.reset_mock() # Remove data_context.__init__ call\n\n yaml_config = \"\"\"\n class_name: Datasource\n\n execution_engine:\n class_name: PandasExecutionEngine\n\n data_connectors:\n default_runtime_data_connector_name:\n class_name: RuntimeDataConnector\n batch_identifiers:\n - default_identifier_name\n \"\"\"\n\n # noinspection PyUnusedLocal\n report_object = context.test_yaml_config(\n name=\"my_directory_datasource\",\n yaml_config=yaml_config,\n return_mode=\"report_object\",\n )\n\n assert report_object[\"execution_engine\"] == {\n \"caching\": True,\n \"module_name\": \"great_expectations.execution_engine.pandas_execution_engine\",\n \"class_name\": \"PandasExecutionEngine\",\n \"discard_subset_failing_expectations\": False,\n \"boto3_options\": {},\n \"azure_options\": {},\n \"gcs_options\": {},\n }\n assert report_object[\"data_connectors\"][\"count\"] == 1\n\n # checking the correct number of data_assets have come back\n assert (\n report_object[\"data_connectors\"][\"default_runtime_data_connector_name\"][\n \"data_asset_count\"\n ]\n == 0\n )\n\n # checking that note has come back\n assert (\n report_object[\"data_connectors\"][\"default_runtime_data_connector_name\"][\n \"note\"\n ]\n == \"RuntimeDataConnector will not have data_asset_names until they are passed in through RuntimeBatchRequest\"\n )\n\n # Confirm that logs do not contain any exceptions or invalid messages\n assert not usage_stats_exceptions_exist(messages=caplog.messages)\n assert not usage_stats_invalid_messages_exist(messages=caplog.messages)", "def test_golden_path_runtime_data_connector_and_inferred_data_connector_pandas_datasource_configuration(\n mock_emit, caplog, empty_data_context_stats_enabled, test_df, tmp_path_factory\n):\n base_directory = str(\n tmp_path_factory.mktemp(\"test_golden_path_pandas_datasource_configuration\")\n )\n\n create_files_in_directory(\n directory=base_directory,\n file_name_list=[\n \"test_dir_charlie/A/A-1.csv\",\n \"test_dir_charlie/A/A-2.csv\",\n \"test_dir_charlie/A/A-3.csv\",\n \"test_dir_charlie/B/B-1.csv\",\n \"test_dir_charlie/B/B-2.csv\",\n \"test_dir_charlie/B/B-3.csv\",\n \"test_dir_charlie/C/C-1.csv\",\n \"test_dir_charlie/C/C-2.csv\",\n \"test_dir_charlie/C/C-3.csv\",\n \"test_dir_charlie/D/D-1.csv\",\n \"test_dir_charlie/D/D-2.csv\",\n \"test_dir_charlie/D/D-3.csv\",\n ],\n file_content_fn=lambda: test_df.to_csv(header=True, index=False),\n )\n\n context: DataContext = empty_data_context_stats_enabled\n\n with set_directory(context.root_directory):\n import great_expectations as gx\n\n context = gx.get_context()\n mock_emit.reset_mock() # Remove data_context.__init__ call\n\n yaml_config = f\"\"\"\n class_name: Datasource\n\n execution_engine:\n class_name: PandasExecutionEngine\n\n data_connectors:\n default_runtime_data_connector_name:\n class_name: RuntimeDataConnector\n batch_identifiers:\n - default_identifier_name\n default_inferred_data_connector_name:\n class_name: InferredAssetFilesystemDataConnector\n base_directory: {base_directory}/test_dir_charlie\n glob_directive: \"*/*.csv\"\n\n default_regex:\n pattern: (.+)/(.+)-(\\\\d+)\\\\.csv\n group_names:\n - subdirectory\n - data_asset_name\n - number\n \"\"\"\n\n # noinspection PyUnusedLocal\n report_object = context.test_yaml_config(\n name=\"my_directory_datasource\",\n yaml_config=yaml_config,\n return_mode=\"report_object\",\n )\n\n assert report_object[\"execution_engine\"] == {\n \"caching\": True,\n \"module_name\": \"great_expectations.execution_engine.pandas_execution_engine\",\n \"class_name\": \"PandasExecutionEngine\",\n \"discard_subset_failing_expectations\": False,\n \"boto3_options\": {},\n \"azure_options\": {},\n \"gcs_options\": {},\n }\n assert report_object[\"data_connectors\"][\"count\"] == 2\n assert report_object[\"data_connectors\"][\n \"default_runtime_data_connector_name\"\n ] == {\n \"class_name\": \"RuntimeDataConnector\",\n \"data_asset_count\": 0,\n \"data_assets\": {},\n \"example_data_asset_names\": [],\n \"example_unmatched_data_references\": [],\n \"note\": \"RuntimeDataConnector will not have data_asset_names until they are \"\n \"passed in through RuntimeBatchRequest\",\n \"unmatched_data_reference_count\": 0,\n }\n assert report_object[\"data_connectors\"][\n \"default_inferred_data_connector_name\"\n ] == {\n \"class_name\": \"InferredAssetFilesystemDataConnector\",\n \"data_asset_count\": 4,\n \"example_data_asset_names\": [\"A\", \"B\", \"C\"],\n \"data_assets\": {\n \"A\": {\n \"batch_definition_count\": 3,\n \"example_data_references\": [\"A/A-1.csv\", \"A/A-2.csv\", \"A/A-3.csv\"],\n },\n \"B\": {\n \"batch_definition_count\": 3,\n \"example_data_references\": [\"B/B-1.csv\", \"B/B-2.csv\", \"B/B-3.csv\"],\n },\n \"C\": {\n \"batch_definition_count\": 3,\n \"example_data_references\": [\"C/C-1.csv\", \"C/C-2.csv\", \"C/C-3.csv\"],\n },\n },\n \"unmatched_data_reference_count\": 0,\n \"example_unmatched_data_references\": [],\n }\n\n # Confirm that logs do not contain any exceptions or invalid messages\n assert not usage_stats_exceptions_exist(messages=caplog.messages)\n assert not usage_stats_invalid_messages_exist(messages=caplog.messages)", "def test_golden_path_configured_asset_pandas_datasource_configuration(\n mock_emit, caplog, empty_data_context_stats_enabled, test_df, tmp_path_factory\n):\n base_directory = str(\n tmp_path_factory.mktemp(\"test_golden_path_pandas_datasource_configuration\")\n )\n\n create_files_in_directory(\n directory=base_directory,\n file_name_list=[\n \"test_dir_foxtrot/A/A-1.csv\",\n \"test_dir_foxtrot/A/A-2.csv\",\n \"test_dir_foxtrot/A/A-3.csv\",\n \"test_dir_foxtrot/B/B-1.txt\",\n \"test_dir_foxtrot/B/B-2.txt\",\n \"test_dir_foxtrot/B/B-3.txt\",\n \"test_dir_foxtrot/C/C-2017.csv\",\n \"test_dir_foxtrot/C/C-2018.csv\",\n \"test_dir_foxtrot/C/C-2019.csv\",\n \"test_dir_foxtrot/D/D-aaa.csv\",\n \"test_dir_foxtrot/D/D-bbb.csv\",\n \"test_dir_foxtrot/D/D-ccc.csv\",\n \"test_dir_foxtrot/D/D-ddd.csv\",\n \"test_dir_foxtrot/D/D-eee.csv\",\n ],\n file_content_fn=lambda: test_df.to_csv(header=True, index=False),\n )\n\n context: DataContext = empty_data_context_stats_enabled\n\n with set_directory(context.root_directory):\n import great_expectations as gx\n\n context = gx.get_context()\n mock_emit.reset_mock() # Remove data_context.__init__ call\n\n yaml_config = f\"\"\"\n class_name: Datasource\n\n execution_engine:\n class_name: PandasExecutionEngine\n\n data_connectors:\n my_filesystem_data_connector:\n class_name: ConfiguredAssetFilesystemDataConnector\n base_directory: {base_directory}\n # glob_directive: \"*\"\n\n default_regex:\n pattern: (.+)\\\\.csv\n group_names:\n - alphanumeric\n\n assets:\n A:\n base_directory: {base_directory}/test_dir_foxtrot/A\n pattern: (.+)-(\\\\d+)\\\\.csv\n group_names:\n - letter\n - number\n B:\n base_directory: {base_directory}/test_dir_foxtrot/B\n pattern: (.+)-(\\\\d+)\\\\.csv\n group_names:\n - letter\n - number\n C:\n base_directory: {base_directory}/test_dir_foxtrot/C\n pattern: (.+)-(\\\\d+)\\\\.csv\n group_names:\n - letter\n - year\n D:\n base_directory: {base_directory}/test_dir_foxtrot/D\n pattern: (.+)-(\\\\d+)\\\\.csv\n group_names:\n - letter\n - checksum\n \"\"\"\n\n # noinspection PyUnusedLocal\n context.test_yaml_config(\n name=\"my_directory_datasource\",\n yaml_config=yaml_config,\n return_mode=\"report_object\",\n )\n # print(json.dumps(report_object, indent=2))\n # print(context.datasources)\n assert mock_emit.call_count == 1\n # Substitute anonymized names since it changes for each run\n anonymized_datasource_name = mock_emit.call_args_list[0][0][0][\"event_payload\"][\n \"anonymized_name\"\n ]\n anonymized_execution_engine_name = mock_emit.call_args_list[0][0][0][\n \"event_payload\"\n ][\"anonymized_execution_engine\"][\"anonymized_name\"]\n anonymized_data_connector_name = mock_emit.call_args_list[0][0][0][\n \"event_payload\"\n ][\"anonymized_data_connectors\"][0][\"anonymized_name\"]\n expected_call_args_list = [\n mock.call(\n {\n \"event\": \"data_context.test_yaml_config\",\n \"event_payload\": {\n \"anonymized_name\": anonymized_datasource_name,\n \"parent_class\": \"Datasource\",\n \"anonymized_execution_engine\": {\n \"anonymized_name\": anonymized_execution_engine_name,\n \"parent_class\": \"PandasExecutionEngine\",\n },\n \"anonymized_data_connectors\": [\n {\n \"anonymized_name\": anonymized_data_connector_name,\n \"parent_class\": \"ConfiguredAssetFilesystemDataConnector\",\n }\n ],\n },\n \"success\": True,\n }\n ),\n ]\n assert mock_emit.call_args_list == expected_call_args_list\n\n my_batch_list = context.get_batch_list(\n datasource_name=\"my_directory_datasource\",\n data_connector_name=\"my_filesystem_data_connector\",\n data_asset_name=\"A\",\n batch_identifiers={\n \"number\": \"2\",\n },\n batch_spec_passthrough={\n \"sampling_method\": \"_sample_using_hash\",\n \"sampling_kwargs\": {\n \"column_name\": \"date\",\n \"hash_function_name\": \"md5\",\n \"hash_value\": \"f\",\n },\n },\n )\n my_batch = my_batch_list[0]\n assert my_batch.batch_definition[\"data_asset_name\"] == \"A\"\n assert mock_emit.call_count == 2\n\n my_batch.head()\n\n df_data = my_batch.data.dataframe\n assert df_data.shape == (10, 10)\n df_data[\"date\"] = df_data.apply(\n lambda row: datetime.datetime.strptime(row[\"date\"], \"%Y-%m-%d\").date(),\n axis=1,\n )\n assert (\n test_df[\n (test_df[\"date\"] == datetime.date(2020, 1, 15))\n | (test_df[\"date\"] == datetime.date(2020, 1, 29))\n ]\n .drop(\"timestamp\", axis=1)\n .equals(df_data.drop(\"timestamp\", axis=1))\n )\n\n # Empty batch list won't error but still will emit usage stats\n batch_list = context.get_batch_list(\n datasource_name=\"my_directory_datasource\",\n data_connector_name=\"my_filesystem_data_connector\",\n data_asset_name=\"DOES_NOT_EXIST\",\n )\n assert len(batch_list) == 0\n assert mock_emit.call_count == 3\n\n my_validator = context.get_validator(\n datasource_name=\"my_directory_datasource\",\n data_connector_name=\"my_filesystem_data_connector\",\n data_asset_name=\"C\",\n data_connector_query={\"batch_filter_parameters\": {\"year\": \"2019\"}},\n create_expectation_suite_with_name=\"my_expectations\",\n batch_spec_passthrough={\n \"sampling_method\": \"_sample_using_hash\",\n \"sampling_kwargs\": {\n \"column_name\": \"date\",\n \"hash_function_name\": \"md5\",\n \"hash_value\": \"f\",\n },\n },\n )\n my_evr = my_validator.expect_column_values_to_be_between(\n column=\"d\", min_value=1, max_value=31\n )\n assert my_evr.success\n assert mock_emit.call_count == 4\n\n # my_evr = my_validator.expect_table_columns_to_match_ordered_list(ordered_list=[\"x\", \"y\", \"z\"])\n # assert my_evr.success\n\n # No other usage stats calls detected\n assert mock_emit.call_count == 4\n\n # Confirm that logs do not contain any exceptions or invalid messages\n assert not usage_stats_exceptions_exist(messages=caplog.messages)\n assert not usage_stats_invalid_messages_exist(messages=caplog.messages)", "def test_datasets_in_custom_path(tmpdir_factory):\n\n tmpdir1 = tmpdir_factory.mktemp('datasets1')\n tmpdir2 = tmpdir_factory.mktemp('datasets2')\n os.environ['CTAPIPE_SVC_PATH'] = \":\".join([str(tmpdir1),str(tmpdir2)])\n\n # create a dummy dataset to search for:\n\n dataset_name = \"test_dataset_1.txt\"\n dataset_path = str(tmpdir1.join(dataset_name))\n\n with open(dataset_path, \"w\") as fp:\n fp.write(\"test test test\")\n\n # try to find dummy dataset\n path = datasets.get_dataset(dataset_name)\n assert path == dataset_path\n\n with pytest.raises(FileNotFoundError):\n badpath = datasets.get_dataset(\"does_not_exist\")\n\n\n # try using find_all_matching_datasets:\n\n ds = datasets.find_all_matching_datasets(\"test.*\",\n searchpath=os.environ['CTAPIPE_SVC_PATH'])\n assert dataset_name in ds", "def test_find_builder_config_code(mock_fs: testing.MockFs):\n\n class MyDataset(testing.DummyMnist): # pylint: disable=unused-variable\n \"\"\"Dummy dataset.\"\"\"\n\n BUILDER_CONFIGS = [\n dataset_builder.BuilderConfig( # pylint: disable=g-complex-comprehension\n name=name, version='2.0.0', description=f'{name} description'\n )\n for name in ('default_config', 'other_config')\n ]\n\n # Old version from before there were configs.\n mock_fs.add_file('path/to/my_dataset/0.0.1/features.json')\n mock_fs.add_file('path/to/my_dataset/0.1.0/features.json')\n mock_fs.add_file('path/to/my_dataset/default_config/0.1.0/features.json')\n mock_fs.add_file('path/to/my_dataset/default_config/1.0.0/features.json')\n mock_fs.add_file('path/to/my_dataset/other_config/1.0.0/features.json')\n mock_fs.add_file('path/to/my_dataset/old_config/0.8.0/features.json')\n mock_fs.add_file('path/to/my_dataset/old_config/1.0.0/features.json')\n mock_fs.add_file('path/to/my_dataset/broken_config/features.json')\n\n # If code can be reached, use it to load the default config name.\n # Note that the existing version is loaded, even if the code is at a\n # more recent version.\n assert (\n _find_builder_dir('my_dataset')\n == 'path/to/my_dataset/default_config/1.0.0'\n )\n # Old version from previous configs.\n assert _find_builder_dir('my_dataset:0.0.1') == 'path/to/my_dataset/0.0.1'\n # Explicitly given version with no config, use folder without config.\n assert _find_builder_dir('my_dataset:0.1.0') == 'path/to/my_dataset/0.1.0'\n # Explicitly given version and config, use folder with config.\n assert (\n _find_builder_dir('my_dataset/default_config:0.1.0')\n == 'path/to/my_dataset/default_config/0.1.0'\n )\n # When config is explicitly given, load the last detected version.\n assert (\n _find_builder_dir('my_dataset/other_config')\n == 'path/to/my_dataset/other_config/1.0.0'\n )\n assert (\n _find_builder_dir('my_dataset/old_config')\n == 'path/to/my_dataset/old_config/1.0.0'\n )\n assert (\n _find_builder_dir('my_dataset/old_config:0.8.0')\n == 'path/to/my_dataset/old_config/0.8.0'\n )\n # When no config found, return None.\n assert _find_builder_dir('my_dataset/broken_config') is None\n assert _find_builder_dir('my_dataset/unknown_config') is None", "def test_fsspec_filesystem(ray_start_regular_shared, tmp_path):\n df1 = pd.DataFrame({\"one\": [1, 2, 3], \"two\": [\"a\", \"b\", \"c\"]})\n table = pa.Table.from_pandas(df1)\n path1 = os.path.join(str(tmp_path), \"test1.parquet\")\n pq.write_table(table, path1)\n df2 = pd.DataFrame({\"one\": [4, 5, 6], \"two\": [\"e\", \"f\", \"g\"]})\n table = pa.Table.from_pandas(df2)\n path2 = os.path.join(str(tmp_path), \"test2.parquet\")\n pq.write_table(table, path2)\n\n fs = LocalFileSystem()\n\n ds = ray.data.read_parquet([path1, path2], filesystem=fs)\n\n # Test metadata-only parquet ops.\n assert ds._plan.execute()._num_computed() == 1\n assert ds.count() == 6\n\n out_path = os.path.join(tmp_path, \"out\")\n os.mkdir(out_path)\n\n ds._set_uuid(\"data\")\n ds.write_parquet(out_path)\n\n ds_df1 = pd.read_parquet(os.path.join(out_path, \"data_000000.parquet\"))\n ds_df2 = pd.read_parquet(os.path.join(out_path, \"data_000001.parquet\"))\n ds_df = pd.concat([ds_df1, ds_df2])\n df = pd.concat([df1, df2])\n assert ds_df.equals(df)", "def test_golden_path_sql_datasource_configuration(\n mock_emit,\n caplog,\n empty_data_context_stats_enabled,\n sa,\n test_connectable_postgresql_db,\n):\n context: DataContext = empty_data_context_stats_enabled\n\n with set_directory(context.root_directory):\n # Everything below this line (except for asserts) is what we expect users to run as part of the golden path.\n import great_expectations as gx\n\n context = gx.get_context()\n\n db_hostname = os.getenv(\"GE_TEST_LOCAL_DB_HOSTNAME\", \"localhost\")\n yaml_config = f\"\"\"\n class_name: SimpleSqlalchemyDatasource\n credentials:\n drivername: postgresql\n username: postgres\n password: \"\"\n host: {db_hostname}\n port: 5432\n database: test_ci\n\n introspection:\n whole_table_with_limits:\n sampling_method: _sample_using_limit\n sampling_kwargs:\n n: 10\n \"\"\"\n # noinspection PyUnusedLocal\n report_object = context.test_yaml_config(\n name=\"my_datasource\",\n yaml_config=yaml_config,\n return_mode=\"report_object\",\n )\n assert mock_emit.call_count == 2\n # Substitute anonymized names since it changes for each run\n anonymized_datasource_name = mock_emit.call_args_list[1][0][0][\"event_payload\"][\n \"anonymized_name\"\n ]\n anonymized_data_connector_name = mock_emit.call_args_list[1][0][0][\n \"event_payload\"\n ][\"anonymized_data_connectors\"][0][\"anonymized_name\"]\n expected_call_args_list = [\n mock.call(\n {\"event_payload\": {}, \"event\": \"data_context.__init__\", \"success\": True}\n ),\n mock.call(\n {\n \"event\": \"data_context.test_yaml_config\",\n \"event_payload\": {\n \"anonymized_name\": anonymized_datasource_name,\n \"parent_class\": \"SimpleSqlalchemyDatasource\",\n \"anonymized_execution_engine\": {\n \"parent_class\": \"SqlAlchemyExecutionEngine\"\n },\n \"anonymized_data_connectors\": [\n {\n \"anonymized_name\": anonymized_data_connector_name,\n \"parent_class\": \"InferredAssetSqlDataConnector\",\n }\n ],\n },\n \"success\": True,\n }\n ),\n ]\n assert mock_emit.call_args_list == expected_call_args_list\n\n print(json.dumps(report_object, indent=2))\n print(context.datasources)\n\n context.get_batch_list(\n \"my_datasource\",\n \"whole_table_with_limits\",\n \"test_df\",\n )\n # assert len(my_batch.data.fetchall()) == 10\n\n with pytest.raises(KeyError):\n context.get_batch_list(\n \"my_datasource\",\n \"whole_table_with_limits\",\n \"DOES_NOT_EXIST\",\n )\n\n my_validator = context.get_validator(\n datasource_name=\"my_datasource\",\n data_connector_name=\"whole_table_with_limits\",\n data_asset_name=\"test_df\",\n expectation_suite=ExpectationSuite(\n \"my_expectation_suite\", data_context=context\n ),\n )\n my_evr = my_validator.expect_table_columns_to_match_set(column_set=[])\n print(my_evr)\n\n # my_evr = my_validator.expect_column_values_to_be_between(\n # column=\"x\",\n # min_value=0,\n # max_value=4,\n # )\n # assert my_evr.success\n\n # TODO: <Alex>ALEX</Alex>\n # my_evr = my_validator.expect_table_columns_to_match_ordered_list(ordered_list=[\"a\", \"b\", \"c\"])\n # assert my_evr.success\n\n # Confirm that logs do not contain any exceptions or invalid messages\n assert not usage_stats_exceptions_exist(messages=caplog.messages)\n assert not usage_stats_invalid_messages_exist(messages=caplog.messages)", "def test_make_dataset_happy_path(self):\n # User story: user runs src.make_dataset() on the current directory\n # and gets a fully functional dataset\n pass", "def test_load(tmp_path, data_name, params, expect_paths):\n\n folder_path = tmp_path\n dsets = pennylane.data.data_manager.load(\n data_name=data_name,\n folder_path=folder_path,\n **params,\n )\n\n assert {Path(dset.bind.filename) for dset in dsets} == {\n Path(tmp_path, path) for path in expect_paths\n }", "def test_data_dir(self):\n return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')", "def test_history_import_relpath_in_metadata():\n with HistoryArchive() as history_archive:\n history_archive.write_metafiles(dataset_file_name='../outside.txt')\n history_archive.write_file('datasets/Pasted_Entry_1.txt', 'foo')\n history_archive.write_outside()\n _run_jihaw_cleanup_check_secure(history_archive, 'Relative parent path in datasets_attrs.txt allowed')", "def test_data_path(self):\n path = self._api.GetDatapath()\n self._api.End()\n self.assertRaises(\n RuntimeError, self._api.Init, path=(self._test_dir + os.path.sep)\n ) # no tessdata\n if _TESSERACT_VERSION >= 0x3999800:\n new_path = path\n else:\n new_path = os.path.abspath(os.path.join(path, os.path.pardir)) + os.path.sep\n self._api.End()\n self._api.Init(new_path)\n self.assertEqual(self._api.GetDatapath(), path)", "def test_find_builder_dir_legacy_ds(mock_fs: testing.MockFs):\n mock_fs.add_file('path/to/ds0/1.0.0/temp.txt')\n assert _find_builder_dir('ds0') is None\n\n mock_fs.add_file('path/to/ds0/1.0.0/features.json')\n assert _find_builder_dir('ds0') == 'path/to/ds0/1.0.0'", "def pytest_configure():\n exec(open(\"script/generate_sql\").read())", "def test_uris(self, monkeypatch):\n import yaml\n\n def test_read_file_string(*args, **kwargs):\n config_string = open('./tests/test_data/basic.yaml', 'r').read()\n return config_string\n\n def test_read_file_uri(*args, **kwargs):\n config_string = open('./tests/test_data/basic_uri.yaml', 'r').read()\n return config_string\n\n def test_read_file_network(*args, **kwargs):\n config_string = open('./tests/test_data/basic_with_network.yaml', 'r').read()\n return config_string\n\n def test_clustername(*args, **kwargs):\n return 'test-clustername'\n\n fake_creds = AnonymousCredentials()\n mock_dataproc_client = mock.create_autospec(ClusterControllerClient(credentials=fake_creds))\n mock_gcs_client = mock.create_autospec(storage.Client(credentials=fake_creds, project='project'))\n # Mock the Compute Engine API client\n mock_compute_client = mock.create_autospec(discovery.build('compute', 'v1',\n credentials=fake_creds, cache_discovery=False))\n spawner = DataprocSpawner(hub=Hub(), dataproc=mock_dataproc_client, gcs=mock_gcs_client,\n user=MockUser(), _mock=True, gcs_notebooks=self.gcs_notebooks,\n compute=mock_compute_client, project='test-project')\n # Prevents a call to GCS. We return the local file instead.\n monkeypatch.setattr(spawner, \"read_gcs_file\", test_read_file_string)\n monkeypatch.setattr(spawner, \"clustername\", test_clustername)\n\n spawner.region = \"us-east1\"\n spawner.zone = \"us-east1-d\"\n spawner.env_str = \"test-env-str\"\n spawner.args_str = \"test-args-str\"\n spawner.user_options = {\n 'cluster_type': 'basic.yaml',\n 'cluster_zone': 'test-form1-a'\n }\n\n config_built = spawner._build_cluster_config()\n\n assert config_built['config']['gce_cluster_config']['subnetwork_uri'] == \"default\"\n\n # Prevents a call to GCS. We return the local file instead.\n monkeypatch.setattr(spawner, \"read_gcs_file\", test_read_file_uri)\n monkeypatch.setattr(spawner, \"clustername\", test_clustername)\n\n spawner.region = \"us-east1\"\n spawner.zone = \"us-east1-d\"\n spawner.env_str = \"test-env-str\"\n spawner.args_str = \"test-args-str\"\n spawner.user_options = {\n 'cluster_type': 'basic.yaml',\n 'cluster_zone': 'test-form1-a'\n }\n\n config_built = spawner._build_cluster_config()\n\n assert config_built['config']['gce_cluster_config']['subnetwork_uri'] == \"projects/test-project/regions/us-east1/subnetworks/default\"\n # Prevents a call to GCS. We return the local file instead.\n monkeypatch.setattr(spawner, \"read_gcs_file\", test_read_file_network)\n monkeypatch.setattr(spawner, \"clustername\", test_clustername)\n\n spawner.region = \"us-east1\"\n spawner.zone = \"us-east1-d\"\n spawner.env_str = \"test-env-str\"\n spawner.args_str = \"test-args-str\"\n spawner.user_options = {\n 'cluster_type': 'basic.yaml',\n 'cluster_zone': 'test-form1-a'\n }\n\n config_built = spawner._build_cluster_config()\n assert 'subnetwork_uri' not in config_built['config']['gce_cluster_config']", "def get_test_data_path():\n return os.path.abspath(os.path.join(os.path.dirname(__file__), \"data\") + os.path.sep)", "def test_missing_data_sources(self):", "def test_datadir(self):\n self.chck_triple('datadir')", "def test_known_file_locations(dataset: linux.LinuxSourcesDataset):\n assert (dataset.src_tree_root / \"kernel\" / \"kexec.c\").is_file()\n assert (dataset.src_tree_root / \"kernel\" / \"smpboot.h\").is_file()", "def test_relative_paths(self):\n command_line = self._MENU + [\n \"some_pool\",\n \"../dev\",\n \"./fake\",\n \"/abc\",\n ]\n TEST_RUNNER(command_line)", "def test_export_datasources_original(app_context, fs):\n # pylint: disable=reimported, redefined-outer-name\n import superset.cli.importexport # noqa: F811\n\n # reload to define export_dashboards correctly based on the\n # feature flags\n importlib.reload(superset.cli.importexport)\n\n runner = app.test_cli_runner()\n response = runner.invoke(\n superset.cli.importexport.export_datasources, (\"-f\", \"datasources.yaml\")\n )\n\n assert response.exit_code == 0\n\n assert Path(\"datasources.yaml\").exists()\n\n # check that file is valid JSON\n with open(\"datasources.yaml\") as fp:\n contents = fp.read()\n yaml.safe_load(contents)", "def test_trailing_slash(setup_teardown_file):\n f = setup_teardown_file[3]\n\n f[\"dataset\"] = 42\n assert \"dataset/\" in f", "def test_paths_properties():\n template_script = get_template_script(output_dir='output1')\n template_script['options']['setup_dir'] = 'setup1'\n exp_builder = ExperimentBuilder(template_script)\n\n # The database path is configured correctly.\n assert exp_builder._db.setup_dir == os.path.join('output1', 'setup1')\n\n # Updating paths also updates the database main directory.\n exp_builder.output_dir = 'output2'\n exp_builder.setup_dir = 'setup2'\n assert exp_builder._db.setup_dir == os.path.join('output2', 'setup2')", "def test_image_folder_datasource(\n ray_start_regular_shared, enable_automatic_tensor_extension_cast\n):\n root = \"example://image-folders/simple\"\n ds = ray.data.read_datasource(ImageFolderDatasource(), root=root)\n\n _, types = ds.schema()\n image_type, label_type = types\n if enable_automatic_tensor_extension_cast:\n assert isinstance(image_type, TensorDtype)\n else:\n assert image_type == np.dtype(\"O\")\n assert label_type == np.dtype(\"O\")\n\n df = ds.to_pandas()\n assert sorted(df[\"label\"]) == [\"cat\", \"cat\", \"dog\"]\n\n tensors = df[\"image\"]\n assert all(tensor.shape == (32, 32, 3) for tensor in tensors)", "def path_finder(cls, *args):\n safe_test_data = os.path.join(\n os.path.dirname(__file__),\n '../tasks/tests/data')\n safe_test_data = os.path.abspath(safe_test_data)\n return os.path.join(safe_test_data, *args)", "def test_uris(self, monkeypatch):\n import yaml\n\n def test_read_file_string(*args, **kwargs):\n config_string = open('./tests/test_data/basic.yaml', 'r').read()\n return config_string\n \n def test_read_file_uri(*args, **kwargs):\n config_string = open('./tests/test_data/basic_uri.yaml', 'r').read()\n return config_string\n \n def test_clustername(*args, **kwargs):\n return 'test-clustername'\n\n mock_dataproc_client = mock.create_autospec(dataproc_v1beta2.ClusterControllerClient())\n mock_gcs_client = mock.create_autospec(storage.Client())\n spawner = DataprocSpawner(hub=Hub(), dataproc=mock_dataproc_client, gcs=mock_gcs_client, user=MockUser(), _mock=True, gcs_notebooks=self.gcs_notebooks)\n \n # Prevents a call to GCS. We return the local file instead.\n monkeypatch.setattr(spawner, \"read_gcs_file\", test_read_file_string)\n monkeypatch.setattr(spawner, \"clustername\", test_clustername)\n\n spawner.project = \"test-project\"\n spawner.region = \"us-east1\"\n spawner.zone = \"us-east1-d\"\n spawner.env_str = \"test-env-str\"\n spawner.args_str = \"test-args-str\"\n spawner.user_options = {\n 'cluster_type': 'basic.yaml',\n 'cluster_zone': 'test-form1-a'\n }\n\n config_built = spawner._build_cluster_config()\n\n assert config_built['config']['gce_cluster_config']['subnetwork_uri'] == \"default\"\n\n # Prevents a call to GCS. We return the local file instead.\n monkeypatch.setattr(spawner, \"read_gcs_file\", test_read_file_uri)\n monkeypatch.setattr(spawner, \"clustername\", test_clustername)\n\n spawner.project = \"test-project\"\n spawner.region = \"us-east1\"\n spawner.zone = \"us-east1-d\"\n spawner.env_str = \"test-env-str\"\n spawner.args_str = \"test-args-str\"\n spawner.user_options = {\n 'cluster_type': 'basic.yaml',\n 'cluster_zone': 'test-form1-a'\n }\n\n config_built = spawner._build_cluster_config()\n\n assert config_built['config']['gce_cluster_config']['subnetwork_uri'] == \"projects/test-project/regions/us-east1/subnetworks/default\"", "def data_dir():\n return os.path.join(os.path.dirname(__file__), 'test', 'data')", "def test_basic(sdc_builder, sdc_executor, gcp, file_format):\n\n if Version(sdc_builder.version) < Version('5.5.0') and file_format == 'JSON':\n pytest.skip('JSON staging introduced in 5.5.0')\n\n bucket_name = f'stf_{get_random_string(ascii_lowercase, 10)}'\n dataset_name = f'stf_{get_random_string(ascii_lowercase, 10)}'\n table_name = f'stf_{get_random_string(ascii_lowercase, 10)}'\n data = '\\n'.join(json.dumps(rec) for rec in ROWS_IN_DATABASE)\n\n pipeline_builder = sdc_builder.get_pipeline_builder()\n\n # Dev raw data source\n dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(data_format='JSON',\n raw_data=data,\n stop_after_first_batch=True)\n\n # Google BigQuery destination stage\n bigquery = pipeline_builder.add_stage(name=DESTINATION_STAGE_NAME)\n bigquery.set_attributes(project_id=gcp.project_id,\n dataset=dataset_name,\n table=table_name,\n bucket=bucket_name,\n staging_file_format=file_format,\n enable_data_drift=False,\n create_table=False,\n purge_stage_file_after_ingesting=True)\n\n dev_raw_data_source >> bigquery\n\n pipeline = pipeline_builder.build().configure_for_environment(gcp)\n\n bigquery_client = gcp.bigquery_client\n dataset_ref = DatasetReference(gcp.project_id, dataset_name)\n\n try:\n logger.info(f'Creating temporary bucket {bucket_name}')\n bucket = gcp.retry_429(gcp.storage_client.create_bucket)(bucket_name)\n\n logger.info('Creating dataset %s and table %s using Google BigQuery client ...', dataset_name, table_name)\n bigquery_client.create_dataset(dataset_ref)\n table = bigquery_client.create_table(Table(dataset_ref.table(table_name), schema=SCHEMA))\n\n sdc_executor.add_pipeline(pipeline)\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n # Verify by reading records using Google BigQuery client\n data_from_bigquery = [tuple(row.values()) for row in bigquery_client.list_rows(table)]\n data_from_bigquery.sort()\n\n expected_data = [tuple(v for v in d.values()) for d in ROWS_IN_DATABASE]\n\n assert len(data_from_bigquery) == len(expected_data)\n assert data_from_bigquery == expected_data\n finally:\n _clean_up_bigquery(bigquery_client, dataset_ref)\n _clean_up_gcs(gcp, bucket, bucket_name)", "def test_get_metadata_df(self):\n\n # first need to populate LabMetadata tables\n from data_processors.lims.lambdas import labmetadata\n labmetadata.scheduled_update_handler({'event': \"test_get_metadata_df\"}, None)\n\n logger.info(f\"Lab metadata count: {LabMetadata.objects.count()}\")\n\n # SEQ-II validation dataset\n mock_bcl_workflow: Workflow = WorkflowFactory()\n mock_sqr: SequenceRun = mock_bcl_workflow.sequence_run\n mock_sqr.run_id = \"r.Uvlx2DEIME-KH0BRyF9XBg\"\n mock_sqr.instrument_run_id = \"200612_A01052_0017_BH5LYWDSXY\"\n mock_sqr.gds_volume_name = \"bssh.acddbfda498038ed99fa94fe79523959\"\n mock_sqr.gds_folder_path = f\"/Runs/{mock_sqr.instrument_run_id}_{mock_sqr.run_id}\"\n mock_sqr.sample_sheet_name = \"SampleSheet.csv\"\n mock_sqr.name = mock_sqr.instrument_run_id\n mock_sqr.save()\n\n mock_library_run = LibraryRun(\n instrument_run_id=mock_sqr.instrument_run_id,\n run_id=mock_sqr.run_id,\n library_id=\"L2000199\",\n lane=1,\n override_cycles=\"Y151;I8N2;U10;Y151\",\n )\n mock_library_run.save()\n\n samplesheet_path = f\"{mock_sqr.gds_folder_path}/{mock_sqr.sample_sheet_name}\"\n\n metadata_df = bcl_convert.get_metadata_df(\n gds_volume=mock_sqr.gds_volume_name,\n samplesheet_path=samplesheet_path\n )\n\n logger.info(\"-\" * 32)\n logger.info(f\"\\n{metadata_df}\")\n\n self.assertTrue(not metadata_df.empty)\n self.assertTrue(\"PTC_SsCRE200323LL_L2000172_topup\" in metadata_df[\"sample\"].tolist())\n\n if \"\" in metadata_df[\"override_cycles\"].unique().tolist():\n logger.info(\"-\" * 32)\n logger.info(\"THERE SEEM TO BE BLANK OVERRIDE_CYCLES METADATA FOR SOME SAMPLES...\")\n self.assertFalse(\"\" in metadata_df[\"override_cycles\"].tolist())\n # This probably mean need to fix data, look for corresponding Lab Metadata entry...\n\n library_id_list = metadata_df[\"library_id\"].tolist()\n library_run_list = libraryrun_srv.link_library_runs_with_x_seq_workflow(library_id_list, mock_bcl_workflow)\n self.assertIsNotNone(library_run_list)\n self.assertEqual(1, len(library_run_list))\n self.assertEqual(mock_library_run.library_id, library_run_list[0].library_id)\n\n library_run_in_workflows = mock_bcl_workflow.libraryrun_set.all()\n self.assertEqual(1, library_run_in_workflows.count())", "def testRunConfiguration(self):\n loader = Loader()\n loader.loadFromDirectory(self.__exampleDirectory)\n\n self.assertEqual(len(loader.taskHolders()), 1)\n\n taskHolder = loader.taskHolders()[0]\n\n taskHolder.addVar(\n \"prefix\",\n self.__exampleTargetPrefixDirectory,\n True\n )\n\n # loading input data for the execution\n crawlerGroups = Crawler.group(\n FsCrawler.createFromPath(\n os.path.join(self.__exampleDirectory, 'textures')\n ).globFromParent()\n )\n\n resultCrawlers = []\n for group in crawlerGroups:\n if isinstance(group[0], Crawler.registeredType('texture')):\n resultCrawlers += taskHolder.run(group)\n\n targetFilePaths = list(sorted(filter(lambda x: len(x), map(lambda x: x.strip(), self.__generatedData.split('\\n')))))\n createdFilePaths = list(sorted(map(lambda x: x.var('fullPath')[len(self.__exampleTargetPrefixDirectory) + 1:].replace('\\\\', '/'), resultCrawlers)))\n\n self.assertListEqual(targetFilePaths, createdFilePaths)" ]
[ "0.7546321", "0.7450935", "0.73727494", "0.6266979", "0.6182107", "0.60948265", "0.6030717", "0.5940892", "0.5830706", "0.5804082", "0.5795107", "0.5780373", "0.5757029", "0.57562345", "0.57373863", "0.5730367", "0.5712738", "0.5712368", "0.56968", "0.5679923", "0.56575793", "0.5655973", "0.56180596", "0.56144184", "0.55873877", "0.5561925", "0.5560777", "0.55566293", "0.55428344", "0.55221707" ]
0.75466603
0
Tests output of test_yaml_config() for a Datacontext configured with a Datasource with RuntimeDataConnector. Even though the test directory contains multiple files that can be readin by GX, the RuntimeDataConnector will output 0 data_assets, and return a "note" to the user. This is because the RuntimeDataConnector is not aware of data_assets until they are passed in through the RuntimeBatchRequest. The test asserts that the proper number of data_asset_names are returned and note is returned to the user.
def test_golden_path_runtime_data_connector_pandas_datasource_configuration( mock_emit, caplog, empty_data_context_stats_enabled, test_df, tmp_path_factory ): base_directory = str( tmp_path_factory.mktemp("test_golden_path_pandas_datasource_configuration") ) create_files_in_directory( directory=base_directory, file_name_list=[ "test_dir_charlie/A/A-1.csv", "test_dir_charlie/A/A-2.csv", "test_dir_charlie/A/A-3.csv", ], file_content_fn=lambda: test_df.to_csv(header=True, index=False), ) context: DataContext = empty_data_context_stats_enabled with set_directory(context.root_directory): import great_expectations as gx context = gx.get_context() mock_emit.reset_mock() # Remove data_context.__init__ call yaml_config = """ class_name: Datasource execution_engine: class_name: PandasExecutionEngine data_connectors: default_runtime_data_connector_name: class_name: RuntimeDataConnector batch_identifiers: - default_identifier_name """ # noinspection PyUnusedLocal report_object = context.test_yaml_config( name="my_directory_datasource", yaml_config=yaml_config, return_mode="report_object", ) assert report_object["execution_engine"] == { "caching": True, "module_name": "great_expectations.execution_engine.pandas_execution_engine", "class_name": "PandasExecutionEngine", "discard_subset_failing_expectations": False, "boto3_options": {}, "azure_options": {}, "gcs_options": {}, } assert report_object["data_connectors"]["count"] == 1 # checking the correct number of data_assets have come back assert ( report_object["data_connectors"]["default_runtime_data_connector_name"][ "data_asset_count" ] == 0 ) # checking that note has come back assert ( report_object["data_connectors"]["default_runtime_data_connector_name"][ "note" ] == "RuntimeDataConnector will not have data_asset_names until they are passed in through RuntimeBatchRequest" ) # Confirm that logs do not contain any exceptions or invalid messages assert not usage_stats_exceptions_exist(messages=caplog.messages) assert not usage_stats_invalid_messages_exist(messages=caplog.messages)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_golden_path_runtime_data_connector_and_inferred_data_connector_pandas_datasource_configuration(\n mock_emit, caplog, empty_data_context_stats_enabled, test_df, tmp_path_factory\n):\n base_directory = str(\n tmp_path_factory.mktemp(\"test_golden_path_pandas_datasource_configuration\")\n )\n\n create_files_in_directory(\n directory=base_directory,\n file_name_list=[\n \"test_dir_charlie/A/A-1.csv\",\n \"test_dir_charlie/A/A-2.csv\",\n \"test_dir_charlie/A/A-3.csv\",\n \"test_dir_charlie/B/B-1.csv\",\n \"test_dir_charlie/B/B-2.csv\",\n \"test_dir_charlie/B/B-3.csv\",\n \"test_dir_charlie/C/C-1.csv\",\n \"test_dir_charlie/C/C-2.csv\",\n \"test_dir_charlie/C/C-3.csv\",\n \"test_dir_charlie/D/D-1.csv\",\n \"test_dir_charlie/D/D-2.csv\",\n \"test_dir_charlie/D/D-3.csv\",\n ],\n file_content_fn=lambda: test_df.to_csv(header=True, index=False),\n )\n\n context: DataContext = empty_data_context_stats_enabled\n\n with set_directory(context.root_directory):\n import great_expectations as gx\n\n context = gx.get_context()\n mock_emit.reset_mock() # Remove data_context.__init__ call\n\n yaml_config = f\"\"\"\n class_name: Datasource\n\n execution_engine:\n class_name: PandasExecutionEngine\n\n data_connectors:\n default_runtime_data_connector_name:\n class_name: RuntimeDataConnector\n batch_identifiers:\n - default_identifier_name\n default_inferred_data_connector_name:\n class_name: InferredAssetFilesystemDataConnector\n base_directory: {base_directory}/test_dir_charlie\n glob_directive: \"*/*.csv\"\n\n default_regex:\n pattern: (.+)/(.+)-(\\\\d+)\\\\.csv\n group_names:\n - subdirectory\n - data_asset_name\n - number\n \"\"\"\n\n # noinspection PyUnusedLocal\n report_object = context.test_yaml_config(\n name=\"my_directory_datasource\",\n yaml_config=yaml_config,\n return_mode=\"report_object\",\n )\n\n assert report_object[\"execution_engine\"] == {\n \"caching\": True,\n \"module_name\": \"great_expectations.execution_engine.pandas_execution_engine\",\n \"class_name\": \"PandasExecutionEngine\",\n \"discard_subset_failing_expectations\": False,\n \"boto3_options\": {},\n \"azure_options\": {},\n \"gcs_options\": {},\n }\n assert report_object[\"data_connectors\"][\"count\"] == 2\n assert report_object[\"data_connectors\"][\n \"default_runtime_data_connector_name\"\n ] == {\n \"class_name\": \"RuntimeDataConnector\",\n \"data_asset_count\": 0,\n \"data_assets\": {},\n \"example_data_asset_names\": [],\n \"example_unmatched_data_references\": [],\n \"note\": \"RuntimeDataConnector will not have data_asset_names until they are \"\n \"passed in through RuntimeBatchRequest\",\n \"unmatched_data_reference_count\": 0,\n }\n assert report_object[\"data_connectors\"][\n \"default_inferred_data_connector_name\"\n ] == {\n \"class_name\": \"InferredAssetFilesystemDataConnector\",\n \"data_asset_count\": 4,\n \"example_data_asset_names\": [\"A\", \"B\", \"C\"],\n \"data_assets\": {\n \"A\": {\n \"batch_definition_count\": 3,\n \"example_data_references\": [\"A/A-1.csv\", \"A/A-2.csv\", \"A/A-3.csv\"],\n },\n \"B\": {\n \"batch_definition_count\": 3,\n \"example_data_references\": [\"B/B-1.csv\", \"B/B-2.csv\", \"B/B-3.csv\"],\n },\n \"C\": {\n \"batch_definition_count\": 3,\n \"example_data_references\": [\"C/C-1.csv\", \"C/C-2.csv\", \"C/C-3.csv\"],\n },\n },\n \"unmatched_data_reference_count\": 0,\n \"example_unmatched_data_references\": [],\n }\n\n # Confirm that logs do not contain any exceptions or invalid messages\n assert not usage_stats_exceptions_exist(messages=caplog.messages)\n assert not usage_stats_invalid_messages_exist(messages=caplog.messages)", "def test_golden_path_configured_asset_pandas_datasource_configuration(\n mock_emit, caplog, empty_data_context_stats_enabled, test_df, tmp_path_factory\n):\n base_directory = str(\n tmp_path_factory.mktemp(\"test_golden_path_pandas_datasource_configuration\")\n )\n\n create_files_in_directory(\n directory=base_directory,\n file_name_list=[\n \"test_dir_foxtrot/A/A-1.csv\",\n \"test_dir_foxtrot/A/A-2.csv\",\n \"test_dir_foxtrot/A/A-3.csv\",\n \"test_dir_foxtrot/B/B-1.txt\",\n \"test_dir_foxtrot/B/B-2.txt\",\n \"test_dir_foxtrot/B/B-3.txt\",\n \"test_dir_foxtrot/C/C-2017.csv\",\n \"test_dir_foxtrot/C/C-2018.csv\",\n \"test_dir_foxtrot/C/C-2019.csv\",\n \"test_dir_foxtrot/D/D-aaa.csv\",\n \"test_dir_foxtrot/D/D-bbb.csv\",\n \"test_dir_foxtrot/D/D-ccc.csv\",\n \"test_dir_foxtrot/D/D-ddd.csv\",\n \"test_dir_foxtrot/D/D-eee.csv\",\n ],\n file_content_fn=lambda: test_df.to_csv(header=True, index=False),\n )\n\n context: DataContext = empty_data_context_stats_enabled\n\n with set_directory(context.root_directory):\n import great_expectations as gx\n\n context = gx.get_context()\n mock_emit.reset_mock() # Remove data_context.__init__ call\n\n yaml_config = f\"\"\"\n class_name: Datasource\n\n execution_engine:\n class_name: PandasExecutionEngine\n\n data_connectors:\n my_filesystem_data_connector:\n class_name: ConfiguredAssetFilesystemDataConnector\n base_directory: {base_directory}\n # glob_directive: \"*\"\n\n default_regex:\n pattern: (.+)\\\\.csv\n group_names:\n - alphanumeric\n\n assets:\n A:\n base_directory: {base_directory}/test_dir_foxtrot/A\n pattern: (.+)-(\\\\d+)\\\\.csv\n group_names:\n - letter\n - number\n B:\n base_directory: {base_directory}/test_dir_foxtrot/B\n pattern: (.+)-(\\\\d+)\\\\.csv\n group_names:\n - letter\n - number\n C:\n base_directory: {base_directory}/test_dir_foxtrot/C\n pattern: (.+)-(\\\\d+)\\\\.csv\n group_names:\n - letter\n - year\n D:\n base_directory: {base_directory}/test_dir_foxtrot/D\n pattern: (.+)-(\\\\d+)\\\\.csv\n group_names:\n - letter\n - checksum\n \"\"\"\n\n # noinspection PyUnusedLocal\n context.test_yaml_config(\n name=\"my_directory_datasource\",\n yaml_config=yaml_config,\n return_mode=\"report_object\",\n )\n # print(json.dumps(report_object, indent=2))\n # print(context.datasources)\n assert mock_emit.call_count == 1\n # Substitute anonymized names since it changes for each run\n anonymized_datasource_name = mock_emit.call_args_list[0][0][0][\"event_payload\"][\n \"anonymized_name\"\n ]\n anonymized_execution_engine_name = mock_emit.call_args_list[0][0][0][\n \"event_payload\"\n ][\"anonymized_execution_engine\"][\"anonymized_name\"]\n anonymized_data_connector_name = mock_emit.call_args_list[0][0][0][\n \"event_payload\"\n ][\"anonymized_data_connectors\"][0][\"anonymized_name\"]\n expected_call_args_list = [\n mock.call(\n {\n \"event\": \"data_context.test_yaml_config\",\n \"event_payload\": {\n \"anonymized_name\": anonymized_datasource_name,\n \"parent_class\": \"Datasource\",\n \"anonymized_execution_engine\": {\n \"anonymized_name\": anonymized_execution_engine_name,\n \"parent_class\": \"PandasExecutionEngine\",\n },\n \"anonymized_data_connectors\": [\n {\n \"anonymized_name\": anonymized_data_connector_name,\n \"parent_class\": \"ConfiguredAssetFilesystemDataConnector\",\n }\n ],\n },\n \"success\": True,\n }\n ),\n ]\n assert mock_emit.call_args_list == expected_call_args_list\n\n my_batch_list = context.get_batch_list(\n datasource_name=\"my_directory_datasource\",\n data_connector_name=\"my_filesystem_data_connector\",\n data_asset_name=\"A\",\n batch_identifiers={\n \"number\": \"2\",\n },\n batch_spec_passthrough={\n \"sampling_method\": \"_sample_using_hash\",\n \"sampling_kwargs\": {\n \"column_name\": \"date\",\n \"hash_function_name\": \"md5\",\n \"hash_value\": \"f\",\n },\n },\n )\n my_batch = my_batch_list[0]\n assert my_batch.batch_definition[\"data_asset_name\"] == \"A\"\n assert mock_emit.call_count == 2\n\n my_batch.head()\n\n df_data = my_batch.data.dataframe\n assert df_data.shape == (10, 10)\n df_data[\"date\"] = df_data.apply(\n lambda row: datetime.datetime.strptime(row[\"date\"], \"%Y-%m-%d\").date(),\n axis=1,\n )\n assert (\n test_df[\n (test_df[\"date\"] == datetime.date(2020, 1, 15))\n | (test_df[\"date\"] == datetime.date(2020, 1, 29))\n ]\n .drop(\"timestamp\", axis=1)\n .equals(df_data.drop(\"timestamp\", axis=1))\n )\n\n # Empty batch list won't error but still will emit usage stats\n batch_list = context.get_batch_list(\n datasource_name=\"my_directory_datasource\",\n data_connector_name=\"my_filesystem_data_connector\",\n data_asset_name=\"DOES_NOT_EXIST\",\n )\n assert len(batch_list) == 0\n assert mock_emit.call_count == 3\n\n my_validator = context.get_validator(\n datasource_name=\"my_directory_datasource\",\n data_connector_name=\"my_filesystem_data_connector\",\n data_asset_name=\"C\",\n data_connector_query={\"batch_filter_parameters\": {\"year\": \"2019\"}},\n create_expectation_suite_with_name=\"my_expectations\",\n batch_spec_passthrough={\n \"sampling_method\": \"_sample_using_hash\",\n \"sampling_kwargs\": {\n \"column_name\": \"date\",\n \"hash_function_name\": \"md5\",\n \"hash_value\": \"f\",\n },\n },\n )\n my_evr = my_validator.expect_column_values_to_be_between(\n column=\"d\", min_value=1, max_value=31\n )\n assert my_evr.success\n assert mock_emit.call_count == 4\n\n # my_evr = my_validator.expect_table_columns_to_match_ordered_list(ordered_list=[\"x\", \"y\", \"z\"])\n # assert my_evr.success\n\n # No other usage stats calls detected\n assert mock_emit.call_count == 4\n\n # Confirm that logs do not contain any exceptions or invalid messages\n assert not usage_stats_exceptions_exist(messages=caplog.messages)\n assert not usage_stats_invalid_messages_exist(messages=caplog.messages)", "def test_golden_path_inferred_asset_pandas_datasource_configuration(\n mock_emit, caplog, empty_data_context_stats_enabled, test_df, tmp_path_factory\n):\n base_directory = str(\n tmp_path_factory.mktemp(\"test_golden_path_pandas_datasource_configuration\")\n )\n\n create_files_in_directory(\n directory=base_directory,\n file_name_list=[\n \"test_dir_charlie/A/A-1.csv\",\n \"test_dir_charlie/A/A-2.csv\",\n \"test_dir_charlie/A/A-3.csv\",\n \"test_dir_charlie/B/B-1.csv\",\n \"test_dir_charlie/B/B-2.csv\",\n \"test_dir_charlie/B/B-3.csv\",\n \"test_dir_charlie/C/C-1.csv\",\n \"test_dir_charlie/C/C-2.csv\",\n \"test_dir_charlie/C/C-3.csv\",\n \"test_dir_charlie/D/D-1.csv\",\n \"test_dir_charlie/D/D-2.csv\",\n \"test_dir_charlie/D/D-3.csv\",\n ],\n file_content_fn=lambda: test_df.to_csv(header=True, index=False),\n )\n\n context: DataContext = empty_data_context_stats_enabled\n\n with set_directory(context.root_directory):\n import great_expectations as gx\n\n context = gx.get_context()\n mock_emit.reset_mock() # Remove data_context.__init__ call\n\n yaml_config = f\"\"\"\n class_name: Datasource\n\n execution_engine:\n class_name: PandasExecutionEngine\n\n data_connectors:\n my_filesystem_data_connector:\n class_name: InferredAssetFilesystemDataConnector\n base_directory: {base_directory}/test_dir_charlie\n glob_directive: \"*/*.csv\"\n\n default_regex:\n pattern: (.+)/(.+)-(\\\\d+)\\\\.csv\n group_names:\n - subdirectory\n - data_asset_name\n - number\n \"\"\"\n\n # noinspection PyUnusedLocal\n context.test_yaml_config(\n name=\"my_directory_datasource\",\n yaml_config=yaml_config,\n return_mode=\"report_object\",\n )\n # print(json.dumps(report_object, indent=2))\n # print(context.datasources)\n assert mock_emit.call_count == 1\n # Substitute anonymized names since it changes for each run\n anonymized_datasource_name = mock_emit.call_args_list[0][0][0][\"event_payload\"][\n \"anonymized_name\"\n ]\n anonymized_execution_engine_name = mock_emit.call_args_list[0][0][0][\n \"event_payload\"\n ][\"anonymized_execution_engine\"][\"anonymized_name\"]\n anonymized_data_connector_name = mock_emit.call_args_list[0][0][0][\n \"event_payload\"\n ][\"anonymized_data_connectors\"][0][\"anonymized_name\"]\n expected_call_args_list = [\n mock.call(\n {\n \"event\": \"data_context.test_yaml_config\",\n \"event_payload\": {\n \"anonymized_name\": anonymized_datasource_name,\n \"parent_class\": \"Datasource\",\n \"anonymized_execution_engine\": {\n \"anonymized_name\": anonymized_execution_engine_name,\n \"parent_class\": \"PandasExecutionEngine\",\n },\n \"anonymized_data_connectors\": [\n {\n \"anonymized_name\": anonymized_data_connector_name,\n \"parent_class\": \"InferredAssetFilesystemDataConnector\",\n }\n ],\n },\n \"success\": True,\n }\n ),\n ]\n assert mock_emit.call_args_list == expected_call_args_list\n\n my_batch_list = context.get_batch_list(\n datasource_name=\"my_directory_datasource\",\n data_connector_name=\"my_filesystem_data_connector\",\n data_asset_name=\"A\",\n batch_identifiers={\n \"number\": \"2\",\n },\n batch_spec_passthrough={\n \"sampling_method\": \"_sample_using_hash\",\n \"sampling_kwargs\": {\n \"column_name\": \"date\",\n \"hash_function_name\": \"md5\",\n \"hash_value\": \"f\",\n },\n },\n )\n my_batch = my_batch_list[0]\n assert my_batch.batch_definition[\"data_asset_name\"] == \"A\"\n assert mock_emit.call_count == 2\n\n df_data = my_batch.data.dataframe\n assert df_data.shape == (10, 10)\n df_data[\"date\"] = df_data.apply(\n lambda row: datetime.datetime.strptime(row[\"date\"], \"%Y-%m-%d\").date(),\n axis=1,\n )\n assert (\n test_df[\n (test_df[\"date\"] == datetime.date(2020, 1, 15))\n | (test_df[\"date\"] == datetime.date(2020, 1, 29))\n ]\n .drop(\"timestamp\", axis=1)\n .equals(df_data.drop(\"timestamp\", axis=1))\n )\n\n # Empty batch list won't error but still will emit usage stats\n batch_list = context.get_batch_list(\n datasource_name=\"my_directory_datasource\",\n data_connector_name=\"my_filesystem_data_connector\",\n data_asset_name=\"DOES_NOT_EXIST\",\n )\n assert len(batch_list) == 0\n assert mock_emit.call_count == 3\n\n my_validator = context.get_validator(\n datasource_name=\"my_directory_datasource\",\n data_connector_name=\"my_filesystem_data_connector\",\n data_asset_name=\"D\",\n data_connector_query={\"batch_filter_parameters\": {\"number\": \"3\"}},\n expectation_suite=ExpectationSuite(\n \"my_expectation_suite\", data_context=context\n ),\n batch_spec_passthrough={\n \"sampling_method\": \"_sample_using_hash\",\n \"sampling_kwargs\": {\n \"column_name\": \"date\",\n \"hash_function_name\": \"md5\",\n \"hash_value\": \"f\",\n },\n },\n )\n assert mock_emit.call_count == 4\n\n my_evr = my_validator.expect_column_values_to_be_between(\n column=\"d\", min_value=1, max_value=31\n )\n assert my_evr.success\n\n # TODO: <Alex>ALEX</Alex>\n # my_evr = my_validator.expect_table_columns_to_match_ordered_list(ordered_list=[\"x\", \"y\", \"z\"])\n # assert my_evr.success\n\n # No other usage stats calls detected\n # assert mock_emit.call_count == 1\n assert mock_emit.call_count == 4\n\n # Confirm that logs do not contain any exceptions or invalid messages\n assert not usage_stats_exceptions_exist(messages=caplog.messages)\n assert not usage_stats_invalid_messages_exist(messages=caplog.messages)", "def testRunConfiguration(self):\n loader = Loader()\n loader.loadFromDirectory(self.__exampleDirectory)\n\n self.assertEqual(len(loader.taskHolders()), 1)\n\n taskHolder = loader.taskHolders()[0]\n\n taskHolder.addVar(\n \"prefix\",\n self.__exampleTargetPrefixDirectory,\n True\n )\n\n # loading input data for the execution\n crawlerGroups = Crawler.group(\n FsCrawler.createFromPath(\n os.path.join(self.__exampleDirectory, 'textures')\n ).globFromParent()\n )\n\n resultCrawlers = []\n for group in crawlerGroups:\n if isinstance(group[0], Crawler.registeredType('texture')):\n resultCrawlers += taskHolder.run(group)\n\n targetFilePaths = list(sorted(filter(lambda x: len(x), map(lambda x: x.strip(), self.__generatedData.split('\\n')))))\n createdFilePaths = list(sorted(map(lambda x: x.var('fullPath')[len(self.__exampleTargetPrefixDirectory) + 1:].replace('\\\\', '/'), resultCrawlers)))\n\n self.assertListEqual(targetFilePaths, createdFilePaths)", "def Get_datasets(**kwargs):\n from .utils import option_printer, get_conn, get_param_dict, get_logger_instance\n from .cohort_tables import make_target_comp_tables\n from .table2rawseq import table_to_rawseq\n from .rawseq2multihot import rawseq_to_multihot\n from .multihot2datasets import multihot_to_datasets\n import os, logging\n from importlib import reload\n \n ## get params\n param_dict = get_param_dict(kwargs['DS_PARAMS_FILE_NAME'], kwargs['CONFIG_FOLDER_PATH'])\n param_dict.update(kwargs)\n if not os.path.exists(param_dict['DATA_FOLDER_PATH']): os.makedirs(param_dict['DATA_FOLDER_PATH'])\n param_dict['CDM_DB_NAME'] = get_param_dict(kwargs['DB_CONN_FILENAME'], kwargs['CONFIG_FOLDER_PATH'])['CDM_DB']\n \n param_dict['DUMPING_PATH'] = os.path.join(param_dict['RESULT_FOLDER_PATH'], \n param_dict['PROJECT_NAME'], \n param_dict['CDM_DB_NAME'])\n if not os.path.exists(param_dict['DUMPING_PATH']): \n os.makedirs(param_dict['DUMPING_PATH'])\n \n if param_dict['PIPELINE_START_LEVEL']<3:\n param_dict['DB_CONN'], CDM_DB_NAME, RESULT_DB_NAME = get_conn(param_dict['DB_CONN_FILENAME'], \n param_dict['CONFIG_FOLDER_PATH'])\n param_dict['CDM_DB_NAME'] = CDM_DB_NAME\n param_dict['RESULT_DB_NAME'] = RESULT_DB_NAME\n else:\n param_dict['RESULT_DB_NAME'] = get_param_dict(kwargs['DB_CONN_FILENAME'], kwargs['CONFIG_FOLDER_PATH'])['RESULT_DB']\n \n ## logger\n logging.shutdown()\n reload(logging)\n main_logger = get_logger_instance(logger_name='ds_pipeline', \n DUMPING_PATH=param_dict['DUMPING_PATH'], \n parent_name=False,\n stream=True)\n \n ## print params\n main_logger.info(\"\\n (params) \\n\")\n try: option_printer(main_logger, param_dict['DB_CONN'], **param_dict)\n except: pass\n main_logger.info(\"=\"*100 + \"\\n\")\n \n ## [1] Make_target_comp_tables\n if param_dict['PIPELINE_START_LEVEL']<=1:\n main_logger.info(\"\\n[Level 1] Make_TARGET_COMP_tables\\n\")\n make_target_comp_tables(**param_dict)\n main_logger.info(\"=\"*100 + \"\\n\")\n \n ## [2] Table to rawSeq\n if param_dict['PIPELINE_START_LEVEL']<=2:\n main_logger.info(\"\\n[Level 2] Table to rawSeq\\n\")\n table_to_rawseq(param_dict['DUMPING_PATH'], \n param_dict['DB_CONN'], param_dict['CDM_DB_NAME'], \n param_dict['DATA_FOLDER_PATH'])\n main_logger.info(\"=\"*100 + \"\\n\")\n \n ## [3] rawSeq to multihot\n if param_dict['PIPELINE_START_LEVEL']<=3:\n main_logger.info(\"\\n[Level 3] Convert to multihot\\n\")\n rawseq_to_multihot(param_dict['DUMPING_PATH'], \n param_dict['DATA_FOLDER_PATH'], param_dict['MAX_TIME_STEP'], \n param_dict['DX_ONLY'])\n main_logger.info(\"=\"*100 + \"\\n\")\n \n ## [4] Multihot to Dataset\n if param_dict['PIPELINE_START_LEVEL']<=4:\n main_logger.info(\"\\n[Level 4] Multihot to Dataset\\n\")\n datasets = multihot_to_datasets(param_dict['DUMPING_PATH'], \n param_dict['DATA_FOLDER_PATH'], param_dict['TR_RATIO'])\n \n #add info\n if param_dict['PIPELINE_START_LEVEL']<3: \n datasets.info['DB_CONN'] = param_dict['DB_CONN']\n datasets.info['CONFIG_FOLDER_PATH'] = param_dict['CONFIG_FOLDER_PATH']\n datasets.info['DATA_FOLDER_PATH'] = param_dict['DATA_FOLDER_PATH']\n datasets.info['RESULT_FOLDER_PATH'] = param_dict['RESULT_FOLDER_PATH']\n datasets.info['DB_CONN_FILENAME'] = param_dict['DB_CONN_FILENAME']\n datasets.info['DS_PARAMS_FILE_NAME'] = param_dict['DS_PARAMS_FILE_NAME']\n datasets.info['CDM_DB_NAME'] = param_dict['CDM_DB_NAME']\n datasets.info['RESULT_DB_NAME'] = param_dict['RESULT_DB_NAME']\n \n main_logger.info(\"\\n[Datasets Info.]\\n\")\n main_logger.info(\"{0:>26} {1:}\".format('[OPTION]', '[VALUE]'))\n for k in sorted(datasets.info.keys()):\n main_logger.info(\" {0:>23}: {1:}\".format(k, datasets.info[k]))\n \n #print(\"\\nALL DONE!!\")\n main_logger.info(\"\\n[ALL DONE!!]\\n\\n\")\n for h in list(main_logger.handlers):\n main_logger.removeHandler(h)\n h.flush()\n h.close()\n return datasets", "def test_export_datasources_original(app_context, fs):\n # pylint: disable=reimported, redefined-outer-name\n import superset.cli.importexport # noqa: F811\n\n # reload to define export_dashboards correctly based on the\n # feature flags\n importlib.reload(superset.cli.importexport)\n\n runner = app.test_cli_runner()\n response = runner.invoke(\n superset.cli.importexport.export_datasources, (\"-f\", \"datasources.yaml\")\n )\n\n assert response.exit_code == 0\n\n assert Path(\"datasources.yaml\").exists()\n\n # check that file is valid JSON\n with open(\"datasources.yaml\") as fp:\n contents = fp.read()\n yaml.safe_load(contents)", "def test_batches_are_accessible(\n monkeypatch,\n multibatch_generic_csv_generator,\n multibatch_generic_csv_generator_context,\n):\n\n context: DataContext = multibatch_generic_csv_generator_context\n data_relative_path = \"../data\"\n data_path = os.path.join(context.root_directory, data_relative_path)\n datasource_name = \"generic_csv_generator\"\n data_connector_name = \"daily_data_connector\"\n asset_name = \"daily_data_asset\"\n\n datasource = context.datasources[datasource_name]\n\n data_connector = datasource.data_connectors[data_connector_name]\n\n total_batches: int = 20\n file_list = multibatch_generic_csv_generator(\n data_path=data_path, num_event_batches=total_batches\n )\n\n assert (\n data_connector._get_data_reference_list_from_cache_by_data_asset_name(\n data_asset_name=asset_name\n )\n == file_list\n )\n\n batch_request_1 = BatchRequest(\n datasource_name=\"generic_csv_generator\",\n data_connector_name=\"daily_data_connector\",\n data_asset_name=\"daily_data_asset\",\n data_connector_query={\n \"index\": -1,\n },\n )\n # Should give most recent batch\n validator_1 = context.get_validator(\n batch_request=batch_request_1,\n create_expectation_suite_with_name=\"my_expectation_suite_name_1\",\n )\n metric_max = validator_1.get_metric(\n MetricConfiguration(\"column.max\", metric_domain_kwargs={\"column\": \"batch_num\"})\n )\n assert metric_max == total_batches\n metric_value_set = validator_1.get_metric(\n MetricConfiguration(\n \"column.distinct_values\",\n metric_domain_kwargs={\"column\": \"string_cardinality_3\"},\n )\n )\n assert metric_value_set == {\"category0\", \"category1\", \"category2\"}\n\n batch_request_2 = BatchRequest(\n datasource_name=\"generic_csv_generator\",\n data_connector_name=\"daily_data_connector\",\n data_asset_name=\"daily_data_asset\",\n data_connector_query={\n \"index\": -2,\n },\n )\n validator_2 = context.get_validator(\n batch_request=batch_request_2,\n create_expectation_suite_with_name=\"my_expectation_suite_name_2\",\n )\n metric_max = validator_2.get_metric(\n MetricConfiguration(\"column.max\", metric_domain_kwargs={\"column\": \"batch_num\"})\n )\n assert metric_max == total_batches - 1\n metric_value_set = validator_2.get_metric(\n MetricConfiguration(\n \"column.distinct_values\",\n metric_domain_kwargs={\"column\": \"string_cardinality_3\"},\n )\n )\n assert metric_value_set == {\"category0\", \"category1\", \"category2\"}\n\n for batch_num in range(1, total_batches + 1):\n batch_request = BatchRequest(\n datasource_name=\"generic_csv_generator\",\n data_connector_name=\"daily_data_connector\",\n data_asset_name=\"daily_data_asset\",\n data_connector_query={\n \"index\": -batch_num,\n },\n )\n validator = context.get_validator(\n batch_request=batch_request,\n create_expectation_suite_with_name=f\"my_expectation_suite_name__{batch_num}\",\n )\n metric_max = validator.get_metric(\n MetricConfiguration(\n \"column.max\", metric_domain_kwargs={\"column\": \"batch_num\"}\n )\n )\n assert metric_max == (total_batches + 1) - batch_num\n metric_value_set = validator.get_metric(\n MetricConfiguration(\n \"column.distinct_values\",\n metric_domain_kwargs={\"column\": \"string_cardinality_3\"},\n )\n )\n assert metric_value_set == {\"category0\", \"category1\", \"category2\"}", "def test_yaml(self):\n\n # Check yml file can be loaded correctly\n with open(\"{}/app_spec.yml\".format(self.APP_PATH), mode='r',\n encoding=\"utf-8\", errors='ignore') as stream:\n # Load yaml file\n try:\n yaml_obj = yaml.load(stream) or {}\n except Exception as e:\n self.fail(msg=\"app_spec.yml cannot be loaded\")\n ll = ['input', 'output']\n check_list = ['value_type']\n\n for l in ll:\n l_obj = yaml_obj.get(l, None)\n # Check [input] and [output] section\n with self.subTest(name=f\"[{l}] section\"):\n self.assertIsNotNone(\n l_obj,\n msg=f\"[{l}] section missing in app_spec.yml\")\n\n for k, v in l_obj.items():\n for cl in check_list:\n with self.subTest(name=f\"[{l}:{k}]\"):\n value = v.get(cl)\n self.assertIsNotNone(\n value,\n msg=f\"[{k}/{cl}] missing in app_spec.yml\")\n if l == 'input' and 'value_range' in v and v['value_range']:\n with self.subTest(\n name=f\"[input:{k}] section\"):\n self.assertTrue(\n type(v['value_range']) is list,\n msg=f\"value_range [input:{k}] not a list\")", "def testLoadConfigs_loadMultipleLab(self):\n config_path = GetTestFilePath('valid/config.yaml')\n pool = lab_config.LabConfigPool(\n lab_config.LocalFileEnumerator(\n os.path.dirname(config_path), lab_config.IsYaml))\n with self.assertRaisesRegex(\n lab_config.ConfigError, r'There are multiple config files.'):\n pool.LoadConfigs()", "def TestDataFile(*args):\n return sdk_test_base.SdkBase.Resource('tests', 'unit', 'surface', 'container',\n 'hub', 'testdata', *args)", "def test_get_metadata_df(self):\n\n # first need to populate LabMetadata tables\n from data_processors.lims.lambdas import labmetadata\n labmetadata.scheduled_update_handler({'event': \"test_get_metadata_df\"}, None)\n\n logger.info(f\"Lab metadata count: {LabMetadata.objects.count()}\")\n\n # SEQ-II validation dataset\n mock_bcl_workflow: Workflow = WorkflowFactory()\n mock_sqr: SequenceRun = mock_bcl_workflow.sequence_run\n mock_sqr.run_id = \"r.Uvlx2DEIME-KH0BRyF9XBg\"\n mock_sqr.instrument_run_id = \"200612_A01052_0017_BH5LYWDSXY\"\n mock_sqr.gds_volume_name = \"bssh.acddbfda498038ed99fa94fe79523959\"\n mock_sqr.gds_folder_path = f\"/Runs/{mock_sqr.instrument_run_id}_{mock_sqr.run_id}\"\n mock_sqr.sample_sheet_name = \"SampleSheet.csv\"\n mock_sqr.name = mock_sqr.instrument_run_id\n mock_sqr.save()\n\n mock_library_run = LibraryRun(\n instrument_run_id=mock_sqr.instrument_run_id,\n run_id=mock_sqr.run_id,\n library_id=\"L2000199\",\n lane=1,\n override_cycles=\"Y151;I8N2;U10;Y151\",\n )\n mock_library_run.save()\n\n samplesheet_path = f\"{mock_sqr.gds_folder_path}/{mock_sqr.sample_sheet_name}\"\n\n metadata_df = bcl_convert.get_metadata_df(\n gds_volume=mock_sqr.gds_volume_name,\n samplesheet_path=samplesheet_path\n )\n\n logger.info(\"-\" * 32)\n logger.info(f\"\\n{metadata_df}\")\n\n self.assertTrue(not metadata_df.empty)\n self.assertTrue(\"PTC_SsCRE200323LL_L2000172_topup\" in metadata_df[\"sample\"].tolist())\n\n if \"\" in metadata_df[\"override_cycles\"].unique().tolist():\n logger.info(\"-\" * 32)\n logger.info(\"THERE SEEM TO BE BLANK OVERRIDE_CYCLES METADATA FOR SOME SAMPLES...\")\n self.assertFalse(\"\" in metadata_df[\"override_cycles\"].tolist())\n # This probably mean need to fix data, look for corresponding Lab Metadata entry...\n\n library_id_list = metadata_df[\"library_id\"].tolist()\n library_run_list = libraryrun_srv.link_library_runs_with_x_seq_workflow(library_id_list, mock_bcl_workflow)\n self.assertIsNotNone(library_run_list)\n self.assertEqual(1, len(library_run_list))\n self.assertEqual(mock_library_run.library_id, library_run_list[0].library_id)\n\n library_run_in_workflows = mock_bcl_workflow.libraryrun_set.all()\n self.assertEqual(1, library_run_in_workflows.count())", "def test_golden_path_sql_datasource_configuration(\n mock_emit,\n caplog,\n empty_data_context_stats_enabled,\n sa,\n test_connectable_postgresql_db,\n):\n context: DataContext = empty_data_context_stats_enabled\n\n with set_directory(context.root_directory):\n # Everything below this line (except for asserts) is what we expect users to run as part of the golden path.\n import great_expectations as gx\n\n context = gx.get_context()\n\n db_hostname = os.getenv(\"GE_TEST_LOCAL_DB_HOSTNAME\", \"localhost\")\n yaml_config = f\"\"\"\n class_name: SimpleSqlalchemyDatasource\n credentials:\n drivername: postgresql\n username: postgres\n password: \"\"\n host: {db_hostname}\n port: 5432\n database: test_ci\n\n introspection:\n whole_table_with_limits:\n sampling_method: _sample_using_limit\n sampling_kwargs:\n n: 10\n \"\"\"\n # noinspection PyUnusedLocal\n report_object = context.test_yaml_config(\n name=\"my_datasource\",\n yaml_config=yaml_config,\n return_mode=\"report_object\",\n )\n assert mock_emit.call_count == 2\n # Substitute anonymized names since it changes for each run\n anonymized_datasource_name = mock_emit.call_args_list[1][0][0][\"event_payload\"][\n \"anonymized_name\"\n ]\n anonymized_data_connector_name = mock_emit.call_args_list[1][0][0][\n \"event_payload\"\n ][\"anonymized_data_connectors\"][0][\"anonymized_name\"]\n expected_call_args_list = [\n mock.call(\n {\"event_payload\": {}, \"event\": \"data_context.__init__\", \"success\": True}\n ),\n mock.call(\n {\n \"event\": \"data_context.test_yaml_config\",\n \"event_payload\": {\n \"anonymized_name\": anonymized_datasource_name,\n \"parent_class\": \"SimpleSqlalchemyDatasource\",\n \"anonymized_execution_engine\": {\n \"parent_class\": \"SqlAlchemyExecutionEngine\"\n },\n \"anonymized_data_connectors\": [\n {\n \"anonymized_name\": anonymized_data_connector_name,\n \"parent_class\": \"InferredAssetSqlDataConnector\",\n }\n ],\n },\n \"success\": True,\n }\n ),\n ]\n assert mock_emit.call_args_list == expected_call_args_list\n\n print(json.dumps(report_object, indent=2))\n print(context.datasources)\n\n context.get_batch_list(\n \"my_datasource\",\n \"whole_table_with_limits\",\n \"test_df\",\n )\n # assert len(my_batch.data.fetchall()) == 10\n\n with pytest.raises(KeyError):\n context.get_batch_list(\n \"my_datasource\",\n \"whole_table_with_limits\",\n \"DOES_NOT_EXIST\",\n )\n\n my_validator = context.get_validator(\n datasource_name=\"my_datasource\",\n data_connector_name=\"whole_table_with_limits\",\n data_asset_name=\"test_df\",\n expectation_suite=ExpectationSuite(\n \"my_expectation_suite\", data_context=context\n ),\n )\n my_evr = my_validator.expect_table_columns_to_match_set(column_set=[])\n print(my_evr)\n\n # my_evr = my_validator.expect_column_values_to_be_between(\n # column=\"x\",\n # min_value=0,\n # max_value=4,\n # )\n # assert my_evr.success\n\n # TODO: <Alex>ALEX</Alex>\n # my_evr = my_validator.expect_table_columns_to_match_ordered_list(ordered_list=[\"a\", \"b\", \"c\"])\n # assert my_evr.success\n\n # Confirm that logs do not contain any exceptions or invalid messages\n assert not usage_stats_exceptions_exist(messages=caplog.messages)\n assert not usage_stats_invalid_messages_exist(messages=caplog.messages)", "def test_load_dataloader_config(self) -> None:\n result = load_dataloader_config()\n self.assertIs(type(result), list)\n self.assertIsNot(result, [])", "def test_get_deployment_resource_data(self):\n pass", "def test_get_data_loader():\n\n # single paired data loader\n config = load_yaml(\"config/test/paired_nifti.yaml\")\n got = load.get_data_loader(data_config=config[\"dataset\"], mode=\"train\")\n assert isinstance(got, PairedDataLoader)\n\n config = load_yaml(\"config/test/paired_h5.yaml\")\n got = load.get_data_loader(data_config=config[\"dataset\"], mode=\"train\")\n assert isinstance(got, PairedDataLoader)\n\n # single unpaired data loader\n config = load_yaml(\"config/test/unpaired_nifti.yaml\")\n got = load.get_data_loader(data_config=config[\"dataset\"], mode=\"train\")\n assert isinstance(got, UnpairedDataLoader)\n\n config = load_yaml(\"config/test/unpaired_h5.yaml\")\n got = load.get_data_loader(data_config=config[\"dataset\"], mode=\"train\")\n assert isinstance(got, UnpairedDataLoader)\n\n # single grouped data loader\n config = load_yaml(\"config/test/grouped_nifti.yaml\")\n got = load.get_data_loader(data_config=config[\"dataset\"], mode=\"train\")\n assert isinstance(got, GroupedDataLoader)\n\n config = load_yaml(\"config/test/grouped_h5.yaml\")\n got = load.get_data_loader(data_config=config[\"dataset\"], mode=\"train\")\n assert isinstance(got, GroupedDataLoader)\n\n # empty data loader\n config = load_yaml(\"config/test/paired_nifti.yaml\")\n config[\"dataset\"][\"dir\"][\"train\"] = \"\"\n got = load.get_data_loader(data_config=config[\"dataset\"], mode=\"train\")\n assert got is None\n\n config = load_yaml(\"config/test/paired_nifti.yaml\")\n config[\"dataset\"][\"dir\"][\"train\"] = None\n got = load.get_data_loader(data_config=config[\"dataset\"], mode=\"train\")\n assert got is None\n\n # unpaired data loader with multiple dirs\n config = load_yaml(\"config/test/unpaired_nifti_multi_dirs.yaml\")\n got = load.get_data_loader(data_config=config[\"dataset\"], mode=\"train\")\n assert isinstance(got, UnpairedDataLoader)\n\n # check not a directory error\n config = load_yaml(\"config/test/paired_nifti.yaml\")\n config[\"dataset\"][\"dir\"][\"train\"] += \".h5\"\n with pytest.raises(ValueError) as err_info:\n load.get_data_loader(data_config=config[\"dataset\"], mode=\"train\")\n assert \"is not a directory or does not exist\" in str(err_info.value)\n\n # check directory not existed error\n config = load_yaml(\"config/test/paired_nifti.yaml\")\n config[\"dataset\"][\"dir\"][\"train\"] = \"/this_should_not_existed\"\n with pytest.raises(ValueError) as err_info:\n load.get_data_loader(data_config=config[\"dataset\"], mode=\"train\")\n assert \"is not a directory or does not exist\" in str(err_info.value)\n\n # check mode\n config = load_yaml(\"config/test/paired_nifti.yaml\")\n with pytest.raises(AssertionError) as err_info:\n load.get_data_loader(data_config=config[\"dataset\"], mode=\"example\")\n assert \"mode must be one of train/valid/test\" in str(err_info.value)", "def get_data_config(args):\n diff_data(args, \".\")", "def load_data(data_config):\n return tfds.load(data_config.path, with_info=data_config.load_with_info)", "def load_data(data_config):\n return tfds.load(data_config.path, with_info=data_config.load_with_info)", "def test_outputs(self, monkeypatch, script_runner):\n monkeypatch.setattr(\"builtins.input\", lambda _: \"n\")\n _ = script_runner.run(\n \"spectrafit\",\n \"spectrafit/test/test_data.txt\",\n \"-i\",\n \"spectrafit/test/test_input_2.json\",\n )\n assert len(list(Path(\".\").glob(\"*.json\"))) == 1\n assert len(list(Path(\".\").glob(\"*.csv\"))) == 3", "def find_output_dataset(stage, rconfig, data_type=None):\n\n # Use the stage-to-data mapping to find the output names\n if data_type is None:\n data_type = DOCUMENT_PROCESSING_IO[stage]['out']\n #for output_name in data_types:\n # Get all data sets D for input name\n dirname = os.path.join(rconfig.target_path, 'data', data_type)\n datasets1 = [ds for ds in os.listdir(dirname) if ds.isdigit()]\n datasets2 = [DataSet(stage, data_type, rconfig, ds) for ds in datasets1]\n # Filer the datasets making sure that d.trace + d.head matches\n # rconfig.pipeline(txt).trace\n datasets3 = [ds for ds in datasets2 if ds.output_matches_global_config()]\n # If there is one result, return it, if there are more than one, write a\n # warning and exit, otherwise, initialize a dataset and return it\n if len(datasets3) == 1:\n return datasets3[0]\n elif len(datasets3) > 1:\n print \"WARNING, more than one approriate training set found:\"\n for ds in datasets3:\n print ' ', ds\n sys.exit(\"Exiting...\")\n elif len(datasets3) == 0:\n highest_id = max([0] + [int(ds) for ds in datasets1])\n new_id = \"%02d\" % (highest_id + 1)\n dataset = DataSet(stage, data_type, rconfig, new_id)\n if not dataset.exists():\n dataset.initialize_on_disk()\n dataset.load_from_disk()\n print \"[%s] created %s\" % (stage, dataset)\n return dataset", "def test_get_yaml_spec(self):\n pass", "def test_main_extra_template_data_config(\n capsys: CaptureFixture, output_model, expected_output\n) -> None:\n\n input_filename = OPEN_API_DATA_PATH / 'api.yaml'\n extra_template_data = OPEN_API_DATA_PATH / 'extra_data.json'\n\n with freeze_time(TIMESTAMP):\n main(\n [\n '--input',\n str(input_filename),\n '--extra-template-data',\n str(extra_template_data),\n '--output-model',\n output_model,\n ]\n )\n\n captured = capsys.readouterr()\n assert (\n captured.out == (EXPECTED_MAIN_PATH / expected_output / 'output.py').read_text()\n )\n assert captured.err == inferred_message.format('openapi') + '\\n'", "def test_configurator(self):\n runner = Runner(YamlManifest(manifest))\n run1 = runner.run(JobOptions(resource=\"test1\"))\n assert not run1.unexpectedAbort, run1.unexpectedAbort.getStackTrace()\n assert len(run1.workDone) == 1, run1.workDone\n result = list(run1.workDone.values())[0].result\n self.assertEqual(result.outputs, {\"fact1\": \"test1\", \"fact2\": \"test\"})\n self.assertEqual(result.result.get(\"stdout\"), sys.executable)\n assert run1.status == Status.ok, run1.summary()", "def create_test_data(measurement_file_name, parameter_file_name, yaml_config,\n yaml_file_name_test, model_output_dir, model_name,\n hdf5_file_name):\n\n test_measurement_file_name = \\\n \"-testset\".join(os.path.splitext(measurement_file_name))\n test_parameter_file_name = \\\n \"-testset\".join(os.path.splitext(parameter_file_name))\n\n # measurements\n df = petab.get_measurement_df(measurement_file_name)\n df.loc[df.observableParameters == 'scaling_x1_common', 'measurement'] = \\\n df.loc[df.observableParameters == 'scaling_x1_common', 'measurement'] \\\n * 2.0\n df.loc[~df.observableParameters.isnull(), 'observableParameters'] = \\\n df.loc[~df.observableParameters.isnull(), 'observableParameters'] \\\n + \"_test\"\n petab.write_parameter_df(df, test_measurement_file_name)\n\n # parameters\n df = petab.get_parameter_df(parameter_file_name)\n df.rename(index={'scaling_x1_common' : 'scaling_x1_common_test',\n 'offset_x2_batch_0': 'offset_x2_batch_0_test',\n 'offset_x2_batch_1': 'offset_x2_batch_1_test'},\n inplace=True)\n petab.write_parameter_df(df, test_parameter_file_name)\n\n # yaml\n yaml_config[ptc.PARAMETER_FILE] = test_parameter_file_name\n yaml_config[ptc.PROBLEMS][0][ptc.MEASUREMENT_FILES][0] = \\\n test_measurement_file_name\n with open(yaml_file_name_test, 'w') as outfile:\n yaml.dump(yaml_config, outfile, default_flow_style=False)\n\n generate_hdf5_file(\n yaml_file=yaml_file_name_test,\n model_output_dir=model_output_dir,\n hdf5_file_name=\"-testset\".join(os.path.splitext(hdf5_file_name)),\n model_name=model_name\n )", "def test_multiple_batch(sdc_builder, sdc_executor, cluster):\n topic = get_random_string()\n\n raw_data = {'key': 'value'}\n\n # Build pipeline.\n builder = sdc_builder.get_pipeline_builder()\n\n source = builder.add_stage('Dev Raw Data Source').set_attributes(\n data_format='JSON',\n raw_data=json.dumps(raw_data),\n stop_after_first_batch=False\n )\n\n destination = builder.add_stage(\n name='com_streamsets_pipeline_stage_destination_kafka_KafkaDTarget',\n library=cluster.kafka.standalone_stage_lib\n ).set_attributes(\n topic=topic,\n data_format='JSON'\n )\n\n source >> destination\n\n pipeline = builder.build(f'Kafka Destination Multiple Batches').configure_for_environment(cluster)\n\n sdc_executor.add_pipeline(pipeline)\n\n sdc_executor.start_pipeline(pipeline)\n sdc_executor.wait_for_pipeline_metric(pipeline, 'output_record_count', 100)\n sdc_executor.stop_pipeline(pipeline)\n\n consumer = cluster.kafka.consumer(consumer_timeout_ms=1000, auto_offset_reset='earliest')\n consumer.subscribe([topic])\n\n msgs_received = [json.loads(message.value.decode()) for message in consumer]\n\n history = sdc_executor.get_pipeline_history(pipeline)\n history_records = history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count\n\n assert len(msgs_received) == history_records\n assert all(msg == raw_data for msg in msgs_received)", "def load_testing_data(self) -> List[np.ndarray]:\n input_data = self._load_set(config.TEST_DIR, False)\n return input_data", "def _generate_and_load_batch(self, tempdir, org_config, options) -> dict:\n options = {\n **options,\n \"working_directory\": tempdir,\n \"set_recently_viewed\": False,\n \"ignore_row_errors\": self.ignore_row_errors,\n \"drop_missing_schema\": self.drop_missing_schema,\n }\n subtask_config = TaskConfig({\"options\": options})\n subtask = GenerateAndLoadDataFromYaml(\n project_config=self.project_config,\n task_config=subtask_config,\n org_config=org_config,\n flow=self.flow,\n name=self.name,\n stepnum=self.stepnum,\n )\n subtask()\n return subtask.return_values[\"load_results\"][0]", "def test_load(tmp_path, data_name, params, expect_paths):\n\n folder_path = tmp_path\n dsets = pennylane.data.data_manager.load(\n data_name=data_name,\n folder_path=folder_path,\n **params,\n )\n\n assert {Path(dset.bind.filename) for dset in dsets} == {\n Path(tmp_path, path) for path in expect_paths\n }", "def test_vs_batch_start(\n self,\n request,\n kube_apis,\n ingress_controller_prerequisites,\n crd_ingress_controller,\n virtual_server_setup,\n test_namespace,\n ):\n resp = requests.get(virtual_server_setup.backend_1_url, headers={\"host\": virtual_server_setup.vs_host})\n assert resp.status_code == 200\n total_vs = int(request.config.getoption(\"--batch-resources\"))\n manifest = f\"{TEST_DATA}/virtual-server/standard/virtual-server.yaml\"\n count_before = get_reload_count(virtual_server_setup.metrics_url)\n with open(manifest) as f:\n doc = yaml.safe_load(f)\n with tempfile.NamedTemporaryFile(mode=\"w+\", suffix=\".yml\", delete=False) as temp:\n for i in range(1, total_vs + 1):\n doc[\"metadata\"][\"name\"] = f\"virtual-server-{i}\"\n doc[\"spec\"][\"host\"] = f\"virtual-server-{i}.example.com\"\n temp.write(yaml.safe_dump(doc) + \"---\\n\")\n create_custom_items_from_yaml(kube_apis.custom_objects, temp.name, test_namespace)\n os.remove(temp.name)\n print(f\"Total resources deployed is {total_vs}\")\n wait_before_test(5)\n count_after = get_reload_count(virtual_server_setup.metrics_url)\n new_reloads = count_after - count_before\n assert get_last_reload_status(virtual_server_setup.metrics_url, \"nginx\") == \"1\" and new_reloads <= int(\n request.config.getoption(\"--batch-reload-number\")\n )\n reload_ms = get_last_reload_time(virtual_server_setup.metrics_url, \"nginx\")\n print(f\"last reload duration: {reload_ms} ms; total new reloads: {new_reloads}\")\n\n for i in range(1, total_vs + 1):\n delete_virtual_server(kube_apis.custom_objects, f\"virtual-server-{i}\", test_namespace)", "def test_data_source(self, response: dict):\n\n data_source_id = str(response['data']['testDataSource']['dataSource']['id'])\n container_name = f'mobydq-test-data-source-{data_source_id}'\n client = docker.from_env()\n client.containers.run(\n name=container_name,\n image='mobydq-scripts',\n network='mobydq-network',\n command=['python', 'run.py', 'test_data_source', data_source_id],\n stream=True,\n remove=True\n )\n\n # Get connectivity test result\n query = f'query{{dataSourceById(id:{data_source_id}){{id,connectivityStatus}}}}'\n data = utils.execute_graphql_request(query)\n return data" ]
[ "0.6210851", "0.60284483", "0.6026436", "0.5509853", "0.5421516", "0.5384713", "0.5326231", "0.5324207", "0.5301481", "0.5286421", "0.5233528", "0.52224165", "0.51863563", "0.5100882", "0.5086977", "0.507586", "0.5057573", "0.5057573", "0.50360197", "0.5035163", "0.5032041", "0.50255644", "0.5022473", "0.5019989", "0.50079113", "0.50072014", "0.49924502", "0.4980037", "0.49766603", "0.49675006" ]
0.655488
0
Tests output of test_yaml_config() for a Datacontext configured with a Datasource with InferredAssetDataConnector and RuntimeDataConnector. 1. The InferredAssetDataConnector will output 4 data_assets, which correspond to the files in the test_dir_charlie folder 2. RuntimeDataConnector will output 0 data_assets, and return a "note" to the user. This is because the RuntimeDataConnector is not aware of data_assets until they are passed in through the RuntimeBatchRequest. The test asserts that the proper number of data_asset_names are returned for both DataConnectors, and in the case of the RuntimeDataConnetor, the proper note is returned to the user.
def test_golden_path_runtime_data_connector_and_inferred_data_connector_pandas_datasource_configuration( mock_emit, caplog, empty_data_context_stats_enabled, test_df, tmp_path_factory ): base_directory = str( tmp_path_factory.mktemp("test_golden_path_pandas_datasource_configuration") ) create_files_in_directory( directory=base_directory, file_name_list=[ "test_dir_charlie/A/A-1.csv", "test_dir_charlie/A/A-2.csv", "test_dir_charlie/A/A-3.csv", "test_dir_charlie/B/B-1.csv", "test_dir_charlie/B/B-2.csv", "test_dir_charlie/B/B-3.csv", "test_dir_charlie/C/C-1.csv", "test_dir_charlie/C/C-2.csv", "test_dir_charlie/C/C-3.csv", "test_dir_charlie/D/D-1.csv", "test_dir_charlie/D/D-2.csv", "test_dir_charlie/D/D-3.csv", ], file_content_fn=lambda: test_df.to_csv(header=True, index=False), ) context: DataContext = empty_data_context_stats_enabled with set_directory(context.root_directory): import great_expectations as gx context = gx.get_context() mock_emit.reset_mock() # Remove data_context.__init__ call yaml_config = f""" class_name: Datasource execution_engine: class_name: PandasExecutionEngine data_connectors: default_runtime_data_connector_name: class_name: RuntimeDataConnector batch_identifiers: - default_identifier_name default_inferred_data_connector_name: class_name: InferredAssetFilesystemDataConnector base_directory: {base_directory}/test_dir_charlie glob_directive: "*/*.csv" default_regex: pattern: (.+)/(.+)-(\\d+)\\.csv group_names: - subdirectory - data_asset_name - number """ # noinspection PyUnusedLocal report_object = context.test_yaml_config( name="my_directory_datasource", yaml_config=yaml_config, return_mode="report_object", ) assert report_object["execution_engine"] == { "caching": True, "module_name": "great_expectations.execution_engine.pandas_execution_engine", "class_name": "PandasExecutionEngine", "discard_subset_failing_expectations": False, "boto3_options": {}, "azure_options": {}, "gcs_options": {}, } assert report_object["data_connectors"]["count"] == 2 assert report_object["data_connectors"][ "default_runtime_data_connector_name" ] == { "class_name": "RuntimeDataConnector", "data_asset_count": 0, "data_assets": {}, "example_data_asset_names": [], "example_unmatched_data_references": [], "note": "RuntimeDataConnector will not have data_asset_names until they are " "passed in through RuntimeBatchRequest", "unmatched_data_reference_count": 0, } assert report_object["data_connectors"][ "default_inferred_data_connector_name" ] == { "class_name": "InferredAssetFilesystemDataConnector", "data_asset_count": 4, "example_data_asset_names": ["A", "B", "C"], "data_assets": { "A": { "batch_definition_count": 3, "example_data_references": ["A/A-1.csv", "A/A-2.csv", "A/A-3.csv"], }, "B": { "batch_definition_count": 3, "example_data_references": ["B/B-1.csv", "B/B-2.csv", "B/B-3.csv"], }, "C": { "batch_definition_count": 3, "example_data_references": ["C/C-1.csv", "C/C-2.csv", "C/C-3.csv"], }, }, "unmatched_data_reference_count": 0, "example_unmatched_data_references": [], } # Confirm that logs do not contain any exceptions or invalid messages assert not usage_stats_exceptions_exist(messages=caplog.messages) assert not usage_stats_invalid_messages_exist(messages=caplog.messages)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_golden_path_runtime_data_connector_pandas_datasource_configuration(\n mock_emit, caplog, empty_data_context_stats_enabled, test_df, tmp_path_factory\n):\n base_directory = str(\n tmp_path_factory.mktemp(\"test_golden_path_pandas_datasource_configuration\")\n )\n\n create_files_in_directory(\n directory=base_directory,\n file_name_list=[\n \"test_dir_charlie/A/A-1.csv\",\n \"test_dir_charlie/A/A-2.csv\",\n \"test_dir_charlie/A/A-3.csv\",\n ],\n file_content_fn=lambda: test_df.to_csv(header=True, index=False),\n )\n\n context: DataContext = empty_data_context_stats_enabled\n\n with set_directory(context.root_directory):\n import great_expectations as gx\n\n context = gx.get_context()\n mock_emit.reset_mock() # Remove data_context.__init__ call\n\n yaml_config = \"\"\"\n class_name: Datasource\n\n execution_engine:\n class_name: PandasExecutionEngine\n\n data_connectors:\n default_runtime_data_connector_name:\n class_name: RuntimeDataConnector\n batch_identifiers:\n - default_identifier_name\n \"\"\"\n\n # noinspection PyUnusedLocal\n report_object = context.test_yaml_config(\n name=\"my_directory_datasource\",\n yaml_config=yaml_config,\n return_mode=\"report_object\",\n )\n\n assert report_object[\"execution_engine\"] == {\n \"caching\": True,\n \"module_name\": \"great_expectations.execution_engine.pandas_execution_engine\",\n \"class_name\": \"PandasExecutionEngine\",\n \"discard_subset_failing_expectations\": False,\n \"boto3_options\": {},\n \"azure_options\": {},\n \"gcs_options\": {},\n }\n assert report_object[\"data_connectors\"][\"count\"] == 1\n\n # checking the correct number of data_assets have come back\n assert (\n report_object[\"data_connectors\"][\"default_runtime_data_connector_name\"][\n \"data_asset_count\"\n ]\n == 0\n )\n\n # checking that note has come back\n assert (\n report_object[\"data_connectors\"][\"default_runtime_data_connector_name\"][\n \"note\"\n ]\n == \"RuntimeDataConnector will not have data_asset_names until they are passed in through RuntimeBatchRequest\"\n )\n\n # Confirm that logs do not contain any exceptions or invalid messages\n assert not usage_stats_exceptions_exist(messages=caplog.messages)\n assert not usage_stats_invalid_messages_exist(messages=caplog.messages)", "def test_golden_path_inferred_asset_pandas_datasource_configuration(\n mock_emit, caplog, empty_data_context_stats_enabled, test_df, tmp_path_factory\n):\n base_directory = str(\n tmp_path_factory.mktemp(\"test_golden_path_pandas_datasource_configuration\")\n )\n\n create_files_in_directory(\n directory=base_directory,\n file_name_list=[\n \"test_dir_charlie/A/A-1.csv\",\n \"test_dir_charlie/A/A-2.csv\",\n \"test_dir_charlie/A/A-3.csv\",\n \"test_dir_charlie/B/B-1.csv\",\n \"test_dir_charlie/B/B-2.csv\",\n \"test_dir_charlie/B/B-3.csv\",\n \"test_dir_charlie/C/C-1.csv\",\n \"test_dir_charlie/C/C-2.csv\",\n \"test_dir_charlie/C/C-3.csv\",\n \"test_dir_charlie/D/D-1.csv\",\n \"test_dir_charlie/D/D-2.csv\",\n \"test_dir_charlie/D/D-3.csv\",\n ],\n file_content_fn=lambda: test_df.to_csv(header=True, index=False),\n )\n\n context: DataContext = empty_data_context_stats_enabled\n\n with set_directory(context.root_directory):\n import great_expectations as gx\n\n context = gx.get_context()\n mock_emit.reset_mock() # Remove data_context.__init__ call\n\n yaml_config = f\"\"\"\n class_name: Datasource\n\n execution_engine:\n class_name: PandasExecutionEngine\n\n data_connectors:\n my_filesystem_data_connector:\n class_name: InferredAssetFilesystemDataConnector\n base_directory: {base_directory}/test_dir_charlie\n glob_directive: \"*/*.csv\"\n\n default_regex:\n pattern: (.+)/(.+)-(\\\\d+)\\\\.csv\n group_names:\n - subdirectory\n - data_asset_name\n - number\n \"\"\"\n\n # noinspection PyUnusedLocal\n context.test_yaml_config(\n name=\"my_directory_datasource\",\n yaml_config=yaml_config,\n return_mode=\"report_object\",\n )\n # print(json.dumps(report_object, indent=2))\n # print(context.datasources)\n assert mock_emit.call_count == 1\n # Substitute anonymized names since it changes for each run\n anonymized_datasource_name = mock_emit.call_args_list[0][0][0][\"event_payload\"][\n \"anonymized_name\"\n ]\n anonymized_execution_engine_name = mock_emit.call_args_list[0][0][0][\n \"event_payload\"\n ][\"anonymized_execution_engine\"][\"anonymized_name\"]\n anonymized_data_connector_name = mock_emit.call_args_list[0][0][0][\n \"event_payload\"\n ][\"anonymized_data_connectors\"][0][\"anonymized_name\"]\n expected_call_args_list = [\n mock.call(\n {\n \"event\": \"data_context.test_yaml_config\",\n \"event_payload\": {\n \"anonymized_name\": anonymized_datasource_name,\n \"parent_class\": \"Datasource\",\n \"anonymized_execution_engine\": {\n \"anonymized_name\": anonymized_execution_engine_name,\n \"parent_class\": \"PandasExecutionEngine\",\n },\n \"anonymized_data_connectors\": [\n {\n \"anonymized_name\": anonymized_data_connector_name,\n \"parent_class\": \"InferredAssetFilesystemDataConnector\",\n }\n ],\n },\n \"success\": True,\n }\n ),\n ]\n assert mock_emit.call_args_list == expected_call_args_list\n\n my_batch_list = context.get_batch_list(\n datasource_name=\"my_directory_datasource\",\n data_connector_name=\"my_filesystem_data_connector\",\n data_asset_name=\"A\",\n batch_identifiers={\n \"number\": \"2\",\n },\n batch_spec_passthrough={\n \"sampling_method\": \"_sample_using_hash\",\n \"sampling_kwargs\": {\n \"column_name\": \"date\",\n \"hash_function_name\": \"md5\",\n \"hash_value\": \"f\",\n },\n },\n )\n my_batch = my_batch_list[0]\n assert my_batch.batch_definition[\"data_asset_name\"] == \"A\"\n assert mock_emit.call_count == 2\n\n df_data = my_batch.data.dataframe\n assert df_data.shape == (10, 10)\n df_data[\"date\"] = df_data.apply(\n lambda row: datetime.datetime.strptime(row[\"date\"], \"%Y-%m-%d\").date(),\n axis=1,\n )\n assert (\n test_df[\n (test_df[\"date\"] == datetime.date(2020, 1, 15))\n | (test_df[\"date\"] == datetime.date(2020, 1, 29))\n ]\n .drop(\"timestamp\", axis=1)\n .equals(df_data.drop(\"timestamp\", axis=1))\n )\n\n # Empty batch list won't error but still will emit usage stats\n batch_list = context.get_batch_list(\n datasource_name=\"my_directory_datasource\",\n data_connector_name=\"my_filesystem_data_connector\",\n data_asset_name=\"DOES_NOT_EXIST\",\n )\n assert len(batch_list) == 0\n assert mock_emit.call_count == 3\n\n my_validator = context.get_validator(\n datasource_name=\"my_directory_datasource\",\n data_connector_name=\"my_filesystem_data_connector\",\n data_asset_name=\"D\",\n data_connector_query={\"batch_filter_parameters\": {\"number\": \"3\"}},\n expectation_suite=ExpectationSuite(\n \"my_expectation_suite\", data_context=context\n ),\n batch_spec_passthrough={\n \"sampling_method\": \"_sample_using_hash\",\n \"sampling_kwargs\": {\n \"column_name\": \"date\",\n \"hash_function_name\": \"md5\",\n \"hash_value\": \"f\",\n },\n },\n )\n assert mock_emit.call_count == 4\n\n my_evr = my_validator.expect_column_values_to_be_between(\n column=\"d\", min_value=1, max_value=31\n )\n assert my_evr.success\n\n # TODO: <Alex>ALEX</Alex>\n # my_evr = my_validator.expect_table_columns_to_match_ordered_list(ordered_list=[\"x\", \"y\", \"z\"])\n # assert my_evr.success\n\n # No other usage stats calls detected\n # assert mock_emit.call_count == 1\n assert mock_emit.call_count == 4\n\n # Confirm that logs do not contain any exceptions or invalid messages\n assert not usage_stats_exceptions_exist(messages=caplog.messages)\n assert not usage_stats_invalid_messages_exist(messages=caplog.messages)", "def test_golden_path_configured_asset_pandas_datasource_configuration(\n mock_emit, caplog, empty_data_context_stats_enabled, test_df, tmp_path_factory\n):\n base_directory = str(\n tmp_path_factory.mktemp(\"test_golden_path_pandas_datasource_configuration\")\n )\n\n create_files_in_directory(\n directory=base_directory,\n file_name_list=[\n \"test_dir_foxtrot/A/A-1.csv\",\n \"test_dir_foxtrot/A/A-2.csv\",\n \"test_dir_foxtrot/A/A-3.csv\",\n \"test_dir_foxtrot/B/B-1.txt\",\n \"test_dir_foxtrot/B/B-2.txt\",\n \"test_dir_foxtrot/B/B-3.txt\",\n \"test_dir_foxtrot/C/C-2017.csv\",\n \"test_dir_foxtrot/C/C-2018.csv\",\n \"test_dir_foxtrot/C/C-2019.csv\",\n \"test_dir_foxtrot/D/D-aaa.csv\",\n \"test_dir_foxtrot/D/D-bbb.csv\",\n \"test_dir_foxtrot/D/D-ccc.csv\",\n \"test_dir_foxtrot/D/D-ddd.csv\",\n \"test_dir_foxtrot/D/D-eee.csv\",\n ],\n file_content_fn=lambda: test_df.to_csv(header=True, index=False),\n )\n\n context: DataContext = empty_data_context_stats_enabled\n\n with set_directory(context.root_directory):\n import great_expectations as gx\n\n context = gx.get_context()\n mock_emit.reset_mock() # Remove data_context.__init__ call\n\n yaml_config = f\"\"\"\n class_name: Datasource\n\n execution_engine:\n class_name: PandasExecutionEngine\n\n data_connectors:\n my_filesystem_data_connector:\n class_name: ConfiguredAssetFilesystemDataConnector\n base_directory: {base_directory}\n # glob_directive: \"*\"\n\n default_regex:\n pattern: (.+)\\\\.csv\n group_names:\n - alphanumeric\n\n assets:\n A:\n base_directory: {base_directory}/test_dir_foxtrot/A\n pattern: (.+)-(\\\\d+)\\\\.csv\n group_names:\n - letter\n - number\n B:\n base_directory: {base_directory}/test_dir_foxtrot/B\n pattern: (.+)-(\\\\d+)\\\\.csv\n group_names:\n - letter\n - number\n C:\n base_directory: {base_directory}/test_dir_foxtrot/C\n pattern: (.+)-(\\\\d+)\\\\.csv\n group_names:\n - letter\n - year\n D:\n base_directory: {base_directory}/test_dir_foxtrot/D\n pattern: (.+)-(\\\\d+)\\\\.csv\n group_names:\n - letter\n - checksum\n \"\"\"\n\n # noinspection PyUnusedLocal\n context.test_yaml_config(\n name=\"my_directory_datasource\",\n yaml_config=yaml_config,\n return_mode=\"report_object\",\n )\n # print(json.dumps(report_object, indent=2))\n # print(context.datasources)\n assert mock_emit.call_count == 1\n # Substitute anonymized names since it changes for each run\n anonymized_datasource_name = mock_emit.call_args_list[0][0][0][\"event_payload\"][\n \"anonymized_name\"\n ]\n anonymized_execution_engine_name = mock_emit.call_args_list[0][0][0][\n \"event_payload\"\n ][\"anonymized_execution_engine\"][\"anonymized_name\"]\n anonymized_data_connector_name = mock_emit.call_args_list[0][0][0][\n \"event_payload\"\n ][\"anonymized_data_connectors\"][0][\"anonymized_name\"]\n expected_call_args_list = [\n mock.call(\n {\n \"event\": \"data_context.test_yaml_config\",\n \"event_payload\": {\n \"anonymized_name\": anonymized_datasource_name,\n \"parent_class\": \"Datasource\",\n \"anonymized_execution_engine\": {\n \"anonymized_name\": anonymized_execution_engine_name,\n \"parent_class\": \"PandasExecutionEngine\",\n },\n \"anonymized_data_connectors\": [\n {\n \"anonymized_name\": anonymized_data_connector_name,\n \"parent_class\": \"ConfiguredAssetFilesystemDataConnector\",\n }\n ],\n },\n \"success\": True,\n }\n ),\n ]\n assert mock_emit.call_args_list == expected_call_args_list\n\n my_batch_list = context.get_batch_list(\n datasource_name=\"my_directory_datasource\",\n data_connector_name=\"my_filesystem_data_connector\",\n data_asset_name=\"A\",\n batch_identifiers={\n \"number\": \"2\",\n },\n batch_spec_passthrough={\n \"sampling_method\": \"_sample_using_hash\",\n \"sampling_kwargs\": {\n \"column_name\": \"date\",\n \"hash_function_name\": \"md5\",\n \"hash_value\": \"f\",\n },\n },\n )\n my_batch = my_batch_list[0]\n assert my_batch.batch_definition[\"data_asset_name\"] == \"A\"\n assert mock_emit.call_count == 2\n\n my_batch.head()\n\n df_data = my_batch.data.dataframe\n assert df_data.shape == (10, 10)\n df_data[\"date\"] = df_data.apply(\n lambda row: datetime.datetime.strptime(row[\"date\"], \"%Y-%m-%d\").date(),\n axis=1,\n )\n assert (\n test_df[\n (test_df[\"date\"] == datetime.date(2020, 1, 15))\n | (test_df[\"date\"] == datetime.date(2020, 1, 29))\n ]\n .drop(\"timestamp\", axis=1)\n .equals(df_data.drop(\"timestamp\", axis=1))\n )\n\n # Empty batch list won't error but still will emit usage stats\n batch_list = context.get_batch_list(\n datasource_name=\"my_directory_datasource\",\n data_connector_name=\"my_filesystem_data_connector\",\n data_asset_name=\"DOES_NOT_EXIST\",\n )\n assert len(batch_list) == 0\n assert mock_emit.call_count == 3\n\n my_validator = context.get_validator(\n datasource_name=\"my_directory_datasource\",\n data_connector_name=\"my_filesystem_data_connector\",\n data_asset_name=\"C\",\n data_connector_query={\"batch_filter_parameters\": {\"year\": \"2019\"}},\n create_expectation_suite_with_name=\"my_expectations\",\n batch_spec_passthrough={\n \"sampling_method\": \"_sample_using_hash\",\n \"sampling_kwargs\": {\n \"column_name\": \"date\",\n \"hash_function_name\": \"md5\",\n \"hash_value\": \"f\",\n },\n },\n )\n my_evr = my_validator.expect_column_values_to_be_between(\n column=\"d\", min_value=1, max_value=31\n )\n assert my_evr.success\n assert mock_emit.call_count == 4\n\n # my_evr = my_validator.expect_table_columns_to_match_ordered_list(ordered_list=[\"x\", \"y\", \"z\"])\n # assert my_evr.success\n\n # No other usage stats calls detected\n assert mock_emit.call_count == 4\n\n # Confirm that logs do not contain any exceptions or invalid messages\n assert not usage_stats_exceptions_exist(messages=caplog.messages)\n assert not usage_stats_invalid_messages_exist(messages=caplog.messages)", "def test_batches_are_accessible(\n monkeypatch,\n multibatch_generic_csv_generator,\n multibatch_generic_csv_generator_context,\n):\n\n context: DataContext = multibatch_generic_csv_generator_context\n data_relative_path = \"../data\"\n data_path = os.path.join(context.root_directory, data_relative_path)\n datasource_name = \"generic_csv_generator\"\n data_connector_name = \"daily_data_connector\"\n asset_name = \"daily_data_asset\"\n\n datasource = context.datasources[datasource_name]\n\n data_connector = datasource.data_connectors[data_connector_name]\n\n total_batches: int = 20\n file_list = multibatch_generic_csv_generator(\n data_path=data_path, num_event_batches=total_batches\n )\n\n assert (\n data_connector._get_data_reference_list_from_cache_by_data_asset_name(\n data_asset_name=asset_name\n )\n == file_list\n )\n\n batch_request_1 = BatchRequest(\n datasource_name=\"generic_csv_generator\",\n data_connector_name=\"daily_data_connector\",\n data_asset_name=\"daily_data_asset\",\n data_connector_query={\n \"index\": -1,\n },\n )\n # Should give most recent batch\n validator_1 = context.get_validator(\n batch_request=batch_request_1,\n create_expectation_suite_with_name=\"my_expectation_suite_name_1\",\n )\n metric_max = validator_1.get_metric(\n MetricConfiguration(\"column.max\", metric_domain_kwargs={\"column\": \"batch_num\"})\n )\n assert metric_max == total_batches\n metric_value_set = validator_1.get_metric(\n MetricConfiguration(\n \"column.distinct_values\",\n metric_domain_kwargs={\"column\": \"string_cardinality_3\"},\n )\n )\n assert metric_value_set == {\"category0\", \"category1\", \"category2\"}\n\n batch_request_2 = BatchRequest(\n datasource_name=\"generic_csv_generator\",\n data_connector_name=\"daily_data_connector\",\n data_asset_name=\"daily_data_asset\",\n data_connector_query={\n \"index\": -2,\n },\n )\n validator_2 = context.get_validator(\n batch_request=batch_request_2,\n create_expectation_suite_with_name=\"my_expectation_suite_name_2\",\n )\n metric_max = validator_2.get_metric(\n MetricConfiguration(\"column.max\", metric_domain_kwargs={\"column\": \"batch_num\"})\n )\n assert metric_max == total_batches - 1\n metric_value_set = validator_2.get_metric(\n MetricConfiguration(\n \"column.distinct_values\",\n metric_domain_kwargs={\"column\": \"string_cardinality_3\"},\n )\n )\n assert metric_value_set == {\"category0\", \"category1\", \"category2\"}\n\n for batch_num in range(1, total_batches + 1):\n batch_request = BatchRequest(\n datasource_name=\"generic_csv_generator\",\n data_connector_name=\"daily_data_connector\",\n data_asset_name=\"daily_data_asset\",\n data_connector_query={\n \"index\": -batch_num,\n },\n )\n validator = context.get_validator(\n batch_request=batch_request,\n create_expectation_suite_with_name=f\"my_expectation_suite_name__{batch_num}\",\n )\n metric_max = validator.get_metric(\n MetricConfiguration(\n \"column.max\", metric_domain_kwargs={\"column\": \"batch_num\"}\n )\n )\n assert metric_max == (total_batches + 1) - batch_num\n metric_value_set = validator.get_metric(\n MetricConfiguration(\n \"column.distinct_values\",\n metric_domain_kwargs={\"column\": \"string_cardinality_3\"},\n )\n )\n assert metric_value_set == {\"category0\", \"category1\", \"category2\"}", "def testRunConfiguration(self):\n loader = Loader()\n loader.loadFromDirectory(self.__exampleDirectory)\n\n self.assertEqual(len(loader.taskHolders()), 1)\n\n taskHolder = loader.taskHolders()[0]\n\n taskHolder.addVar(\n \"prefix\",\n self.__exampleTargetPrefixDirectory,\n True\n )\n\n # loading input data for the execution\n crawlerGroups = Crawler.group(\n FsCrawler.createFromPath(\n os.path.join(self.__exampleDirectory, 'textures')\n ).globFromParent()\n )\n\n resultCrawlers = []\n for group in crawlerGroups:\n if isinstance(group[0], Crawler.registeredType('texture')):\n resultCrawlers += taskHolder.run(group)\n\n targetFilePaths = list(sorted(filter(lambda x: len(x), map(lambda x: x.strip(), self.__generatedData.split('\\n')))))\n createdFilePaths = list(sorted(map(lambda x: x.var('fullPath')[len(self.__exampleTargetPrefixDirectory) + 1:].replace('\\\\', '/'), resultCrawlers)))\n\n self.assertListEqual(targetFilePaths, createdFilePaths)", "def Get_datasets(**kwargs):\n from .utils import option_printer, get_conn, get_param_dict, get_logger_instance\n from .cohort_tables import make_target_comp_tables\n from .table2rawseq import table_to_rawseq\n from .rawseq2multihot import rawseq_to_multihot\n from .multihot2datasets import multihot_to_datasets\n import os, logging\n from importlib import reload\n \n ## get params\n param_dict = get_param_dict(kwargs['DS_PARAMS_FILE_NAME'], kwargs['CONFIG_FOLDER_PATH'])\n param_dict.update(kwargs)\n if not os.path.exists(param_dict['DATA_FOLDER_PATH']): os.makedirs(param_dict['DATA_FOLDER_PATH'])\n param_dict['CDM_DB_NAME'] = get_param_dict(kwargs['DB_CONN_FILENAME'], kwargs['CONFIG_FOLDER_PATH'])['CDM_DB']\n \n param_dict['DUMPING_PATH'] = os.path.join(param_dict['RESULT_FOLDER_PATH'], \n param_dict['PROJECT_NAME'], \n param_dict['CDM_DB_NAME'])\n if not os.path.exists(param_dict['DUMPING_PATH']): \n os.makedirs(param_dict['DUMPING_PATH'])\n \n if param_dict['PIPELINE_START_LEVEL']<3:\n param_dict['DB_CONN'], CDM_DB_NAME, RESULT_DB_NAME = get_conn(param_dict['DB_CONN_FILENAME'], \n param_dict['CONFIG_FOLDER_PATH'])\n param_dict['CDM_DB_NAME'] = CDM_DB_NAME\n param_dict['RESULT_DB_NAME'] = RESULT_DB_NAME\n else:\n param_dict['RESULT_DB_NAME'] = get_param_dict(kwargs['DB_CONN_FILENAME'], kwargs['CONFIG_FOLDER_PATH'])['RESULT_DB']\n \n ## logger\n logging.shutdown()\n reload(logging)\n main_logger = get_logger_instance(logger_name='ds_pipeline', \n DUMPING_PATH=param_dict['DUMPING_PATH'], \n parent_name=False,\n stream=True)\n \n ## print params\n main_logger.info(\"\\n (params) \\n\")\n try: option_printer(main_logger, param_dict['DB_CONN'], **param_dict)\n except: pass\n main_logger.info(\"=\"*100 + \"\\n\")\n \n ## [1] Make_target_comp_tables\n if param_dict['PIPELINE_START_LEVEL']<=1:\n main_logger.info(\"\\n[Level 1] Make_TARGET_COMP_tables\\n\")\n make_target_comp_tables(**param_dict)\n main_logger.info(\"=\"*100 + \"\\n\")\n \n ## [2] Table to rawSeq\n if param_dict['PIPELINE_START_LEVEL']<=2:\n main_logger.info(\"\\n[Level 2] Table to rawSeq\\n\")\n table_to_rawseq(param_dict['DUMPING_PATH'], \n param_dict['DB_CONN'], param_dict['CDM_DB_NAME'], \n param_dict['DATA_FOLDER_PATH'])\n main_logger.info(\"=\"*100 + \"\\n\")\n \n ## [3] rawSeq to multihot\n if param_dict['PIPELINE_START_LEVEL']<=3:\n main_logger.info(\"\\n[Level 3] Convert to multihot\\n\")\n rawseq_to_multihot(param_dict['DUMPING_PATH'], \n param_dict['DATA_FOLDER_PATH'], param_dict['MAX_TIME_STEP'], \n param_dict['DX_ONLY'])\n main_logger.info(\"=\"*100 + \"\\n\")\n \n ## [4] Multihot to Dataset\n if param_dict['PIPELINE_START_LEVEL']<=4:\n main_logger.info(\"\\n[Level 4] Multihot to Dataset\\n\")\n datasets = multihot_to_datasets(param_dict['DUMPING_PATH'], \n param_dict['DATA_FOLDER_PATH'], param_dict['TR_RATIO'])\n \n #add info\n if param_dict['PIPELINE_START_LEVEL']<3: \n datasets.info['DB_CONN'] = param_dict['DB_CONN']\n datasets.info['CONFIG_FOLDER_PATH'] = param_dict['CONFIG_FOLDER_PATH']\n datasets.info['DATA_FOLDER_PATH'] = param_dict['DATA_FOLDER_PATH']\n datasets.info['RESULT_FOLDER_PATH'] = param_dict['RESULT_FOLDER_PATH']\n datasets.info['DB_CONN_FILENAME'] = param_dict['DB_CONN_FILENAME']\n datasets.info['DS_PARAMS_FILE_NAME'] = param_dict['DS_PARAMS_FILE_NAME']\n datasets.info['CDM_DB_NAME'] = param_dict['CDM_DB_NAME']\n datasets.info['RESULT_DB_NAME'] = param_dict['RESULT_DB_NAME']\n \n main_logger.info(\"\\n[Datasets Info.]\\n\")\n main_logger.info(\"{0:>26} {1:}\".format('[OPTION]', '[VALUE]'))\n for k in sorted(datasets.info.keys()):\n main_logger.info(\" {0:>23}: {1:}\".format(k, datasets.info[k]))\n \n #print(\"\\nALL DONE!!\")\n main_logger.info(\"\\n[ALL DONE!!]\\n\\n\")\n for h in list(main_logger.handlers):\n main_logger.removeHandler(h)\n h.flush()\n h.close()\n return datasets", "def test_golden_path_sql_datasource_configuration(\n mock_emit,\n caplog,\n empty_data_context_stats_enabled,\n sa,\n test_connectable_postgresql_db,\n):\n context: DataContext = empty_data_context_stats_enabled\n\n with set_directory(context.root_directory):\n # Everything below this line (except for asserts) is what we expect users to run as part of the golden path.\n import great_expectations as gx\n\n context = gx.get_context()\n\n db_hostname = os.getenv(\"GE_TEST_LOCAL_DB_HOSTNAME\", \"localhost\")\n yaml_config = f\"\"\"\n class_name: SimpleSqlalchemyDatasource\n credentials:\n drivername: postgresql\n username: postgres\n password: \"\"\n host: {db_hostname}\n port: 5432\n database: test_ci\n\n introspection:\n whole_table_with_limits:\n sampling_method: _sample_using_limit\n sampling_kwargs:\n n: 10\n \"\"\"\n # noinspection PyUnusedLocal\n report_object = context.test_yaml_config(\n name=\"my_datasource\",\n yaml_config=yaml_config,\n return_mode=\"report_object\",\n )\n assert mock_emit.call_count == 2\n # Substitute anonymized names since it changes for each run\n anonymized_datasource_name = mock_emit.call_args_list[1][0][0][\"event_payload\"][\n \"anonymized_name\"\n ]\n anonymized_data_connector_name = mock_emit.call_args_list[1][0][0][\n \"event_payload\"\n ][\"anonymized_data_connectors\"][0][\"anonymized_name\"]\n expected_call_args_list = [\n mock.call(\n {\"event_payload\": {}, \"event\": \"data_context.__init__\", \"success\": True}\n ),\n mock.call(\n {\n \"event\": \"data_context.test_yaml_config\",\n \"event_payload\": {\n \"anonymized_name\": anonymized_datasource_name,\n \"parent_class\": \"SimpleSqlalchemyDatasource\",\n \"anonymized_execution_engine\": {\n \"parent_class\": \"SqlAlchemyExecutionEngine\"\n },\n \"anonymized_data_connectors\": [\n {\n \"anonymized_name\": anonymized_data_connector_name,\n \"parent_class\": \"InferredAssetSqlDataConnector\",\n }\n ],\n },\n \"success\": True,\n }\n ),\n ]\n assert mock_emit.call_args_list == expected_call_args_list\n\n print(json.dumps(report_object, indent=2))\n print(context.datasources)\n\n context.get_batch_list(\n \"my_datasource\",\n \"whole_table_with_limits\",\n \"test_df\",\n )\n # assert len(my_batch.data.fetchall()) == 10\n\n with pytest.raises(KeyError):\n context.get_batch_list(\n \"my_datasource\",\n \"whole_table_with_limits\",\n \"DOES_NOT_EXIST\",\n )\n\n my_validator = context.get_validator(\n datasource_name=\"my_datasource\",\n data_connector_name=\"whole_table_with_limits\",\n data_asset_name=\"test_df\",\n expectation_suite=ExpectationSuite(\n \"my_expectation_suite\", data_context=context\n ),\n )\n my_evr = my_validator.expect_table_columns_to_match_set(column_set=[])\n print(my_evr)\n\n # my_evr = my_validator.expect_column_values_to_be_between(\n # column=\"x\",\n # min_value=0,\n # max_value=4,\n # )\n # assert my_evr.success\n\n # TODO: <Alex>ALEX</Alex>\n # my_evr = my_validator.expect_table_columns_to_match_ordered_list(ordered_list=[\"a\", \"b\", \"c\"])\n # assert my_evr.success\n\n # Confirm that logs do not contain any exceptions or invalid messages\n assert not usage_stats_exceptions_exist(messages=caplog.messages)\n assert not usage_stats_invalid_messages_exist(messages=caplog.messages)", "def test_outputs(self, monkeypatch, script_runner):\n monkeypatch.setattr(\"builtins.input\", lambda _: \"n\")\n _ = script_runner.run(\n \"spectrafit\",\n \"spectrafit/test/test_data.txt\",\n \"-i\",\n \"spectrafit/test/test_input_2.json\",\n )\n assert len(list(Path(\".\").glob(\"*.json\"))) == 1\n assert len(list(Path(\".\").glob(\"*.csv\"))) == 3", "def get_data_config(args):\n diff_data(args, \".\")", "def test_conll_dataloader(shared_datadir, dummy_vocabs):\n token_vocab, char_vocab, label_vocab = dummy_vocabs\n # Check on the <pad> and <unk> index\n assert char_vocab.stoi[\"<pad>\"] == 1\n assert char_vocab.stoi[\"<unk>\"] == 0\n dataset = ConllDataset(os.path.join(shared_datadir, \"conll_sample.txt\"),\n token_vocab, char_vocab, label_vocab)\n dataset = DataLoader(dataset, batch_size=3,\n collate_fn=generate_sentence_batch)\n\n # Check the one batch\n for batch in dataset:\n tokens, token_chars, lengths, labels = batch\n assert lengths.tolist() == [5, 3, 1]\n assert tokens.shape == (3, 5)\n assert token_chars.shape == (15, 7) # max char len is 7\n assert labels.shape == (3, 5)\n \n assert (tokens == torch.tensor([\n [5, 6, 7, 2, 0], [6, 4, 3, 1, 1], [0, 1, 1, 1, 1]\n ])).all()\n assert (labels == torch.tensor([\n [3, 5, 6, 6, 2], [3, 6, 6, 1, 1], [3, 1, 1, 1, 1]\n ])).all()\n \n # Check padding of characters\n assert token_chars[0].tolist() == [17, 32, 47, 32, 45, 1, 1]\n assert token_chars[3].tolist() == [28, 47, 1, 1, 1, 1, 1]\n assert token_chars[7].tolist() == [35, 28, 43, 43, 52, 1, 1]\n assert (token_chars[1] == token_chars[5]).all() # \"Such\"\n assert token_chars[8].tolist() == token_chars[9].tolist() == [1] * 7\n assert token_chars[10].tolist() == [3, 32, 52, 42, 41, 30, 0]", "def test_get_data_loader():\n\n # single paired data loader\n config = load_yaml(\"config/test/paired_nifti.yaml\")\n got = load.get_data_loader(data_config=config[\"dataset\"], mode=\"train\")\n assert isinstance(got, PairedDataLoader)\n\n config = load_yaml(\"config/test/paired_h5.yaml\")\n got = load.get_data_loader(data_config=config[\"dataset\"], mode=\"train\")\n assert isinstance(got, PairedDataLoader)\n\n # single unpaired data loader\n config = load_yaml(\"config/test/unpaired_nifti.yaml\")\n got = load.get_data_loader(data_config=config[\"dataset\"], mode=\"train\")\n assert isinstance(got, UnpairedDataLoader)\n\n config = load_yaml(\"config/test/unpaired_h5.yaml\")\n got = load.get_data_loader(data_config=config[\"dataset\"], mode=\"train\")\n assert isinstance(got, UnpairedDataLoader)\n\n # single grouped data loader\n config = load_yaml(\"config/test/grouped_nifti.yaml\")\n got = load.get_data_loader(data_config=config[\"dataset\"], mode=\"train\")\n assert isinstance(got, GroupedDataLoader)\n\n config = load_yaml(\"config/test/grouped_h5.yaml\")\n got = load.get_data_loader(data_config=config[\"dataset\"], mode=\"train\")\n assert isinstance(got, GroupedDataLoader)\n\n # empty data loader\n config = load_yaml(\"config/test/paired_nifti.yaml\")\n config[\"dataset\"][\"dir\"][\"train\"] = \"\"\n got = load.get_data_loader(data_config=config[\"dataset\"], mode=\"train\")\n assert got is None\n\n config = load_yaml(\"config/test/paired_nifti.yaml\")\n config[\"dataset\"][\"dir\"][\"train\"] = None\n got = load.get_data_loader(data_config=config[\"dataset\"], mode=\"train\")\n assert got is None\n\n # unpaired data loader with multiple dirs\n config = load_yaml(\"config/test/unpaired_nifti_multi_dirs.yaml\")\n got = load.get_data_loader(data_config=config[\"dataset\"], mode=\"train\")\n assert isinstance(got, UnpairedDataLoader)\n\n # check not a directory error\n config = load_yaml(\"config/test/paired_nifti.yaml\")\n config[\"dataset\"][\"dir\"][\"train\"] += \".h5\"\n with pytest.raises(ValueError) as err_info:\n load.get_data_loader(data_config=config[\"dataset\"], mode=\"train\")\n assert \"is not a directory or does not exist\" in str(err_info.value)\n\n # check directory not existed error\n config = load_yaml(\"config/test/paired_nifti.yaml\")\n config[\"dataset\"][\"dir\"][\"train\"] = \"/this_should_not_existed\"\n with pytest.raises(ValueError) as err_info:\n load.get_data_loader(data_config=config[\"dataset\"], mode=\"train\")\n assert \"is not a directory or does not exist\" in str(err_info.value)\n\n # check mode\n config = load_yaml(\"config/test/paired_nifti.yaml\")\n with pytest.raises(AssertionError) as err_info:\n load.get_data_loader(data_config=config[\"dataset\"], mode=\"example\")\n assert \"mode must be one of train/valid/test\" in str(err_info.value)", "def test_find_builder_config_code(mock_fs: testing.MockFs):\n\n class MyDataset(testing.DummyMnist): # pylint: disable=unused-variable\n \"\"\"Dummy dataset.\"\"\"\n\n BUILDER_CONFIGS = [\n dataset_builder.BuilderConfig( # pylint: disable=g-complex-comprehension\n name=name, version='2.0.0', description=f'{name} description'\n )\n for name in ('default_config', 'other_config')\n ]\n\n # Old version from before there were configs.\n mock_fs.add_file('path/to/my_dataset/0.0.1/features.json')\n mock_fs.add_file('path/to/my_dataset/0.1.0/features.json')\n mock_fs.add_file('path/to/my_dataset/default_config/0.1.0/features.json')\n mock_fs.add_file('path/to/my_dataset/default_config/1.0.0/features.json')\n mock_fs.add_file('path/to/my_dataset/other_config/1.0.0/features.json')\n mock_fs.add_file('path/to/my_dataset/old_config/0.8.0/features.json')\n mock_fs.add_file('path/to/my_dataset/old_config/1.0.0/features.json')\n mock_fs.add_file('path/to/my_dataset/broken_config/features.json')\n\n # If code can be reached, use it to load the default config name.\n # Note that the existing version is loaded, even if the code is at a\n # more recent version.\n assert (\n _find_builder_dir('my_dataset')\n == 'path/to/my_dataset/default_config/1.0.0'\n )\n # Old version from previous configs.\n assert _find_builder_dir('my_dataset:0.0.1') == 'path/to/my_dataset/0.0.1'\n # Explicitly given version with no config, use folder without config.\n assert _find_builder_dir('my_dataset:0.1.0') == 'path/to/my_dataset/0.1.0'\n # Explicitly given version and config, use folder with config.\n assert (\n _find_builder_dir('my_dataset/default_config:0.1.0')\n == 'path/to/my_dataset/default_config/0.1.0'\n )\n # When config is explicitly given, load the last detected version.\n assert (\n _find_builder_dir('my_dataset/other_config')\n == 'path/to/my_dataset/other_config/1.0.0'\n )\n assert (\n _find_builder_dir('my_dataset/old_config')\n == 'path/to/my_dataset/old_config/1.0.0'\n )\n assert (\n _find_builder_dir('my_dataset/old_config:0.8.0')\n == 'path/to/my_dataset/old_config/0.8.0'\n )\n # When no config found, return None.\n assert _find_builder_dir('my_dataset/broken_config') is None\n assert _find_builder_dir('my_dataset/unknown_config') is None", "def test_merge_datasets(self):\n disk.merge_datasets(self.input_datasets[0:2], self.output_dataset)\n self.assertEqual(4, len(self.output_dataset.metadata()))", "def find_output_dataset(stage, rconfig, data_type=None):\n\n # Use the stage-to-data mapping to find the output names\n if data_type is None:\n data_type = DOCUMENT_PROCESSING_IO[stage]['out']\n #for output_name in data_types:\n # Get all data sets D for input name\n dirname = os.path.join(rconfig.target_path, 'data', data_type)\n datasets1 = [ds for ds in os.listdir(dirname) if ds.isdigit()]\n datasets2 = [DataSet(stage, data_type, rconfig, ds) for ds in datasets1]\n # Filer the datasets making sure that d.trace + d.head matches\n # rconfig.pipeline(txt).trace\n datasets3 = [ds for ds in datasets2 if ds.output_matches_global_config()]\n # If there is one result, return it, if there are more than one, write a\n # warning and exit, otherwise, initialize a dataset and return it\n if len(datasets3) == 1:\n return datasets3[0]\n elif len(datasets3) > 1:\n print \"WARNING, more than one approriate training set found:\"\n for ds in datasets3:\n print ' ', ds\n sys.exit(\"Exiting...\")\n elif len(datasets3) == 0:\n highest_id = max([0] + [int(ds) for ds in datasets1])\n new_id = \"%02d\" % (highest_id + 1)\n dataset = DataSet(stage, data_type, rconfig, new_id)\n if not dataset.exists():\n dataset.initialize_on_disk()\n dataset.load_from_disk()\n print \"[%s] created %s\" % (stage, dataset)\n return dataset", "def test_main_extra_template_data_config(\n capsys: CaptureFixture, output_model, expected_output\n) -> None:\n\n input_filename = OPEN_API_DATA_PATH / 'api.yaml'\n extra_template_data = OPEN_API_DATA_PATH / 'extra_data.json'\n\n with freeze_time(TIMESTAMP):\n main(\n [\n '--input',\n str(input_filename),\n '--extra-template-data',\n str(extra_template_data),\n '--output-model',\n output_model,\n ]\n )\n\n captured = capsys.readouterr()\n assert (\n captured.out == (EXPECTED_MAIN_PATH / expected_output / 'output.py').read_text()\n )\n assert captured.err == inferred_message.format('openapi') + '\\n'", "def test_cached_dataloader(self):\n\n v = [\"data\", \"target\", \"model_out_sqnet\"]\n\n for data, target in self.train_loader:\n b, c, h, w = data[v[0]].shape\n assert data[v[1]].shape == (b, )\n assert data[v[2]].shape == (b, 100)\n assert data[v[1]].shape == target.shape", "def test_export_datasources_original(app_context, fs):\n # pylint: disable=reimported, redefined-outer-name\n import superset.cli.importexport # noqa: F811\n\n # reload to define export_dashboards correctly based on the\n # feature flags\n importlib.reload(superset.cli.importexport)\n\n runner = app.test_cli_runner()\n response = runner.invoke(\n superset.cli.importexport.export_datasources, (\"-f\", \"datasources.yaml\")\n )\n\n assert response.exit_code == 0\n\n assert Path(\"datasources.yaml\").exists()\n\n # check that file is valid JSON\n with open(\"datasources.yaml\") as fp:\n contents = fp.read()\n yaml.safe_load(contents)", "def test_execute_job_with_array_input(self):\n cwl = {\n \"cwlVersion\": \"v1.0\",\n \"class\": \"CommandLineTool\",\n \"baseCommand\": [\"python3\", \"script.py\"],\n \"inputs\":\n {\n \"test_int_array\": {\"type\": {\"type\": \"array\", \"items\": \"int\"}, \"inputBinding\": {\"position\": 1}},\n \"test_float_array\": {\"type\": {\"type\": \"array\", \"items\": \"float\"}},\n \"test_string_array\": {\"type\": {\"type\": \"array\", \"items\": \"string\"}},\n \"test_reference_array\": {\"type\": {\"type\": \"array\", \"items\": \"File\"}},\n \"test_int_value\": \"int\",\n \"test_float_value\": \"float\",\n \"test_string_value\": \"string\",\n \"test_reference_http_value\": \"File\",\n \"test_reference_file_value\": \"File\",\n \"test_reference_s3_value\": \"File\"\n },\n \"requirements\": {\n CWL_REQUIREMENT_APP_DOCKER: {\n \"dockerPull\": \"python:3.7-alpine\"\n },\n CWL_REQUIREMENT_INIT_WORKDIR: {\n \"listing\": [\n {\n \"entryname\": \"script.py\",\n \"entry\": cleandoc(\"\"\"\n import json\n import os\n input = $(inputs)\n for key, value in input.items():\n if isinstance(value, list):\n if all(isinstance(val, int) for val in value):\n value = map(lambda v: v+1, value)\n elif all(isinstance(val, float) for val in value):\n value = map(lambda v: v+0.5, value)\n elif all(isinstance(val, bool) for val in value):\n value = map(lambda v: not v, value)\n elif all(isinstance(val, str) for val in value):\n value = map(lambda v: v.upper(), value)\n elif all(isinstance(val, dict) for val in value):\n def tmp(value):\n path_ = value.get('path')\n if path_ and os.path.exists(path_):\n with open (path_, 'r') as file_:\n file_data = file_.read()\n return file_data.upper()\n value = map(tmp, value)\n input[key] = \";\".join(map(str, value))\n elif isinstance(value, dict):\n path_ = value.get('path')\n if path_ and os.path.exists(path_):\n with open (path_, 'r') as file_:\n file_data = file_.read()\n input[key] = file_data.upper()\n elif isinstance(value, str):\n input[key] = value.upper()\n elif isinstance(value, bool):\n input[key] = not value\n elif isinstance(value, int):\n input[key] = value+1\n elif isinstance(value, float):\n input[key] = value+0.5\n json.dump(input, open(\"./tmp.txt\",\"w\"))\n \"\"\")\n }\n ]\n }\n },\n \"outputs\": [{\"id\": \"output_test\", \"type\": \"File\", \"outputBinding\": {\"glob\": \"tmp.txt\"}}],\n }\n body = {\n \"processDescription\": {\n \"process\": {\n \"id\": self._testMethodName,\n \"title\": \"some title\",\n \"abstract\": \"this is a test\",\n },\n },\n \"deploymentProfileName\": \"http://www.opengis.net/profiles/eoc/wpsApplication\",\n \"executionUnit\": [{\"unit\": cwl}],\n }\n try:\n desc, _ = self.deploy_process(body, describe_schema=\"OLD\")\n except colander.Invalid:\n self.fail(\"Test\")\n\n assert desc is not None\n\n test_bucket_ref = mocked_aws_s3_bucket_test_file(\n \"wps-process-test-bucket\",\n \"input_file_s3.txt\",\n \"This is a generated file for s3 test\"\n )\n\n test_http_ref = mocked_reference_test_file(\n \"input_file_http.txt\",\n \"http\",\n \"This is a generated file for http test\"\n )\n\n test_file_ref = mocked_reference_test_file(\n \"input_file_ref.txt\",\n \"file\",\n \"This is a generated file for file test\"\n )\n\n exec_body = {\n \"mode\": EXECUTE_MODE_ASYNC,\n \"response\": EXECUTE_RESPONSE_DOCUMENT,\n \"inputs\":\n [\n {\"id\": \"test_int_array\", \"value\": [10, 20, 30, 40, 50]},\n {\"id\": \"test_float_array\", \"value\": [10.03, 20.03, 30.03, 40.03, 50.03]},\n {\"id\": \"test_string_array\", \"value\": [\"this\", \"is\", \"a\", \"test\"]},\n {\"id\": \"test_reference_array\",\n \"value\": [\n {\"href\": test_file_ref},\n {\"href\": test_http_ref},\n {\"href\": test_bucket_ref}\n ]\n },\n {\"id\": \"test_int_value\", \"value\": 2923},\n {\"id\": \"test_float_value\", \"value\": 389.73},\n {\"id\": \"test_string_value\", \"value\": \"string_test\"},\n {\"id\": \"test_reference_http_value\", \"href\": test_http_ref},\n {\"id\": \"test_reference_file_value\", \"href\": test_file_ref},\n {\"id\": \"test_reference_s3_value\", \"href\": test_bucket_ref}\n ],\n \"outputs\": [\n {\"id\": \"output_test\", \"type\": \"File\"},\n ]\n }\n\n with contextlib.ExitStack() as stack_exec:\n for mock_exec in mocked_execute_process():\n stack_exec.enter_context(mock_exec)\n proc_url = \"/processes/{}/jobs\".format(self._testMethodName)\n resp = mocked_sub_requests(self.app, \"post_json\", proc_url, timeout=5,\n data=exec_body, headers=self.json_headers, only_local=True)\n assert resp.status_code in [200, 201], \"Failed with: [{}]\\nReason:\\n{}\".format(resp.status_code, resp.json)\n status_url = resp.json.get(\"location\")\n\n results = self.monitor_job(status_url)\n\n job_output_path = results.get(\"output_test\")[\"href\"].split(self.settings[\"weaver.wps_output_path\"])[-1]\n tmp_file = \"{}/{}\".format(self.settings[\"weaver.wps_output_dir\"], job_output_path)\n\n try:\n processed_values = json.load(open(tmp_file, \"r\"))\n except FileNotFoundError:\n self.fail(\"Output file [{}] was not found where it was expected to resume test\".format(tmp_file))\n except Exception as exception:\n self.fail(\"An error occurred during the reading of the file: {}\".format(exception))\n assert processed_values[\"test_int_array\"] == \"11;21;31;41;51\"\n assert processed_values[\"test_float_array\"] == \"10.53;20.53;30.53;40.53;50.53\"\n assert processed_values[\"test_string_array\"] == \"THIS;IS;A;TEST\"\n assert processed_values[\"test_reference_array\"] == (\"THIS IS A GENERATED FILE FOR FILE TEST;\"\n \"THIS IS A GENERATED FILE FOR HTTP TEST;\"\n \"THIS IS A GENERATED FILE FOR S3 TEST\")\n assert processed_values[\"test_int_value\"] == 2924\n assert processed_values[\"test_float_value\"] == 390.23\n assert processed_values[\"test_string_value\"] == \"STRING_TEST\"\n assert processed_values[\"test_reference_s3_value\"] == \"THIS IS A GENERATED FILE FOR S3 TEST\"\n assert processed_values[\"test_reference_http_value\"] == \"THIS IS A GENERATED FILE FOR HTTP TEST\"\n assert processed_values[\"test_reference_file_value\"] == \"THIS IS A GENERATED FILE FOR FILE TEST\"", "def testDataStreams(self):\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS,\n identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file',\n parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n self.assertEqual(file_entry.number_of_data_streams, 1)\n\n data_stream_names = []\n for data_stream in file_entry.data_streams:\n data_stream_names.append(data_stream.name)\n\n self.assertEqual(data_stream_names, [''])\n\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_DIRECTORY,\n location='/a_directory', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n self.assertEqual(file_entry.number_of_data_streams, 0)\n\n data_stream_names = []\n for data_stream in file_entry.data_streams:\n data_stream_names.append(data_stream.name)\n\n self.assertEqual(data_stream_names, [])\n\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS, identifier=25,\n location='/a_directory/a_resourcefork', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n self.assertEqual(file_entry.number_of_data_streams, 2)\n\n data_stream_names = []\n for data_stream in file_entry.data_streams:\n data_stream_names.append(data_stream.name)\n\n self.assertEqual(data_stream_names, ['', 'rsrc'])", "def test_configurator(self):\n runner = Runner(YamlManifest(manifest))\n run1 = runner.run(JobOptions(resource=\"test1\"))\n assert not run1.unexpectedAbort, run1.unexpectedAbort.getStackTrace()\n assert len(run1.workDone) == 1, run1.workDone\n result = list(run1.workDone.values())[0].result\n self.assertEqual(result.outputs, {\"fact1\": \"test1\", \"fact2\": \"test\"})\n self.assertEqual(result.result.get(\"stdout\"), sys.executable)\n assert run1.status == Status.ok, run1.summary()", "def test_get_metadata_df(self):\n\n # first need to populate LabMetadata tables\n from data_processors.lims.lambdas import labmetadata\n labmetadata.scheduled_update_handler({'event': \"test_get_metadata_df\"}, None)\n\n logger.info(f\"Lab metadata count: {LabMetadata.objects.count()}\")\n\n # SEQ-II validation dataset\n mock_bcl_workflow: Workflow = WorkflowFactory()\n mock_sqr: SequenceRun = mock_bcl_workflow.sequence_run\n mock_sqr.run_id = \"r.Uvlx2DEIME-KH0BRyF9XBg\"\n mock_sqr.instrument_run_id = \"200612_A01052_0017_BH5LYWDSXY\"\n mock_sqr.gds_volume_name = \"bssh.acddbfda498038ed99fa94fe79523959\"\n mock_sqr.gds_folder_path = f\"/Runs/{mock_sqr.instrument_run_id}_{mock_sqr.run_id}\"\n mock_sqr.sample_sheet_name = \"SampleSheet.csv\"\n mock_sqr.name = mock_sqr.instrument_run_id\n mock_sqr.save()\n\n mock_library_run = LibraryRun(\n instrument_run_id=mock_sqr.instrument_run_id,\n run_id=mock_sqr.run_id,\n library_id=\"L2000199\",\n lane=1,\n override_cycles=\"Y151;I8N2;U10;Y151\",\n )\n mock_library_run.save()\n\n samplesheet_path = f\"{mock_sqr.gds_folder_path}/{mock_sqr.sample_sheet_name}\"\n\n metadata_df = bcl_convert.get_metadata_df(\n gds_volume=mock_sqr.gds_volume_name,\n samplesheet_path=samplesheet_path\n )\n\n logger.info(\"-\" * 32)\n logger.info(f\"\\n{metadata_df}\")\n\n self.assertTrue(not metadata_df.empty)\n self.assertTrue(\"PTC_SsCRE200323LL_L2000172_topup\" in metadata_df[\"sample\"].tolist())\n\n if \"\" in metadata_df[\"override_cycles\"].unique().tolist():\n logger.info(\"-\" * 32)\n logger.info(\"THERE SEEM TO BE BLANK OVERRIDE_CYCLES METADATA FOR SOME SAMPLES...\")\n self.assertFalse(\"\" in metadata_df[\"override_cycles\"].tolist())\n # This probably mean need to fix data, look for corresponding Lab Metadata entry...\n\n library_id_list = metadata_df[\"library_id\"].tolist()\n library_run_list = libraryrun_srv.link_library_runs_with_x_seq_workflow(library_id_list, mock_bcl_workflow)\n self.assertIsNotNone(library_run_list)\n self.assertEqual(1, len(library_run_list))\n self.assertEqual(mock_library_run.library_id, library_run_list[0].library_id)\n\n library_run_in_workflows = mock_bcl_workflow.libraryrun_set.all()\n self.assertEqual(1, library_run_in_workflows.count())", "def test_load(tmp_path, data_name, params, expect_paths):\n\n folder_path = tmp_path\n dsets = pennylane.data.data_manager.load(\n data_name=data_name,\n folder_path=folder_path,\n **params,\n )\n\n assert {Path(dset.bind.filename) for dset in dsets} == {\n Path(tmp_path, path) for path in expect_paths\n }", "def testLoadConfigs_loadMultipleLab(self):\n config_path = GetTestFilePath('valid/config.yaml')\n pool = lab_config.LabConfigPool(\n lab_config.LocalFileEnumerator(\n os.path.dirname(config_path), lab_config.IsYaml))\n with self.assertRaisesRegex(\n lab_config.ConfigError, r'There are multiple config files.'):\n pool.LoadConfigs()", "def compare_resources(data_packages_path):\n logger.info(f'Comparing resources at {data_packages_path}')\n\n def f(rows):\n\n # Calculate minimum statistics\n total = 0\n\n errors = []\n deleted = 0\n found_update = 0\n found_not_update = 0\n\n for row in rows:\n total += 1\n ckan_id = row['id']\n \n valid, error = compare_resources_validate(row)\n if not valid:\n errors.append(error)\n row['comparison_results'] = {'action': 'error', 'ckan_id': ckan_id, 'new_data': None, 'reason': error}\n yield row\n continue\n \n extras = row.get('extras', False)\n identifier = [extra['value'] for extra in extras if extra['key'] == 'identifier'][0]\n\n file_exists, expected_path = compare_resources_resource_exists(data_packages_path, identifier)\n if not file_exists:\n deleted += 1\n row['comparison_results'] = {\n 'action': 'delete', \n 'ckan_id': ckan_id, \n 'new_data': None, \n 'reason': 'It no longer exists in the data.json source'\n }\n logger.info(f'Mark for delete: ID {ckan_id}')\n yield row\n continue\n\n require_update, data_json_data = compare_resource_require_update(expected_path, row)\n if require_update:\n row['comparison_results'] = {\n 'action': 'update',\n 'ckan_id': ckan_id,\n 'new_data': data_json_data,\n 'reason': f'The resource is older'\n }\n logger.info(f'Mark for update: ID {ckan_id}')\n found_update += 1\n else:\n row['comparison_results'] = {\n 'action': 'ignore',\n 'ckan_id': ckan_id,\n 'new_data': None, # don't need this\n 'reason': 'The resource is updated'\n }\n found_not_update += 1\n logger.info(f'Mark for ignore: ID {ckan_id}')\n yield row\n \n # detect new datasets\n news = 0\n for data_json_data in compare_resource_get_new_datasets(data_packages_path):\n total += 1\n news += 1 \n row = {\n 'comparison_results': {\n 'action': 'create',\n 'ckan_id': None,\n 'new_data': data_json_data,\n 'reason': 'Not found in the CKAN results'}\n }\n\n yield row\n\n found = found_not_update + found_update\n\n total_errors = len(errors)\n stats = f\"\"\"Compare total processed: {total}.\n {total_errors} errors.\n {deleted} deleted.\n {found} datasets found\n ({found_update} needs update, {found_not_update} are the same).\n {news} new datasets.\"\"\"\n\n logger.info(stats)\n\n return f", "def test_yaml(self):\n\n # Check yml file can be loaded correctly\n with open(\"{}/app_spec.yml\".format(self.APP_PATH), mode='r',\n encoding=\"utf-8\", errors='ignore') as stream:\n # Load yaml file\n try:\n yaml_obj = yaml.load(stream) or {}\n except Exception as e:\n self.fail(msg=\"app_spec.yml cannot be loaded\")\n ll = ['input', 'output']\n check_list = ['value_type']\n\n for l in ll:\n l_obj = yaml_obj.get(l, None)\n # Check [input] and [output] section\n with self.subTest(name=f\"[{l}] section\"):\n self.assertIsNotNone(\n l_obj,\n msg=f\"[{l}] section missing in app_spec.yml\")\n\n for k, v in l_obj.items():\n for cl in check_list:\n with self.subTest(name=f\"[{l}:{k}]\"):\n value = v.get(cl)\n self.assertIsNotNone(\n value,\n msg=f\"[{k}/{cl}] missing in app_spec.yml\")\n if l == 'input' and 'value_range' in v and v['value_range']:\n with self.subTest(\n name=f\"[input:{k}] section\"):\n self.assertTrue(\n type(v['value_range']) is list,\n msg=f\"value_range [input:{k}] not a list\")", "def test_data_infos__default_db_directories(self):\n test_dataset_root = osp.join(self.data_dir, 'VOCdevkit', 'VOC2007')\n custom_ds = self.dataset_class(\n data_root=test_dataset_root,\n ann_file=osp.join(test_dataset_root, 'ImageSets', 'Main',\n 'trainval.txt'),\n pipeline=[],\n classes=('person', 'dog'),\n test_mode=True)\n\n self.assertListEqual([{\n 'id': '000001',\n 'filename': osp.join('JPEGImages', '000001.jpg'),\n 'width': 353,\n 'height': 500\n }], custom_ds.data_infos)", "def compare_resource_get_new_datasets(data_packages_path):\n \n for name in glob.glob(f'{data_packages_path}/data-json-*.json'):\n package = Package(name)\n data_json = package.get_resource('inline')\n data_json_data = data_json.source\n\n yield data_json_data\n\n # Delete the data.json file\n os.remove(name)", "def test_multiple_batch(sdc_builder, sdc_executor, cluster):\n topic = get_random_string()\n\n raw_data = {'key': 'value'}\n\n # Build pipeline.\n builder = sdc_builder.get_pipeline_builder()\n\n source = builder.add_stage('Dev Raw Data Source').set_attributes(\n data_format='JSON',\n raw_data=json.dumps(raw_data),\n stop_after_first_batch=False\n )\n\n destination = builder.add_stage(\n name='com_streamsets_pipeline_stage_destination_kafka_KafkaDTarget',\n library=cluster.kafka.standalone_stage_lib\n ).set_attributes(\n topic=topic,\n data_format='JSON'\n )\n\n source >> destination\n\n pipeline = builder.build(f'Kafka Destination Multiple Batches').configure_for_environment(cluster)\n\n sdc_executor.add_pipeline(pipeline)\n\n sdc_executor.start_pipeline(pipeline)\n sdc_executor.wait_for_pipeline_metric(pipeline, 'output_record_count', 100)\n sdc_executor.stop_pipeline(pipeline)\n\n consumer = cluster.kafka.consumer(consumer_timeout_ms=1000, auto_offset_reset='earliest')\n consumer.subscribe([topic])\n\n msgs_received = [json.loads(message.value.decode()) for message in consumer]\n\n history = sdc_executor.get_pipeline_history(pipeline)\n history_records = history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count\n\n assert len(msgs_received) == history_records\n assert all(msg == raw_data for msg in msgs_received)", "def main(argv):\n parser = argparse.ArgumentParser(description=__doc__,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--bundle-uuid', '-b',\n default=TestTDRHCAPlugin.bundle_fqid.uuid,\n help='The UUID of the existing DCP/1 canned bundle.')\n parser.add_argument('--source-id', '-s',\n default=TestTDRHCAPlugin.source.id,\n help='The UUID of the snapshot/dataset to contain the canned DCP/2 bundle.')\n parser.add_argument('--version', '-v',\n default=TestTDRHCAPlugin.bundle_fqid.version,\n help='The version for any mock entities synthesized by the script.')\n parser.add_argument('--input-dir', '-I',\n default=os.path.join(config.project_root, 'test', 'indexer', 'data'),\n help='The path to the input directory.')\n parser.add_argument('--mock-supplementary-files', '-S',\n type=int,\n default=0,\n help='The number of mock supplementary files to add to the output.')\n args = parser.parse_args(argv)\n\n paths = file_paths(args.input_dir, args.bundle_uuid)\n\n log.debug('Reading canned bundle %r from %r', args.bundle_uuid, paths['dss'])\n with open(paths['dss']['manifest']) as f:\n manifest = json.load(f)\n with open(paths['dss']['metadata']) as f:\n metadata = json.load(f)\n\n dss_source = DSSSourceRef(id='',\n spec=SimpleSourceSpec(prefix=Prefix.of_everything,\n name=config.dss_endpoint))\n dss_bundle = DSSBundle(fqid=SourcedBundleFQID(source=dss_source,\n uuid=args.bundle_uuid,\n version=''),\n manifest=manifest,\n metadata_files=metadata)\n\n tdr_source = TDRSourceRef(id=args.source_id,\n spec=TDRSourceSpec(prefix=Prefix.of_everything,\n project='test_project',\n name='test_name',\n is_snapshot=True))\n tdr_bundle = dss_bundle_to_tdr(dss_bundle, tdr_source)\n\n add_supp_files(tdr_bundle,\n num_files=args.mock_supplementary_files,\n version=args.version)\n\n log.debug('Writing converted bundle %r to %r', args.bundle_uuid, paths['tdr'])\n with write_file_atomically(paths['tdr']['result']) as f:\n json.dump({\n 'manifest': tdr_bundle.manifest,\n 'metadata': tdr_bundle.metadata_files\n }, f, indent=4)\n\n with write_file_atomically(paths['tdr']['tables']) as f:\n json.dump(dump_tables(tdr_bundle), f, indent=4)", "def _get_setup(self, dataset_name):\n for potential_setup in self.setup:\n for dataset in potential_setup[\"datasets\"]:\n if dataset_name in dataset:\n test_setup = potential_setup\n self.io_args.color = os.path.join(self.io_args.input_root, dataset)\n return test_setup" ]
[ "0.6569703", "0.63541687", "0.6163136", "0.5435593", "0.52773416", "0.5255355", "0.5180936", "0.510481", "0.509443", "0.5093699", "0.50851494", "0.50355923", "0.50091726", "0.50006497", "0.499144", "0.49418464", "0.49240813", "0.4921885", "0.4911996", "0.49098986", "0.48707098", "0.48628414", "0.48610318", "0.4833452", "0.48309004", "0.4826116", "0.4825737", "0.48250195", "0.4809915", "0.47886524" ]
0.65214986
1
Used by flask to initialize a user
def init_user( app ): login_manager.init_app(app) app.register_blueprint(user)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init():\n create_user(app)\n get_all_user()", "def user_init(self):\n pass", "def setup_user():\n if 'auth_user' in flask.session:\n user = models.User.query.get(flask.session['auth_user'])\n if user is None:\n # old bad cookie, no good\n del flask.session['auth_user']\n # save the user in `flask.g`, which is a set of globals for this request\n flask.g.user = user", "def initialize(self, *a, **kw):\n webapp2.RequestHandler.initialize(self, *a, **kw)\n uid = self.read_secure_cookie('user_id')\n self.user = uid and User.by_id(int(uid))", "def initialize(self, *a, **kw):\n\n webapp2.RequestHandler.initialize(self, *a, **kw)\n uid = self.read_secure_cookie('user_id')\n self.user = uid and User.by_id(int(uid))", "def initialize(self, *a, **kw):\n webapp2.RequestHandler.initialize(self, *a, **kw)\n uid = self.get_uid_from_cookie()\n self.user = uid and User.get_by_id(int(uid))", "def init_default_users():\n from flask import current_app as app\n with app.app_context():\n notion_uname = app.config.get(\"NOTION_CRONJOB_USERNAME\")\n notion_passwd = app.config.get(\"NOTION_CRONJOB_PASSWORD\")\n\n if notion_uname and notion_passwd:\n try:\n User.createOne(\n username=notion_uname,\n password=notion_passwd\n )\n except NotUniqueError:\n app.logger.info(\"Notion Job User already exists!\")\n except Exception as err:\n app.logger.error(\"Notion Job User was not created!\", err)\n else:\n app.logger.info(\"Created Notion Job User Successfully!\")", "def new_user():\n pass", "def default_user(self):\n self.user = self.create_user(create_token=True)\n return", "def initialize(self):\n self.login()", "def init_user(device, user_id=None):\n logging.debug(\"Initializing user: device={0} user_id={1}\".format(device, user_id))\n if user_id:\n data = {\n \"device\": {\n \"id\": device,\n \"platform\": \"other\"\n },\n \"userId\": user_id\n }\n else:\n data = {\n \"device\": {\n \"id\": device,\n \"platform\": \"other\"\n }\n }\n return ask('init', data, 'post')", "def rpc_client_initialize(self):\n\t\tusername = self.basic_auth_user\n\t\tif not username:\n\t\t\treturn True\n\t\tsession = db_manager.Session()\n\t\tif not db_manager.get_row_by_id(session, db_models.User, username):\n\t\t\tuser = db_models.User(id=username)\n\t\t\tsession.add(user)\n\t\t\tsession.commit()\n\t\tsession.close()\n\t\treturn True", "def _load_user():\n user = session.get('user')\n\n if user is None:\n g.user = None\n else:\n g.user = user", "def create_user(self):\n # TODO-ROB: This is used ONLY when the user registers in flask\n # TODO-ROB: Create the cookiecutter.json file\n # extra_context overrides user and default configs\n cookiecutter(self.user_cookie, no_input=True, extra_context={\"user_name\": self.user}, output_dir=self.users)", "def setup_user(self):\r\n self.email = '[email protected]'\r\n self.password = 'bar'\r\n self.username = 'test'\r\n self.create_account(self.username,\r\n self.email, self.password)\r\n self.activate_user(self.email)\r\n self.login(self.email, self.password)", "async def prepare(self):\n\n # Read the secure cookie which exists if we are in an authenticated\n # context (though not if the caimira webservice is running standalone).\n session = json.loads(self.get_secure_cookie('session') or 'null')\n\n if session:\n self.current_user = AuthenticatedUser(\n username=session['username'],\n email=session['email'],\n fullname=session['fullname'],\n )\n else:\n self.current_user = AnonymousUser()", "def user():", "def init_login(app_):\n\n login_manager = LoginManager()\n login_manager.init_app(app_)\n\n @login_manager.user_loader\n def load_user(user_id):\n return models.User.query.get(user_id)", "def __init__(self, **kwargs):\n super(User, self).__init__(**kwargs)\n Role.insert_roles()\n if self.role is None:\n if self.email == current_app.config[\"FLASKY_ADMIN\"]:\n self.role = Role.query.filter_by(permissions=0xff).first()\n if self.role is None:\n self.role = Role.query.filter_by(default=True).first()\n # self.image_url = photos.url(\"user/default.png\")\n self.image_url = self.avatar(128)", "def __init__(self):\n self.user = \"\"\n self.password = \"\"", "def on_start(self):\n admin_user = os.environ['ADMIN_USER']\n admin_password = os.environ['ADMIN_PASSWORD']\n admin_domain_name = os.environ['ADMIN_DOMAIN_NAME']\n admin_project_id = os.environ['ADMIN_PROJECT_ID']\n HEADERS['X-Auth-Token'] = self._get_token(admin_user,\n admin_password,\n admin_domain_name,\n project_id=admin_project_id)\n # Create test user\n self.username = 'test_user'\n self.password = 'Password1'\n self.user_domain_id = 'default'\n self.user_domain_name = 'Default'\n self.project_id = self._create_project()['project']['id']\n self._create_user(self.username, self.password, self.user_domain_id,\n self.project_id)", "def setUp(self):\n User.users = {}\n self.app = User('[email protected]', 'admin', 'admin')\n # Set some default user data\n self.user_data = {\n 1: {\n 'email': '[email protected]',\n 'username': 'admin',\n 'password': 'admin' \n }\n \n }", "def create_and_login(self):\n with self.context():\n user = self.factory(meido.factories.UserFactory)\n self.client.post('/management/login', data={\n 'username': 'admin', 'password': 'pretender'\n })", "def on_user_create(self, user):", "def initDb():\n createDb()\n admin = User(\n name=\"faby\",\n lastname=\"star\",\n username=\"faby\",\n email=\"[email protected]\",\n isAdmin=True,\n cellphone=\"0983856136\",\n )\n admin.onSetPassord(\"faby123\")\n db.session.add(admin)\n db.session.commit()", "def auth(self, user):", "def make_user_global():\n if CURR_USER_KEY in session:\n g.user = User.query.get(session[CURR_USER_KEY])\n else:\n g.user = None", "def populate_user_data():\n try:\n db = mongo_client.MongoClient(config.MONGO_URI).twitter\n db.user.insert_one(\n {\n 'username': 'admin',\n 'password': 'admin',\n }\n )\n print(\"Created an admin account\")\n except Exception as e:\n print(e)", "def register_user():\n pass", "def before_request():\n g.user = None\n if 'user_id' in session:\n g.user = User.query.get(session['user_id'])" ]
[ "0.829152", "0.758554", "0.7491351", "0.73094636", "0.72450036", "0.7144329", "0.7079882", "0.6995411", "0.69382924", "0.69301695", "0.68964845", "0.68418664", "0.68159044", "0.6811671", "0.6796923", "0.6795561", "0.67746", "0.67692834", "0.6762055", "0.6730134", "0.67115355", "0.66205126", "0.6615118", "0.6614266", "0.6605725", "0.6592196", "0.6589177", "0.65877247", "0.6573804", "0.6570999" ]
0.77850324
1
Splits X in time if it is chronologically sorted.
def time_split(X, test_size=0.1): split_index = int(len(X)*(1 - test_size)) return X[:split_index], X[split_index:]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_by_timegap(X, timename='time', hours=1):\n \n time = X[timename].values\n dt = np.diff(time)\n\n # print(len(time))\n \n i = [int(ii) for ii in np.where(dt>np.timedelta64(hours, 'h'))[0]] + [len(time)-1]\n i = [-1] + list(set(i))\n i.sort()\n \n # print('Split index')\n # print(i)\n \n Xs = [X.isel({timename: np.arange(i[j]+1, i[j+1])}) for j in np.arange(0, len(i)-1)]\n \n return Xs", "def _splitTime(self, time): \n if (time):\n x = re.split(\"[-\\/\\s:]\", time)\n else:\n x = []\n # Pad the list to four elements (year,month,day,hour)\n while (len(x) < 4):\n x.append(None)\n return x", "def _chunk_time(x, samp_buffer=0):\n if samp_buffer < 0:\n raise ValueError(\n 'Buffer between signal peaks must be a positive number')\n if samp_buffer != int(samp_buffer):\n raise ValueError('Number of samples must be an integer')\n\n if type(x[0]) == np.bool_:\n Xs = np.arange(len(x))\n x = Xs[x]\n X = len(x)\n\n cur_start = x[0]\n cur_samp = x[0]\n Nchunk = 0\n chunks = []\n for i in range(1, X):\n if x[i] > (cur_samp + samp_buffer + 1):\n if Nchunk == 0:\n chunks = [cur_start, cur_samp]\n else:\n chunks = np.vstack([chunks, [cur_start, cur_samp]])\n\n Nchunk = Nchunk + 1\n cur_start = x[i]\n\n cur_samp = x[i]\n\n # Add final row to chunk\n if Nchunk == 0:\n chunks = [[cur_start, cur_samp]]\n else:\n chunks = np.vstack([chunks, [cur_start, cur_samp]])\n\n return chunks", "def split_by(self):\r\n return 'time'", "def split_before_after(time, columns): \n if time is None or np.isnan(time):\n return [], [], []\n \n currentday = int(time)//24\n before = columns[:currentday]\n current = columns[currentday]\n after = columns[currentday+1:]\n return (before, current, after)", "def splitCandles(data):\n\n premarket = data[(data['Time'] < datetime.time(9,30))]\n regularMarket = data[(data['Time'] > datetime.time(9, 30)) & (data['Time'] < datetime.time(16,0))]\n afterhour = data[(data['Time'] > datetime.time(16,0))]\n\n return premarket, regularMarket, afterhour", "def split(self, X):", "def split(self, X, y=None, group=None):\n\n # Initiate loop variables\n trainset = []\n testset = []\n train_index = 0\n test_index = 0\n tsplit = self.startdate + self.traindur\n\n # Adjust start index to correspond to start date\n while self.dates[train_index] < self.startdate:\n train_index += 1\n\n n_pos = 0\n while tsplit + self.gap + self.testdur < self.enddate:\n # Set test index to correspond to appropriate date\n test_index = train_index\n while self.dates[test_index] < tsplit + self.gap:\n test_index += 1\n\n # Build training set\n while self.dates[train_index] < tsplit:\n trainset.append(train_index)\n train_index += 1\n\n # Build test set\n testset = []\n while self.dates[test_index] < tsplit + self.gap + self.testdur:\n testset.append(test_index)\n test_index += 1\n if y[test_index] == 1:\n n_pos += 1\n\n if self.debug:\n print(str(len(trainset)) + ' ' + str(len(testset)) + ' ' \\\n + str(n_pos) + ' ' + str(self.dates[test_index]))\n n_pos = 0\n\n # Loop update\n tsplit += self.update\n\n yield trainset, testset", "def split_by_time(datafile, portion):\n datafile = datafile.sort_values(['user_id', 'date'], ascending=[True, True])\n datafile = datafile.reset_index(drop=True)\n test = datafile[datafile['user_id']=='-1']\n cnt = 0\n for i in range(1, len(datafile)):\n cnt += 1\n if datafile['user_id'][i] != datafile['user_id'][i-1] or i == (len(datafile) - 1):\n j = round(cnt*portion)\n test = test.append(datafile.iloc[(i - int(j)):i])\n cnt = 0\n datafile = datafile.drop(datafile.index[test.index])\n datafile = datafile.reset_index(drop=True)\n test = test.reset_index(drop=True)\n return test, datafile", "def split(self, time: float) -> Tuple['Trajectory','Trajectory']:\n if time <= self.times[0]:\n #split before start of trajectory\n return self.constructor()([time],[self.milestones[0]]),self.constructor()([time]+self.times,[self.milestones[0]]+self.milestones)\n elif time >= self.times[-1]:\n #split after end of trajectory\n return self.constructor()(self.times+[time],self.milestones+[self.milestones[-1]]),self.constructor()([time],[self.milestones[-1]])\n i,u = self.getSegment(time)\n assert i >= 0,\"getSegment returned -1? something must be wrong with the times\"\n #split in middle of trajectory\n splitpt = self.interpolate_state(self.milestones[i],self.milestones[i+1],u,self.times[i+1]-self.times[i])\n front = self.constructor()(self.times[:i+1],self.milestones[:i+1])\n back = self.constructor()(self.times[i+1:],self.milestones[i+1:])\n if u > 0:\n front.times.append(time)\n front.milestones.append(splitpt)\n if u < 1:\n back.times = [time] + back.times\n back.milestones = [splitpt] + back.milestones\n return (front,back)", "def get_time_breaks(userid, split_by, c):\n result = pd.read_sql_query(\"SELECT time FROM user_insights WHERE userid = %s;\" % userid, c) \n if split_by == \"insight\":\n timestamps = pd.to_datetime(pd.Series(np.insert(result.values,0,0)))\n timestamps.sort()\n return timestamps\n elif split_by == \"hour\":\n timestamps = pd.to_datetime(pd.Series(result.values[:,0]))\n timestamps = pd.date_range(timestamps.min(), timestamps.max(), freq='H')\n timestamps = np.insert(timestamps.values, 0, 0)\n return pd.Series(pd.to_datetime(timestamps))", "def _splitPoints(self, points, split):\n # validate split\n if not split:\n return [points]\n\n # complete split with adding start and end frames\n if split[0] != 0:\n split.insert(0, 0)\n\n if split[-1] != len(points):\n split.append(len(points))\n\n # make sure split is sorted and doesn't contain any duplicates\n split = list(set(split))\n split.sort()\n\n # split range for looping\n splitA = split[:-1]\n splitB = split[1:]\n\n # get lists\n return [points[a:b + 1] for a, b in zip(splitA, splitB)]", "def test_split_ranges(self):\n start = datetime.utcnow() - pd.Timedelta(\"5H\")\n end = datetime.utcnow() + pd.Timedelta(\"5min\")\n delta = pd.Timedelta(\"1H\")\n\n ranges = QueryProvider._calc_split_ranges(start, end, delta)\n self.assertEqual(len(ranges), 5)\n self.assertEqual(ranges[0][0], start)\n self.assertEqual(ranges[-1][1], end)\n\n st_times = [start_tm[0] for start_tm in ranges]\n for end_time in (end_tm[1] for end_tm in ranges):\n self.assertNotIn(end_time, st_times)\n\n end = end + pd.Timedelta(\"20min\")\n ranges = QueryProvider._calc_split_ranges(start, end, delta)\n self.assertEqual(len(ranges), 5)\n self.assertEqual(ranges[0][0], start)\n self.assertEqual(ranges[-1][1], end)", "def tim_sort(li: Sequence) -> List:\n minrun = find_minrun(len(li))\n \n for start in range(0, len(li), minrun):\n # Note that insertion_sort sorts [left, right)\n end = min(start + minrun, len(li))\n insertion_sort(li, start, end)\n \n size = minrun\n while size < len(li):\n for left in range(0, len(li), 2 * size):\n # Since [left : left+size] and [left+size : left+2*size] have been sorted \n # (when size=minrun, these two have been sorted by insertion_sort; when \n # size is doubled, they are sorted by the previous loop), we can use merge.\n mid = min(left + size, len(li))\n right = min(left + 2 * size, len(li))\n merge(li, left, mid, right)\n size *= 2", "def _split_date_range(start, end, intv):\n previous = start\n diff = (end - start) / intv\n for i in range(1, intv):\n current = start + diff * i\n yield (previous, current)\n previous = current\n yield (previous, end)", "def split_half(slice):\n slice = set_date(slice)\n slice['Date'] = pd.to_datetime(slice['Date'])\n col_names = ['Date', 'Label', 'Text Extract', 'Processed']\n slice = slice[col_names]\n max_date = slice['Date'].max()\n min_date = slice['Date'].min()\n split_day = min_date + (max_date-min_date) // 2\n slice_b = slice[slice['Date'] > split_day]\n slice = slice[slice['Date'] <= split_day]\n return slice, slice_b", "def chunk_periods(start, end):\n\n logging.debug(f'chunking {start} to {end}')\n # convert the strings to datetime objects\n #start = dt.datetime.strptime(''.join(start.rsplit(':', 1)), '%Y-%m-%dT%H:%M:%S-%z')\n start = dt.datetime.strptime(start, '%Y-%m-%dT%H:%M:%S-%z')\n logging.debug(f'start: {start}')\n periods = []\n\n # if the year and month of the period are the same, just return the dates as we got them\n\n\n\n return periods", "def sort_by_time(pairs):\n pairs = sorted(pairs, key=lambda line: line[2], reverse=False)\n order = 0\n out = []\n for i in range(len(pairs)):\n if i != 0 and pairs[i][2] == pairs[i - 1][2]:\n out += [(pairs[i][0], pairs[i][1], order)]\n else:\n order += 1\n out += [(pairs[i][0], pairs[i][1], order)]\n return out", "def intervals(data, x, dt_col, mode='first', freq=60):\n\n if type(freq) == type('s'):\n if freq == 'quarter':\n freq = 240\n elif freq == 'half':\n freq = 720\n elif freq == 'full':\n freq = 1440\n elif freq == 'week':\n freq = 10080\n elif freq == 'month':\n freq = 32400\n elif freq == 'year':\n freq = 525600\n\n freq = str(freq) + \"Min\"\n\n time_temp = data[[x, dt_col]].set_index(dt_col)\n time_temp = time_temp.groupby(pd.Grouper(freq=freq, label='right'))\n\n return groupby_func(data=time_temp, func=mode)", "def split(self, x):\r\n if x >= self.n2.x or x <= self.n1.x: return [self]\r\n n_intermediate = Node.MiddleNode(x=x)\r\n bar1 = BeamElement(nodes=[self.n1, n_intermediate], section=self.section, material=self.material)\r\n bar2 = BeamElement(nodes=[n_intermediate, self.n2], section=self.section, material=self.material)\r\n return [bar1, bar2]", "def _sort_time(self):\n time = np.copy(self.data[\"time\"][:])\n ind_sorted = np.argsort(time)\n ind_valid: list[int] = []\n for ind in ind_sorted:\n if time[ind] not in time[ind_valid]:\n ind_valid.append(ind)\n n_time = len(time)\n for key, array in self.data.items():\n if not hasattr(array, \"shape\"):\n continue\n if array.ndim == 1 and array.shape[0] == n_time:\n self.data[key] = self.data[key][ind_valid]\n if array.ndim == 2 and array.shape[0] == n_time:\n self.data[key] = self.data[key][ind_valid, :]", "def prepare_data(x_data):\n x_data = sorted(x_data)\n y_data = np.array(range(len(x_data)))/(len(x_data)-1)\n for idx in range(len(x_data)):\n if (timeout != None) and (x_data[idx] >= timeout):\n x_data[idx] = timeout\n y_data[idx] = y_data[idx-1]\n return (x_data, y_data)", "def divide_feature_containing_multiple_timexes(self, featurelist, timexList):\n \n if not timexList:\n return featurelist\n if not featurelist:\n return []\n\n #sentences = util.sentence_tokenize(text)\n text = self.text.lower()\n \n extraFeatureList = []\n for index, feature in enumerate(featurelist):\n start_char_feat = feature.getStartPos()\n end_char_feat = feature.getEndPos()\n #timexes = [t for t in timexList if t.getRole()!='IGNORE' and t.getType()=='DATE' and t.getStartPos()>=feature.getStartPos() and t.getStartPos()<=end_char_feat]\n timexes = [t for t in timexList if t.getRole()!='IGNORE' and (t.getType()=='DATE' or t.getType()=='REL')\n and t.getStartPos()>=feature.getStartPos() and t.getStartPos()<=end_char_feat]\n \n if timexes:\n strFeat = feature.getString()\n words_feat = nltk.word_tokenize(strFeat)\n ptFeat = 0\n cur = start_char_feat\n lastPos = end_char_feat\n ##: add a fake timex so that the feature scan can go to the end \n timexes.append(timexan.Timex3(lastPos, lastPos, None, None, ''))\n newFeatureStringList = []\n for t in timexes:\n tpos = t.getStartPos() \n txt = self.clean_text(text[cur:tpos])\n words_sent = nltk.word_tokenize(txt)\n newFeats = []\n for w in words_sent:\n if w in words_feat: # and not w in newFeats:\n newFeats.append(w)\n words_feat.remove(w)\n newFeatStr = ' '.join(newFeats)\n \n \n ##: although 'cur' is not necessarily the exact position of this feature segment, \n ##: it doesn't change its relative position with other features and timexes\n newFeatStr = newFeatStr.strip(', ')\n newFeatStr = newFeatStr.replace(' , ', ', ')\n if newFeatStr.startswith('and '):\n newFeatStr = newFeatStr[4:]\n if newFeatStr!='':\n newFeatureStringList.append(newFeatStr)\n \n cur = tpos + len(t.getString())\n \n if len(newFeatureStringList) > 1:\n featObjList = []\n for featStr in newFeatureStringList:\n (start_new_feat, end_new_feat) = util.find_sub_text_range(text[start_char_feat:end_char_feat], featStr)\n start_new_feat += start_char_feat\n end_new_feat += start_char_feat\n tks = re.split(', | ',featStr)\n tks = nltk.word_tokenize(featStr)\n tags = [(w, t) for (w,t) in feature.getTags() for tg in tks if w==tg]\n featObjList.append(Feature((feature.getType(), featStr, feature.getSentNum(), tags, start_new_feat, end_new_feat)))\n \n featurelist[index] = featObjList[0]\n for i in range(1, len(featObjList)):\n extraFeatureList.append(featObjList[i])\n \n for f in extraFeatureList:\n featurelist.append(f)\n \n featurelist.sort(key= lambda f:f.getStartPos())\n \n return featurelist", "def split(self, x):\r\n new_beams = np.array([])\r\n for bar in self.bar_elements:\r\n new_beams = np.concatenate((new_beams, bar.split(x)))\r\n return BeamElements(new_beams)", "def tim_sort(lst):\n length = len(lst)\n runs, sorted_runs = [], []\n new_run = [lst[0]]\n sorted_array = []\n i = 1\n while i < length:\n if lst[i] < lst[i - 1]:\n runs.append(new_run)\n new_run = [lst[i]]\n else:\n new_run.append(lst[i])\n i += 1\n runs.append(new_run)\n\n for run in runs:\n sorted_runs.append(insertion_sort(run))\n for run in sorted_runs:\n sorted_array = merge(sorted_array, run)\n\n return sorted_array", "def task_get_time_slices(\n self, timestamp: datetime = None\n ) -> List[Tuple[datetime, datetime]]:\n total_streams: int = self._config[\"graph_streams\"]\n\n t_now: datetime = (\n timestamp.replace(microsecond=0)\n if timestamp is not None\n else datetime.utcnow().replace(microsecond=0)\n )\n\n t_lag: timedelta = timedelta(seconds=self._config[\"graph_timelag\"])\n t_sec: timedelta = timedelta(seconds=1)\n t_delta: timedelta = timedelta(seconds=self._config[\"graph_stream_frame\"])\n\n frame_end: datetime = t_now - t_lag - t_sec\n frame_start: datetime = frame_end + t_sec - t_delta * total_streams\n\n self._logger.info(\n \"Split [%s - %s] into %s slices\",\n frame_start.isoformat(),\n frame_end.isoformat(),\n total_streams,\n )\n\n result: List[Tuple[datetime, datetime]] = []\n\n for i in range(total_streams):\n slice_start: datetime = frame_end + t_sec - t_delta * (i + 1)\n slice_end: datetime = frame_end - t_delta * i\n\n result.append((slice_start, slice_end))\n\n return result", "def near_split(x, num_bins=None, size_bins=None):\n if num_bins:\n quotient, remainder = divmod(x, num_bins)\n return [quotient + 1] * remainder + [quotient] * (num_bins - remainder)\n elif size_bins:\n return near_split(x, num_bins=int(np.ceil(x / size_bins)))", "def near_split(x, num_bins=None, size_bins=None):\n if num_bins:\n quotient, remainder = divmod(x, num_bins)\n return [quotient + 1] * remainder + [quotient] * (num_bins - remainder)\n elif size_bins:\n return near_split(x, num_bins=int(np.ceil(x / size_bins)))", "def split_batches(filenames):\n by_time = {}\n for path_name in filenames:\n file_name = path.basename(path_name)\n parsed_fn = parse_agdc_fn(file_name)\n dt = parsed_fn['datetime']\n by_time.setdefault(dt, []).append((path_name, parsed_fn))\n\n rv = list(by_time.values())\n\n for group in rv:\n # Will raise exception if group is non-homogeneous\n check_sane(parsed for _, parsed in group)\n\n return rv", "def interval_split(a,b,split_ps):\n ps = [a] + [s for s in sorted(split_ps) if a < s < b] + [b]\n return [(p1,p2) for p1,p2 in zip(ps,ps[1:])]" ]
[ "0.69730914", "0.6158432", "0.6031342", "0.5992386", "0.5956518", "0.5774354", "0.5740947", "0.5668877", "0.5631412", "0.5549076", "0.55123365", "0.5393284", "0.5385934", "0.53507435", "0.530311", "0.5302109", "0.52849704", "0.5250058", "0.5211332", "0.51921046", "0.51773995", "0.5152388", "0.51190054", "0.5107278", "0.51056916", "0.50947964", "0.5043088", "0.5043088", "0.5033579", "0.5031859" ]
0.6935556
1
_Implement this method_ Write a function that takes as input an input file and and output file name. Read the input file and write to the output file the courses listed with the semester that they occur in. For example 'cs 121' in the input file would be 'cs 121 fall' in the output file.
def addSemester(inputFile, outputFile): fall_classes = ['cs 121', 'cs 223', 'cs 260', 'cs 215'] spring_classes = ['cs 122', 'cs 166', 'cs 224', 'cs 251', 'cs 261'] with open(inputFile, 'r') as f: newlist = f.readlines() for i in range(len(newlist)): newlist[i] = newlist[i].strip() print(newlist) for i in newlist: print(i) print("-----------------------") # this is all taking each line in the text and converting it into a list while also getting rid of any new lines to make it easier to concatanate fall or spring with open(outputFile, 'w') as f: index = 0 # this is a counter that is incrementing once every loop and is being used to index into the list for i in newlist: if i in fall_classes: newlist[index] += " fall\n" index += 1 elif i in spring_classes: newlist[index] += " spring\n" index += 1 original = ''.join(newlist) # this brings together the finalized list into the original format just with the corresponding fall or spring print(original) f.write(original)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_to_latex(codelist, unwanted_courses):\n # TODO: investigate a way to add large amounts of text outside of the\n # function\n abstract01 = \"I created this document to practice parsing html and using\\\n tools like Beautiful Soup which I've previously had little experience\\\n in. As a result, it's not perfect.\\\\newline\\\n It is also a slightly condensed all-in-one-place look at a selection\\\n of courses that are available for fourth year computer science\\\n students at the University of Glasgow. For the purposes of clarity I\\\n have removed several courses from this selection. The following\\\n courses have been omitted:\"\n abstract02 = \"For more insight into the project, to report issues or to\\\n inspect the code, have a look at the GitHub:\\\n \\\\url{https://github.com/IS0metric/course-ripper}\"\n unincluded = create_not_included_list(unwanted_courses)\n with open('courses.tex', 'w') as f:\n # TODO Try and move all this to a separate function?\n # TODO: Check if it's more efficient to write a single, massive string\n # to file\n f.write('\\\\documentclass{hitec}\\n')\n f.write('\\\\usepackage[document]{ragged2e}\\n')\n f.write('\\\\usepackage{url}\\n')\n f.write('\\\\usepackage{hyperref}\\n')\n f.write('\\\\setcounter{tocdepth}{4}\\n')\n f.write('\\\\begin{document}\\n')\n f.write('\\\\title{Fourth Year (2016-17) Courses}\\n')\n f.write('\\\\author{Jack Parkinson}\\n')\n f.write('\\\\date{August 2016}\\n')\n f.write('\\\\maketitle\\n')\n f.write('\\\\abstract{' + abstract01 + unincluded + abstract02 + '}\\n')\n f.write('\\\\newpage\\n\\n')\n f.write('\\\\tableofcontents\\n')\n f.write('\\\\newpage\\n\\n')\n # TODO: Look into alternatives to the three lists\n all_courses = []\n sem1_courses = []\n sem2_courses = []\n for code in codelist:\n course = bsoup(get_coursepage(code))\n if course['offered']['value'] == 'Runs Throughout Semesters 1 and 2':\n all_courses.append(course)\n elif \"1\" in course['offered']['value']:\n sem1_courses.append(course)\n elif \"2\" in course['offered']['value']:\n sem2_courses.append(course)\n f.write('\\\\section{Semester 1 and 2 Courses}\\n\\n')\n for course in all_courses:\n f.write(latex_course(course))\n f.write('\\\\section{Semester 1 Only Courses}\\n\\n')\n for course in sem1_courses:\n f.write(latex_course(course))\n f.write('\\\\section{Semester 2 Only Courses}\\n\\n')\n for course in sem2_courses:\n f.write(latex_course(course))\n f.write('\\\\end{document}')\n return None", "def export_courses(courses, output):\n courses = sorted(courses)\n writer = csv.writer(output)\n writer.writerow([\n 'College', 'Department', 'Code', 'Name', 'Credits', 'Tags',\n 'Prerequisites'\n ])\n\n for course in courses:\n writer.writerow([\n course.college, course.department, course.code, course.name,\n course.credits, ','.join(course.tags), ','.join(course.prerequisites)\n ])", "def write_the_contents_to_the_same_file(self):\n if not len(self.student_list):\n print('There is no contents to write')\n return\n\n if self._filename is None:\n self._filename = self.input_filename()\n\n with open(self._filename, 'w') as OUT:\n OUT.write(self.student_list.to_csv(date_format='%Y-%m-%d',\n sep='\\t', header=False, columns=self.columns_to_save))\n print(f'Data are saved into {self._filename!r}')", "def write_file(self):\n if self._write_file == None:\n return\n\n try:\n out = file(self._write_file, \"w\")\n except IOError, e:\n print e\n sys.exit(1)\n out.writelines(\"A cases\") \n out.close()", "def write_file(country, season, final, var):\n if var=='label':\n path='../results/kmeans/'\n elif var=='cluster':\n path='../results/sequence_analysis/'\n country_ = country.lower()\n season_ = season.replace('-','_')\n file_name=country_+\"_\"+season_\n newpath=path+file_name+'/'\n if not os.path.exists(newpath):\n os.makedirs(newpath)\n f = open(newpath+file_name+\".txt\",\"w\") \n f.write(final)\n f.close()", "def do_2004(in_dir, out_dir):\n dir_items = setup_outdir_and_get_input(in_dir, out_dir)\n for idx, item in enumerate(dir_items):\n full_path = in_dir + os.path.sep + item\n print(f\"{full_path} -> {out_dir}/{idx}\")\n create_dirs_and_write_files(full_path, idx, in_dir, item, out_dir)", "def write_crf_input(out_file, sentences, poss, lemmas, concepts):\n\n print '\\n\\tWrite out data in crf compliant format'\n f = open(out_file, 'w+')\n for position_i in range(len(sentences)):\n for position_j in range(len(sentences[position_i])):\n f.write(\n sentences[ position_i ][ position_j ] + '\\t' +\n poss[ position_i ][ position_j ] + '\\t' +\n lemmas[ position_i ][ position_j ] + '\\t' +\n concepts[ position_i ][ position_j ]\n + '\\n'\n )\n f.write('\\n')\n f.close()\n print '\\t--done'", "def input_write_filename(self, register: str, filename: str):\n dir_path = (_root_dir / filename).parent\n if not dir_path.is_dir():\n print(f\"Creating directory: {dir_path}\")\n dir_path.mkdir(parents=True, exist_ok=True)\n self._write_files.add(filename)\n self._input_filename(register, filename)", "def writeCC(self, fileName, allSCC):\n f = open(fileName,'w')\n\n for compNumber in range(0,len(allSCC)):\n f.write(\"Component number %s: \" % (compNumber))\n f.write(\"%s\\n\" % (str(allSCC[compNumber])))\n f.close()", "def write_input_file(y,z,fname):\n file = open('c:/4nec2/out/' + fname + '.nec', 'w')\n file.write('CM Seeddesign \\n')\n file.write('CM Zigzag Antenna \\n')\n file.write('CE File generated by python \\n')\n seg = 1\n\n #write the antenna\n for i in range(0,len(y)-1):\n file.write('GW %3i %3i %8.4f %8.4f %8.4f %8.4f %8.4f %8.4f %8.4f\\n' % (1,seg,0,y[i],z[i],0,y[i+1],z[i+1],1))\n\n file.write('GE 0 \\n')\n file.write('EK \\n')\n file.write('EX %3i %3i %3i %3i %3i %3i %3i\\n' % (0,1,1,1,1,0,0))\n file.write('GN -1 \\n')\n \n file.write('FR %3i %3i %3i %3i %8.4f %8.4f\\n' % (0,1,0,0,900,0))\n file.write('FR %3i %3i %3i %3i %8.4f %8.4f\\n' % (0,11,0,0,850,10))\n\n file.write('LD %3i %3i %3i %3i %8.4f %8.4f\\n' % (5,1,0,0,58000000,2))\n file.write('RP %3i %3i %3i %3i %8.4f %8.4f %8.4f %8.4f\\n' % (0,1,1,1000,90,0,0,0))\n\n file.write('EN \\n')\n file.close()", "def write_SEQRES_fasta():\n \n import os\n choice = input('Enter the name of the file: ')\n filepath = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Data', choice)\n with open(filepath,'r') as file:\n seq_list = []\n for line in file:\n if line[:6] == 'SEQRES':\n line_split = line.split()[4:]\n seq_list.append(line_split)\n choice1 = input('Enter name of the outfile: ') \n filepath1 = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice1)\n with open(filepath1, 'w') as outfile:\n for i in seq_list:\n outfile.writelines(i)\n print('Sequences successfully written!')\n \n with open(choice, 'r') as myfile:\n header = ''\n for line in myfile:\n if line.startswith(\"TITLE\"): \n head_split = line.split()\n header = header + ' '.join(head_split[1:])\n \n choice2 = input('Enter output file name with a .fasta extension: ')\n filepath2 = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice2)\n z = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice1)\n with open(z, 'r') as file:\n with open(filepath2, 'w') as output:\n for i in file:\n output.writelines('>' + header + '\\n' + i)\n print('>' + header + '\\n' + i)\n print('Fasta file generated!')", "def write(read_file):\n princesses = filter_by_status(read(read_file))\n princesses = sort_by_status(princesses)\n princesses = sort_by_place(princesses)\n\n file = open(\"princesses_to_save.txt\", \"w\")\n for word in header:\n for something in word:\n file.write(\"{:20}\".format(something))\n file.write(\"\\n\")\n for i in range(len(princesses)):\n str1 = princesses[i]\n for word in str1:\n file.write(\"{:20}\".format(word))\n if i != len(princesses) - 1:\n file.write(\"\\n\")", "def main():\n csvFile = open('college.csv', 'w', newline='')\n csvWriter = create_csvWriter(csvFile)\n input_files_names = os.listdir(DATADIR)\n print(\"starting\")\n for file_name in input_files_names:\n if file_name.endswith(\".html\"):\n print(\".\", end=\"\", flush=True)\n soup = create_soup(file_name)\n name = college_names(soup)\n if name is None:\n continue\n city, state, zipcode = college_location(soup)\n selectivity = competitiveness(soup)\n\n csvWriter.writerow([name, city, state, zipcode, selectivity, file_name])\n print(\"Finished\")\n csvFile.close()", "def output_schedule(self) -> None:\n with open(\"Output.txt\", \"w\") as out_file:\n for sem in self.plan:\n out_file.write(sem.title.center(15 + 20 + 50 + 5) + \"\\n\\n\")\n for course in sem.required_courses:\n if course.special:\n out_file.write(\"*\" * 10 + \" \" * 5 + f\"{course.special_type}\\n\")\n elif course.grade != \"\":\n out_file.write(\n course.sem_taken.ljust(15)\n + f\"{course.dept} {course.number}-{course.section}\".ljust(\n 20\n )\n + course.title.ljust(50)\n + course.grade.ljust(5)\n + \"\\n\"\n )\n else:\n out_file.write(\n \"AP/UNK\".ljust(15)\n + f\"{course.dept} {course.number}-{course.section}\".ljust(\n 20\n )\n + course.title.ljust(50)\n + \"AP/UNK\".ljust(5)\n + \"\\n\"\n )\n out_file.write(\"\\n\\n\")", "def open_courses(self, filename):\n\n with open(filename) as courses:\n course_reader = csv.DictReader(courses)\n course_list = []\n for row in course_reader:\n name = row['\\ufeffCourses Period 4']\n lec = row['#lec']\n tut = row['#tut']\n prac = row['#pr']\n tut_tot = row['#tuttot']\n prac_tot = row['#prtot']\n max_tut = row['#max stud tut']\n max_prac = row['#max stud pr']\n exp_stud = row['E(students)']\n dif_total = int(row['#lec']) + int(row['#tut']) + int(row['#pr'])\n act_tot = int(row['#lec']) + int(row['#tuttot']) + int(row['#prtot'])\n course = Courses(name, lec, tut, prac, tut_tot, prac_tot, max_tut, max_prac, exp_stud, act_tot, dif_total)\n course_list.append(course)\n\n course_list_simulated = []\n\n for i in range(len(course_list)):\n lecs = int(course_list[i].lec)\n course_list_simulated.append(course_list[i].name + '_lec')\n if lecs > 0:\n for j in range(lecs):\n activity = course_list[i].name\n activity = activity + '_lec'\n course_list[i].add(activity)\n tuts = int(course_list[i].tut_tot)\n if tuts > 0:\n course_list_simulated.append(course_list[i].name + '_tut')\n for k in range(tuts):\n activity = course_list[i].name\n activity = activity + '_tut'\n course_list[i].add(activity)\n pracs = int(course_list[i].prac_tot)\n if pracs > 0:\n course_list_simulated.append(course_list[i].name + '_prac')\n for l in range(pracs):\n activity = course_list[i].name\n activity = activity + '_prac'\n course_list[i].add(activity)\n\n\n return course_list", "def parse_files(usage: str, full_time: str, part_time: str, semester: str, total_courses: int) -> dict:\n one = filter_for_semester(open(usage, 'r').readlines(), semester)\n two = get_rows_with_usage(one)\n usage_file = remove_duplicate_crn(two)\n no_dup_r = remove_duplicate_royal(two)\n full_time_file = open(full_time, 'r').readlines()\n full_r = list()\n part_r = list()\n for x in full_time_file:\n y = x.split(DELIMITER)\n full_r.append(y[FAC_ROYAL])\n part_time_file = open(part_time, 'r').readlines()\n for x in part_time_file:\n y = x.split(DELIMITER)\n part_r.append(y[FAC_ROYAL])\n full = list()\n part = list()\n staff = list()\n for x in range(len(part_r)):\n part_r[x] = part_r[x].strip(\"\\\"\")\n for x in range(len(full_r)):\n full_r[x] = full_r[x].strip(\"\\\"\")\n for x in no_dup_r:\n y = x.split(DELIMITER)\n if y[USAGE_ROYAL] in full_r:\n full.append(y)\n elif y[USAGE_ROYAL] in part_r:\n part.append(y)\n else:\n staff.append(y)\n return {'semester_no_dup_crn': usage_file,\n 'semester_no_dup_r': no_dup_r,\n 'semester': two,\n 'full_time': full,\n 'len_full': len(full_time_file),\n 'part_time': part,\n 'len_part': len(part_time_file),\n 'staff': staff,\n 'total_courses': total_courses}", "def write(afile, seqs): \n for s in seqs :\n writeseq(afile, s)", "def display_algn_seq():\n \n import os\n choice = input('Enter the name of the file: ')\n filepath = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Data', choice)\n with open(filepath,'r') as file:\n seq_list = []\n for line in file:\n if line[:6] == 'SEQRES':\n line_split = line.split()[4:]\n seq_list.append(line_split)\n \n filepath1 = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', 'outfile1')\n with open(filepath1, 'w') as outfile:\n for i in seq_list:\n outfile.writelines(i)\n \n filepath2 = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', 'outfile2')\n j = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', 'outfile1')\n with open(j, 'r') as fil:\n d = {'CYS':'C','ASP':'D','SER':'S','GLN':'Q','LYS':'K','ILE':'I','PRO':'P','THR':'T','PHE':'F','ASN':'N',\n 'GLY':'G','HIS':'H','LEU':'L','ARG':'R','TRP':'W','TER':'*','ALA':'A','VAL':'V','GLU':'E','TYR':'Y',\n 'MET':'M','XAA':'X'}\n with open(filepath2, 'w') as outf:\n for line in fil:\n if len(line) %3 == 0:\n upper_seq = line.upper()\n single_seq = ''\n for i in range(int(len(upper_seq)/3)):\n single_seq += d[upper_seq[3*i:3*i+3]]\n outf.write(single_seq) \n return single_seq\n else:\n print(\"ERROR: Line was not a factor of 3 in length!\")", "def write_copy(file_source_path, original_file_name, **kwargs):\r\n\r\n group_dir, subject_dir, year_session_dir = None, None, None\r\n\r\n matched_groups = kwargs.get(\"matched_groups\")\r\n if matched_groups is not None:\r\n \"\"\"\r\n matched_groups[0] = year\r\n matched_groups[1] = session\r\n matched_groups[2] = subject group number with hyphen (if applicable)\r\n matched_groups[3] = subject group number (if applicable)\r\n matched_groups[4] = subject group name\r\n matched_groups[5] = subject name\r\n matched_groups[8] = paper number\r\n matched_groups[9] = further info\r\n \"\"\"\r\n\r\n # Handling computer science's change of group\r\n if \"Computer_science\" in matched_groups[5] and \"Mathematics\" in matched_groups[4]:\r\n group_dir = \"Group 4 - Sciences\"\r\n # Continuing regular group handing\r\n elif matched_groups[3] is None:\r\n group_dir = format_group_name(matched_groups[4])\r\n else:\r\n group_dir = matched_groups[2] + matched_groups[4]\r\n\r\n # Handling difficulty. Bulk of it is handling HLSL files and files with no difficulty stated.\r\n fake_difficulty = kwargs.get(\"fake_difficulty\")\r\n if fake_difficulty is not None:\r\n difficulty = fake_difficulty\r\n elif \"HLSL\" in original_file_name:\r\n write_copy(file_source_path, original_file_name, matched_groups=matched_groups, fake_difficulty=\"HL\")\r\n write_copy(file_source_path, original_file_name, matched_groups=matched_groups, fake_difficulty=\"SL\")\r\n return\r\n elif \"HL\" in original_file_name:\r\n difficulty = \"HL\"\r\n elif \"SL\" in original_file_name:\r\n difficulty = \"SL\"\r\n else:\r\n write_copy(file_source_path, original_file_name, matched_groups=matched_groups, fake_difficulty=\"HL\")\r\n write_copy(file_source_path, original_file_name, matched_groups=matched_groups, fake_difficulty=\"SL\")\r\n return\r\n\r\n # This is where we handle deprecated/changed subject names\r\n subject = matched_groups[5]\r\n if \"Business_and_management\" in subject:\r\n subject = subject.replace(\"Business_and_management\", \"Business_management\")\r\n elif \"Belarussian\" in subject:\r\n subject = subject.replace(\"Belarussian\", \"Belarusian\")\r\n elif \"Biology_HL\" in subject:\r\n subject = subject.replace(\"Biology_HL\", \"Biology\")\r\n elif \"Biology_SL\" in subject:\r\n subject = subject.replace(\"Biology_SL\", \"Biology\")\r\n elif \"Ecosystems_and_societies_SL\" in subject:\r\n subject = subject.replace(\"Ecosystems_and_societies_SL\", \"Ecosystems_and_societies\")\r\n elif \"Environmental_systems_SL\" in subject:\r\n subject = subject.replace(\"Environmental_systems_SL\", \"Environmental_systems\")\r\n elif \"History_route_1\" in subject:\r\n subject = subject.replace(\"History_route_1\", \"History\")\r\n elif \"History_route_2\" in subject:\r\n subject = subject.replace(\"History_route_2\", \"History\")\r\n elif \"History_of_the_Islamic_World\" in subject:\r\n subject = subject.replace(\"History_of_the_Islamic_World\", \"Islamic_history\")\r\n\r\n subject_dir = f\"{subject}_{difficulty}\"\r\n year_session_dir = f\"{matched_groups[0]} {matched_groups[1]} Examination Session\"\r\n\r\n music_groups = kwargs.get(\"music_groups\")\r\n if music_groups is not None:\r\n \"\"\"\r\n music_groups[0] = year\r\n music_groups[1] = session\r\n music_groups[2] = subject group number with hyphen (if applicable)\r\n music_groups[3] = subject group number (if applicable)\r\n music_groups[4] = subject group name\r\n music_groups[5] = file name\r\n \"\"\"\r\n\r\n group_dir = \"Group 6 - The Arts\"\r\n subject = \"Music\"\r\n\r\n # Handling difficulty. Bulk of it is handling HLSL files and files with no difficulty stated.\r\n fake_difficulty = kwargs.get(\"fake_difficulty\")\r\n if fake_difficulty is not None:\r\n difficulty = fake_difficulty\r\n elif \"HLSL\" in original_file_name:\r\n write_copy(file_source_path, original_file_name, music_groups=music_groups, fake_difficulty=\"HL\")\r\n write_copy(file_source_path, original_file_name, music_groups=music_groups, fake_difficulty=\"SL\")\r\n return\r\n elif \"HL\" in original_file_name:\r\n difficulty = \"HL\"\r\n elif \"SL\" in original_file_name:\r\n difficulty = \"SL\"\r\n else:\r\n write_copy(file_source_path, original_file_name, music_groups=music_groups, fake_difficulty=\"HL\")\r\n write_copy(file_source_path, original_file_name, music_groups=music_groups, fake_difficulty=\"SL\")\r\n return\r\n\r\n subject_dir = f\"{subject}_{difficulty}\"\r\n year_session_dir = f\"{music_groups[0]} {music_groups[1]} Examination Session\"\r\n\r\n audio_groups = kwargs.get(\"audio_groups\")\r\n if audio_groups is not None:\r\n \"\"\"\r\n audio_groups[0] = year\r\n audio_groups[1] = session\r\n audio_groups[2] = subject group number with hyphen (if applicable)\r\n audio_groups[3] = subject group number (if applicable)\r\n audio_groups[4] = subject group name (contains '\\\\audio' in some instances)\r\n audio_groups[5] = file name\r\n \"\"\"\r\n\r\n group_dir = \"Group 6 - The Arts\"\r\n\r\n fake_difficulty = kwargs.get(\"fake_difficulty\")\r\n if fake_difficulty is not None:\r\n difficulty = fake_difficulty\r\n else:\r\n write_copy(file_source_path, original_file_name, audio_groups=audio_groups, fake_difficulty=\"HL\")\r\n write_copy(file_source_path, original_file_name, audio_groups=audio_groups, fake_difficulty=\"SL\")\r\n return\r\n\r\n subject_dir = f\"Music_{difficulty}\"\r\n year_session_dir = f\"{audio_groups[0]} {audio_groups[1]} Examination Session\"\r\n year_session_dir = os.path.join(year_session_dir, \"audio\")\r\n\r\n if None not in [group_dir, subject_dir, year_session_dir]:\r\n new_filepath = os.path.join(abs_destination_directory, group_dir, subject_dir, year_session_dir,\r\n original_file_name)\r\n os.makedirs(os.path.dirname(new_filepath), exist_ok=True)\r\n shutil.copy(file_source_path, new_filepath)\r\n else:\r\n print(f\"CRITICAL ERROR: File had 'None' path attributes: {file_source_path}\")", "def create_input_file(self, polymer_identifier, format, outpath):\n\n\t\tsmiles = self.get_smiles_from_identifier(polymer_identifier)\n\t\t\n\t\tresult = generate_input_files(smiles, format)\n\t\twith open(outpath, 'w+') as f:\n\t\t\tf.write(result)", "def writeItineraryOutput(filename, itins):\n if filename[-4:] != \".csv\": # Make sure the filename is a .csv\n filename += \".csv\"\n try:\n with open(os.path.join(\"output\", filename), \"w\", newline='') as f:\n writer = csv.writer(f, delimiter=\",\")\n firstline = [\"Name\", \"Cost\", \"Home\", \"Dest 1\", \"Dest 2\", \"Dest 3\", \"Dest 4\", \"Dest 5\", \"Dest 6\"]\n writer.writerow(firstline)\n for itinerary in itins:\n line = []\n line.append(itinerary.name)\n line.append(itinerary.cheapest_cost)\n line = line + itinerary.cheapest_route.getCodeList()\n writer.writerow(line)\n except (FileNotFoundError, OSError):\n return False\n else: \n return True", "def _write_output_file(output: str, file_name: str):\n\tfile1 = open(file_name, 'w')\n\tfile1.write(output)\n\tfile1.close()", "def combine_modeloutputs(outputname='xxRENAMExx_Zcombined.txt',\n data='sfr',\n verbose=True):\n if data == 'sfr':\n filepath = '/Users/kschmidt/work/catalogs/NEOGALlines/nebular_emission/'\n modelfilestr = filepath+'nebular_emission_Z0*.txt'\n splitstr = 'emission_Z'\n elif data == 'agn':\n filepath = '/Users/kschmidt/work/catalogs/NEOGALlines/AGN_NLR_nebular_feltre16/'\n modelfilestr = filepath+'nlr_nebular_Z0*.txt'\n splitstr = 'nebular_Z'\n else:\n sys.exit('Inavlid value of data=\"'+data+'\"')\n\n output = filepath+outputname\n if verbose: print(' - Setting up output for:\\n '+output)\n modelfiles = glob.glob(modelfilestr)\n header = open(modelfiles[0]).readline().rstrip()\n if data == 'sfr':\n header = header.replace('##','# Zgas ')\n elif data == 'agn':\n header = header.replace('#','# Zgas ')\n header = header+'\\n'\n\n fout = open(output, 'w')\n fout.write(header)\n if verbose: print(' - Writing the following files to ouput:')\n for mf in modelfiles:\n if verbose: print(' '+mf)\n Zgasstring = mf.split('/')[-1].split(splitstr)[-1].split('.txt')[0]\n\n with open(mf, 'r') as f:\n linesall = f.readlines()\n\n for linestring in linesall:\n if linestring.startswith('#'):\n pass\n elif linestring == ' \\n':\n fout.write(linestring)\n else:\n fout.write('0.'+Zgasstring+' '+linestring)\n\n fout.close()", "def write_candidates_file(self, min_count, stops, tags, filename):\n filename = os.path.join(filename)\n candidates = self.candidates(min_count, stops, tags)\n with open(filename, \"w\", encoding=\"utf-8\") as file:\n for wordi, wordj in candidates:\n file.write(\"{} {}\\n\".format(wordi, wordj))\n print(\"Success: Candidates written to '{}'\".format(filename))", "def write_star_files(self, star_input, outpath):\n \n with open(star_input, 'r') as f:\n table = parse_star(f)\n\n cluster_star = {}\n\n for cluster, nodes in clusters.items():\n if nodes:\n #convert to str to match df\n #add 1 to match RELION indexing\n avgs = [str(node+1) for node in nodes]\n subset = table[table['ClassNumber'].isin(avgs)]\n cluster_star[cluster] = subset\n\n for cluster, table in cluster_star.items():\n with open(outpath+'/slicem_cluster_{0}.star'.format(cluster), 'w') as f:\n #write the star file\n print('data_', file=f)\n print('loop_', file=f)\n for i, name in enumerate(table.columns):\n print('_rln' + name + ' #' + str(i+1), file=f)\n table.to_csv(f, sep='\\t', index=False, header=False)\n\n with open(outpath+'/slicem_clusters.txt', 'w') as f:\n for cluster, averages in clusters.items():\n f.write(str(cluster) + '\\t' + str(averages) + '\\n')\n \n print('star files written!')", "def write_input(self):\n # load template, substitute parameters and write input file\n self.homog_core()\n self.write_mat_string()\n input_tmpl = open('base_input.txt')\n templ = Template(input_tmpl.read())\n file_string = templ.substitute(cool_frac = self.vfrac_cermet,\n r_core = self.r,\n core_z = self.z,\n r_refl = self.r + self.refl_t,\n refl_min = -self.refl_t,\n refl_max = self.z + self.refl_t,\n fuel_string = self.fuel_string,\n fuel_rho = self.rho,\n fuel_vol = self.core_vol,\n refl_vol = self.core_vol,\n thermal_power = self.Q_therm)\n # write the file\n filename = 'r_{0}_{1}.i'.format(round(self.vfrac_cermet, 3), \n round(self.r, 3))\n ifile = open(filename, 'w')\n ifile.write(file_string)\n ifile.close()\n\n return filename", "def combine_files(output_filename, *passes):\n all_columns = {}\n for x in passes:\n sp = pyvyu.load_opf(x)\n column_list = sp.get_column_list()\n for c in column_list:\n all_columns[c] = sp.get_column(c)\n sp = pyvyu.Spreadsheet()\n sp.name = output_filename\n sp.columns = all_columns\n pyvyu.save_opf(sp, output_filename, True, *all_columns.keys())\n return output_filename", "def write(self, filename, overwrite=False, format='pickle',\n file_per_mode=False):\n def write_dat(fname):\n with open(fname, 'w') as fh:\n for val in cf:\n fh.write(\"{}\\n\".format(val))\n return\n\n try:\n # Get filename\n if not overwrite:\n i = 0\n while True:\n msg = ''\n path = os.path.dirname(filename)\n f = os.path.basename(filename)\n f, ext = os.path.splitext(f)\n\n if os.path.exists(filename):\n msg += '\\033[93mSplittingfunction-file exist,'\n msg += 'not overwriting\\033[0m'\n if i == 0:\n f = \"%s_%s\" % (f, str(i))\n i += 1\n a = \"_%s\" % str(i-1)\n b = \"_%s\" % str(i)\n f = f.replace(a, b)\n filename = os.path.join(path, \"%s%s\" % (f, ext))\n else:\n print(msg)\n break\n\n if format == 'pickle':\n write_pickle(self, filename)\n\n elif format == 'json':\n data = {'SF': {}}\n for m, cst in self.cst.items():\n data['SF'][m] = {}\n for deg, c in cst.items():\n data['SF'][m][deg] = c.tolist()\n with open(filename, 'w') as fh:\n json.dump(data, fh)\n\n elif format == 'dat':\n if file_per_mode is False:\n cf = []\n for mode, c in self.cst.items():\n degs = sorted(list(self.cst[mode].keys()))\n for deg in degs:\n coeff = self.cst[mode][deg]\n if deg == '0':\n cf += [coeff[0]]\n cf += [self.dst[mode]['0'][0]]\n else:\n cf += coeff.tolist()\n write_dat(filename)\n\n else:\n for mode, c in self.cst.items():\n cf = []\n degs = sorted(list(self.cst[mode].keys()))\n for deg in degs:\n coeff = self.cst[mode][deg]\n if deg == '0':\n cf += [coeff[0]]\n cf += [self.dst[mode]['0'][0]]\n else:\n cf += coeff.tolist()\n print(mode)\n write_dat(\"{}.dat\".format(mode))\n\n else:\n raise IOError('Only support for pickle and json files.')\n\n except IOError:\n msg = \"\\033[91mCan't save file\\n\"\n msg += \"Error message: %s\\033[0m\" % sys.exc_info()[1]\n print(msg)\n return", "def write_to_files(section, csv_path, srt_path):\n write_to_csv(section, csv_path)\n write_to_srt(section, srt_path)", "def do_2003(in_dir, out_dir):\n\n dir_items = setup_outdir_and_get_input(in_dir, out_dir)\n for idx, item in enumerate(dir_items):\n full_path = in_dir + os.path.sep + item\n print(f\"{item} -> {idx}\")\n create_dirs_and_write_files(full_path, idx, in_dir, item, out_dir)" ]
[ "0.5756077", "0.56120145", "0.55398047", "0.53964037", "0.53702533", "0.537013", "0.53571147", "0.5315433", "0.5303923", "0.5292199", "0.5276437", "0.52718467", "0.5271242", "0.52504164", "0.52265584", "0.519093", "0.51793826", "0.51728696", "0.5169168", "0.51610035", "0.51603466", "0.51591676", "0.5136202", "0.51278085", "0.5115107", "0.5114406", "0.511093", "0.51051533", "0.50937617", "0.50876737" ]
0.70653456
0
_Implement this method_ Write a function that takes as input an input file and stores the contents in a matrix. Each row in the matrix should be a line in the input file. Each element in a row should be an element from the line, delimited by a tab.
def storeTabDelimitedFile(inputFile): list0 = [] with open(inputFile, 'r') as f: newlist = f.readlines() #print(newlist) for i in range(len(newlist)): #newlist[i] = newlist[i].strip('\t') newlist[i] = newlist[i].strip('\n') # this makes the matrix easier to read as i will not be converting back to original format x = newlist[i].split('\t') # everytime a tab appears in a lines string, the string is split and is storing data in a list list0.append(x) print(list0) # list0 is the matrix as it contains sublists and elements that can be individually accessed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def MatrixToFile(self):\n # open text file\n file = open(\"intersection_matrix.txt\", 'w')\n # write opening square bracket for matrix\n file.write(\"[\")\n # use for loop to write in the matrix\n for i in range(self.rows):\n # square brackets to append in elements of a row of the matrix\n mat = []\n if i != 0:\n # separate each row with a comma\n file.write(\",\")\n for j in range(self.cols):\n # append elements of the row\n mat.append(self.matrix[i][j])\n # avoid having space as the first row in the text file\n if i != 0:\n file.write(\"\\n\")\n # write in the row\n file.write(str(mat))\n # write closing bracket for the matrix\n file.write(\"]\")\n # close file\n file.close()\n return", "def _write_matrix(self) -> None:\n\n # Define function to filter in any line of stdout that exclusively contains numbers, tabs, and decimal signs.\n def is_part_of_matrix(string, contains_wrong_characters=re.compile(r'[^\\t0-9. ]').search):\n return not bool(contains_wrong_characters(string))\n\n # Apply filter to each line in stdout.\n matrix_lines = [line + \"\\n\" for line in self.stdout_and_stderr.splitlines() if is_part_of_matrix(line)]\n\n # Write our beautiful (and hopefully not broken!) matrix to disk.\n with open(self.write_matrix_lines_to, \"w\") as file:\n file.writelines(matrix_lines)", "def _file_to_matrix(self, input_field: str, depth: int) -> np.char.array:\n maze_rows = input_field.splitlines()\n maze_lists = [list(row) for row in maze_rows]\n maze = np.array(maze_lists)\n maze = np.pad(maze, pad_width=1, constant_values=EMPTY)\n\n multidim_maze = np.char.array([np.char.array(maze, itemsize=2)\n for _ in range(depth)])\n return multidim_maze", "def write_out(matrix, filename):\n with open(filename, 'w') as csvfile:\n writer = csv.writer(csvfile)\n for r in matrix:\n writer.writerow(r)\n print(filename + ' writen!')", "def upload_matrix(row_names, col_names, matrix):", "def writeMatrix(self):\n\t\tpass", "def loadMatrixFromFile(filename, rows=0,cols=0,sep=','):\n datafile=open(filename,'r')\n if rows!=0 and cols!=0: \n matrix=np.asarray(np.zeros((rows,cols)))\n for i in range(rows):\n aux=datafile.readline()\n aux=aux.split(sep)\n matrix[i]=np.asarray(aux,dtype=np.float64)\n #print 'Reading line '+str(i)+' of file '+filename\n return matrix\n else:\n aux=datafile.readline()\n aux=aux.split(sep)\n matrix=np.asarray([aux],dtype=np.float64)\n aux=datafile.readline()\n while aux!=\"\":\n aux=aux.split(sep)\n #print aux\n matrix=np.append(matrix,np.asarray([aux],dtype=np.float64),0)\n aux=datafile.readline()\n return matrix", "def ReadMatrix(file,\n separator=\"\\t\",\n numeric_type=numpy.float,\n take=\"all\",\n headers=False\n ):\n\n lines = filter(lambda x: x[0] != \"#\", file.readlines())\n\n row_headers, col_headers = [], []\n\n if headers:\n col_headers = lines[0][:-1].split(\"\\t\")[1:]\n del lines[0]\n\n num_rows = len(lines)\n\n if take != \"all\":\n num_cols = len(take)\n else:\n l = len(string.split(lines[0][:-1], \"\\t\"))\n if headers:\n take = range(1, l)\n else:\n take = range(0, l)\n\n num_cols = len(take)\n\n matrix = numpy.zeros((num_rows, num_cols), numeric_type)\n\n nrow = 0\n for l in lines:\n data = l[:-1].split(\"\\t\")\n if headers:\n row_headers.append(data[0])\n\n try:\n data = map(lambda x: float(data[x]), take)\n except ValueError:\n print \"error parsing data\", data\n raise\n\n matrix[nrow] = data\n nrow += 1\n\n return matrix, row_headers, col_headers", "def import_matrix(fileMatrix):\n with open(fileMatrix) as fMat:\n matrix = np.zeros((3,4))\n for ligne in fMat:\n if ligne.startswith(' 1') or ligne.startswith(' 2') or ligne.startswith(' 3'):\n matrix[int(ligne.split()[0])-1,:] = float(ligne.split()[1]),float(ligne.split()[2]),float(ligne.split()[3]),float(ligne.split()[4])\n return deepcopy(matrix)", "def readMatrix(infile,\n format=\"full\",\n separator=\"\\t\",\n numeric_type=numpy.float,\n take=\"all\",\n headers=True,\n missing=None,\n ):\n\n row_headers, col_headers = [], []\n\n lines = filter(lambda x: x[0] != \"#\", infile.readlines())\n\n if len(lines) == 0:\n raise IOError(\"no input\")\n\n if format == \"full\":\n\n if headers:\n col_headers = lines[0][:-1].split(\"\\t\")[1:]\n del lines[0]\n\n num_rows = len(lines)\n\n if take != \"all\":\n num_cols = len(take)\n else:\n l = len(string.split(lines[0][:-1], \"\\t\"))\n if headers:\n take = range(1, l)\n else:\n take = range(0, l)\n\n num_cols = len(take)\n\n matrix = numpy.zeros((num_rows, num_cols), numeric_type)\n\n nrow = 0\n for l in lines:\n data = l[:-1].split(\"\\t\")\n if headers:\n row_headers.append(data[0])\n\n if missing is None:\n try:\n data = map(lambda x: float(data[x]), take)\n except ValueError, msg:\n raise ValueError(\"error %s: data=%s\" % (msg, str(data)))\n except IndexError, msg:\n raise IndexError(\"error %s: data=%s\" % (msg, str(data)))\n\n else:\n d = []\n for x in take:\n try:\n d.append(float(data[x]))\n except ValueError:\n d.append(missing)\n except IndexError, msg:\n raise IndexError(\n \"error %s: data=%s\" % (msg, str(data)))\n\n data = d\n\n matrix[nrow] = data\n\n nrow += 1\n\n elif format == \"phylip\":\n # read in symmetric phylip matrices\n # note: they can wrap around\n if take != \"all\":\n raise \"phylip matrix does not support take - only full matrices are processed.\"\n\n if not headers:\n raise \"phylip matrix always has headers.\"\n\n num_rows = int(lines[0].strip())\n\n num_cols = num_rows\n\n matrix = numpy.zeros((num_rows, num_cols), numeric_type)\n take = range(1, num_rows)\n\n nrow = 0\n ncol = 0\n for l in lines[1:]:\n\n data = re.split(\"\\s+\", l[:-1])\n\n if ncol == 0:\n row_headers.append(data[0])\n\n try:\n data = map(float, data[1:len(data)])\n except ValueError:\n raise \"parsing error in conversion to float in line %s\" % l\n\n for x in range(len(data)):\n matrix[nrow][ncol] = data[x]\n ncol += 1\n\n # deal with wrapping around\n if ncol == num_cols:\n ncol = 0\n nrow += 1\n\n col_headers = row_headers\n\n return matrix, row_headers, col_headers", "def write_mrc_matrix(self):\n\n matrix = self.matrix\n \n matrix = self.permute_matrix_to_map_axis_order(matrix)\n a = Numeric.ravel(matrix)\n \n data = a.tostring()\n\n file_write = open(self.path,'ab')\n file_write.write(data)\n file_write.close()", "def makeMaze(self,inputFileName):\n\t\tinputFile = open(inputFileName, \"r\")\n\n\t\tfor line in inputFile:\n\t\t\tline = line.strip(\"\\n\")\n\t\t\t# Add to matrix\n\t\t\tself.maze.append(list(line))", "def print_matrix_to_file(matrix, fileName):\n with open(fileName, 'w') as f:\n for row in matrix:\n print('\\t'.join(map(str, row)), file=f)", "def readMatrix(file):\n file1 = open(file, \"r\")\n rawData = file1.readlines()\n file1.close() \n \n n = round(len(rawData[0])/2) \n \n matrix2D = [[None for x in range(n)] for y in range(n)] \n \n j = 0\n for line in rawData: \n i = 0 \n for element in line:\n if element != \" \":\n if i == n:\n break\n matrix2D[j][i] = element\n i+= 1 \n j+= 1 \n \n return matrix2D", "def _corpusReader(self,input_file):\n reader = csv.reader(open(input_file,\"rb\")) \n temp_mat = []\n for i,each in enumerate(reader):\n tmp_list = []\n each_instance_values = each\n each_instance_values = map(lambda v:float(v), map(lambda s: s.strip(),each_instance_values))\n for j,each_index in enumerate(each_instance_values):\n tmp_list.append(each_index) \n self.data[(i+1,j+1)] = each_index \n temp_mat.append(tmp_list)\n self.N = i\n\n self.N=self.N+1\n self.data_matrix = np.matrix(temp_mat) \n self.T = self.data_matrix.shape[1]", "def create_matrix(line):\n\tlst = line.split()\n\tn = int(len(lst)**.5)\n\treturn [lst[i*n:i*n+n] for i in range(n)]", "def save_matrix(self, file):\n print 'Saving matrix as', file\n \n np.save(file, self.matrix)", "def _fileToMatrix(file_name):\r\n # TODO: np.loadtxt() might be an alternative\r\n # try:\r\n if 1 < 3:\r\n lres = []\r\n for line in open(file_name, 'r').readlines():\r\n if len(line) > 0 and line[0] not in ('%', '#'):\r\n lres.append(list(map(float, line.split())))\r\n res = lres\r\n else:\r\n fil = open(file_name, 'r')\r\n fil.readline() # rudimentary, assume one comment line\r\n lineToRow = lambda line: list(map(float, line.split()))\r\n res = list(map(lineToRow, fil.readlines()))\r\n fil.close() # close file could be omitted, reference counting should do during garbage collection, but...\r\n\r\n while res != [] and res[0] == []: # remove further leading empty lines\r\n del res[0]\r\n return res\r\n # except:\r\n print('could not read file ' + file_name)", "def export(self, key, matrix=None):\n if key == 'e':\n if not matrix:\n matrix = self.matrix\n\n with open(self.getName('txt'), 'w') as file:\n for line in matrix:\n file.write(''.join(list(map(str, line))) + '\\n')\n print('Text saved')", "def _save_matrix(self, filename):\n\n scipy.io.mmwrite(filename, self.mtx, field=\"integer\") # pylint: disable=no-member", "def fromfile(self, path):\n\t\tdata = filetools.read_data(path)\n\t\tprint \"File read: %i lines\" % len(data)\n\t\tself.build_matrix(data)", "def add_file_to_data_matrix(data_matrix, new_file):\n\t# Generate list of words already in data matrix\n\t# Store words in list dm_words\n\t# Store\twords with 0 count in dictionary word_dict\n\twith open(data_matrix, 'r') as old_data_matrix:\n\t\treader = csv.reader(old_data_matrix)\n\t\tfor line in reader:\n\t\t\tdm_words=line[1:]\n\t\t\tbreak\n\t\n\ttrie = Trie()\n\tfor word in dm_words:\n\t\ttrie.insert(word)\n\n\t# Open input file, split at \"::\" to remove heading\n\t# Words stripped of punctuation, split on whitespace\n\t# Words counted and stored in dictionary word_dict\n\tnew_words_added = 0\n\twith open(new_file, 'r') as f:\n\t\tfor line in f:\n\t\t\tnew_words = trie.count_line(line)\n\t\t\tfor new_word in new_words:\n\t\t\t\tadd_new_word(data_matrix, new_word)\n\t\t\t\tnew_words_added += 1\n\t\t\tdm_words += new_words\n\n\n\t# Generate list of values in order of words in original data_matrix\n\tvalues = [str(trie.get_val(word)) for word in dm_words]\n\tnew_row = [new_file] + values\n\n\t# Append new row of values to data_matrix\n\twith open(data_matrix, 'a') as new_data_matrix:\n\t\twriter = csv.writer(new_data_matrix, lineterminator='\\n')\n\t\twriter.writerow(new_row)\n\n\treturn new_words_added", "def parse_matrix(inputfile, nrows, ncols, ncolsblock):\n nparray = numpy.empty(shape=(nrows, ncols))\n line = next(inputfile)\n assert len(line.split()) == min(ncolsblock, ncols)\n colcounter = 0\n while colcounter < ncols:\n # If the line is just the column header (indices)...\n if line[:5].strip() == '':\n line = next(inputfile)\n rowcounter = 0\n while rowcounter < nrows:\n row = list(map(float, line.split()[1:]))\n assert len(row) == min(ncolsblock, (ncols - colcounter))\n nparray[rowcounter][colcounter:colcounter + ncolsblock] = row\n line = next(inputfile)\n rowcounter += 1\n colcounter += ncolsblock\n return nparray", "def write (self, path):\n\t\ts=[]; add=s.append\n\t\tadd ('\\t'.join (self.schema))\n\t\tfor record in self.data:\n\t\t\tadd (record.asTabDelimitedRecord())\n\t\t\n\t\t# f = open (path, 'w')\n\t\tf = codecs.open(path, 'w', 'utf-8')\n\t\tf.write (self.linesep.join (s))\n\t\tf.close()\n\t\tprint (\"data written to \" + path)", "def __write_out_row__(self):\n column_pointer = spacing\n\n row_height = np.max([b.shape[0] for b in self.row_bitmaps])\n\n with open(\"active_weather.basic.exp\"+str(self.box_count)+\".box\",\"a\") as f:\n for char,b in zip(self.row_characters,self.row_bitmaps):\n assert isinstance(b, np.ndarray)\n height, width = b.shape\n\n # row first and then column\n additional_height = row_height-height\n\n self.training_page[self.row_pointer+additional_height:self.row_pointer + height+additional_height, column_pointer:column_pointer + width] = b\n a, b, c, d, e = char, column_pointer, self.height - (self.row_pointer + height + additional_height), column_pointer + width, self.height - (self.row_pointer+additional_height)\n f.write(str(a) + \" \" + str(b) + \" \" + str(c+1) + \" \" + str(d-1) + \" \" + str(e) + \" 0\\n\")\n\n column_pointer += width + spacing\n\n\n self.row_pointer += spacing + row_height\n self.column_pointer = spacing\n\n self.row_bitmaps = []\n self.row_characters = []", "def WriteFile( self ):\n with open( \"BasisVector.in\" , \"w\" ) as outfile:\n firstLine = \" \" + str( self.NQ ) + \\\n \" \" + str( self.Nbranches ) + \\\n \" \" + str( self.NatomsUC ) + \\\n \" \" + str( self.dim ) + \"\\n\"\n outfile.write( firstLine )\n for qq in range( self.NQ ): ## loop over Q vectors\n lineQ = [ \"{:15.8f}\".format( x ) for x in \n self.QVectors[ qq , : ] ]\n lineQ = \"\".join( lineQ )\n outfile.write( lineQ + \"\\n\" )\n for branch in range( self.Nbranches ): ## loop over branches\n for atom in range( self.NatomsUC ): ## loop over atoms in unit cell\n line = [ \"{:15.8f}\".format( x ) for x in \n self.EigenVectors[ qq , branch , atom , : ] ]\n line = \"\".join( line )\n outfile.write( line + \"\\n\" )\n outfile.write( \"\\n\" )\n outfile.write( \"\\n\" )", "def lire_matrice(fichier):\n matrix = [] # Matrix (2D list) of the game plan\n \n with open(fichier) as file:\n data = file.readlines() # Contains all rows as a list (1 row = 1 item)\n \n for i in data: # i will have the line\n IntList = [] # Matrix which will contain in item integer\n for j in i.split(): # j will have each digit of the line i\n IntList.append(tryCastToInt(j))\n \n matrix.append(IntList)\n \n return matrix", "def read_matrix(filename='matrix.txt'):\n matrix = []\n with open(filename) as f:\n for line in f:\n row = line.strip().split(',')\n row = [int(number_str) for number_str in row]\n matrix.append(row)\n return matrix", "def make_table_file(lines, labels, dir_path, filename):\r\n lines.sort()\r\n lines.insert(0, '\\t'.join(labels))\r\n\r\n output = open(os.path.join(dir_path, filename), 'w')\r\n output.write('\\n'.join(lines))\r\n output.close()", "def read_data(self,filename):\n self.x = [] #Input values\n self.t = [] #Target values\n\n with open(filename, \"r\") as infile:\n lines = infile.readlines()\n self.n = len(lines)\n for line in lines:\n words = line.split()\n self.x.append(float(words[0]))\n self.t.append(float(words[1]))\n\n self.x = np.array(self.x)\n self.t = np.array(self.t)\n self.create_design_matrix()" ]
[ "0.6438186", "0.6260845", "0.6211034", "0.61225104", "0.61192155", "0.6067222", "0.59721917", "0.5874142", "0.58524245", "0.58170664", "0.5776375", "0.5770828", "0.5763248", "0.5702263", "0.5634613", "0.5628356", "0.56093055", "0.5561605", "0.55224174", "0.5520379", "0.5517763", "0.54868114", "0.5414574", "0.5372211", "0.535461", "0.53484017", "0.5333877", "0.53313667", "0.5327707", "0.53020823" ]
0.6892157
0
Prints and flushes the things passes as arguments.
def printnflush(*args): if pyscheduler.verbose: print args sys.stdout.flush()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pflush(*args, **kwargs):\n print(*args, **kwargs)\n sys.stdout.flush()", "def print_flush(msg):\n print(msg, end='')\n sys.stdout.flush()", "def output(*args):\n print(*args, end='', file=file)", "def out(*args):\r\n print(*args)", "def printwf(data):\n print data #replace for Py3\n sys.stdout.flush()\n sys.stderr.flush()", "def print_out():\n pass", "def flush_print(string):\n print(string)\n sys.stdout.flush()", "def flush(self):\n if self.stderr:\n sys.__stderr__.flush()\n else:\n sys.__stdout__.flush()", "def print(*args, **kwargs):\n sep, file = kwargs.pop(\"sep\", b\" \"), kwargs.pop(\"file\", sys.stdout)\n at_start = True\n for x in args:\n if not at_start:\n file.write(sep)\n file.write(str(x))\n at_start = False\n file.write(kwargs.pop(\"end\", b\"\\n\"))\n if kwargs.pop(\"flush\", False):\n file.flush()", "def _flush():\n libtcod.console_flush()", "def Write(*args):\n for arg in args:\n sys.stdout.write(str(arg))", "def flush():\n None", "def flush_output():\n if len(buffered) == 1:\n code.add_line(\"append_result(%s)\" % buffered[0])\n elif len(buffered) > 1:\n code.add_line(\"extend_result([%s])\" % \", \".join(buffered))\n del buffered[:]", "def flush(self) -> None:\r\n if self.file is not None:\r\n self.file.flush()\r\n\r\n self.stdout.flush()", "def write(self, *args, **keys):\n output = self.format(*args, **keys)\n self.eol_pending = not output.endswith(\"\\n\")\n sys.stderr.flush()\n sys.stdout.write(output)\n sys.stdout.flush()", "def _flush(self):", "def flush(*args):\n return _libsbml.flush(*args)", "def _print_output(*args):\n for arg in args:\n print(arg)\n print('\\n')", "def flush(self):\n pass", "def _flush(self):\n pass", "def flush(self):", "def flush(self):", "def flush(self):", "def flush(self):", "def flush(self):", "def flush(self):", "def sgr_print(*args, reset=True, end='\\n', flush=False):\n print(sgr_string(*args, reset=reset), sep='', end=end, flush=flush)", "def dump(self):\n# self.partial_in=\"\"\n# for line in sys.stdin: \n# self.partial_in+=sys.stdin.read(1)\n sys.stdout = sys.__stdout__\n os.system('cls')\n for cb in self.buffers.values():\n cb.dump(sys.stdout)\n sys.stdout = self", "def l_print(*args):\n for rank in range(0, comm.size):\n comm.Barrier()\n if rank == comm.rank:\n l_print_no_barrier(*args)\n comm.Barrier()", "def flush(self):\n self.out.flush()" ]
[ "0.7685068", "0.7031397", "0.69741064", "0.6884889", "0.67748654", "0.6692131", "0.6624699", "0.66217965", "0.6615985", "0.6506932", "0.6432913", "0.6390469", "0.6375495", "0.6357373", "0.6350542", "0.6315522", "0.628105", "0.6269504", "0.6254084", "0.62510514", "0.62450105", "0.62450105", "0.62450105", "0.62450105", "0.62450105", "0.62450105", "0.622952", "0.6228648", "0.6177813", "0.61574894" ]
0.7230101
1
Helper function to run tasks inside a process. It implements an infinite loop controlled by the messages received from 'pipe_end'.
def run_task(process_name, tasks, pipe_end): task_ended = False try: while not task_ended: # Blocks until it receives a message message_type, value = pipe_end.recv() if message_type == "EXECUTE": result = tasks[value].run() pipe_end.send(("TASK FINISHED", (value, result))) elif message_type == "FINISH": printnflush( "Communication successfully closed for",process_name) task_ended = True else: printnflush("Unexpected message: %s"%message_type) task_ended = True except EOFError: printnflush("Communication closed due to remote closing of the pipe in process %s"%process_name) except Exception, msg: printnflush("Communication closed due to unexpected exception: %s"%msg) pipe_end.close() printnflush( "Task reached end")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self):\n while self.container.process(): pass", "def non_blocking_streamlit(process: psutil.Popen) -> None:\n while process.is_running():\n process.communicate()", "def run(self):\n while True:\n cmd, flag = self.Q['in'].get()\n if flag == 'stop':\n break\n try:\n if flag == 'process':\n sshCmd = \"ssh -q %s \\\"cd %s; %s\\\"\" % (self.host, self.cwd, cmd)\n fp = os.popen(sshCmd)\n output = fp.read()\n #output = fp.readlines()\n fp.close()\n else:\n raise ValueError, 'Unknown flag %r' % flag\n except:\n # unconditional except is right, since we report *all* errors\n self.reportError()\n else:\n if output:\n self.Q['out'].put(output)", "def run(self):\n while True:\n try:\n # probing the parent process.\n if self._target_pid is not None:\n os.kill(self._target_pid, 0)\n time.sleep(self._poll_interval)\n except OSError:\n self._on_stop_callback()\n break", "async def _run(self):\n self.sub_process.start()\n log.info('Started sub process (pid {}).'.format(self.sub_process.pid))\n\n # Wait until the process is actually started to not consider it dead when it's not even born yet\n while not self.sub_process.is_alive():\n try:\n # Wtb milliseconds async sleep omg\n await asyncio.wait_for(asyncio.sleep(1), 0.1)\n except asyncio.TimeoutError:\n pass\n\n # ERMAHGERD ! MAH FRAVRIT LERP !\n while True:\n try:\n data = self.mp_queue.get(False) # Do not block\n except QueueEmpty:\n if not self.sub_process.is_alive():\n log.warning('Sub process (pid {}) appears dead.'.format(self.sub_process.pid))\n asyncio.ensure_future(self.stop())\n\n # Arbitrary sleep time after an unsuccessful poll\n await asyncio.sleep(4)\n except Exception as e:\n # Might be triggered when the sub_process is terminated while putting data in the queue\n log.error('Queue polling error: ' + str(e))\n break\n else:\n if data is not None:\n # Process the data sent by the subprocess\n self.on_data(data)", "def net_proc(pipe):\n asyncio.run(net_server(pipe))", "def run(self):\n proc_name = self.name\n while True:\n next_task = self.task_queue.get()\n if next_task is None:\n debug('{}: Exiting'.format(proc_name))\n self.task_queue.task_done()\n break\n debug('{}: {}'.format(proc_name, next_task))\n answer = next_task()\n self.task_queue.task_done()\n self.result_queue.put(answer)\n return", "async def process_loop(self):\n self.state = 'running'\n while not self.is_exiting_:\n # If any messages have been handled recently, do not sleep\n # Else if no messages, sleep for some short time\n LOG.debug(\"jso: waiting for inbox data\")\n msg = await self.receive()\n if type(msg) != tuple or len(msg) != 2:\n LOG.debug(\"Got unhandled msg: %s\", msg)\n continue\n LOG.debug(\"got msg = %s\", msg)\n fun, args = msg\n # todo: make this if thing in meta class so that we only evaluate\n # this once\n import asyncio\n if asyncio.iscoroutinefunction(fun):\n # since fun will be returned by a function that only have access\n # to the Class rather than the instance, we have to send `self`\n # as an argument\n await fun(self, args)\n else:\n fun(self, args)\n\n LOG.debug(\"Process %s process_loop stopped\", self.pid_)\n self.state = 'exiting'", "def run(self):\n while True:\n task = self.queue.get()\n if task is None:\n break\n fslfile = os.path.join(task.directory, task.fsl_file)\n # TODO: there must be a better way to set the workdir\n workdir = '/'.join(task.directory.split('/')[-3:])\n logger.info('Docker task %s %s workdir %s',\n task.id, task.fsl_file, workdir)\n fslcmds = ['save_model(close)',\n \"chdir('{}')\".format(workdir)]\n \n with open(fslfile) as f:\n fslcmds += f.readlines()\n r = [json.loads(s)\n for s in self.container.send_fsl(\n '\\n'.join(fslcmds), publish_receive)[:2]]\n try:\n if r[0]['status'] == 'ok':\n task.status = 'C'\n else:\n task.status = 'X'\n except:\n task.status = 'X'\n logger.info(\"Finished %s\", r)\n if self.stoponend:\n self.container.quit()\n # lets hope that docker will always restart this container\n self.queue.task_done()", "def test_poll_processes(self):\n message='abcdefg'\n response_type = 'TEST'\n t = threading.Thread(target=self.handle_process_pipes, args=(message, response_type))\n t.start()\n\n self.dut._poll_processes(message=message,\n timeout=2,\n response_type=response_type)\n\n t.join()", "def run(self):\n while True:\n next_task = self.task_queue.get()\n if next_task is None:\n # Poison pill means shutdown\n self.task_queue.task_done()\n break\n answer = next_task(self.data)\n self.task_queue.task_done()\n self.result_queue.put(answer)", "def doTask(self):\n\n def signal_cb(s, f):\n os._exit(0)\n\n for s in signal.SIGTERM, signal.SIGINT, signal.SIGHUP, signal.SIGQUIT:\n signal.signal(s, signal_cb)\n\n # write pidfile\n def atexit_cb():\n print(\"Exit fork\")\n\n atexit.register(atexit_cb)\n\n # Start the write\n i = 0\n while self.pid == 0 or not self.do_fork:\n print(self.msg % os.getpid())\n time.sleep(2)\n i += 1", "def _run_monitor_thread(self):\n while True:\n chunk = self.stream.read(1024)\n if not chunk:\n # EOF - subprocess has exited, so trigger shutdown\n trigger_exit(ExitMode.CHILD)\n break\n self.output_deque.appendleft(chunk)", "def run(self):\n while self.time <= self.deadline:\n self.setup_period()\n self.execute_process(self.select_process())\n\n self.time += 1", "def run(self):\n def target():\n # Pass these inputs to STDIN with delays\n for i in self.delayed_inputs:\n if type(i) is int or type(i) is float:\n time.sleep(i)\n elif type(i) is bytes:\n try:\n self.process.stdin.write(i) \n except IOError as e:\n lg.info(\n \"Input: {} failed to write to stdin due to\\n{}\".format(i, e)\n )\n break\n if self.disable_communicate:\n self.process.wait()\n else:\n self.stdout_res, self.stderr_res = self.process.communicate(\n input=self.inputs)\n\n try:\n self.process = Popen(self.command, stdin=self.stdin,\n stdout=self.stdout, stderr=self.stderr,\n start_new_session=True, cwd=self.cwd, env=self.env)\n except OSError:\n lg.error(\"Couldn't Popen command {}\".format(self.command))\n raise\n self.thread = Thread(target=target)\n self.thread.start()", "def run(self):\r\n\r\n while self.running:\r\n while self.has_forks is False:\r\n self.think()\r\n self.get_forks()\r\n self.eat()\r\n self.put_forks()", "def _process(self):\n\n while True:\n try:\n sockets = [self.master_fd]\n if self.sock:\n sockets.append(self.sock)\n # Don't handle user input while a side command is running.\n if len(self.filter) == 1:\n sockets.append(pty.STDIN_FILENO)\n rfds, _, _ = select.select(sockets, [], [], 0.25)\n except select.error as ex:\n if ex[0] == errno.EAGAIN: # Interrupted system call.\n continue\n raise\n\n if not rfds:\n self._timeout()\n else:\n # Handle one packet at a time to mitigate the side channel\n # breaking into user input.\n if self.master_fd in rfds:\n data = os.read(self.master_fd, 1024)\n self.master_read(data)\n elif pty.STDIN_FILENO in rfds:\n data = os.read(pty.STDIN_FILENO, 1024)\n self.stdin_read(data)\n elif self.sock in rfds:\n data, self.last_addr = self.sock.recvfrom(65536)\n if data[-1] == b'\\n':\n self.log(\"WARNING: the command ending with <nl>. \"\n \"The StreamProxy filter known to fail.\")\n self.log(\"Got command '%s'\" % data.decode('utf-8'))\n command = self.filter_command(data)\n self.log(\"Translated command '{}'\"\n .format(command.decode('utf-8')))\n if command:\n self.write_master(command)\n self.write_master(b'\\n')", "def run(proc, *args, queue_len=None):\n\n async def _run_task(proc, args):\n async with open_loop(queue_len=queue_len):\n return await proc(*args)\n\n return trio.run(_run_task, proc, args)", "def run(self):\n while True:\n next_task = self.task_queue.get()\n if next_task is None:\n # Poison pill means shutdown\n self.task_queue.task_done()\n break\n # Fetch answer from task\n answer = next_task()\n self.task_queue.task_done()\n # Put into result queue\n self.result_queue.put(answer)\n return", "def test_mp_pipe_replacement(self):\n parent, child = create_psuedo_anonymous_duct_pair()\n\n def mp_child_target():\n parent.close()\n time.sleep(3)\n child.send(\"hello world\")\n child.close()\n\n p = multiprocessing.Process(target=mp_child_target)\n p.daemon = True\n p.start()\n child.close()\n\n p.join(10)\n assert_that(parent.recv()).is_equal_to(\"hello world\")\n parent.close()", "def run(self):\n self._config_log()\n # Ugly patch to avoid pyvisa complaining about missing filters\n warnings.simplefilter(\"ignore\")\n\n # Redirecting stdout and stderr to the logging system.\n logger = logging.getLogger()\n redir_stdout = StreamToLogRedirector(logger)\n sys.stdout = redir_stdout\n redir_stderr = StreamToLogRedirector(logger, 'stderr')\n sys.stderr = redir_stderr\n logger.info('Logger parametrised')\n\n logger.info('Process running')\n self.pipe.send('READY')\n while not self.process_stop.is_set():\n\n # Prevent us from crash if the pipe is closed at the wrong moment.\n try:\n\n # Wait for a measurement.\n while not self.pipe.poll(2):\n if self.process_stop.is_set():\n break\n\n if self.process_stop.is_set():\n break\n\n # Get the measure.\n name, config, build, runtime, mon_entries = self.pipe.recv()\n\n # Build it by using the given build dependencies.\n root = build_task_from_config(config, build, True)\n\n # Give all runtime dependencies to the root task.\n root.run_time = runtime\n\n logger.info('Task built')\n\n # There are entries in the database we are supposed to\n # monitor start a spy to do it.\n if mon_entries:\n spy = MeasureSpy(\n self.monitor_queue, mon_entries,\n root.task_database)\n\n # Set up the logger for this specific measurement.\n if self.meas_log_handler is not None:\n logger.removeHandler(self.meas_log_handler)\n self.meas_log_handler.close()\n self.meas_log_handler = None\n\n log_path = os.path.join(\n root.get_from_database('default_path'),\n name + '.log')\n if os.path.isfile(log_path):\n os.remove(log_path)\n self.meas_log_handler = RotatingFileHandler(log_path,\n mode='w',\n maxBytes=10**6,\n backupCount=10)\n aux = '%(asctime)s | %(levelname)s | %(message)s'\n formatter = logging.Formatter(aux)\n self.meas_log_handler.setFormatter(formatter)\n logger.addHandler(self.meas_log_handler)\n\n # Pass the events signaling the task it should stop or pause\n # to the task and make the database ready.\n root.should_pause = self.task_pause\n root.paused = self.task_paused\n root.should_stop = self.task_stop\n root.task_database.prepare_for_running()\n\n # Perform the checks.\n check, errors = root.check(test_instr=True)\n\n # They pass perform the measure.\n if check:\n logger.info('Check successful')\n root.perform_(root)\n result = ['', '', '']\n if self.task_stop.is_set():\n result[0] = 'INTERRUPTED'\n result[2] = 'Measure {} was stopped'.format(name)\n else:\n result[0] = 'COMPLETED'\n result[2] = 'Measure {} succeeded'.format(name)\n\n if self.process_stop.is_set():\n result[1] = 'STOPPING'\n else:\n result[1] = 'READY'\n\n self.pipe.send(tuple(result))\n\n # They fail, mark the measure as failed and go on.\n else:\n mes = 'Tests failed, see log for full records.'\n self.pipe.send(('FAILED', 'READY', mes))\n\n # Log the tests that failed.\n fails = errors.iteritems()\n message = '\\n'.join('{} : {}'.format(path, mes)\n for path, mes in fails)\n logger.critical(message)\n\n # If a spy was started kill it\n if mon_entries:\n spy.close()\n del spy\n\n except IOError:\n pass\n\n # Clean up before closing.\n logger.info('Process shuting down')\n if self.meas_log_handler:\n self.meas_log_handler.close()\n self.log_queue.put_nowait(None)\n self.monitor_queue.put_nowait((None, None))\n self.pipe.close()", "def run_pipelines(self):\n while True:\n # wait until a job is available or end has been signaled\n nodes_assigned = []\n no_more_pipelines = False\n with self.job_list_cv:\n while len(self.job_list) == 0:\n if not self._allow_new_pipelines:\n no_more_pipelines = True\n break\n self.job_list_cv.wait()\n\n if no_more_pipelines:\n self._join_running_pipelines()\n return\n\n # wait until nodes are available or quit has been signaled\n with self.free_cv:\n pipeline = self.job_list.pop_job(self.free_nodes)\n while pipeline is None:\n if not self._process_pipelines:\n break\n self.free_cv.wait()\n pipeline = self.job_list.pop_job(self.free_nodes)\n\n if self._process_pipelines:\n _log.debug(\"starting pipeline %s, free nodes %d -> %d\",\n pipeline.id, self.free_nodes,\n self.free_nodes - pipeline.get_nodes_used())\n self.free_nodes -= pipeline.get_nodes_used()\n\n # Get a list of node names from the allocated nodes and\n # assign it to the pipeline\n for i in range(pipeline.total_nodes):\n nodes_assigned.append(self.allocated_nodes.get())\n _log.debug(\"pipeline {0} allocated nodes {1}\".format(\n pipeline.id, nodes_assigned))\n\n if not self._process_pipelines:\n self._join_running_pipelines()\n return\n\n with self.pipelines_lock:\n pipeline.start(self, nodes_assigned, self.runner)\n self._running_pipelines.add(pipeline)\n if self._status is not None:\n self._status.set_state(pipeline.get_state())\n\n self._join_running_pipelines()", "def run(self):\n operation_manager = self._core.get_operation_manager()\n while True:\n while operation_manager.process_next():\n pass\n sleep(2)", "async def fork(pid, cin, state_info):\n do = reporter(state_info, pid)\n\n while True:\n # wait for fork grab\n phil_hand = await do('wg', cin())\n\n # wait for philosopher to pick up fork\n await do('wu', phil_hand())\n\n # wait for philosopher to put down fork\n await do('wd', phil_hand())", "def processCommands(self):\n while True:\n yield\n if self.pipe.poll():\n msg, argument = self.pipe.recv()\n if msg == \"STOP\":\n logging.debug(\"Stopping execution of robot code\")\n self.debugmode = True\n elif msg == \"END\":\n logging.debug(\"Ending simulation\")\n raise FinishSimulation()\n elif msg == \"BREAK\":\n logging.debug(\"Adding a breakpoint\")\n self.breakpoints.append(argument)\n else:\n logging.debug(\"Unknown command\")", "def run_command(self):\n\n while True:\n current_line = self.process.stdout.readline().rstrip()\n\n if not current_line:\n break\n\n yield self.decode_output(current_line)", "def run_tweepy(self):\n # Setup the logging for the sub-process\n rlog = logging.getLogger()\n rlog.setLevel(logging.INFO)\n handler = logging.FileHandler(paths.TWITTER_SUBPROCESS_LOG.format(pid=os.getpid()), encoding='utf-8')\n handler.setFormatter(logging.Formatter('{asctime} {levelname} {name} {message}', style='{'))\n rlog.addHandler(handler)\n\n # Do not join the queue's bg thread on exit\n self.mp_queue.cancel_join_thread()\n\n # Create the tweepy stream\n log.info('Creating and starting tweepy stream.')\n api = TweepyAPI(self.credentials) # Re-creation, much efficient, wow\n listener = SubProcessStream.TweepyListener(self.mp_queue, api)\n stream = tweepy.Stream(api.auth, listener)\n log.info('Tweepy stream ready.')\n\n # ERMAHGERD ! MAH FRAVRIT LERP !\n while True:\n try:\n log.info('Starting Tweepy stream.')\n stream.filter(follow=self.follows)\n except Exception as e:\n log.exception('Recovering from exception : {}'.format(e))\n else:\n log.info('Exiting sub-process.')\n return", "def run(self):\n _threadpool_limits = _no_threadpool_limits\n if USE_THREADPOOL_LIMITS:\n _threadpool_limits = threadpool_limits\n\n while True:\n next_task = self.task_queue.get()\n if next_task is None:\n # Poison pill means shutdown\n self.task_queue.task_done()\n break\n with _threadpool_limits(limits=1):\n answer = next_task(self.data)\n self.task_queue.task_done()\n self.result_queue.put(answer)", "def run(self) -> None:\n\n while True:\n try:\n input_element = self.input_queue.get_nowait()\n self.process(input_element)\n except Empty:\n return", "def run(self):\n while True:\n try:\n data = self._read()\n except IOError:\n break\n\n if len(data) == 0:\n self.finalize(\"Connection closed.\")\n break\n\n gevent.spawn(self.process_data, data)" ]
[ "0.6293807", "0.6225486", "0.62050015", "0.61386853", "0.61101425", "0.6040808", "0.6030226", "0.6017932", "0.59379154", "0.58778286", "0.5862268", "0.58200634", "0.58104557", "0.5759573", "0.5742945", "0.5714849", "0.56924564", "0.5676682", "0.56600606", "0.5646844", "0.56132877", "0.55967623", "0.5571972", "0.5569721", "0.55646116", "0.5554807", "0.5545116", "0.5538102", "0.55259573", "0.5517748" ]
0.7024238
0
Creates the process that will be in charge of executing the tasks and a pipe to communicate with the main process.
def __init__(self, process_name, target_function, tasks): self.pipe_start, self.pipe_end = multiprocessing.Pipe() printnflush ("Process started: %s"%process_name) self.process = multiprocessing.Process(group=None, target=target_function, name=process_name, args = (process_name, tasks, self.pipe_end)) self.busy = False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def spawn(self):\n self._proc = subprocess.Popen(\n self._args, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )", "def spawn(self, pcls, args):\n\n childp, ownp = multiprocessing.Pipe()\n p = pcls(self._id, childp)\n p._loglevel = self._loglevel\n p.start()\n\n childp.close()\n cid = ownp.recv()\n ownp.send((\"setup\", args))\n ownp.send(\"start\")\n\n self._child_procs.append((p.pid, cid))\n\n return cid", "def new_process() -> Process:\n return multiprocessing.Process()", "def start_new_processes(self):\n # initialize cache to mutualize calls to Variable.get in DAGs\n # needs to be done before this process is forked to create the DAG parsing processes.\n SecretCache.init()\n\n while self._parallelism - len(self._processors) > 0 and self._file_path_queue:\n file_path = self._file_path_queue.popleft()\n # Stop creating duplicate processor i.e. processor with the same filepath\n if file_path in self._processors:\n continue\n\n callback_to_execute_for_file = self._callback_to_execute[file_path]\n processor = self._create_process(\n file_path,\n self._pickle_dags,\n self._dag_ids,\n self.get_dag_directory(),\n callback_to_execute_for_file,\n )\n\n del self._callback_to_execute[file_path]\n Stats.incr(\"dag_processing.processes\", tags={\"file_path\": file_path, \"action\": \"start\"})\n\n processor.start()\n self.log.debug(\"Started a process (PID: %s) to generate tasks for %s\", processor.pid, file_path)\n self._processors[file_path] = processor\n self.waitables[processor.waitable_handle] = processor\n\n Stats.gauge(\"dag_processing.file_path_queue_size\", len(self._file_path_queue))", "def create_process(self, args=[], *popenargs, **kwargs):\n try:\n startupinfo = subprocess.STARTUPINFO()\n startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW\n kwargs.setdefault('startupinfo', startupinfo)\n except:\n pass\n kwargs.setdefault('universal_newlines', True)\n kwargs.setdefault('stdin', sys.stdin)\n return subprocess.Popen(self.build_args(args), *popenargs, **kwargs)", "def _create_process(self, process, name):\n def _run():\n try:\n while True:\n process.loop()\n except KeyboardInterrupt:\n pass\n except:\n self._logger.exception('Process %s died!', name)\n return ProcessEnvironment().create_process(_run, name)", "def spawn_process_for(self, gq):\n pipe_top, pipe_bottom = multiprocessing.Pipe()\n p = multiprocessing.Process(target=GridQueue.listen,args=(gq, pipe_bottom))\n p.start()\n self.pipes[gq.index] = pipe_top", "def spawnProcess(self, processProtocol, executable, args=(), env={},\r\n path=None, uid=None, gid=None, usePTY=0,\r\n childFDs=None):\r\n\r\n proc = DummyProcess(self, executable, args, env, path,\r\n processProtocol, uid, gid, usePTY, childFDs)\r\n processProtocol.makeConnection(proc)\r\n self.spawnedProcesses.append(proc)\r\n return proc", "def start(self):\n assert not self._running\n for i in range(self._n_parallel):\n # Set the unique seed.\n self._kwargs['seed'] = (self._seed + i)%2**32 if self._seed is not None else None\n # Create the pipe to comunicate with this process.\n conn1, conn2 = mp.Pipe()\n # Create and start the process to run an environment.\n p = mp.Process(\n target=self._runner, \n args=(conn2, self._env_id),\n kwargs=self._kwargs,\n daemon=True\n )\n p.start()\n # Store the connection and the process object.\n self._conns.append(conn1)\n self._processes.append(p)\n\n self._running = True", "def _spawn_simple_process(self, process_id, name, module, cls, config, proc_attr):\n process_instance = self._create_process_instance(process_id, name, module, cls, config, proc_attr)\n # Add publishers if any...\n publish_streams = get_safe(config, \"process.publish_streams\")\n pub_names = self._set_publisher_endpoints(process_instance, publish_streams)\n\n # cleanup method to delete process queue (@TODO: leaks a bit here - should use XOs)\n def cleanup(*args):\n for name in pub_names:\n p = getattr(process_instance, name)\n p.close()\n\n proc = self.proc_sup.spawn(name=process_instance.id,\n service=process_instance,\n listeners=[],\n proc_name=process_instance._proc_name,\n cleanup_method=cleanup)\n proc.proc._glname = \"ION Proc %s\" % process_instance._proc_name\n self.proc_sup.ensure_ready(proc, \"_spawn_simple_process for %s\" % process_instance.id)\n\n # map gproc to process_instance\n self._spawned_proc_to_process[proc.proc] = process_instance\n\n # set service's reference to process\n process_instance._process = proc\n\n self._process_init(process_instance)\n self._process_start(process_instance)\n\n return process_instance", "def spawn(self):\r\n options = self.config.options\r\n\r\n if self.pid:\r\n msg = 'process %r already running' % self.config.name\r\n options.logger.warn(msg)\r\n return\r\n\r\n self.killing = 0\r\n self.spawnerr = None\r\n self.exitstatus = None\r\n self.system_stop = 0\r\n self.administrative_stop = 0\r\n\r\n self.laststart = time.time()\r\n\r\n self._assertInState(ProcessStates.EXITED, ProcessStates.FATAL,\r\n ProcessStates.BACKOFF, ProcessStates.STOPPED)\r\n\r\n self.change_state(ProcessStates.STARTING)\r\n\r\n try:\r\n filename, argv = self.get_execv_args()\r\n except ProcessException as what:\r\n self.record_spawnerr(what.args[0])\r\n self._assertInState(ProcessStates.STARTING)\r\n self.change_state(ProcessStates.BACKOFF)\r\n return\r\n\r\n try:\r\n self.dispatchers, self.pipes = self.config.make_dispatchers(self)\r\n except OSError as why:\r\n code = why.args[0]\r\n if code == errno.EMFILE:\r\n # too many file descriptors open\r\n msg = 'too many open files to spawn %r' % self.config.name\r\n else:\r\n msg = 'unknown error: %s' % errno.errorcode.get(code, code)\r\n self.record_spawnerr(msg)\r\n self._assertInState(ProcessStates.STARTING)\r\n self.change_state(ProcessStates.BACKOFF)\r\n return\r\n\r\n try:\r\n pid = options.fork()\r\n except OSError as why:\r\n code = why.args[0]\r\n if code == errno.EAGAIN:\r\n # process table full\r\n msg = ('Too many processes in process table to spawn %r' %\r\n self.config.name)\r\n else:\r\n msg = 'unknown error: %s' % errno.errorcode.get(code, code)\r\n\r\n self.record_spawnerr(msg)\r\n self._assertInState(ProcessStates.STARTING)\r\n self.change_state(ProcessStates.BACKOFF)\r\n options.close_parent_pipes(self.pipes)\r\n options.close_child_pipes(self.pipes)\r\n return\r\n\r\n if pid != 0:\r\n return self._spawn_as_parent(pid)\r\n\r\n else:\r\n return self._spawn_as_child(filename, argv)", "def run(self):\n self._config_log()\n # Ugly patch to avoid pyvisa complaining about missing filters\n warnings.simplefilter(\"ignore\")\n\n # Redirecting stdout and stderr to the logging system.\n logger = logging.getLogger()\n redir_stdout = StreamToLogRedirector(logger)\n sys.stdout = redir_stdout\n redir_stderr = StreamToLogRedirector(logger, 'stderr')\n sys.stderr = redir_stderr\n logger.info('Logger parametrised')\n\n logger.info('Process running')\n self.pipe.send('READY')\n while not self.process_stop.is_set():\n\n # Prevent us from crash if the pipe is closed at the wrong moment.\n try:\n\n # Wait for a measurement.\n while not self.pipe.poll(2):\n if self.process_stop.is_set():\n break\n\n if self.process_stop.is_set():\n break\n\n # Get the measure.\n name, config, build, runtime, mon_entries = self.pipe.recv()\n\n # Build it by using the given build dependencies.\n root = build_task_from_config(config, build, True)\n\n # Give all runtime dependencies to the root task.\n root.run_time = runtime\n\n logger.info('Task built')\n\n # There are entries in the database we are supposed to\n # monitor start a spy to do it.\n if mon_entries:\n spy = MeasureSpy(\n self.monitor_queue, mon_entries,\n root.task_database)\n\n # Set up the logger for this specific measurement.\n if self.meas_log_handler is not None:\n logger.removeHandler(self.meas_log_handler)\n self.meas_log_handler.close()\n self.meas_log_handler = None\n\n log_path = os.path.join(\n root.get_from_database('default_path'),\n name + '.log')\n if os.path.isfile(log_path):\n os.remove(log_path)\n self.meas_log_handler = RotatingFileHandler(log_path,\n mode='w',\n maxBytes=10**6,\n backupCount=10)\n aux = '%(asctime)s | %(levelname)s | %(message)s'\n formatter = logging.Formatter(aux)\n self.meas_log_handler.setFormatter(formatter)\n logger.addHandler(self.meas_log_handler)\n\n # Pass the events signaling the task it should stop or pause\n # to the task and make the database ready.\n root.should_pause = self.task_pause\n root.paused = self.task_paused\n root.should_stop = self.task_stop\n root.task_database.prepare_for_running()\n\n # Perform the checks.\n check, errors = root.check(test_instr=True)\n\n # They pass perform the measure.\n if check:\n logger.info('Check successful')\n root.perform_(root)\n result = ['', '', '']\n if self.task_stop.is_set():\n result[0] = 'INTERRUPTED'\n result[2] = 'Measure {} was stopped'.format(name)\n else:\n result[0] = 'COMPLETED'\n result[2] = 'Measure {} succeeded'.format(name)\n\n if self.process_stop.is_set():\n result[1] = 'STOPPING'\n else:\n result[1] = 'READY'\n\n self.pipe.send(tuple(result))\n\n # They fail, mark the measure as failed and go on.\n else:\n mes = 'Tests failed, see log for full records.'\n self.pipe.send(('FAILED', 'READY', mes))\n\n # Log the tests that failed.\n fails = errors.iteritems()\n message = '\\n'.join('{} : {}'.format(path, mes)\n for path, mes in fails)\n logger.critical(message)\n\n # If a spy was started kill it\n if mon_entries:\n spy.close()\n del spy\n\n except IOError:\n pass\n\n # Clean up before closing.\n logger.info('Process shuting down')\n if self.meas_log_handler:\n self.meas_log_handler.close()\n self.log_queue.put_nowait(None)\n self.monitor_queue.put_nowait((None, None))\n self.pipe.close()", "def create_process( # type: ignore[override]\n self,\n python_setup: PythonSetup,\n subprocess_encoding_environment: SubprocessEncodingEnvironment,\n pex_build_environment: PexBuildEnvironment,\n *,\n pex_args: Iterable[str],\n description: str,\n input_files: Optional[Digest] = None,\n env: Optional[Mapping[str, str]] = None,\n **kwargs: Any,\n ) -> Process:\n\n env = dict(env) if env else {}\n env.update(**pex_build_environment.invocation_environment_dict,)\n\n return super().create_process(\n python_setup=python_setup,\n subprocess_encoding_environment=subprocess_encoding_environment,\n pex_path=self.executable,\n pex_args=pex_args,\n description=description,\n input_files=input_files or self.digest,\n env=env,\n **kwargs,\n )", "def create_task():", "def _spawn(self, protocol, args, env=None):\n return reactor.spawnProcess(protocol, self.cmd, args, env=env)", "def Spawn(proc):\n proc.start()\n return proc", "def main():\n parent_pipe, child_pipe = Pipe()\n\n process1 = Process(target=book_seat, args=(child_pipe,))\n process2 = Process(target=book_seat, args=(child_pipe,))\n process3 = Process(target=book_seat, args=(child_pipe,))\n\n for user_info in USER_INFO:\n parent_pipe.send(user_info)\n\n process1.start()\n process2.start()\n process3.start()\n\n parent_pipe.close()\n\n process1.join()\n process2.join()\n process3.join()", "def __init__(self, task, queue, semaphore=None, task_args=None,\n task_kwargs=None):\n multiprocessing.Process.__init__(self)\n self._task = task\n self._queue = queue\n self._semaphore = semaphore\n self._started = multiprocessing.Event()\n self._killing = multiprocessing.Event()\n self._output = None\n self._parent_pid = None\n self._task_args = task_args if task_args else ()\n self._task_kwargs = task_kwargs if task_kwargs else {}", "def _spawn_stream_process(self, process_id, name, module, cls, config, proc_attr):\n process_instance = self._create_process_instance(process_id, name, module, cls, config, proc_attr)\n\n listen_name = get_safe(config, \"process.listen_name\") or name\n log.debug(\"Stream Process (%s) listen_name: %s\", name, listen_name)\n process_instance._proc_listen_name = listen_name\n\n process_instance.stream_subscriber = StreamSubscriber(process=process_instance, exchange_name=listen_name, callback=process_instance.call_process)\n\n # Add publishers if any...\n publish_streams = get_safe(config, \"process.publish_streams\")\n pub_names = self._set_publisher_endpoints(process_instance, publish_streams)\n\n rsvc = self._create_listening_endpoint(node=self.container.node,\n from_name=process_instance.id,\n process=process_instance)\n\n # cleanup method to delete process queue (@TODO: leaks a bit here - should use XOs)\n def cleanup(*args):\n self._cleanup_method(process_instance.id, rsvc)\n for name in pub_names:\n p = getattr(process_instance, name)\n p.close()\n\n proc = self.proc_sup.spawn(name=process_instance.id,\n service=process_instance,\n listeners=[rsvc, process_instance.stream_subscriber],\n proc_name=process_instance._proc_name,\n cleanup_method=cleanup)\n proc.proc._glname = \"ION Proc %s\" % process_instance._proc_name\n self.proc_sup.ensure_ready(proc, \"_spawn_stream_process for %s\" % process_instance._proc_name)\n\n # map gproc to process_instance\n self._spawned_proc_to_process[proc.proc] = process_instance\n\n # set service's reference to process\n process_instance._process = proc\n\n self._process_init(process_instance)\n self._process_start(process_instance)\n\n try:\n proc.start_listeners()\n except IonProcessError:\n self._process_quit(process_instance)\n self._call_proc_state_changed(process_instance, ProcessStateEnum.FAILED)\n raise\n\n return process_instance", "def spawn(self):\r\n self.before_spawn()\r\n pid = Subprocess.spawn(self)\r\n if pid is None:\r\n #Remove object reference to decrement the reference count on error\r\n self.fcgi_sock = None\r\n return pid", "def main(self):\n\n self._setup_task_manager()\n self._setup_source_and_destination()\n self.task_manager.blocking_start(waiting_func=self.waiting_func)\n self._cleanup()", "def _make_fifo(self):\n if os.path.exists(self.fifo_path):\n os.remove(self.fifo_path)\n os.mkfifo(self.fifo_path)", "def CreateProcesses(self, umpire_config, env):\n if ('services' not in umpire_config or\n 'instalog' not in umpire_config['services']):\n return None\n cli_path = os.path.join(env.server_toolkit_dir, 'py', 'instalog', 'cli.py')\n config_path = self.GenerateConfigFile(umpire_config, env)\n proc_config = {\n 'executable': cli_path,\n 'name': SERVICE_NAME,\n # Have to use --no-daemon when starting instalog, because Umpire will\n # supervise the process by its pid.\n 'args': ['--config', config_path, 'start', '--no-daemon'],\n 'path': '/tmp',\n 'env': os.environ}\n proc = umpire_service.ServiceProcess(self)\n proc.SetConfig(proc_config)\n return [proc]", "def run_task(process_name, tasks, pipe_end):\n task_ended = False\n try:\n while not task_ended:\n # Blocks until it receives a message\n message_type, value = pipe_end.recv()\n\n if message_type == \"EXECUTE\":\n result = tasks[value].run()\n pipe_end.send((\"TASK FINISHED\", (value, result)))\n\n elif message_type == \"FINISH\":\n printnflush( \"Communication successfully closed for\",process_name)\n task_ended = True\n else:\n printnflush(\"Unexpected message: %s\"%message_type)\n task_ended = True\n\n except EOFError:\n printnflush(\"Communication closed due to remote closing of the pipe in process %s\"%process_name)\n\n except Exception, msg:\n printnflush(\"Communication closed due to unexpected exception: %s\"%msg)\n\n pipe_end.close()\n printnflush( \"Task reached end\")", "def _start_child(self):\n parent_pipe, child_pipe = mp.Pipe()\n self._poll.register(parent_pipe.fileno(), select.POLLIN | select.POLLPRI)\n\n pid = os.fork()\n if not pid:\n ch = Worker(child_pipe, self.server_socket)\n parent_pipe.close()\n ch.run()\n else:\n self._children[parent_pipe.fileno()] = ManagerChild(pid, parent_pipe)\n child_pipe.close()", "async def open_process(\r\n cls, args: \"Union[str, List[str]]\", env_additions: Dict[str, str] = {}\r\n ) -> \"AsyncIterator[Expect]\":\r\n printer_channels: (\r\n \"Tuple[MemorySendChannel[bytes], MemoryReceiveChannel[bytes]]\"\r\n ) = trio.open_memory_channel(1)\r\n printer_send_channel, printer_receive_channel = printer_channels\r\n notifier_channels: (\r\n \"Tuple[MemorySendChannel[bytes], MemoryReceiveChannel[bytes]]\"\r\n ) = trio.open_memory_channel(0)\r\n notifier_send_channel, notifier_receive_channel = notifier_channels\r\n\r\n async with notifier_receive_channel:\r\n\r\n with patch.dict(\"os.environ\", values=env_additions) as patched_env:\r\n async with await trio.open_process(\r\n args, stdin=PIPE, stdout=PIPE, stderr=STDOUT, env=patched_env\r\n ) as process:\r\n async with trio.open_nursery() as nursery:\r\n expect = cls(\r\n process=process,\r\n printer_send_channel=printer_send_channel,\r\n printer_receive_channel=printer_receive_channel,\r\n notifier_send_channel=notifier_send_channel,\r\n opened_notifier_receive_channel=notifier_receive_channel,\r\n )\r\n nursery.start_soon(expect.copier_recorder)\r\n nursery.start_soon(expect.printer)\r\n\r\n yield expect\r\n\r\n # print(\"waiting for process\") # debug\r\n await expect.process.wait()", "def _spawn_immediate_process(self, process_id, name, module, cls, config, proc_attr):\n process_instance = self._create_process_instance(process_id, name, module, cls, config, proc_attr)\n self._process_init(process_instance)\n self._process_start(process_instance)\n return process_instance", "def doTask(self):\n\n def signal_cb(s, f):\n os._exit(0)\n\n for s in signal.SIGTERM, signal.SIGINT, signal.SIGHUP, signal.SIGQUIT:\n signal.signal(s, signal_cb)\n\n # write pidfile\n def atexit_cb():\n print(\"Exit fork\")\n\n atexit.register(atexit_cb)\n\n # Start the write\n i = 0\n while self.pid == 0 or not self.do_fork:\n print(self.msg % os.getpid())\n time.sleep(2)\n i += 1", "def open_persistent_pipe(self):\n if self.proc is not None:\n return\n self.proc = subprocess.Popen([self.herbstclient_path, '--binary-pipe'],\n stdout=subprocess.PIPE,\n stdin=subprocess.PIPE,\n env=self.env,\n encoding=None, # open stdout/stdin in binary mode\n )", "def t_run_process(self, *args, **kwargs):\n\n str_cmd = \"\"\n d_request = {}\n d_meta = {}\n\n for k,v in kwargs.items():\n if k == 'request': d_request = v\n\n d_meta = d_request['meta']\n\n if d_meta:\n self.jid = d_meta['jid']\n self.auid = d_meta['auid']\n str_cmd = d_meta['cmd']\n\n if isinstance(self.jid, int):\n self.jid = str(self.jid)\n\n self.dp.qprint(\"spawing and starting poller thread\")\n\n # Start the 'poller' worker\n self.poller = Poller(cmd = str_cmd,\n debugToFile = self.b_debugToFile,\n debugFile = self.str_debugFile)\n self.poller.start()\n\n str_timeStamp = datetime.datetime.today().strftime('%Y%m%d%H%M%S.%f')\n str_uuid = uuid.uuid4()\n str_dir = '%s_%s' % (str_timeStamp, str_uuid)\n self.str_jobRootDir = str_dir\n\n b_jobsAllDone = False\n\n p = self._ptree\n\n p.cd('/')\n p.mkcd(str_dir)\n p.touch('d_meta', json.dumps(d_meta))\n p.touch('cmd', str_cmd)\n if len(self.auid):\n p.touch('auid', self.auid)\n if len(self.jid):\n p.touch('jid', self.jid)\n\n p.mkdir('start')\n p.mkdir('end')\n\n jobCount = 0\n p.touch('jobCount', jobCount)\n\n while not b_jobsAllDone:\n try:\n b_jobsAllDone = self.poller.queueAllDone.get_nowait()\n except queue.Empty:\n self.dp.qprint('Waiting on start job info')\n d_startInfo = self.poller.queueStart.get()\n str_startDir = '/%s/start/%d' % (self.str_jobRootDir, jobCount)\n p.mkdir(str_startDir)\n p.cd(str_startDir)\n p.touch('startInfo', d_startInfo.copy())\n p.touch('/%s/startInfo' % str_dir, d_startInfo.copy())\n\n self.dp.qprint('Waiting on end job info')\n d_endInfo = self.poller.queueEnd.get()\n str_endDir = '/%s/end/%d' % (self.str_jobRootDir, jobCount)\n p.mkdir(str_endDir)\n p.cd(str_endDir)\n p.touch('endInfo', d_endInfo.copy())\n p.touch('/%s/endInfo' % str_dir, d_endInfo.copy())\n\n p.touch('/%s/jobCount' % str_dir, jobCount)\n jobCount += 1\n self.dp.qprint('All jobs processed.')" ]
[ "0.6289883", "0.6266566", "0.6023925", "0.6001324", "0.5990925", "0.59712356", "0.5920482", "0.58967435", "0.58738995", "0.5843958", "0.5802889", "0.5784489", "0.5773565", "0.57671195", "0.57264394", "0.5718598", "0.56604475", "0.5654954", "0.5608592", "0.5602501", "0.5568197", "0.5554925", "0.5543479", "0.55300206", "0.55166", "0.5503267", "0.54865855", "0.5463574", "0.5457047", "0.54535204" ]
0.6642339
0
Sends the process an "EXECUTE" task message to run the task named 'task_name'.
def execute_task(self, task_name): self.busy = True self.pipe_start.send(("EXECUTE",task_name))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calltask(self, name, **vars):\n if name in self._tasks:\n for entry in self._tasks[name]:\n entry.execute(vars)\n else:\n raise Error(\"No such task: {0}\".format(name))", "def doTask(self, *args):\n taskId = self.task.get()\n document = self.document_uuid.get()\n visitor = self.visitor_uuid.get()\n self.output.set(str(self.taskEx.executeTask(visitor, document, taskId)))", "def execute_task(task_name, uuid, args, kwargs):\n driver = registry.get_driver() # Here is the main reason we have a singleton\n task_to_run = driver.get_task(task_name)\n log.info('Executing task {}'.format(task_name))\n log.debug('with param {}, {}'.format(args, kwargs))\n try:\n ret = TaskResponse(uuid, 'DONE',\n task_to_run.execute(*args, **kwargs))\n except Exception as exc:\n log.error('Error {} while running task {} with param {}, {}'\n ''.format(exc, task_name, args, kwargs))\n ret = TaskResponse(uuid, 'ERROR',\n exception=exc,\n traceback=sys.exc_info[2])\n\n ret = ret.to_dict()\n log.info('task {} executed'.format(task_name))\n log.debug('task returns {}'.format(ret))\n return ret", "def octopus_task(self, msg, args):\r\n self.tasks.send_task_by_id(msg, args)", "def execute_task(self):\n raise NotImplementedError(\"Execute Task method not implemented\")", "def _send_execute_command(self):\n client = SBusClient(self.storlet_pipe_path)\n try:\n resp = client.execute(self.srequest.params, self.remote_fds)\n if not resp.status:\n raise StorletRuntimeException(\"Failed to send execute command\")\n\n if not resp.task_id:\n raise StorletRuntimeException(\"Missing task id\")\n else:\n self.task_id = resp.task_id\n except SBusClientException:\n raise StorletRuntimeException(\"Failed to send execute command\")", "def run(self):\n# log.trace(\" run task %s \", self.name)\n return self.target.send(self.params)", "def execute_task(self, task):\n t = threading.Thread(target=task)\n t.start()", "def _run_system(task):\n\n cmd = task.task.format(*task.get_args(), **task.get_kwargs())\n\n print(\"Running: {}\".format(cmd))\n os.system(cmd)", "def octopus_tasks(self, msg, args):\r\n self.tasks.send_tasks(msg, args)", "async def execute_task(self, *_, **__) -> None:\n if not self._can_run_disabled and self.hacs.system.disabled:\n self.task_logger(\n self.hacs.log.debug,\n f\"Skipping task, HACS is disabled {self.hacs.system.disabled_reason}\",\n )\n return\n self.task_logger(self.hacs.log.debug, \"Executing task\")\n start_time = monotonic()\n\n try:\n if task := getattr(self, \"async_execute\", None):\n await task() # pylint: disable=not-callable\n elif task := getattr(self, \"execute\", None):\n await self.hass.async_add_executor_job(task)\n\n except BaseException as exception: # lgtm [py/catch-base-exception] pylint: disable=broad-except\n self.task_logger(self.hacs.log.error, f\"failed: {exception}\")\n\n else:\n self.hacs.log.debug(\n \"HacsTask<%s> took %.3f seconds to complete\", self.slug, monotonic() - start_time\n )", "def execute_task(self, config=None, args=None, targets=None, extra_targets=None):\r\n with closing(StringIO()) as output:\r\n task = prepare_task(self.task_type(), config=config, args=args, targets=targets,\r\n outstream=output)\r\n task.execute(list(targets or ()) + list(extra_targets or ()))\r\n return output.getvalue()", "def execute(self, task, script, **kwargs):\n locals().update(kwargs)\n exec(script)", "def process_task(params):\n params['task'](params)", "def queue_task(self, task_name):\n task_response = self.do_task(\n self.types[task_name],\n self.data_dict[task_name]['taskId']\n )\n self.data_dict[task_name]['job_id'] = json.loads(\n task_response.content\n )['JobId']\n logger.info(\n 'Queued {name} task response: '.format(name=task_name) +\n task_response.content\n )", "def executeTask (self, queue='default'):\n tasks = self.taskqueue_stub.GetTasks(queue)\n if tasks:\n task = tasks[0]\n self.taskqueue_stub.DeleteTask (queue, task['name'])\n params = base64.b64decode(task[\"body\"])\n if dict(task['headers']).get('Content-Type') == 'application/json':\n return self.testapp.post_json(task[\"url\"], json.loads(params))\n else:\n return self.testapp.post(task[\"url\"], params)", "def task_trigger(self, args):\n h, tmp = tempfile.mkstemp(\n dir=self._tmpdir, prefix='trigger_raw', suffix='.json')\n os.close(h)\n cmd = [\n '-user',\n 'joe@localhost',\n '-d',\n 'pool=default',\n '-dump-json',\n tmp,\n ]\n cmd.extend(args)\n assert not self._run_swarming('trigger',\n cmd), 'Failed to trigger a task. cmd=%s' % cmd\n with open(tmp, 'rb') as f:\n data = json.load(f)\n task_id = data['tasks'][0]['task_id']\n logging.debug('task_id = %s', task_id)\n return task_id", "def run_task(self, image, name, command, configuration):\n raise NotImplementedError()", "def post(self, queue_name):\n task_name = self.request.get('task_name')\n\n if self.request.get('action:deletetask'):\n result = self._delete_task(queue_name, task_name)\n elif self.request.get('action:runtask'):\n result = self._run_task(queue_name, task_name)\n if result == taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE:\n message = 'Queue \"%s\" not found' % queue_name\n elif result == taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_TASK:\n message = 'Task \"%s\" not found' % task_name\n elif result != taskqueue_service_pb.TaskQueueServiceError.OK:\n message = 'Internal error'\n else:\n message = ''\n self.redirect(\n '%s?%s' % (self.request.path_url,\n urllib.urlencode({'page': self.request.get('page'),\n 'message': message})))", "def executeTask(self, task):\n if self._Finished:\n return\n \n self._NextTask = None\n taskRunner = TaskRunner(task)\n self.currentTask = taskRunner\n print \"Starting Task Execution: %s\" % task.taskname()\n taskRunner.evalStartControlPoint(self)\n if self._NextTask != None:\n print \"Next Task Scheduled: %s\" % self._NextTask\n nextTask = self.taskTree.findTask(self._NextTask)\n if nextTask == None:\n msg = \"Error: Task named %s not found:\\n\" % self._NextTask\n msg += \"Unable to continue processing\\n\"\n raise ShREEKException(msg, ClassInstance = self)\n self.executeTask(nextTask)\n return\n\n if self.monitorThread != None:\n self.monitorThread.notifyTaskStart(task)\n exitCode = taskRunner.run()\n if self.monitorThread != None:\n self.monitorThread.notifyTaskEnd(task, exitCode)\n print \"Task Execution Complete: %s Exit: %s\" % (\n task.taskname(), exitCode,\n )\n \n\n taskRunner.evalEndControlPoint(self)\n if self._NextTask != None:\n print \"Next Task Scheduled: %s\" % self._NextTask,\n nextTask = self.taskTree.findTask(self._NextTask)\n if nextTask == None:\n msg = \"Error: Task named %s not found:\\n\" % self._NextTask\n msg += \"Unable to continue processing\\n\"\n raise ShREEKException(msg, ClassInstance = self)\n\n self.executeTask(nextTask)\n return\n\n for child in task.children:\n self.executeTask(child)\n return", "def _run_async_task(task=None, session=None):\n if task is None or session is None:\n return None\n task_name = session.xenapi.task.get_name_label(task)\n log.debug(\"Running %s\", task_name)\n while session.xenapi.task.get_status(task) == \"pending\":\n progress = round(session.xenapi.task.get_progress(task), 2) * 100\n log.debug(\"Task progress %.2f%%\", progress)\n time.sleep(1)\n log.debug(\"Cleaning up task %s\", task_name)\n session.xenapi.task.destroy(task)", "def run_task(self, task_id):\n raise NotImplementedError", "def task():\n\n\tprint('Example task executed.')", "def execute_task(self, config=None, args=None, targets=None):\n with closing(StringIO()) as output:\n task = self.prepare_task(config=config,\n args=args,\n targets=targets,\n build_graph=self.build_graph,\n build_file_parser=self.build_file_parser,\n address_mapper=self.address_mapper,\n console_outstream=output)\n task.execute()\n return output.getvalue()", "def run_task(self) -> Task:", "def task(self, name):\n pass", "def exec(cls, *args, **kwargs):\n task = cls(*args, **kwargs)\n task.run()\n return task", "def ExecuteTaskQueueTasks(self, handler_name, task_queue_name):\n taskq = self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME)\n tasks = taskq.GetTasks(task_queue_name)\n taskq.FlushQueue(task_queue_name)\n for task in tasks:\n self.testapp.post(handler_name,\n urllib.unquote_plus(base64.b64decode(task['body'])))\n self.ExecuteTaskQueueTasks(handler_name, task_queue_name)", "def queue_cloud_task(request):\n project = os.environ.get(\"PROJECT_ID\")\n queue = os.environ.get(\"QUEUE_NAME\")\n location = os.environ.get(\"QUEUE_REGION_LOCATION\")\n service_account_email = os.environ.get(\"SERVICE_ACCOUNT_EMAIL\")\n\n request_json = request.get_json()\n\n # the http endpoint the task will send to\n url = request_json.get('url')\n # the post data that should be forwarded to the http endpoint\n payload = request_json.get('payload')\n # the time in seconds to delay task execution\n in_seconds = request_json.get('in_seconds')\n # the unique name of the task we are queueing\n task_name = request_json.get('task_name')\n\n try:\n # Create a client.\n client = tasks_v2.CloudTasksClient()\n # Construct the fully qualified queue name.\n parent = client.queue_path(project, location, queue)\n except Exception as e:\n print(e)\n return f\"{e}\", 500\n\n # Construct the request body.\n task = {\n \"http_request\": { # Specify the type of request.\n \"http_method\": tasks_v2.HttpMethod.POST,\n \"url\": url,\n \"oidc_token\": {\"service_account_email\": service_account_email},\n }\n }\n if payload is not None:\n if isinstance(payload, dict):\n # Convert dict to JSON string\n payload = json.dumps(payload)\n # specify http content-type to application/json\n task[\"http_request\"][\"headers\"] = {\"Content-type\": \"application/json\"}\n\n # The API expects a payload of type bytes.\n converted_payload = payload.encode()\n\n # Add the payload to the request.\n task[\"http_request\"][\"body\"] = converted_payload\n\n if in_seconds is not None:\n # Convert \"seconds from now\" into an rfc3339 datetime string.\n d = datetime.datetime.utcnow() + datetime.timedelta(seconds=in_seconds)\n\n # Create Timestamp protobuf.\n timestamp = timestamp_pb2.Timestamp()\n timestamp.FromDatetime(d)\n\n # Add the timestamp to the tasks.\n task[\"schedule_time\"] = timestamp\n\n if task_name is not None:\n # Add the name to tasks.\n name = f\"projects/{project}/locations/{location}/queues/{queue}/tasks{task_name}\"\n task[\"name\"] = name\n\n try:\n # Use the client to build and send the task.\n response = client.create_task(request={\"parent\": parent, \"task\": task})\n return f\"Created task {response.name}\", 200\n except Exception as e:\n print(e)\n return f\"{e}\", 500", "def do_task(self, task_type, upload_id, sys_config=1):\n response = self.do_request(\n self.base_url +\n \"/oasis/doTask\" + task_type + \"/\" +\n str(sys_config) + \"/\" +\n str(upload_id) + \"/\"\n )\n return response" ]
[ "0.67628795", "0.67263645", "0.6693782", "0.6614726", "0.65432596", "0.62563705", "0.62268275", "0.61100656", "0.60212255", "0.59888965", "0.5962691", "0.5941963", "0.5929463", "0.5922408", "0.5921233", "0.59052026", "0.5753579", "0.57175267", "0.56979007", "0.56960267", "0.5683987", "0.56730735", "0.5669286", "0.5663449", "0.5643297", "0.5638627", "0.55953306", "0.55889237", "0.5555424", "0.5549231" ]
0.77790886
0
Sets the 'busy' flag in order to mark this task executor as busy (its associated process is performing a task)
def set_task_finished(self): self.busy = False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_busy(self, busy):\n if busy:\n self.hjson_button.setDisabled(True)\n self.loading.setHidden(False)\n self.loading_movie.start()\n else:\n self.hjson_button.setDisabled(False)\n self.loading.setHidden(True)\n self.loading_movie.stop()\n self.busy = busy", "def busy(self):\n pass", "def busy(self) -> bool:\n return self._busy", "def busyWait(self):\n time.sleep(0.0)", "def setBusyMessage(self, message):\n self.connection.client.run_command(\"set busy %s\" % message)", "def busy(self, flag, message=\"\"): \n return None", "def _busy_wait(self):\n wait_for = GPIO.HIGH\n if self.inky_version == 2:\n wait_for = GPIO.LOW\n\n while(GPIO.input(self.busy_pin) != wait_for):\n pass", "def busy(self) -> bool:\n return self.state != SubflowState.Available", "def freebusy(self):\r\n return FreeBusyResource(self)", "def isBusy(self):\n return self.busy", "def is_busy(self) -> bool:\n return self.__interface.read_pin(self.__interface.BUSY_PIN) == 0 # 0: busy, 1: idle.", "def run_and_wait():\n self.busy.put(True)\n action()\n self.busy.put(False)\n status._finished(success=True)", "def set_cursor_busy(self, widget=None):\n logger.debug(\"Setting cursor to busy. widget: %s\", widget)\n widget = self.root if widget is None else widget\n widget.config(cursor=\"watch\")\n widget.update_idletasks()", "def unlocked_task(self):\n yield self.executor.submit(self.blocking_task)\n self.doc.add_next_tick_callback(partial(self.step_button))", "def is_busy(self):\n threads = len(self.executor._threads)\n if threads == 0:\n return False\n\n capacity = self.executor._work_queue.qsize() / float(threads)\n if capacity > 2:\n return True\n elif capacity < 1:\n return False\n else:\n return capacity > (random.random() + 1)", "def _busy_wait(self, timeout=40.0):\n # If the busy_pin is *high* (pulled up by host)\n # then assume we're not getting a signal from inky\n # and wait the timeout period to be safe.\n if self._gpio.input(self.busy_pin):\n warnings.warn(\"Busy Wait: Held high. Waiting for {:0.2f}s\".format(timeout))\n time.sleep(timeout)\n return\n\n # If the busy_pin is *low* (pulled down by inky)\n # then wait for it to high.\n t_start = time.time()\n while not self._gpio.input(self.busy_pin):\n time.sleep(0.01)\n if time.time() - t_start >= timeout:\n warnings.warn(\"Busy Wait: Timed out after {:0.2f}s\".format(time.time() - t_start))\n return\n\n # print(\"Busy_waited\", time.time()-t_start, \"out of\", timeout, \"seconds\")", "def free_busy_location(self, free_busy_location: str):\n self._free_busy_location = free_busy_location", "def isBusy(self):\n\t\tif self.stokes.isBusy() == 1 or self.thp.isBusy() == 1 or self.tthp.isBusy() == 1:\n\t\t\treturn 1\n\t\telse:\n\t\t\treturn 0", "def acqbusy(self):\n data = self._ftdi.spi_read(self.ACQBUSY_ADDR, len=1, burst='fixed')\n return data[0] & self.ACQBUSY_MASK", "def update_waiting(self):\n if self.get_value(0) is None:\n self.set_value(True, 0)\n else:\n self.set_value(not bool(self.get_value(0)), 0)\n self.state = ACTIVE", "def update_waiting(self):\n if self.get_value(0) is None:\n self.set_value(False, 0)\n else:\n self.set_value(bool(self.get_value(0)), 0)\n self.state = ACTIVE", "def set_no_longer_active(self):\n with self.redis_client.lock(\"active-lock\"):\n self.set_to_redis(\"active\", \"done\")", "def set_task_in_progress(self):\n\n tasks = self._get_all_tasks()\n\n task_id = tasks[self.tasks_view.currentRow()].Id\n\n self.tasks_flow.set_status(task_id, 1)\n\n # Refresh the table\n self.write_tasks_table()", "def isBusy(self):\n state = caget(self.pvBase + \":CHAN1:DeviceStatus_RBV\")\n return state != \"2\"", "def set_task_not_started(self):\n\n tasks = self._get_all_tasks()\n\n task_id = tasks[self.tasks_view.currentRow()].Id\n\n self.tasks_flow.set_status(task_id, 2)\n\n # Refresh the table\n self.write_tasks_table()", "def ignore_if_busy(self):\r\n if self.is_waiting_for_message():\r\n self.beep()\r\n return True\r\n return False", "def mark_as_in_progress(self, task):\n raise NotImplementedError('')", "def execute_task(self, task_name):\n self.busy = True\n self.pipe_start.send((\"EXECUTE\",task_name))", "def set_On(self):\n if not(self._locked):\n self.__dict__['statusOn']=True\n self._do_action()\n else:\n self._log.info('The JobProperty %s is blocked', self.__name__)", "def async_mark_unavailable(self):\n self._available = False" ]
[ "0.70856065", "0.6832197", "0.6694359", "0.62857145", "0.62733877", "0.6099452", "0.6085377", "0.60533494", "0.6050232", "0.6013056", "0.59043485", "0.57955724", "0.5726104", "0.5645821", "0.5613755", "0.55460453", "0.5537266", "0.5512436", "0.5508247", "0.5469994", "0.5445433", "0.5433892", "0.5430102", "0.54132265", "0.53275496", "0.53117573", "0.5273132", "0.5264646", "0.52508956", "0.52248436" ]
0.6956033
1
Sends a finalization message (forces the associated process to break the loop and end)
def finalize(self): self.busy = False self.pipe_start.send(("FINISH",None)) self.process.join() if self.process.is_alive(): self.process.terminate()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def finalize(self):\n sys.stderr.write(f\"{self._message} finished after {(time.time()-self._startTime):.1f}s \"\n \"at \"+time.strftime(\"%H:%M:%S\", time.localtime())+\" \\n\")", "def finalize(self):\n sys.stderr.write(f\"{self._message} finished after {(time.time()-self._startTime):.1f}s \"\n \"at \"+time.strftime(\"%H:%M:%S\", time.localtime())+\" \\n\")", "def finalize(self,x):\n logger.info('*** finalize: worker id=%d',self._agent.wid)\n exec_command(self.commands['finalize'])", "def finalize(self):\n self.clear()\n sys.stderr.write(f\"{self._message} finished after {(time.time()-self._startTime):.1f}s \"\n \"at \"+time.strftime(\"%H:%M:%S\", time.localtime())+\" \\n\")", "def on_process_exit(self):\n if self.flush_thread:\n self.flush_thread.cancel()\n self.flush_buffer()", "def finalize(self):\n self.stop()", "def finalize():", "def finalize():", "def finalize():", "def _end(self):\n\n self.logger.msg1(\"Done\")", "def endMessage(self):", "def finalize(self, interrupted=False):\n pass", "def finalize_script(self):\n self.interrupt_script()", "def finalize():\n pass", "def finalize():\n pass", "def finish(**kwargs):\n if __debug__:\n logger.debug(HEADER + \"Finalization at worker. Not needed.\")\n pass", "def postloop(self):\n cmd.Cmd.postloop(self) ## Clean up command completion\n print(\"Exiting...\")", "def __procFinished(self, exitCode, exitStatus):\n self.__finish()", "def finishWorker(*args, **kwargs):\n if __debug__:\n logger.debug(HEADER + \"Finalization at worker process. Not needed.\")\n pass", "def exit_loop(self):\n self.loop.exit()", "def cmdfinalization_hook_system_exit(\n self, data: cmd2.plugin.CommandFinalizationData\n ) -> cmd2.plugin.CommandFinalizationData:\n self.called_cmdfinalization += 1\n raise SystemExit(5)", "def terminated(self):\n gc3libs.log.debug(\" ...done.\")", "def finish(self) -> None:\n self.__exit__(None, None, None)", "def finalize(self):\n for p in self._processes:\n if p.join(30) is None and p.exitcode is None:\n p.kill()", "def end(self) -> None:\n self.process_event(\n PipelineEvent(\n PipelineEventType.RUN_END,\n )\n )", "def finish():", "def finish():", "def finish():", "def finish():", "def __finish(self):\n self.__current_control_node = None\n self._on_finish()\n self.__state = Process.IDLE\n if self.should_terminate():\n self.__terminated = True" ]
[ "0.7009401", "0.7009401", "0.67796814", "0.67073065", "0.6634447", "0.65721756", "0.653", "0.653", "0.653", "0.6507717", "0.6498745", "0.64971644", "0.64605737", "0.64544594", "0.64544594", "0.6441591", "0.64370334", "0.6400482", "0.63626516", "0.6344346", "0.63207597", "0.6313977", "0.6257272", "0.62276745", "0.6191077", "0.61712927", "0.61712927", "0.61712927", "0.61712927", "0.61512095" ]
0.7368279
0
True if this task runner has received a message from its associated process.
def has_an_incomming_message(self): return self.pipe_start.poll(1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_waiting_for_message(self):\r\n return self.waiting_for_message", "def check_command(self):\n return self.process is not None and self.process.poll() is None", "def is_running(self):\n if self._process:\n return self._process.poll() is None\n else:\n return False", "def is_running(self):\n if self._process and self._process.poll() is None:\n return True\n return False", "def has_event(self):\n return self.ser.inWaiting()", "def alive(self):\n return self._proc is not None and self._proc.poll() is None", "def is_running(self):\n if self.__process.poll() is not None: # process has ended\n for nbsr in (\"stdout\", \"stderr\"):\n getattr(self, nbsr).finalise()\n return False\n return True", "def is_done(self) -> bool:\n is_done = self._process.poll() is not None\n\n return is_done", "def _is_running(self, _):\n if self._shutdown_event.is_set():\n raise RequestProcessingError(\n \"Unable to process message - currently shutting down\"\n )", "def _pubnub_receive(self, msg):\r\n self.signal_recv(self, msg)\r\n self._time_last_received = time.time()\r\n return not self._terminating", "def running(self):\n return self.sub_process and self.sub_process.is_alive()", "def has_msg(self):\n return self.bufsize >= 4 and self.bufsize - 4 >= struct.unpack('!I', str(self.buf.peek(0, 4)))[0]", "def has_message_available(self):\n return not self.feedback_log.empty()", "def isRunning(self):\n if not self.running:\n return False\n elif self.process.poll() == 0 or self.process.returncode >= 0:\n return False\n else:\n return True", "def running(self):\n return bool(self.proc and self._running())", "def check_finish(self):\r\n return not self.proc.is_alive()", "def msg_ready(self):\n if self._in_queue.qsize() == 0:\n return False\n else:\n return True", "def msg_ready(self):\n if self._in_queue.qsize() == 0:\n return False\n else:\n return True", "def get_status(self) -> bool:\n try:\n self.__driver.service.assert_process_still_running()\n return True\n except AttributeError:\n return False", "def _proc_is_alive(self):\n if self._proc is None:\n return False\n\n return self._proc.poll() is None", "def reply_received():\n return call_id in self._reply_inbox", "def messaging(self) -> bool:\n return self._messaging", "def is_running(self):\n return self._task.running()", "def is_running(self):\n return self._task.running()", "def _isSubProcessRunning(self): \n # Check if child process has terminated. Set and return returncode attribute.\n if self.__process.poll() is None:\n return True\n else:\n return False", "def alive(self):\n\n return self.subprocess.poll() is None and not self.thread_stop.is_set()", "def alive(self):\n return self._process.is_alive()", "def has_messages(self) -> bool:\n return self._has_messages", "def is_playing(self):\n return self.process is not None", "def is_started(self):\n return bool(self._processes)" ]
[ "0.70353997", "0.69293225", "0.6854245", "0.6836179", "0.68009895", "0.66489863", "0.66337824", "0.6600015", "0.6596999", "0.6567547", "0.65392643", "0.65115803", "0.6506902", "0.6501797", "0.64903075", "0.64822733", "0.6465509", "0.6465509", "0.6463263", "0.64439535", "0.6408495", "0.6387588", "0.63670695", "0.63670695", "0.63523805", "0.63439023", "0.63002115", "0.6298006", "0.6295119", "0.62892115" ]
0.718958
0
Like in the SerialScheduler, this function tries to run all the tasks, checking their dependencies. In this case some processes will be spawned so that they can share the work of executing the tasks. This run function acts as the real scheduler, telling the 'task executor' objects which task to run. This kind of dynamic scheduling fosters an efficient use of the resources (every time a 'task executor' ends a task, it is told to run another one, so that load is balanced). This is a simple implementation of a masterslave pattern (where slaves are the task runners).
def run(self): self.function_exec('scheduling_started', {"number_of_tasks":len(self.not_completed)}) # Create processes available_workers = self.number_of_processes task_runners = [] for i in range(available_workers): process_name = "TaskExecutor"+str(i) runner = TaskRunner(process_name, run_task, self.tasks) runner.run() task_runners.append(runner) # Execute all tasks while not len(self.finished) == len(self.tasks): cannot_choose_a_task = False # Choose an available process task_name = self.choose_runnable_task() # Try to execute it if task_name is not None: # If we can still execute a task we find a free task runner to do it for task_runner in task_runners: if not task_runner.busy: self.function_exec('task_started', {"task_name":task_name}) task_runner.execute_task(task_name) self.lock_task(task_name) # Ensure that it can't be selected again until task is finished self.running.append(task_name) break else: cannot_choose_a_task = True if cannot_choose_a_task or len(self.running) == available_workers: # If there is not an available task (so all remaining tasks have dependencies) or # we do not have any available worker, it's time to block until we receive results. # We start polling busy runners pipes to wait for a result and add this result to the # results list task_finished = False while not task_finished: for task_runner in task_runners: if task_runner.busy and task_runner.has_an_incomming_message(): message, value = task_runner.get_message() if message == "TASK FINISHED": task_name, result = value self.function_exec('task_ended', {"task_name":task_name, "finished":len(self.finished)}) self.running.remove(task_name) self.complete_task(task_name) self.remove_from_dependencies(task_name) task_runner.set_task_finished() self.results.append(result) else: printnflush ( "Unexpected message: %s"%message) exit() task_finished = True printnflush ("Sending processes termination message.") for task_runner in task_runners: task_runner.finalize() self.function_exec('scheduling_ended') return self.results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start(self):\n\n while len(self.task_order) > 0:\n # Get the task to run, set it up, and run it\n task = self.task_order[0]\n\n # In the case of a sublist, we'll run all in parallel\n if type(task) is list:\n running_jobs = []\n job_handles = []\n print(\"Starting following tasks in parallel:\")\n for sub_task in task:\n # Add the job to a list to run. Note, each task has a\n # system object within it.\n running_jobs.append(self.task_list[sub_task])\n # If we want to keep using the same system as before\n # then assign it here.\n if running_jobs[-1].persist_system:\n running_jobs[-1].system = self.global_system\n running_jobs[-1].system.name = running_jobs[-1].task_name\n\n # Run all job\n job_handles.append(running_jobs[-1].run())\n print(\"\\t%s\" % sub_task)\n\n # Wait for jobs to finish\n for j in job_handles:\n j.wait()\n\n # Read in the data from each job\n self.data = []\n for j in running_jobs:\n j.read_results()\n self.data.append(j.data)\n\n # Check conditionals\n conditional_jobs = []\n for j in running_jobs:\n if j.conditional(j.data):\n conditional_jobs.append(j.conditional_sim_name)\n if len(conditional_jobs) > 0:\n if len(conditional_jobs) == 1:\n conditional_jobs = conditional_jobs[0]\n # Overwrite the previous task jobs and run conditionals\n self.task_order[0] = conditional_jobs\n continue\n\n # Check callbacks. Note, callbacks are only run if\n # conditionals are false.\n for j in running_jobs:\n if j.callback is not None:\n j.callback(self, j)\n\n # Remove the last simulation and continue\n del self.task_order[0]\n else:\n running_job = self.task_list[task]\n # Setup\n if running_job.persist_system:\n running_job.system = self.global_system\n running_job.system.name = running_job.task_name\n # Run\n print(\"Starting the following task:\")\n print(\"\\t%s\" % task)\n job_handle = running_job.run()\n\n job_handle.wait()\n\n # Read in the results of the simulation\n running_job.read_results()\n\n # If we have a conditional simulation to run, check and do so.\n # Note, in the case of a conditional, callback is not run!\n if running_job.conditional(running_job.data):\n self.task_order[0] = running_job.conditional_sim_name\n self.data = running_job.data\n continue\n\n # Store the data from the last simulation here\n self.data = running_job.data\n\n if running_job.callback is not None:\n running_job.callback(self, running_job)\n\n # Else, remove the finished simulation and continue\n del self.task_order[0]", "def _run_tasks(self):\n next_tasks = self._job_queue.get_next_tasks()\n for task in next_tasks:\n sid = self._docker.start_task(task.identifier, task.image, task.name, task.args)\n self._job_queue.mark_task_started(task.identifier, task.name, sid)", "def execute(self):\n if not self._multiprocessing:\n for counter, subtasktuples in enumerate(self.task_scheduler):\n self._storegate.compile()\n result = self.execute_pipeline(subtasktuples, counter)\n self._history.append(result)\n\n logger.counter(counter + 1,\n len(self.task_scheduler),\n divide=1,\n message=f'metric={result.metric_value}')\n\n else: # multiprocessing\n if self._storegate.backend not in ('numpy', 'hybrid'):\n raise NotImplementedError(\n 'multiprocessing is supported for only numpy and hybrid backend'\n )\n\n ctx = mp.get_context('spawn')\n queue = ctx.Queue()\n args = []\n\n for counter, subtasktuples in enumerate(self.task_scheduler):\n args.append([subtasktuples, counter])\n\n if len(args) == self._num_workers:\n self.execute_jobs(ctx, queue, args)\n args = []\n logger.counter(counter + 1,\n len(self.task_scheduler),\n divide=1)\n\n self.execute_jobs(ctx, queue, args)", "def execute(self):\n\n with self._lock_c:\n self.count = 0\n self.numtasks = 0\n self.taskset = []\n self.results = {}\n self.totaltime = time.time()\n # Start all tasks\n for task in self.taskseq:\n self.taskset.append(task)\n self.numtasks += 1\n task.init_and_start(self)\n\n num_tasks = self.getNumTasks()\n # Wait on each task to clean up results\n while num_tasks > 0:\n\n self.check_state()\n\n for i in range(num_tasks):\n try:\n try:\n task = self.getTask(i)\n except IndexError:\n # A task got deleted from the set. Jump back out\n # to outer loop and repoll the number of tasks\n break\n\n #self.logger.debug(\"waiting on %s\" % task)\n res = task.wait(timeout=self.idletime)\n\n #self.logger.debug(\"finished: %s\" % task)\n self.child_done(res, task)\n\n except TaskTimeout:\n continue\n\n except Exception as e:\n #self.logger.warning(\"Subtask propagated exception: %s\" % str(e))\n self.child_done(e, task)\n continue\n\n # wait a bit and try again\n #self.ev_quit.wait(self.idletime)\n\n # re-get number of tasks, in case some were added or deleted\n num_tasks = self.getNumTasks()\n\n # Scan results for errors (exceptions) and raise the first one we find\n for key in self.results.keys():\n value = self.results[key]\n if isinstance(value, Exception):\n (count, task) = key\n self.logger.error(\"Child task %s terminated with exception: %s\" % (\n task.tag, str(value)))\n raise value\n\n # Return value of last child to complete\n return value", "def run(self):\n time_start = datetime.datetime.now()\n # while there are tasks in queue or tasks running\n while self.tasks_to_run or self.tasks_running:\n print(f\"\\nScheduler ticks... \\nElapsed time {(datetime.datetime.now() - time_start).seconds} \\n\"\n f\"TTR: {list(self.tasks_to_run.keys())} \\n\"\n f\"TRU: {list(self.tasks_running.keys())} \\n\"\n f\"TCO: {list(self.tasks_complete.keys())} \\n\"\n f\"\\nMeanwhile:\"\n )\n # each tick I launch 1 task from queue\n if self.tasks_to_run:\n i = list(self.tasks_to_run.keys())[0]\n self.task_launcher(i, self.tasks_to_run.pop(i))\n # tick - tock\n time.sleep(1)\n\n # and finishing remarks...\n print(\n f\"Process finished in {(datetime.datetime.now() - time_start).seconds} seconds\\n\"\n f\"TTR: {list(self.tasks_to_run.keys())} \\n\"\n f\"TRU: {list(self.tasks_running.keys())} \\n\"\n f\"TCO: {list(self.tasks_complete.keys())} \\n\"\n )", "def run(self):\n results = []\n for task in self.tasks:\n results.append(task.run())\n self.tasks = []\n return results", "def run_task(self, task, ignore_concurrency = False):\n\n logger.info(\"Preparing to run task %s on appropriate nodes\", task)\n\n try:\n task_config = self.cluster.config.get('tasks')[task]\n enforce_concurrency = 'concurrency' in task_config and task_config['concurrency'] > 0 and not ignore_concurrency\n\n if enforce_concurrency:\n # Count currently running processes\n running_processes_count = len(self.cluster.list_task_processes(task))\n\n if 'pools' in task_config:\n pools = task_config['pools']\n else:\n pools = [ \"default\" ]\n\n nodes = []\n for pool in pools:\n if pool in self.cluster.pools:\n nodes += self.cluster.pools[pool]\n else:\n logger.warning(\"Task %s should run on pool %s but there is no such pool in the cluster.\", task, pool)\n\n nodes = list(set(nodes))\n\n if not nodes:\n logger.warning(\"There are no nodes to run task %s !\", task)\n return\n\n if 'mode' in task_config and task_config['mode'] == 'any':\n nodes = [ random.choice(nodes) ]\n\n for node in nodes:\n try:\n if enforce_concurrency and running_processes_count >= task_config['concurrency']:\n logger.warning(\"Maximum concurrency reached (%d) for task %s !\", task_config['concurrency'], task)\n break\n\n addr = parse_host_port(self.cluster.nodes[node]['address'])\n client = HTTPClient(addr)\n logger.info(\"Running task %s on node %s\", task, node)\n ret = client.request(HTTPRequest(uri = '/tasks/%s?target=local' % task, method = 'EXECUTE'))\n\n get_plugin_registry().call_hook('TaskExecuted', task, node, ret.code == 200, ret.body)\n\n if ret.code == 200:\n if enforce_concurrency:\n running_processes_count += 1\n else:\n logger.error (\"Node %s answered %d when asked to execute task %s !\", node, ret.code, task)\n\n except Exception:\n logger.exception(\"An exception occurred when trying to run task %s on node %s.\", task, node)\n\n except Exception as e:\n logger.exception(\"Encountered an exception while preparing to run task %s.\", task)", "def _runJobs (self):\n\t\t# submit jobs\n\t\tdef sworker (q):\n\t\t\t\"\"\"\n\t\t\tThe worker to run jobs\n\t\t\t\"\"\"\n\t\t\twhile True:\n\t\t\t\t(run, i) = q.get()\n\t\t\t\tsleep (i)\n\t\t\t\tif run.isRunning():\n\t\t\t\t\tself.log (\"Job #%s is already running, skip submitting.\" % run.job.index, 'info')\n\t\t\t\telse:\n\t\t\t\t\trun.submit()\n\t\t\t\trun.wait() \n\t\t\t\trun.finish()\n\t\t\t\tq.task_done()\n\t\t\n\t\trunner = proc.RUNNERS[self.runner]\n\t\tmaxsubmit = self.forks\n\t\tif hasattr(runner, 'maxsubmit'): \n\t\t\tmaxsubmit = runner.maxsubmit\n\t\tinterval = .1\n\t\tif hasattr(runner, 'interval'): \n\t\t\tinterval = runner.interval\n\t\t\n\t\tsq = Queue()\n\t\tfor i in self.ncjobids:\n\t\t\trjob = runner (self.jobs[i])\n\t\t\ttm = int(i/maxsubmit) * interval\n\t\t\tsq.put ((rjob, tm))\n\n\t\t# submit jobs\n\t\tnojobs2submit = min (self.forks, len(self.ncjobids))\n\t\tfor i in range (nojobs2submit):\n\t\t\tt = threading.Thread(target = sworker, args = (sq, ))\n\t\t\tt.daemon = True\n\t\t\tt.start ()\n\t\t\n\t\tsq.join()", "def executeTask(self, task):\n if self._Finished:\n return\n \n self._NextTask = None\n taskRunner = TaskRunner(task)\n self.currentTask = taskRunner\n print \"Starting Task Execution: %s\" % task.taskname()\n taskRunner.evalStartControlPoint(self)\n if self._NextTask != None:\n print \"Next Task Scheduled: %s\" % self._NextTask\n nextTask = self.taskTree.findTask(self._NextTask)\n if nextTask == None:\n msg = \"Error: Task named %s not found:\\n\" % self._NextTask\n msg += \"Unable to continue processing\\n\"\n raise ShREEKException(msg, ClassInstance = self)\n self.executeTask(nextTask)\n return\n\n if self.monitorThread != None:\n self.monitorThread.notifyTaskStart(task)\n exitCode = taskRunner.run()\n if self.monitorThread != None:\n self.monitorThread.notifyTaskEnd(task, exitCode)\n print \"Task Execution Complete: %s Exit: %s\" % (\n task.taskname(), exitCode,\n )\n \n\n taskRunner.evalEndControlPoint(self)\n if self._NextTask != None:\n print \"Next Task Scheduled: %s\" % self._NextTask,\n nextTask = self.taskTree.findTask(self._NextTask)\n if nextTask == None:\n msg = \"Error: Task named %s not found:\\n\" % self._NextTask\n msg += \"Unable to continue processing\\n\"\n raise ShREEKException(msg, ClassInstance = self)\n\n self.executeTask(nextTask)\n return\n\n for child in task.children:\n self.executeTask(child)\n return", "def process_all_runs(run_storage_base, options):\n global current\n\n errored_tasks = []\n running_tasks = []\n for name in os.listdir(run_storage_base):\n run_dir = path.join(run_storage_base, name)\n run_id = get_run_id_from_path(run_dir)\n if path.isdir(run_dir) and run_info.is_illumina_run(run_dir):\n try:\n emitted_task = try_autoprocessing(run_dir, options)\n if emitted_task and emitted_task.status == tasks.ERROR:\n errored_tasks.append((emitted_task, None))\n if emitted_task and emitted_task.status == tasks.RUNNING:\n running_tasks.append(emitted_task)\n except Exception as e:\n # Dummy catchall task to signal exceptional failure\n errored_tasks.append((ProcessingTask(run_id,\n 'try_autoprocessing',\n tasks.ERROR), e))\n logging.exception(e)\n if options.verbose:\n logging.error(\"try_autoprocessing failed:\\n %s\", e)\n\n current.task = None\n\n # Stop on error in any task, don't continue with the other runs\n # if emitted_task and emitted_task.status != COMPLETE:\n # break\n\n errorlist = ', '.join(['%s:%s' % (t.task_name, t.run_id)\n for t, e in errored_tasks])\n runninglist = ', '.join(['%s:%s' % (t.task_name, t.run_id)\n for t in running_tasks])\n if options.verbose:\n if running_tasks:\n logging.info(\"%s task(s) are currently running (%s): %s\",\n len(running_tasks), run_storage_base, runninglist)\n else:\n logging.info(\"Successfully completed processing of runs in: %s\",\n run_storage_base)\n\n if errored_tasks:\n if options.verbose:\n logging.error(\"Processing runs in %s completed with failures: %s\",\n run_storage_base, errorlist)\n logging.getLogger('autoprocess_notify').error(\n \"Processing runs in %s completed with failures: %s\",\n run_storage_base,\n errorlist)\n for t, ex in errored_tasks:\n if ex:\n logging.exception(ex)\n else:\n # Throttle notification log if in --quiet mode\n notify_every = timedelta(minutes=options.notify_frequency)\n for t, ex in errored_tasks:\n if t.last_failure_notify_time is None or \\\n (t.last_failure_notify_time +\n notify_every < datetime.now()):\n logging.getLogger('autoprocess_notify').error(\n \"Processing runs in %s completed with failures: %s\",\n run_storage_base,\n errorlist)\n t.last_failure_notify_time = datetime.now()\n taskdb = TaskDb(path.join(run_storage_base, t.run_id),\n ProcessingTask, t.run_id)\n taskdb.update(t)\n # t._db.update(t)\n break\n\n return not errored_tasks", "def run_jobs(**kwargs): # pylint: disable=W0613\n\n root_nodes, job_instances_map = build_graph(ctx.nodes)\n monitor = Monitor(job_instances_map, ctx.logger)\n\n # Execution of first job instances\n tasks_list = []\n for root in root_nodes:\n tasks_list += root.queue_all_instances()\n monitor.add_node(root)\n wait_tasks_to_finish(tasks_list)\n\n # Monitoring and next executions loop\n while monitor.is_something_executing() and not api.has_cancel_request():\n # Monitor the infrastructure\n monitor.update_status()\n exec_nodes_finished = []\n new_exec_nodes = []\n for node_name, exec_node in monitor.get_executions_iterator():\n if exec_node.check_status():\n if exec_node.completed:\n exec_node.clean_all_instances()\n exec_nodes_finished.append(node_name)\n new_nodes_to_execute = exec_node.get_children_ready()\n for new_node in new_nodes_to_execute:\n new_exec_nodes.append(new_node)\n else:\n # Something went wrong in the node, cancel execution\n cancel_all(monitor.get_executions_iterator())\n return\n\n # remove finished nodes\n for node_name in exec_nodes_finished:\n monitor.finish_node(node_name)\n # perform new executions\n tasks_list = []\n for new_node in new_exec_nodes:\n tasks_list += new_node.queue_all_instances()\n monitor.add_node(new_node)\n wait_tasks_to_finish(tasks_list)\n\n if monitor.is_something_executing():\n cancel_all(monitor.get_executions_iterator())\n\n ctx.logger.info(\n \"------------------Workflow Finished-----------------------\")\n return", "def run_tasks(\n hosts,\n loader,\n inventory_manager,\n variable_manager,\n options={\n \"forks\": 6,\n \"connection\": \"smart\",\n \"verbosity\": 2,\n \"become_method\": \"sudo\"\n },\n passwords={\"vault_pass\": \"any\"},\n gather_facts=False,\n tasks=[]):\n tqm = None\n try:\n context.CLIARGS = ImmutableDict(**options)\n\n play = Play().load(\n {\n \"hosts\": hosts,\n \"gather_facts\": \"yes\" if gather_facts else \"no\",\n \"tasks\": tasks,\n },\n variable_manager=variable_manager,\n loader=loader,\n )\n\n play_tasks = play.get_tasks()[0]\n\n if len(play_tasks) > 1:\n callback = BatchResultsCollector()\n else:\n callback = ResultCollector()\n\n tqm = TaskQueueManager(\n inventory=inventory_manager,\n variable_manager=variable_manager,\n loader=loader,\n passwords=passwords,\n stdout_callback=callback,\n forks=options.get(\"forks\")\n )\n tqm.run(play)\n return callback.results\n finally:\n if tqm is not None:\n tqm.cleanup()", "def _run_tasks(self, taskq_len, create_tasks, update_tasks, delete_tasks):\n # 'finished' indicates that the task queue is empty, or there is\n # no way to continue to make progress. If there are errors in\n # deploying any resource, it is saved in the queue until another\n # pass can be made to deploy the configuration. When we have\n # gone through the queue on a pass without shrinking the task\n # queue, it is determined that progress has stopped and the\n # loop is exited with work remaining.\n finished = False\n while not finished:\n LOGGER.debug(\"Service task queue length: %d\", taskq_len)\n\n # Iterate over the list of resources to create\n create_tasks = self._create_resources(create_tasks)\n\n # Iterate over the list of resources to update\n update_tasks = self._update_resources(update_tasks)\n\n # Iterate over the list of resources to delete\n delete_tasks = self._delete_resources(delete_tasks)\n\n tasks_remaining = (\n len(create_tasks) + len(update_tasks) + len(delete_tasks))\n\n # Did the task queue shrink?\n if tasks_remaining >= taskq_len or tasks_remaining == 0:\n # No, we have stopped making progress.\n finished = True\n\n # Reset the taskq length.\n taskq_len = tasks_remaining\n\n return taskq_len", "def submit_tasks(self, fun, shared_data, *argss, **kwdss):\n self.shared_data = shared_data\n ids = self.add_tasks(fun, *argss, **kwdss)\n # Launches the new tasks if the previous ones have stopped.\n if self.thread is None or not self.thread.is_alive():\n self.run_thread()\n return ids", "def do_tasks(self):\n\t\twork_ = self.TASK_LIMIT\n\t\twhile True:\n\t\t\tif len(self.tasks) == 0 or work_ <= 0:\n\t\t\t\tbreak\n\t\t\tself.tasks[0].work(self)\n\t\t\tif self.tasks[0].completed:\n\t\t\t\tself.tasks.pop(0)\n\n\t\t\twork_ -= 1", "def algorithm(self):\n self.logger.debug(\"Starting\")\n while(True):\n for status, worktype in states():\n limit = self.slaves.queueableTasks()\n if not self._lockWork(limit=limit, getstatus=status, setstatus='HOLDING'):\n continue\n pendingwork = self._getWork(limit=limit, getstatus='HOLDING')\n self.logger.info(\"Retrieved a total of %d %s works\" %(len(pendingwork), worktype))\n self.logger.debug(\"Retrieved the following works: \\n%s\" %(str(pendingwork)))\n self.slaves.injectWorks([(worktype, work, None) for work in pendingwork])\n for task in pendingwork:\n self.updateWork(task['tm_taskname'], 'QUEUED')\n self.logger.info('Worker status:')\n self.logger.info(' - free slaves: %d' % self.slaves.freeSlaves())\n self.logger.info(' - acquired tasks: %d' % self.slaves.queuedTasks())\n self.logger.info(' - tasks pending in queue: %d' % self.slaves.pendingTasks())\n\n finished = self.slaves.checkFinished()\n self.updateFinished(finished)\n if self.TEST:\n #if we are testing we just do one cycle\n break\n\n time.sleep(self.config.TaskWorker.polling)\n\n self.logger.debug(\"Stopping\")", "def run(self):\n\n logging.info(\"Pool scheduler started\")\n\n # do forever\n while True:\n\n try :\n # get job information about new jobs\n JobStatus.addNewJobs()\n\n # apply policy\n groups = self.applyPolicy()\n\n # any job to check?\n if len(groups) == 0:\n\n # no, wait for jobs to arrive\n logging.info( \"No work to do, \" + \\\n \"scheduler goes to sleep for \" + \\\n str(self.delay) + \" seconds\")\n sleep(self.delay)\n continue\n\n # new threads to start?\n if len(groups) >= self.threadsWorking:\n\n # yes, start threads\n for grp in groups:\n\n # but only for new groups\n if grp not in self.groupsUnderProcessing:\n\n # insert group ID into queue\n # to trigger thread start\n self.groupsUnderProcessing.add(grp)\n self.pool.enqueue(grp, grp)\n\n # wait for a thread to finish\n (group, result) = self.pool.dequeue()\n logging.info(\"Thread processing group \" + str(group) + \\\n \" has finished\")\n\n # decrement threads counter\n self.threadsWorking = self.threadsWorking - 1\n\n # remove its ID from groups\n self.groupsUnderProcessing.remove(group)\n\n # remove all finished jobs from this group\n JobStatus.removeFinishedJobs(group)\n\n except Exception, ex :\n import traceback\n logging.error( 'Error in PoolScheduler : [%s]' % str(ex) )\n logging.error( \"Traceback: %s\" % traceback.format_exc() )\n logging.error( \"PoolScheduler goes to sleep for \" + \\\n str(self.delay) + \" seconds\" )\n sleep(self.delay)", "async def start_execute(self, **kwargs):\n await super().start_execute(**kwargs)\n\n async def execute_child_components():\n futures = []\n for component in self.components:\n future = asyncio.create_task(component.start(**kwargs))\n futures.append(future)\n\n # used fo debugging\n loop = asyncio.get_running_loop()\n if not hasattr(loop, 'flowsaber_futures'):\n loop.flowsaber_futures = []\n loop.flowsaber_futures += list(zip(self.components, futures))\n\n done, pending = await asyncio.wait(futures, return_when=asyncio.FIRST_EXCEPTION)\n # TODO wait for truely cancelled\n for fut in pending:\n fut.cancel()\n\n res_futures = list(done) + list(pending)\n self.check_future_exceptions(res_futures)\n\n return res_futures\n\n # for the top most flow, clear context info, initialize executors, go to flow_worker\n if self.config_dict['id'] == self.context['flow_id']:\n flowsaber.context._info.clear()\n with change_cwd(self.context.get('flow_workdir', '')) as p:\n async with flowsaber.context:\n await execute_child_components()\n # for the top most flow, return None, since Flowrunner's returned final\n # state will inlcude this\n else:\n await execute_child_components()", "def task_run(taskname,mynodes):\n print \"FULLRUN\"\n task = task_self()\n print \"Booting task: \" , taskname\n \n # first initiate environment to run our python+java\n os.chdir(CASSANDRA_HOME)\n \n #FIXME: set init_environment to actually work\n #task.shell(\"cluster_config/init_environment.sh\",nodes=mynodes)\n cmdenv = \"export PYTHONHOME=/opt/python2.7.2; \\\n export JAVA_HOME=/opt/jdk1.6.0_27; \\\n export PYTHONPATH=/opt/python2.7.2/lib; \\\n export \\\n PATH=/opt/python2.7.2/lib:/opt/python2.7.2/bin:/opt/jdk1.6.0_27/bin:/usr/kerberos/sbin:/usr/kerberos/bin:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin;\"\n \n\n \n task.run(cmdenv+taskname,nodes=mynodes)\n print \":\\n\".join([\"%s=%s\" % (i,j) for j,i in task.iter_buffers()])", "def run(self):\n # Start spawning processes to execute the commands.\n timer = Timer()\n logger.debug(\"Preparing to run %s with a concurrency of %i ..\",\n pluralize(self.num_commands, \"command\"),\n self.concurrency)\n try:\n with self.get_spinner(timer) as spinner:\n num_started = 0\n num_collected = 0\n while not self.is_finished:\n # When concurrency is set to one (I know, initially it\n # sounds like a silly use case, bear with me) I want the\n # start_event and finish_event callbacks of external\n # commands to fire in the right order. The following\n # conditional is intended to accomplish this goal.\n if self.concurrency > (num_started - num_collected):\n num_started += self.spawn()\n num_collected += self.collect()\n spinner.step(label=format(\n \"Waiting for %i/%i %s\",\n self.num_commands - self.num_finished, self.num_commands,\n \"command\" if self.num_commands == 1 else \"commands\",\n ))\n spinner.sleep()\n except Exception:\n if self.num_running > 0:\n logger.warning(\"Command pool raised exception, terminating running commands!\")\n # Terminate commands that are still running.\n self.terminate()\n # Re-raise the exception to the caller.\n raise\n # Collect the output and return code of any commands not yet collected.\n self.collect()\n logger.debug(\"Finished running %s in %s.\",\n pluralize(self.num_commands, \"command\"),\n timer)\n # Report the results to the caller.\n return self.results", "def _run_tasks(tasks, context):\n executed_tasks = []\n original_sigterm_handler = signal.getsignal(signal.SIGTERM)\n signal.signal(signal.SIGTERM, _sigterm_handler)\n try:\n for task in tasks:\n LOG.info(\"=========== run %s ===========\", task)\n executed_tasks.append(task)\n task.run(context)\n except KeyboardInterrupt:\n LOG.info(\"Got Ctrl-C during task run, jumping to cleanup\")\n except SigTermException:\n LOG.info(\"Got SIGTERM during task run, jumping to cleanup\")\n except Exception as ex:\n LOG.exception(\"Caught exception while running '%s'\", task)\n failure = {'name': ex.__class__.__name__,\n 'msg': str(ex),\n 'task': str(task)}\n context['_taskrunner']['run_failures'].append(failure)\n # restore original signal handler\n signal.signal(signal.SIGTERM, original_sigterm_handler)\n return executed_tasks", "def run(self, targets=None, context={}, force=False):\n targets = [\"__all\"] if targets is None else targets\n self._scheduler.run(targets, context=context, force=force)", "def run_all(self):\n results = []\n # Keep a loop going until all the tasks are gone:\n i = 0\n while self.tasks:\n i += 1\n time.sleep(0.0)\n print(f\"\\nOuter loop count: {i}\")\n # pop a task off the end\n task = self.tasks.pop()\n # run that task:\n try:\n res = task.send(None) # TaskLoop.run_all() - do_a_few_things() - count() - yield\n print(\"returned from send:\", res)\n self.tasks.insert(0, task) # move task to the begining of the list\n except StopIteration as si: # task completed yield return StopIteration exception\n results.append(si.args[0])\n print(\"task: {} result >>> {}\".format(task, si.args[0]))\n return results", "def execute(self, tasks: List[Task], progress_bar: bool = True):\n n_tasks = len(tasks)\n\n pickled_tasks = [pickle.dumps(task) for task in tasks]\n\n n_procs = min(self.n_procs, n_tasks)\n logger.info(f\"Performing parallel task execution on {n_procs} \"\n f\"processes.\")\n\n with Pool(processes=n_procs) as pool:\n results = pool.map(work,\n tqdm(pickled_tasks,\n disable=not progress_bar)\n )\n\n return results", "def __run_schedules():\n while True:\n __scheduler.run()", "def run_tasks(tasks, time_limit, cron_type, kill_time=None, use_stdio=True, max_tasks=None):\n # TODO: support no time limit\n # TODO: include error message for tasks that were killed\n if not kill_time:\n kill_time = time_limit * MAX_TIME_MULTIPLIER\n start = default_timer()\n\n processes = [ProcessHandler(function) for function in tasks]\n\n # If max_tasks is None, list slicing returns the whole list\n for p in processes[:max_tasks]:\n p.start()\n\n for _ in range(kill_time):\n if all(p.stopped for p in processes):\n break\n\n for p in processes:\n p.check_completed() # Stops processes if completed\n\n # If task counting is enabled\n if max_tasks:\n running_task_count = sum(1 for p in processes if p.running())\n required_new_tasks = max_tasks - running_task_count\n not_started_tasks = [p for p in processes if not p.started()]\n for p in not_started_tasks[:required_new_tasks]:\n p.start()\n\n sleep(1)\n\n for p in processes:\n if p.started() and not p.stopped:\n p.kill()\n \n total_time = default_timer() - start\n errors = [\"Error in running function '%s'\\n\" % p.name for p in processes if p.process.exitcode]\n \n if total_time > time_limit or total_time > kill_time:\n errors.append(\"ERROR: cron job: over time limit\")\n\n tasks_left = [p.name for p in processes if not p.started()]\n if tasks_left:\n errors.append(\"The following tasks never ran: \" + ', '.join(tasks_left))\n\n process_times = {\n p.name: p.get_run_time_message() for p in processes if p.stopped\n }\n\n if errors:\n error_message = \"Cron type %s completed with errors; total time %s\\n\" % (cron_type, total_time)\n for error in errors:\n error_message += error\n error_message += \"\\n\"\n if use_stdio:\n stderr.write(error_message)\n stderr.write(str(process_times))\n exit(1)\n else:\n raise TaskError(error_message, process_times=process_times)\n else:\n if use_stdio:\n print(\"Cron type %s completed; total time %s\" % (cron_type, total_time))\n print(str(process_times))\n else:\n return total_time, process_times", "def run_tasks(self, task_index=0):\n for generator in self.task_tree[task_index]():\n next(generator)\n # self.device_control.wait_for_device()\n \n next_index = task_index + 1\n if next_index < len(self.task_tree):\n self.run_tasks(next_index)", "def run(\n self, pipeline: tfx_pipeline.Pipeline, run_name: Optional[str] = None\n ) -> None:\n for component in pipeline.components:\n if isinstance(component, base_component.BaseComponent):\n component._resolve_pip_dependencies(\n pipeline.pipeline_info.pipeline_root\n )\n\n c = compiler.Compiler()\n pipeline = c.compile(pipeline)\n\n run_name = run_name or datetime.now().strftime(\"%d_%h_%y-%H_%M_%S_%f\")\n # Substitute the runtime parameter to be a concrete run_id\n runtime_parameter_utils.substitute_runtime_parameter(\n pipeline,\n {\n PIPELINE_RUN_ID_PARAMETER_NAME: run_name,\n },\n )\n\n deployment_config = runner_utils.extract_local_deployment_config(\n pipeline\n )\n connection_config = deployment_config.metadata_connection_config # type: ignore[attr-defined] # noqa\n\n logger.debug(f\"Using deployment config:\\n {deployment_config}\")\n logger.debug(f\"Using connection config:\\n {connection_config}\")\n\n # Run each component. Note that the pipeline.components list is in\n # topological order.\n for node in pipeline.nodes:\n pipeline_node = node.pipeline_node\n node_id = pipeline_node.node_info.id\n executor_spec = runner_utils.extract_executor_spec(\n deployment_config, node_id\n )\n custom_driver_spec = runner_utils.extract_custom_driver_spec(\n deployment_config, node_id\n )\n\n component_launcher = launcher.Launcher(\n pipeline_node=pipeline_node,\n mlmd_connection=metadata.Metadata(connection_config),\n pipeline_info=pipeline.pipeline_info,\n pipeline_runtime_spec=pipeline.runtime_spec,\n executor_spec=executor_spec,\n custom_driver_spec=custom_driver_spec,\n )\n start = time.time()\n logger.info(f\"Step `{node_id}` has started.\")\n component_launcher.launch()\n end = time.time()\n logger.info(\n f\"Step `{node_id}` has finished\"\n f\" in {format_timedelta_pretty(end - start)}.\"\n )", "def run(self):\n\n if self.nproc > 0:\n # get resources\n nodes = self.RM.get_allocation(self, self.nproc, self.mem_pproc, self.disk_pproc)\n\n # did we actually get nodes?????\n if nodes >= 0:\n #--------------------------------\n # update resource usage\n #--------------------------------\n self.using.nodes = nodes\n self.using.procs = self.nproc\n if self.start_waiting_time >= 0:\n self.total_waiting_time += self.fwk.fwk_global_time - self.start_waiting_time\n self.start_waiting_time = -1\n\n #--------------------------------\n # set curr_exec_time, start_exec_time, and state\n #--------------------------------\n self.get_curr_exec_time()\n\n #--------------------------------\n # log event\n #--------------------------------\n if self.retry == True:\n if self.sim.retry_limit > 0 and self.curr_retries < self.sim.retry_limit:\n self.num_retries += 1\n self.curr_retries += 1\n self.fwk.logEvent(self.sim.name, self.name, \"relaunch_task\", \"relaunched attempt %d on %d processes on %d nodes\" %(self.retry, self.using.procs, self.using.nodes))\n else:\n #print \"exceeded retry limit\"\n if self.fwk.debug:\n print('exceeded retry limit, killing sim from component.')\n self.sim.kill()\n else:\n self.fwk.logEvent(self.sim.name, self.name, \"start_task\", \"started running on %d processes on %d nodes\" % (self.using.procs, self.using.nodes))\n else:\n #-------------------------------------------\n # we did not get the resources we wanted\n #-------------------------------------------\n self.state = \"waiting_on_resources\"\n if self.start_waiting_time == -1:\n self.start_waiting_time = self.fwk.fwk_global_time\n self.num_waiting += 1\n #--------------------------------\n # log event\n #--------------------------------\n self.fwk.logEvent(self.sim.name, self.name, \"waiting_on_procs\", \"needs %d procs %d memory pproc %d disk pproc\" % (self.nproc, self.mem_pproc, self.disk_pproc))\n else:\n # non-resource consuming component\n self.get_curr_exec_time()\n if self.retry == True:\n self.fwk.logEvent(self.sim.name, self.name, \"relaunch_task\", \"relaunched, attempt %d\" %(self.num_retries))\n else:\n self.fwk.logEvent(self.sim.name, self.name, \"start_task\", \"started\")", "def schedule_jobs(self):\n for species in self.species_dict.values():\n if species.initial_xyz is None and species.final_xyz is None and species.conformers \\\n and any([e is not None for e in species.conformer_energies]):\n # The species has no xyz, but has conformers and at least one of the conformers has energy.\n self.determine_most_stable_conformer(species.label)\n if species.initial_xyz is not None:\n if self.composite_method:\n self.run_composite_job(species.label)\n else:\n self.run_opt_job(species.label, fine=self.fine_only)\n self.run_conformer_jobs()\n self.spawn_ts_jobs() # If all reactants/products are already known (Arkane yml or restart), spawn TS searches.\n while self.running_jobs != {}:\n self.timer = True\n for label in self.unique_species_labels:\n if self.output[label]['convergence'] is False:\n # Skip unconverged species.\n if label in self.running_jobs:\n del self.running_jobs[label]\n continue\n # Look for completed jobs and decide what jobs to run next.\n self.get_server_job_ids() # updates ``self.server_job_ids``\n self.get_completed_incore_jobs() # updates ``self.completed_incore_jobs``\n if label not in self.running_jobs.keys():\n continue\n job_list = self.running_jobs[label]\n for job_name in job_list:\n if 'conformer' in job_name:\n i = get_i_from_job_name(job_name)\n job = self.job_dict[label]['conformers'][i]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n # this is a completed conformer job\n successful_server_termination = self.end_job(job=job, label=label, job_name=job_name)\n if successful_server_termination:\n troubleshooting_conformer = self.parse_conformer(job=job, label=label, i=i)\n if troubleshooting_conformer:\n break\n # Just terminated a conformer job.\n # Are there additional conformer jobs currently running for this species?\n for spec_jobs in job_list:\n if 'conformer' in spec_jobs and spec_jobs != job_name:\n break\n else:\n # All conformer jobs terminated.\n # Check isomorphism and run opt on most stable conformer geometry.\n logger.info(f'\\nConformer jobs for {label} successfully terminated.\\n')\n if self.species_dict[label].is_ts:\n self.determine_most_likely_ts_conformer(label)\n else:\n self.determine_most_stable_conformer(label) # also checks isomorphism\n if self.species_dict[label].initial_xyz is not None:\n # if initial_xyz is None, then we're probably troubleshooting conformers, don't opt\n if not self.composite_method:\n self.run_opt_job(label, fine=self.fine_only)\n else:\n self.run_composite_job(label)\n self.timer = False\n break\n if 'tsg' in job_name:\n job = self.job_dict[label]['tsg'][get_i_from_job_name(job_name)]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n # This is a successfully completed tsg job. It may have resulted in several TSGuesses.\n self.end_job(job=job, label=label, job_name=job_name)\n if job.local_path_to_output_file.endswith('.yml'):\n for rxn in job.reactions:\n rxn.ts_species.process_completed_tsg_queue_jobs(yml_path=job.local_path_to_output_file)\n # Just terminated a tsg job.\n # Are there additional tsg jobs currently running for this species?\n for spec_jobs in job_list:\n if 'tsg' in spec_jobs and spec_jobs != job_name:\n break\n else:\n # All tsg jobs terminated. Spawn confs.\n logger.info(f'\\nTS guess jobs for {label} successfully terminated.\\n')\n self.run_conformer_jobs(labels=[label])\n self.timer = False\n break\n elif 'opt' in job_name:\n # val is 'opt1', 'opt2', etc., or 'optfreq1', optfreq2', etc.\n job = self.job_dict[label]['opt'][job_name]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n successful_server_termination = self.end_job(job=job, label=label, job_name=job_name)\n if successful_server_termination:\n success = self.parse_opt_geo(label=label, job=job)\n if success:\n self.spawn_post_opt_jobs(label=label, job_name=job_name)\n self.timer = False\n break\n elif 'freq' in job_name:\n # this is NOT an 'optfreq' job\n job = self.job_dict[label]['freq'][job_name]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n successful_server_termination = self.end_job(job=job, label=label, job_name=job_name)\n if successful_server_termination:\n self.check_freq_job(label=label, job=job)\n self.timer = False\n break\n elif 'sp' in job_name:\n job = self.job_dict[label]['sp'][job_name]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n successful_server_termination = self.end_job(job=job, label=label, job_name=job_name)\n if successful_server_termination:\n self.check_sp_job(label=label, job=job)\n self.timer = False\n break\n elif 'composite' in job_name:\n job = self.job_dict[label]['composite'][job_name]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n successful_server_termination = self.end_job(job=job, label=label, job_name=job_name)\n if successful_server_termination:\n success = self.parse_composite_geo(label=label, job=job)\n if success:\n self.spawn_post_opt_jobs(label=label, job_name=job_name)\n self.timer = False\n break\n elif 'directed_scan' in job_name:\n job = self.job_dict[label]['directed_scan'][job_name]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n successful_server_termination = self.end_job(job=job, label=label, job_name=job_name)\n if successful_server_termination:\n self.check_directed_scan_job(label=label, job=job)\n if 'cont' in job.directed_scan_type and job.job_status[1]['status'] == 'done':\n # This is a continuous restricted optimization, spawn the next job in the scan.\n xyz = parser.parse_xyz_from_file(job.local_path_to_output_file) \\\n if not hasattr(job, 'opt_xyz') else job.opt_xyz\n self.spawn_directed_scan_jobs(label=label, rotor_index=job.rotor_index, xyz=xyz)\n if 'brute_force' in job.directed_scan_type:\n # Just terminated a brute_force directed scan job.\n # Are there additional jobs of the same type currently running for this species?\n self.species_dict[label].rotors_dict[job.rotor_index]['number_of_running_jobs'] -= 1\n if not self.species_dict[label].rotors_dict[job.rotor_index]['number_of_running_jobs']:\n # All brute force scan jobs for these pivots terminated.\n logger.info(f'\\nAll brute force directed scan jobs for species {label} between '\n f'pivots {job.pivots} successfully terminated.\\n')\n self.process_directed_scans(label, pivots=job.pivots)\n shutil.rmtree(job.local_path, ignore_errors=True)\n self.timer = False\n break\n elif 'scan' in job_name and 'directed' not in job_name:\n job = self.job_dict[label]['scan'][job_name]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n successful_server_termination = self.end_job(job=job, label=label, job_name=job_name)\n if successful_server_termination \\\n and (job.directed_scan_type is None or job.directed_scan_type == 'ess'):\n self.check_scan_job(label=label, job=job)\n self.timer = False\n break\n elif 'irc' in job_name:\n job = self.job_dict[label]['irc'][job_name]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n successful_server_termination = self.end_job(job=job, label=label, job_name=job_name)\n if successful_server_termination:\n self.spawn_post_irc_jobs(label=label, job=job)\n self.timer = False\n break\n elif 'orbitals' in job_name:\n job = self.job_dict[label]['orbitals'][job_name]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n successful_server_termination = self.end_job(job=job, label=label, job_name=job_name)\n if successful_server_termination:\n # copy the orbitals file to the species / TS output folder\n folder_name = 'rxns' if self.species_dict[label].is_ts else 'Species'\n orbitals_path = os.path.join(self.project_directory, 'output', folder_name, label,\n 'geometry', 'orbitals.fchk')\n if os.path.isfile(job.local_path_to_orbitals_file):\n try:\n shutil.copyfile(job.local_path_to_orbitals_file, orbitals_path)\n except shutil.SameFileError:\n pass\n self.timer = False\n break\n elif 'onedmin' in job_name:\n job = self.job_dict[label]['onedmin'][job_name]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n successful_server_termination = self.end_job(job=job, label=label, job_name=job_name)\n if successful_server_termination:\n # Copy the lennard_jones file to the species output folder (TS's don't have L-J data).\n lj_output_path = os.path.join(self.project_directory, 'output', 'Species', label,\n 'lennard_jones.dat')\n if os.path.isfile(job.local_path_to_lj_file):\n try:\n shutil.copyfile(job.local_path_to_lj_file, lj_output_path)\n except shutil.SameFileError:\n pass\n self.output[label]['job_types']['onedmin'] = True\n self.species_dict[label].set_transport_data(\n lj_path=os.path.join(self.project_directory, 'output', 'Species', label,\n 'lennard_jones.dat'),\n opt_path=self.output[label]['paths']['geo'], bath_gas=job.bath_gas,\n opt_level=self.opt_level)\n self.timer = False\n break\n\n if not len(job_list):\n self.check_all_done(label)\n if not self.running_jobs[label]:\n # Delete the label only if it represents an empty entry.\n del self.running_jobs[label]\n\n if self.timer and len(job_list):\n time.sleep(30) # wait 30 sec before bugging the servers again.\n t = time.time() - self.report_time\n if t > 3600 and self.running_jobs:\n self.report_time = time.time()\n logger.info(f'Currently running jobs:\\n{pprint.pformat(self.running_jobs)}')\n\n # Generate a TS report:\n self.generate_final_ts_guess_report()" ]
[ "0.6703353", "0.632755", "0.6312713", "0.6186647", "0.6169463", "0.614637", "0.611018", "0.60892034", "0.606724", "0.6054457", "0.5861122", "0.5801138", "0.57111627", "0.56422794", "0.5621341", "0.5604925", "0.5578869", "0.55761963", "0.55614334", "0.5556777", "0.55550724", "0.55403763", "0.5535473", "0.5527791", "0.5524737", "0.5522676", "0.5516985", "0.5489258", "0.54877216", "0.5470069" ]
0.75108796
0
Creates a CreateGroupMessage from a protobuf As a requirement of all objects which inherit from Serializable, this method transforms a protobuf object into an instance of this class.
def _proto2object( proto: CreateGroupMessage_PB, ) -> "CreateGroupMessage": return CreateGroupMessage( msg_id=_deserialize(blob=proto.msg_id), address=_deserialize(blob=proto.address), content=json.loads(proto.content), reply_to=_deserialize(blob=proto.reply_to), )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _object2proto(self) -> CreateGroupMessage_PB:\n return CreateGroupMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def _proto2object(\n proto: CreateGroupResponse_PB,\n ) -> \"CreateGroupResponse\":\n\n return CreateGroupResponse(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n status_code=proto.status_code,\n content=json.loads(proto.content),\n )", "def _object2proto(self) -> CreateGroupResponse_PB:\n return CreateGroupResponse_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n status_code=self.status_code,\n content=json.dumps(self.content),\n )", "def _proto2object(\n proto: DeleteGroupMessage_PB,\n ) -> \"DeleteGroupMessage\":\n\n return DeleteGroupMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def _proto2object(\n proto: GetGroupMessage_PB,\n ) -> \"GetGroupMessage\":\n\n return GetGroupMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def _proto2object(\n proto: UpdateGroupMessage_PB,\n ) -> \"UpdateGroupMessage\":\n\n return UpdateGroupMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def _object2proto(self) -> GetGroupMessage_PB:\n return GetGroupMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def _object2proto(self) -> DeleteGroupMessage_PB:\n return DeleteGroupMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def _proto2object(\n proto: GetGroupsMessage_PB,\n ) -> \"GetGroupsMessage\":\n\n return GetGroupsMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def _object2proto(self) -> GetGroupsMessage_PB:\n return GetGroupsMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def _object2proto(self) -> UpdateGroupMessage_PB:\n return UpdateGroupMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def _proto2object(\n proto: DeleteGroupResponse_PB,\n ) -> \"DeleteGroupResponse\":\n\n return DeleteGroupResponse(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n status_code=proto.status_code,\n content=json.loads(proto.content),\n )", "def _proto2object(\n proto: GetGroupResponse_PB,\n ) -> \"GetGroupResponse\":\n\n return GetGroupResponse(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n status_code=proto.status_code,\n content=json.loads(proto.content),\n )", "def _object2proto(self) -> GetGroupResponse_PB:\n return GetGroupResponse_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n status_code=self.status_code,\n content=json.dumps(self.content),\n )", "def _object2proto(self) -> DeleteGroupResponse_PB:\n return DeleteGroupResponse_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n status_code=self.status_code,\n content=json.dumps(self.content),\n )", "def _proto2object(\n proto: UpdateGroupResponse_PB,\n ) -> \"UpdateGroupResponse\":\n\n return UpdateGroupResponse(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n status_code=proto.status_code,\n content=json.loads(proto.content),\n )", "def to_protobuf(self):\n self._validate()\n kwargs = {k: _convert(getattr(self, k), 'to_protobuf')\n for k in self._get_params()}\n return self._protobuf_cls(**kwargs)", "def _proto2object(\n proto: GetGroupsResponse_PB,\n ) -> \"GetGroupsResponse\":\n\n return GetGroupsResponse(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n status_code=proto.status_code,\n content=json.loads(proto.content),\n )", "def from_protobuf(cls, msg):\n if not isinstance(msg, cls._protobuf_cls):\n raise TypeError(\"Expected message of type \"\n \"%r\" % cls._protobuf_cls.__name__)\n kwargs = {k: getattr(msg, k) for k in cls._get_params()}\n return cls(**kwargs)", "def get_protobuf_schema() -> GeneratedProtocolMessageType:\n\n return CreateGroupMessage_PB", "def _object2proto(self) -> UpdateGroupResponse_PB:\n return UpdateGroupResponse_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n status_code=self.status_code,\n content=json.dumps(self.content),\n )", "def _object2proto(self) -> GetGroupsResponse_PB:\n return GetGroupsResponse_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n status_code=self.status_code,\n content=json.dumps(self.content),\n )", "def get_protobuf_schema() -> GeneratedProtocolMessageType:\n\n return CreateGroupResponse_PB", "def get_protobuf_schema() -> GeneratedProtocolMessageType:\n\n return GetGroupMessage_PB", "def parse_and_decode(cls, data: bytes) -> \"Message\":\n if len(data) < cls.calc_size() + 1:\n raise NotEnoughData()\n if data[0] != cls.type:\n raise InvalidType()\n\n return cls(*unpack('<' + cls.fmt, data[1:cls.calc_size() + 1]))", "def create_from_pb2(cls, pb2_obj: _DetectionProto) -> 'Detection':\n categories = []\n keypoints = []\n\n for idx, score in enumerate(pb2_obj.score):\n categories.append(\n category_module.Category(\n score=score,\n index=pb2_obj.label_id[idx]\n if idx < len(pb2_obj.label_id)\n else None,\n category_name=pb2_obj.label[idx]\n if idx < len(pb2_obj.label)\n else None,\n display_name=pb2_obj.display_name[idx]\n if idx < len(pb2_obj.display_name)\n else None,\n )\n )\n\n if pb2_obj.location_data.relative_keypoints:\n for idx, elem in enumerate(pb2_obj.location_data.relative_keypoints):\n keypoints.append(\n keypoint_module.NormalizedKeypoint(\n x=elem.x,\n y=elem.y,\n label=elem.keypoint_label,\n score=elem.score,\n )\n )\n\n return Detection(\n bounding_box=bounding_box_module.BoundingBox.create_from_pb2(\n pb2_obj.location_data.bounding_box\n ),\n categories=categories,\n keypoints=keypoints,\n )", "def _object2proto(self) -> Metadata_PB:\n return Metadata_PB(\n name=self.name, id=serialize(self.id), node=serialize(self.node)\n )", "def _from_protobuf(cls, aux_data):\n data = AuxData.serializer.decode(\n BytesIO(aux_data.data), aux_data.type_name\n )\n return cls(data=data, type_name=aux_data.type_name)", "def get_protobuf_schema() -> GeneratedProtocolMessageType:\n\n return GetGroupsMessage_PB", "def _proto2object(proto: Metadata_PB) -> \"Metadata\":\n\n return Metadata(\n id=validate_type(_deserialize(blob=proto.id), UID, optional=True),\n name=proto.name,\n node=validate_type(_deserialize(blob=proto.node), Location),\n )" ]
[ "0.8077058", "0.7641945", "0.7594313", "0.7564539", "0.7550068", "0.7347152", "0.7319372", "0.7269308", "0.72298175", "0.70890194", "0.7041015", "0.68744355", "0.6841385", "0.6676443", "0.6657618", "0.6614466", "0.6529713", "0.65050894", "0.646013", "0.6432547", "0.6401638", "0.6384466", "0.63067377", "0.57851255", "0.57391596", "0.57165", "0.5690177", "0.56462306", "0.5591516", "0.5588023" ]
0.83095145
0
Creates a GetGroupMessage from a protobuf As a requirement of all objects which inherit from Serializable, this method transforms a protobuf object into an instance of this class.
def _proto2object( proto: GetGroupMessage_PB, ) -> "GetGroupMessage": return GetGroupMessage( msg_id=_deserialize(blob=proto.msg_id), address=_deserialize(blob=proto.address), content=json.loads(proto.content), reply_to=_deserialize(blob=proto.reply_to), )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _proto2object(\n proto: CreateGroupMessage_PB,\n ) -> \"CreateGroupMessage\":\n\n return CreateGroupMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def _object2proto(self) -> GetGroupMessage_PB:\n return GetGroupMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def _proto2object(\n proto: GetGroupsMessage_PB,\n ) -> \"GetGroupsMessage\":\n\n return GetGroupsMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def _object2proto(self) -> CreateGroupMessage_PB:\n return CreateGroupMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def _proto2object(\n proto: DeleteGroupMessage_PB,\n ) -> \"DeleteGroupMessage\":\n\n return DeleteGroupMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def _object2proto(self) -> GetGroupsMessage_PB:\n return GetGroupsMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def _proto2object(\n proto: GetGroupResponse_PB,\n ) -> \"GetGroupResponse\":\n\n return GetGroupResponse(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n status_code=proto.status_code,\n content=json.loads(proto.content),\n )", "def _proto2object(\n proto: UpdateGroupMessage_PB,\n ) -> \"UpdateGroupMessage\":\n\n return UpdateGroupMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def _object2proto(self) -> GetGroupResponse_PB:\n return GetGroupResponse_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n status_code=self.status_code,\n content=json.dumps(self.content),\n )", "def _object2proto(self) -> DeleteGroupMessage_PB:\n return DeleteGroupMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def _proto2object(\n proto: CreateGroupResponse_PB,\n ) -> \"CreateGroupResponse\":\n\n return CreateGroupResponse(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n status_code=proto.status_code,\n content=json.loads(proto.content),\n )", "def _object2proto(self) -> CreateGroupResponse_PB:\n return CreateGroupResponse_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n status_code=self.status_code,\n content=json.dumps(self.content),\n )", "def _proto2object(\n proto: GetGroupsResponse_PB,\n ) -> \"GetGroupsResponse\":\n\n return GetGroupsResponse(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n status_code=proto.status_code,\n content=json.loads(proto.content),\n )", "def _object2proto(self) -> UpdateGroupMessage_PB:\n return UpdateGroupMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def _proto2object(\n proto: DeleteGroupResponse_PB,\n ) -> \"DeleteGroupResponse\":\n\n return DeleteGroupResponse(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n status_code=proto.status_code,\n content=json.loads(proto.content),\n )", "def _object2proto(self) -> GetGroupsResponse_PB:\n return GetGroupsResponse_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n status_code=self.status_code,\n content=json.dumps(self.content),\n )", "def _object2proto(self) -> DeleteGroupResponse_PB:\n return DeleteGroupResponse_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n status_code=self.status_code,\n content=json.dumps(self.content),\n )", "def _proto2object(\n proto: UpdateGroupResponse_PB,\n ) -> \"UpdateGroupResponse\":\n\n return UpdateGroupResponse(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n status_code=proto.status_code,\n content=json.loads(proto.content),\n )", "def to_protobuf(self):\n self._validate()\n kwargs = {k: _convert(getattr(self, k), 'to_protobuf')\n for k in self._get_params()}\n return self._protobuf_cls(**kwargs)", "def _object2proto(self) -> UpdateGroupResponse_PB:\n return UpdateGroupResponse_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n status_code=self.status_code,\n content=json.dumps(self.content),\n )", "def from_protobuf(cls, msg):\n if not isinstance(msg, cls._protobuf_cls):\n raise TypeError(\"Expected message of type \"\n \"%r\" % cls._protobuf_cls.__name__)\n kwargs = {k: getattr(msg, k) for k in cls._get_params()}\n return cls(**kwargs)", "def get_protobuf_schema() -> GeneratedProtocolMessageType:\n\n return GetGroupMessage_PB", "def get_protobuf_schema() -> GeneratedProtocolMessageType:\n\n return CreateGroupMessage_PB", "def get_protobuf_schema() -> GeneratedProtocolMessageType:\n\n return GetGroupResponse_PB", "def get_protobuf_schema() -> GeneratedProtocolMessageType:\n\n return CreateGroupResponse_PB", "def read_message(m_bytes, proto_version):\n\n # This is the sub-module for the specified proto version.\n try:\n proto_module = PROTOCOL_VERSION_MAP[proto_version]\n except KeyError:\n # TODO: Depending on the backwards-compatibility policy with gotalk,\n # we might be able to fall back to the latest known version and\n # potentially limp along. Too early to know.\n raise InvalidProtocolVersionError(\"Invalid gotalk protocol version.\")\n\n type_id = m_bytes[0]\n try:\n msg_class_name = MESSAGE_TYPE_TO_CLASS_MAP[type_id]\n except KeyError:\n raise InvalidMessageTypeIDError()\n msg_class = getattr(proto_module, msg_class_name)\n return msg_class.from_bytes(m_bytes)", "def get_protobuf_schema() -> GeneratedProtocolMessageType:\n\n return GetGroupsMessage_PB", "def get_protobuf_schema() -> GeneratedProtocolMessageType:\n\n return GetGroupsResponse_PB", "def object_from_protobuf(pb, model_type=None):\n key = None\n if isinstance(pb, entity_pb2.Entity):\n pb = pb._pb\n\n if pb.HasField(\"key\"): # Message field (Key)\n key = CustomIterator.key_from_protobuf(pb.key)\n key._type = SubclassMap.get()[key.kind]\n\n entity_props = {}\n\n for prop_name, value_pb in pb.properties.items():\n value = CustomIterator._get_value_from_value_pb(value_pb)\n entity_props[prop_name] = value\n\n obj = model_type._dotted_dict_to_object(entity_props, key)\n return obj", "def to_proto(self):\n proto = bounding_box_pb2.BoundingBox()\n proto.start.CopyFrom(geom_utils.ToVector3j(self.start))\n proto.size.CopyFrom(geom_utils.ToVector3j(self.size))\n return proto" ]
[ "0.8065173", "0.8059963", "0.80385315", "0.7838053", "0.78343934", "0.7728165", "0.772733", "0.76612204", "0.75730646", "0.75216293", "0.7501986", "0.7421604", "0.7359245", "0.7337106", "0.72288305", "0.71763164", "0.70391303", "0.70110816", "0.68666184", "0.68378156", "0.64507353", "0.6313591", "0.62147075", "0.6185094", "0.6170158", "0.608279", "0.604966", "0.5923993", "0.5917072", "0.58498925" ]
0.8384033
0
Creates a GetGroupsMessage from a protobuf As a requirement of all objects which inherit from Serializable, this method transforms a protobuf object into an instance of this class.
def _proto2object( proto: GetGroupsMessage_PB, ) -> "GetGroupsMessage": return GetGroupsMessage( msg_id=_deserialize(blob=proto.msg_id), address=_deserialize(blob=proto.address), content=json.loads(proto.content), reply_to=_deserialize(blob=proto.reply_to), )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _object2proto(self) -> GetGroupsMessage_PB:\n return GetGroupsMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def _proto2object(\n proto: GetGroupMessage_PB,\n ) -> \"GetGroupMessage\":\n\n return GetGroupMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def _object2proto(self) -> GetGroupMessage_PB:\n return GetGroupMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def _object2proto(self) -> GetGroupsResponse_PB:\n return GetGroupsResponse_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n status_code=self.status_code,\n content=json.dumps(self.content),\n )", "def _proto2object(\n proto: CreateGroupMessage_PB,\n ) -> \"CreateGroupMessage\":\n\n return CreateGroupMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def _proto2object(\n proto: GetGroupsResponse_PB,\n ) -> \"GetGroupsResponse\":\n\n return GetGroupsResponse(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n status_code=proto.status_code,\n content=json.loads(proto.content),\n )", "def _object2proto(self) -> CreateGroupMessage_PB:\n return CreateGroupMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def _proto2object(\n proto: DeleteGroupMessage_PB,\n ) -> \"DeleteGroupMessage\":\n\n return DeleteGroupMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def _object2proto(self) -> GetGroupResponse_PB:\n return GetGroupResponse_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n status_code=self.status_code,\n content=json.dumps(self.content),\n )", "def _object2proto(self) -> DeleteGroupMessage_PB:\n return DeleteGroupMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def _proto2object(\n proto: GetGroupResponse_PB,\n ) -> \"GetGroupResponse\":\n\n return GetGroupResponse(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n status_code=proto.status_code,\n content=json.loads(proto.content),\n )", "def _object2proto(self) -> CreateGroupResponse_PB:\n return CreateGroupResponse_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n status_code=self.status_code,\n content=json.dumps(self.content),\n )", "def _proto2object(\n proto: UpdateGroupMessage_PB,\n ) -> \"UpdateGroupMessage\":\n\n return UpdateGroupMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def _proto2object(\n proto: CreateGroupResponse_PB,\n ) -> \"CreateGroupResponse\":\n\n return CreateGroupResponse(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n status_code=proto.status_code,\n content=json.loads(proto.content),\n )", "def _object2proto(self) -> UpdateGroupMessage_PB:\n return UpdateGroupMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def _object2proto(self) -> DeleteGroupResponse_PB:\n return DeleteGroupResponse_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n status_code=self.status_code,\n content=json.dumps(self.content),\n )", "def _proto2object(\n proto: DeleteGroupResponse_PB,\n ) -> \"DeleteGroupResponse\":\n\n return DeleteGroupResponse(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n status_code=proto.status_code,\n content=json.loads(proto.content),\n )", "def to_protobuf(self):\n self._validate()\n kwargs = {k: _convert(getattr(self, k), 'to_protobuf')\n for k in self._get_params()}\n return self._protobuf_cls(**kwargs)", "def _object2proto(self) -> UpdateGroupResponse_PB:\n return UpdateGroupResponse_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n status_code=self.status_code,\n content=json.dumps(self.content),\n )", "def _proto2object(\n proto: UpdateGroupResponse_PB,\n ) -> \"UpdateGroupResponse\":\n\n return UpdateGroupResponse(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n status_code=proto.status_code,\n content=json.loads(proto.content),\n )", "def get_protobuf_schema() -> GeneratedProtocolMessageType:\n\n return GetGroupsMessage_PB", "def from_protobuf(cls, msg):\n if not isinstance(msg, cls._protobuf_cls):\n raise TypeError(\"Expected message of type \"\n \"%r\" % cls._protobuf_cls.__name__)\n kwargs = {k: getattr(msg, k) for k in cls._get_params()}\n return cls(**kwargs)", "def get_protobuf_schema() -> GeneratedProtocolMessageType:\n\n return GetGroupsResponse_PB", "def get_protobuf_schema() -> GeneratedProtocolMessageType:\n\n return GetGroupMessage_PB", "def get_protobuf_schema() -> GeneratedProtocolMessageType:\n\n return GetGroupResponse_PB", "def get_protobuf_schema() -> GeneratedProtocolMessageType:\n\n return CreateGroupResponse_PB", "def get_protobuf_schema() -> GeneratedProtocolMessageType:\n\n return CreateGroupMessage_PB", "def toDict(self, protoObj):\n iterable = False\n if isinstance(protoObj, (RepeatedCompositeFieldContainer, RepeatedCompositeContainer)):\n iterable = True\n else:\n protoObj = [protoObj]\n retlist = []\n\n for po in protoObj:\n msg = dict()\n for fielddesc, value in po.ListFields():\n # print value, type(value), getattr(value, \"__iter__\", False)\n if fielddesc.type == descriptor.FieldDescriptor.TYPE_GROUP or \\\n isinstance(value, (RepeatedCompositeFieldContainer, RepeatedCompositeContainer)) or \\\n isinstance(value, Message):\n msg[fielddesc.name] = self.toDict(value)\n else:\n msg[fielddesc.name] = value\n retlist.append(msg)\n if not iterable:\n if len(retlist) > 0:\n return retlist[0]\n else:\n return None\n return retlist", "def toDict(self, protoObj):\n iterable = False\n if isinstance(protoObj, RepeatedCompositeFieldContainer):\n iterable = True\n else:\n protoObj = [protoObj]\n retlist = []\n\n for po in protoObj:\n msg = dict()\n for fielddesc, value in po.ListFields():\n # print value, type(value), getattr(value, \"__iter__\", False)\n if fielddesc.type == descriptor.FieldDescriptor.TYPE_GROUP or \\\n isinstance(value, RepeatedCompositeFieldContainer) or \\\n isinstance(value, Message):\n msg[fielddesc.name] = self.toDict(value)\n else:\n msg[fielddesc.name] = value\n retlist.append(msg)\n if not iterable:\n if len(retlist) > 0:\n return retlist[0]\n else:\n return None\n return retlist", "def to_proto(self):\n proto = bounding_box_pb2.BoundingBox()\n proto.start.CopyFrom(geom_utils.ToVector3j(self.start))\n proto.size.CopyFrom(geom_utils.ToVector3j(self.size))\n return proto" ]
[ "0.7964506", "0.7870362", "0.7740668", "0.7571255", "0.7528219", "0.750384", "0.7494732", "0.7383841", "0.73493606", "0.7295214", "0.7263857", "0.7139681", "0.7122208", "0.70240104", "0.6993496", "0.6870807", "0.68291026", "0.67293364", "0.6576462", "0.6541453", "0.63282835", "0.628723", "0.62793636", "0.6016085", "0.59654397", "0.5898938", "0.5897326", "0.585855", "0.5807636", "0.5796592" ]
0.8058562
0
Creates a UpdateGroupMessage from a protobuf As a requirement of all objects which inherit from Serializable, this method transforms a protobuf object into an instance of this class.
def _proto2object( proto: UpdateGroupMessage_PB, ) -> "UpdateGroupMessage": return UpdateGroupMessage( msg_id=_deserialize(blob=proto.msg_id), address=_deserialize(blob=proto.address), content=json.loads(proto.content), reply_to=_deserialize(blob=proto.reply_to), )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _object2proto(self) -> UpdateGroupMessage_PB:\n return UpdateGroupMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def _proto2object(\n proto: CreateGroupMessage_PB,\n ) -> \"CreateGroupMessage\":\n\n return CreateGroupMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def _proto2object(\n proto: UpdateGroupResponse_PB,\n ) -> \"UpdateGroupResponse\":\n\n return UpdateGroupResponse(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n status_code=proto.status_code,\n content=json.loads(proto.content),\n )", "def _object2proto(self) -> CreateGroupMessage_PB:\n return CreateGroupMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def _object2proto(self) -> UpdateGroupResponse_PB:\n return UpdateGroupResponse_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n status_code=self.status_code,\n content=json.dumps(self.content),\n )", "def _proto2object(\n proto: DeleteGroupMessage_PB,\n ) -> \"DeleteGroupMessage\":\n\n return DeleteGroupMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def _proto2object(\n proto: GetGroupMessage_PB,\n ) -> \"GetGroupMessage\":\n\n return GetGroupMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def _object2proto(self) -> GetGroupMessage_PB:\n return GetGroupMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def _proto2object(\n proto: CreateGroupResponse_PB,\n ) -> \"CreateGroupResponse\":\n\n return CreateGroupResponse(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n status_code=proto.status_code,\n content=json.loads(proto.content),\n )", "def _object2proto(self) -> DeleteGroupMessage_PB:\n return DeleteGroupMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def _object2proto(self) -> CreateGroupResponse_PB:\n return CreateGroupResponse_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n status_code=self.status_code,\n content=json.dumps(self.content),\n )", "def _proto2object(\n proto: GetGroupsMessage_PB,\n ) -> \"GetGroupsMessage\":\n\n return GetGroupsMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def _object2proto(self) -> GetGroupsMessage_PB:\n return GetGroupsMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def _proto2object(\n proto: DeleteGroupResponse_PB,\n ) -> \"DeleteGroupResponse\":\n\n return DeleteGroupResponse(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n status_code=proto.status_code,\n content=json.loads(proto.content),\n )", "def _proto2object(\n proto: GetGroupResponse_PB,\n ) -> \"GetGroupResponse\":\n\n return GetGroupResponse(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n status_code=proto.status_code,\n content=json.loads(proto.content),\n )", "def _object2proto(self) -> GetGroupResponse_PB:\n return GetGroupResponse_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n status_code=self.status_code,\n content=json.dumps(self.content),\n )", "def _object2proto(self) -> DeleteGroupResponse_PB:\n return DeleteGroupResponse_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n status_code=self.status_code,\n content=json.dumps(self.content),\n )", "def from_protobuf(cls, msg):\n if not isinstance(msg, cls._protobuf_cls):\n raise TypeError(\"Expected message of type \"\n \"%r\" % cls._protobuf_cls.__name__)\n kwargs = {k: getattr(msg, k) for k in cls._get_params()}\n return cls(**kwargs)", "def _proto2object(\n proto: GetGroupsResponse_PB,\n ) -> \"GetGroupsResponse\":\n\n return GetGroupsResponse(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n status_code=proto.status_code,\n content=json.loads(proto.content),\n )", "def to_protobuf(self):\n self._validate()\n kwargs = {k: _convert(getattr(self, k), 'to_protobuf')\n for k in self._get_params()}\n return self._protobuf_cls(**kwargs)", "def _object2proto(self) -> GetGroupsResponse_PB:\n return GetGroupsResponse_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n status_code=self.status_code,\n content=json.dumps(self.content),\n )", "def get_protobuf_schema() -> GeneratedProtocolMessageType:\n\n return UpdateGroupMessage_PB", "def get_protobuf_schema() -> GeneratedProtocolMessageType:\n\n return UpdateGroupResponse_PB", "def get_protobuf_schema() -> GeneratedProtocolMessageType:\n\n return CreateGroupMessage_PB", "def parse_and_decode(cls, data: bytes) -> \"Message\":\n if len(data) < cls.calc_size() + 1:\n raise NotEnoughData()\n if data[0] != cls.type:\n raise InvalidType()\n\n return cls(*unpack('<' + cls.fmt, data[1:cls.calc_size() + 1]))", "def get_protobuf_schema() -> GeneratedProtocolMessageType:\n\n return CreateGroupResponse_PB", "def read_message(m_bytes, proto_version):\n\n # This is the sub-module for the specified proto version.\n try:\n proto_module = PROTOCOL_VERSION_MAP[proto_version]\n except KeyError:\n # TODO: Depending on the backwards-compatibility policy with gotalk,\n # we might be able to fall back to the latest known version and\n # potentially limp along. Too early to know.\n raise InvalidProtocolVersionError(\"Invalid gotalk protocol version.\")\n\n type_id = m_bytes[0]\n try:\n msg_class_name = MESSAGE_TYPE_TO_CLASS_MAP[type_id]\n except KeyError:\n raise InvalidMessageTypeIDError()\n msg_class = getattr(proto_module, msg_class_name)\n return msg_class.from_bytes(m_bytes)", "def _object2proto(self) -> Metadata_PB:\n return Metadata_PB(\n name=self.name, id=serialize(self.id), node=serialize(self.node)\n )", "def get_protobuf_schema() -> GeneratedProtocolMessageType:\n\n return GetGroupMessage_PB", "def FromProto(cls, proto_obj):\n key=None\n if proto_obj.twitter_account:\n twitter_id = long(proto_obj.twitter_account.id_str)\n key = team_twitter_key(twitter_id)\n else:\n twitter_id = 0\n if proto_obj.score_reporter_account:\n score_reporter_id = proto_obj.score_reporter_account.id\n key = team_score_reporter_key(score_reporter_id)\n else:\n score_reporter_id = ''\n return Team(twitter_id=twitter_id, score_reporter_id=score_reporter_id,\n parent=key)" ]
[ "0.80309", "0.77817404", "0.75891674", "0.7585066", "0.7526722", "0.7499154", "0.7463992", "0.73389864", "0.72233707", "0.7196931", "0.71022654", "0.70876956", "0.70350796", "0.69301087", "0.68484676", "0.67432654", "0.66871005", "0.65235186", "0.64753807", "0.6466748", "0.6374014", "0.6238004", "0.6158324", "0.59269893", "0.58499956", "0.58345675", "0.5764996", "0.57308924", "0.57040507", "0.56862515" ]
0.8248486
0
Creates a DeleteGroupMessage from a protobuf As a requirement of all objects which inherit from Serializable, this method transforms a protobuf object into an instance of this class.
def _proto2object( proto: DeleteGroupMessage_PB, ) -> "DeleteGroupMessage": return DeleteGroupMessage( msg_id=_deserialize(blob=proto.msg_id), address=_deserialize(blob=proto.address), content=json.loads(proto.content), reply_to=_deserialize(blob=proto.reply_to), )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _object2proto(self) -> DeleteGroupMessage_PB:\n return DeleteGroupMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def _object2proto(self) -> DeleteGroupResponse_PB:\n return DeleteGroupResponse_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n status_code=self.status_code,\n content=json.dumps(self.content),\n )", "def _proto2object(\n proto: DeleteGroupResponse_PB,\n ) -> \"DeleteGroupResponse\":\n\n return DeleteGroupResponse(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n status_code=proto.status_code,\n content=json.loads(proto.content),\n )", "def _proto2object(\n proto: CreateGroupMessage_PB,\n ) -> \"CreateGroupMessage\":\n\n return CreateGroupMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def _proto2object(\n proto: GetGroupMessage_PB,\n ) -> \"GetGroupMessage\":\n\n return GetGroupMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def _object2proto(self) -> CreateGroupMessage_PB:\n return CreateGroupMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def _proto2object(\n proto: UpdateGroupMessage_PB,\n ) -> \"UpdateGroupMessage\":\n\n return UpdateGroupMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def _proto2object(\n proto: CreateGroupResponse_PB,\n ) -> \"CreateGroupResponse\":\n\n return CreateGroupResponse(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n status_code=proto.status_code,\n content=json.loads(proto.content),\n )", "def _object2proto(self) -> GetGroupMessage_PB:\n return GetGroupMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def _proto2object(\n proto: GetGroupsMessage_PB,\n ) -> \"GetGroupsMessage\":\n\n return GetGroupsMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def get_protobuf_schema() -> GeneratedProtocolMessageType:\n\n return DeleteGroupMessage_PB", "def _object2proto(self) -> CreateGroupResponse_PB:\n return CreateGroupResponse_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n status_code=self.status_code,\n content=json.dumps(self.content),\n )", "def get_protobuf_schema() -> GeneratedProtocolMessageType:\n\n return DeleteGroupResponse_PB", "def _proto2object(\n proto: GetGroupResponse_PB,\n ) -> \"GetGroupResponse\":\n\n return GetGroupResponse(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n status_code=proto.status_code,\n content=json.loads(proto.content),\n )", "def _object2proto(self) -> GetGroupsMessage_PB:\n return GetGroupsMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def _object2proto(self) -> UpdateGroupMessage_PB:\n return UpdateGroupMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def _object2proto(self) -> GetGroupResponse_PB:\n return GetGroupResponse_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n status_code=self.status_code,\n content=json.dumps(self.content),\n )", "def _proto2object(\n proto: UpdateGroupResponse_PB,\n ) -> \"UpdateGroupResponse\":\n\n return UpdateGroupResponse(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n status_code=proto.status_code,\n content=json.loads(proto.content),\n )", "def _proto2object(\n proto: GetGroupsResponse_PB,\n ) -> \"GetGroupsResponse\":\n\n return GetGroupsResponse(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n status_code=proto.status_code,\n content=json.loads(proto.content),\n )", "def _object2proto(self) -> UpdateGroupResponse_PB:\n return UpdateGroupResponse_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n status_code=self.status_code,\n content=json.dumps(self.content),\n )", "def _object2proto(self) -> GetGroupsResponse_PB:\n return GetGroupsResponse_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n status_code=self.status_code,\n content=json.dumps(self.content),\n )", "def from_protobuf(cls, msg):\n if not isinstance(msg, cls._protobuf_cls):\n raise TypeError(\"Expected message of type \"\n \"%r\" % cls._protobuf_cls.__name__)\n kwargs = {k: getattr(msg, k) for k in cls._get_params()}\n return cls(**kwargs)", "def get_protobuf_schema() -> GeneratedProtocolMessageType:\n\n return CreateGroupMessage_PB", "def get_protobuf_schema() -> GeneratedProtocolMessageType:\n\n return CreateGroupResponse_PB", "def to_protobuf(self):\n self._validate()\n kwargs = {k: _convert(getattr(self, k), 'to_protobuf')\n for k in self._get_params()}\n return self._protobuf_cls(**kwargs)", "def deserialize_message_delete_event(\n self,\n shard: gateway_shard.GatewayShard,\n payload: data_binding.JSONObject,\n *,\n old_message: typing.Optional[messages_models.Message],\n ) -> message_events.MessageDeleteEvent:", "def get_protobuf_schema() -> GeneratedProtocolMessageType:\n\n return GetGroupMessage_PB", "def parse_and_decode(cls, data: bytes) -> \"Message\":\n if len(data) < cls.calc_size() + 1:\n raise NotEnoughData()\n if data[0] != cls.type:\n raise InvalidType()\n\n return cls(*unpack('<' + cls.fmt, data[1:cls.calc_size() + 1]))", "def get_protobuf_schema() -> GeneratedProtocolMessageType:\n\n return GetGroupsMessage_PB", "def read_message(m_bytes, proto_version):\n\n # This is the sub-module for the specified proto version.\n try:\n proto_module = PROTOCOL_VERSION_MAP[proto_version]\n except KeyError:\n # TODO: Depending on the backwards-compatibility policy with gotalk,\n # we might be able to fall back to the latest known version and\n # potentially limp along. Too early to know.\n raise InvalidProtocolVersionError(\"Invalid gotalk protocol version.\")\n\n type_id = m_bytes[0]\n try:\n msg_class_name = MESSAGE_TYPE_TO_CLASS_MAP[type_id]\n except KeyError:\n raise InvalidMessageTypeIDError()\n msg_class = getattr(proto_module, msg_class_name)\n return msg_class.from_bytes(m_bytes)" ]
[ "0.8354678", "0.7887407", "0.7785718", "0.74330956", "0.72480917", "0.715022", "0.6949523", "0.6918182", "0.6893605", "0.6851132", "0.6755857", "0.6749247", "0.667964", "0.66556287", "0.66541016", "0.6589059", "0.6392503", "0.6390019", "0.6260242", "0.61250013", "0.61075175", "0.5863597", "0.57958156", "0.56731427", "0.5648975", "0.56434274", "0.5578579", "0.54758775", "0.5414358", "0.5380913" ]
0.8342417
1
Gets a new access token if the old one already expired access_token token used to access the TD Ameritrade site expire_time time in seconds since epoch when the token will expire
def get_access(access_token='',expire_time=0): #Get a new access token if it expires or is five minutes away from exp#iration if (expire_time==0) or (len(access_token)==0) or (time.time()-expire_time>=-300): #API needed to authorize account with refresh token auth_url = 'https://api.tdameritrade.com/v1/oauth2/token' #Data needed for token data = {'grant_type':'refresh_token', 'refresh_token':TDAuth_Info.refresh_token, 'client_id':TDAuth_Info.client_id} #Post the data to get the token auth_reply_json = requests.post(url=auth_url,data=data) auth_reply=auth_reply_json.json() #Now use the token to get account information access_token = auth_reply['access_token'] expire_time=time.time()+auth_reply['expires_in'] return (access_token,expire_time)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def access_token(self):\n if self.has_expired():\n self.update()\n\n return self.token['access_token']", "def get_access_token(self):\n\n token_work = time.time() < self.expires\n\n if token_work:\n # No need update token\n return self.access_token\n\n data = {\n 'client_id': self.client_id,\n 'grant_type': 'implicit'\n }\n\n response = requests.post('https://api.moltin.com/oauth/access_token', data=data)\n raise_response_errors(response)\n\n response_json = response.json()\n\n self.access_token = response_json['access_token']\n self.expires = response_json['expires']\n\n logger.debug('elasticpathh access token was updated')\n\n return self.access_token", "def _get_token(self):\n if self._access_token is None or self._is_expired():\n self._refresh_token()\n return self._access_token", "def refresh_token():\n current_user = get_jwt_identity()\n if current_user is None:\n return abort(401)\n response = deepcopy(AUTH_OKAY)\n response['payload']['access_token'] = create_access_token(\n identity=current_user,\n expires_delta=EXPIRY_DURATION\n )\n response['payload']['expires_in'] = EXPIRY_DURATION.seconds\n response['payload']['not_before'] = int(time() + EXPIRY_DURATION.seconds)\n return jsonify(response['payload']), response['status_code']", "def get_access_token(self):\n if self.token.is_expired():\n logging.debug('Requesting a new access token')\n self.token.load_from_json(json=self.__get_token_data__())\n else:\n logging.debug('Access token still valid')\n\n return self.token.access_token", "def refresh_token(self):\n now = timezone.now()\n limit = now - timedelta(days=20)\n # TODO: use expires_in from response data?\n print(self.token_refresh_date)\n print(limit)\n if self.token_refresh_date < limit:\n url = '{}refresh_access_token'.format(conf.INSTAGRAM_API)\n params = {\n 'grant_type': 'ig_refresh_token',\n 'access_token': self.token\n }\n response = requests.get(url, params=params)\n data = response.json()\n else:\n print('no need to get a fresch token yet')\n return\n if response.status_code == 200 and data:\n self.token = data.get('access_token')\n self.token_refresh_date = now\n self.token_ok = True\n self.save()\n elif settings.DEBUG:\n self.token_ok = False\n self.save()\n print('could not refresh token')\n return", "def build_access_token_expired():\n return do_build_access_token(tenant_id='intility_tenant_id', expired=True)", "def test_expired_access_token_time(self):\n\n expired = datetime.datetime.now(pytz.utc) - datetime.timedelta(\n minutes=6)\n\n # Store the old TZ info, if it exists.\n old_tz = None\n if 'TZ' in os.environ:\n old_tz = os.environ['TZ']\n\n # Convert now into every possible timezone out there :)\n for name in self.tested_timezones:\n\n # Override the 'default timezone' for the current runtime.\n os.environ['TZ'] = name\n\n # Create a token.\n with base.HybridSessionManager():\n authorization_code = auth_api.authorization_code_save({\n 'user_id': 2,\n 'state': 'test_state',\n 'code': 'test_valid_code',\n 'expires_in': 300,\n 'created_at': expired\n })\n\n content_type = 'application/x-www-form-urlencoded'\n # POST with content: application/x-www-form-urlencoded\n response = self.app.post('/v1/openid/token',\n params={\n 'code': authorization_code.code,\n 'grant_type': 'authorization_code'\n },\n content_type=content_type,\n expect_errors=True)\n\n # Assert that this is a valid call.\n self.assertEqual(401, response.status_code)\n\n # Reset the timezone.\n if old_tz:\n os.environ['TZ'] = old_tz\n else:\n del os.environ['TZ']", "def getAccessToken(self):\r\n\r\n #lets see if we have an oauth code\r\n if self.oauthToken is None:\r\n self.oauthToken = self.createAccessToken\r\n\r\n if self.oauthToken.isExpired(): #check to see if its expired if so refresh it\r\n self.oauthToken = self.refreshAccessToken()\r\n\r\n return self.oauthToken #return out access token\r", "def test_access_token_refreshed_for_token_expired_with_get_method(self):\n with patch('hgw_common.models.OAuth2Session', MockOAuth2Session):\n MockOAuth2Session.RESPONSES = [TokenExpiredError(), 200]\n proxy = OAuth2SessionProxy(self.service_url, self.client_id, self.client_secret)\n session = proxy._session\n first_token = session.token['access_token']\n # m.token['expires_at'] = m.token['expires_at'] - 36001\n proxy.get(\"/fake_url/1/\")\n second_token = session.token['access_token']\n self.assertEqual(len(session.get.call_args_list), 2) # Number of calls\n self.assertEqual(len(session.fetch_token.call_args_list), 2) # Number of calls\n session.get.assert_has_calls([call('/fake_url/1/'), call('/fake_url/1/')])\n self.assertEqual(AccessToken.objects.count(), 1)\n self.assertNotEquals(first_token, second_token)", "def renew_access_token(self):\n self._access_token = self._get_access_token()", "def access_token(self):\n access_token = self.session.get('component_access_token')\n if access_token:\n if not self.expires_at:\n # user provided access_token, just return it\n return access_token\n\n timestamp = time.time()\n if self.expires_at - timestamp > 60:\n return access_token\n\n self.fetch_access_token()\n return self.session.get('component_access_token')", "def get_access_token(self,\n client_id=settings.OPENHUMANS_CLIENT_ID,\n client_secret=settings.OPENHUMANS_CLIENT_SECRET):\n # Also refresh if nearly expired (less than 60s remaining).\n delta = timedelta(seconds=60)\n if arrow.get(self.token_expires) - delta < arrow.now():\n self._refresh_tokens(client_id=client_id,\n client_secret=client_secret)\n return self.access_token", "def refresh_access_token(self):\n parameters = {'client_id': self.CLIENT_ID,\n 'auth_code': self.auth_code,\n 'client_secret': self.CLIENT_SECRET,\n 'grant_type': 'authorization_code'}\n url = self.ACCESS_TOKEN_URL % parameters\n data = self._get_refresh_data()\n logging.info('url: %s, data: %s', url, data)\n\n try:\n # empty data to trigger a post\n req = urllib2.Request(url, data)\n req.add_header('Content-Type', 'application/x-www-form-urlencoded')\n result = urllib2.urlopen(req)\n result = json.load(result)\n logging.info('result: %s', result)\n except urllib2.HTTPError, err:\n result = json.load(err)\n logging.info(result)\n raise err\n\n self.access_token = result['access_token']\n self.expires = int(time.time() + result['expires_in'])\n self.refresh_token = result.get('refresh_token', None)", "def get_token(request):\n try:\n ft_session = request.session['ft_token']\n token = OAuthAccessToken.objects.get(session_key=ft_session)\n # invalidate any token > 24 hours old\n now = datetime.now()\n diff = now - token.created\n if diff.days:\n token.delete()\n return False\n # TODO check ip address matches\n #oauthorize\n return token\n except KeyError:\n print 'no session token..'\n except OAuthAccessToken.DoesNotExist:\n print 'no access token ...'\n return False", "def _refresh_token(self):\n token_url = self._base_url + '/api/oauth2/token'\n params = {\n 'grant_type': 'client_credentials',\n 'client_id': self._client_id,\n 'client_secret': self._client_secret\n }\n headers = {'accept': 'application/json'}\n response = requests.post(token_url,proxies = self._proxy,params= params,headers = headers)\n logging.debug(response.text)\n parsed = response.json()\n self._access_token = parsed['access_token']\n self._refresh_token = parsed['refresh_token']\n expires_in = parsed['expires_in']\n ## Keep a buffer of 120 seconds to refresh token before expiry\n self._expires_at = datetime.now() + timedelta(seconds=(expires_in - 120))\n\n logging.debug('access_token %s expires at %s', self._access_token, self._expires_at)\n\n return", "def _refresh_access_token(self) -> None:\n response = httpx.post(\n f\"{self._base_url}/oauth2/token\",\n proxies=self._proxies,\n data={\n \"grant_type\": \"client_credentials\",\n \"client_id\": self._api_key,\n \"client_secret\": self._api_secret,\n },\n )\n response.raise_for_status()\n token = response.json()[\"access_token\"]\n c = httpx.Client()\n c.close()\n self._authorization_headers = {\"Authorization\": f\"Bearer {token}\"}", "def get_renewed_token(self):\n api = facebook.GraphAPI(self.config.fb_share_token)\n long_token = api.extend_access_token(\n self.config.fb_share_app_id,\n self.config.fb_share_app_secret\n )\n self.config.fb_share_token = long_token['access_token']\n self.config.save()\n return self.config.fb_share_token", "def refresh_token(self):\n token = json.loads(get_metadata(\n 'instance/service-accounts/%s/token' % self.service_account,\n ))\n seconds = token['expires_in'] - 60\n self._expiration_time = (\n datetime.datetime.now() + datetime.timedelta(seconds=seconds)\n )\n self._token = token['access_token']", "def test_legacy_client_expired_access_token(self):\n self.legacy_client._client._expires_at = 1\n response = self.legacy_client.request('GET', urljoin(BASE_URL, MOCK_ENDPOINT))\n self.assertEquals(response.content, RESPONSE)", "def get_token(self):\n logging.debug(\"In the Token get_token() class method.\")\n\n if datetime.datetime.now() > self.token_expiry:\n logging.info(\"Token Expired.\")\n self.generate_tokens()\n return self.access_token", "def accessToken(self):\n if self.isExpired:\n self.refresh()\n\n return self._accessToken", "def refresh_access_token(self):\n self._access_token = self.generate_access_token()", "def get_access_token(self, renew=False):\n if self.access_token is None or renew:\n headers = {} # don't use json here, juse urlencode.\n url = self._url_for_op('token')\n data = urllib.urlencode({'grant_type': 'client_credentials',\n 'client_id':self.CLIENT_ID,\n 'client_secret':self.CLIENT_SECRET})\n req = urllib2.Request(url, data, headers)\n try:\n response = urllib2.urlopen(req).read()\n response = json.loads(response)\n except urllib2.HTTPError as e:\n raise ApiError(e.reason)\n except Exception, e:\n raise ApiError(e)\n self.access_token = response['access_token']\n return self.access_token", "def isAPITokenExpired(self):\n print('WaPOR API: Checking token...')\n self.isAPITokenSet()\n\n # APIToken = self.token['API']\n RefToken = self.token['Refresh']\n dt_start = self.token['time']['start']\n dt_expire = self.token['time']['expire']\n\n dt_now = datetime.datetime.now().timestamp()\n if dt_now - dt_start > dt_expire - TIME_EXPIRES_BEFORE_SECOND:\n Token = self._query_refreshToken(RefToken)\n\n if Token is None:\n raise Exception(\n 'WaPOR API ERROR: The data with specified level version'\n ' is not available in this version')\n else:\n self.token['Access'] = Token['accessToken']\n self.token['Refresh'] = Token['refreshToken']\n self.token['time']['expire'] = Token['expiresIn']\n self.token['time']['start'] = dt_now\n self.token['time']['now'] = dt_now", "def _get_aad_token(self, resource: str) -> str:\n aad_token = self.oauth_tokens.get(resource)\n if aad_token and self._is_oauth_token_valid(aad_token):\n return aad_token[\"access_token\"]\n\n self.log.info(\"Existing AAD token is expired, or going to expire soon. Refreshing...\")\n try:\n for attempt in self._get_retry_object():\n with attempt:\n if self.databricks_conn.extra_dejson.get(\"use_azure_managed_identity\", False):\n params = {\n \"api-version\": \"2018-02-01\",\n \"resource\": resource,\n }\n resp = requests.get(\n AZURE_METADATA_SERVICE_TOKEN_URL,\n params=params,\n headers={**self.user_agent_header, \"Metadata\": \"true\"},\n timeout=self.token_timeout_seconds,\n )\n else:\n tenant_id = self.databricks_conn.extra_dejson[\"azure_tenant_id\"]\n data = {\n \"grant_type\": \"client_credentials\",\n \"client_id\": self.databricks_conn.login,\n \"resource\": resource,\n \"client_secret\": self.databricks_conn.password,\n }\n azure_ad_endpoint = self.databricks_conn.extra_dejson.get(\n \"azure_ad_endpoint\", AZURE_DEFAULT_AD_ENDPOINT\n )\n resp = requests.post(\n AZURE_TOKEN_SERVICE_URL.format(azure_ad_endpoint, tenant_id),\n data=data,\n headers={\n **self.user_agent_header,\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n },\n timeout=self.token_timeout_seconds,\n )\n\n resp.raise_for_status()\n jsn = resp.json()\n\n self._is_oauth_token_valid(jsn)\n self.oauth_tokens[resource] = jsn\n break\n except RetryError:\n raise AirflowException(f\"API requests to Azure failed {self.retry_limit} times. Giving up.\")\n except requests_exceptions.HTTPError as e:\n raise AirflowException(f\"Response: {e.response.content}, Status Code: {e.response.status_code}\")\n\n return jsn[\"access_token\"]", "def getAccessToken( refresh_token):\n try:\n payload = {\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=ApiJWTAuthentication.expirationTime_Access),\n 'refresh_token': refresh_token\n }\n jwttoken= jwt.encode(\n payload,\n ApiJWTAuthentication.secretKey_access,\n algorithm='HS256'\n )\n token=jwttoken.decode('utf-8')\n return {\"message\": \"success\", \"access_token\": token}\n except Exception as e:\n return {\"message\": \"exception\",\"Exception\": str(e)}", "def test_expires_soon(self):\n now = timezone.now()\n window = SparkSettings().RENEW_TOKEN_WINDOW\n cur = self.factory.build(access_token='good',\n expires_at=now + timedelta(seconds=window*2))\n exp = self.factory.build(access_token='expired',\n expires_at=now + timedelta(seconds=window/2))\n self.assertFalse(cur.expires_soon())\n self.assertTrue(exp.expires_soon())", "def refresh_token(self):\n # basic function to get an access token\n api_response = requests.get(\n self.api_config.get_api_url() + \"authentication/g?username=\" + self.api_config.get_api_username() + \"&password=\" + self.api_config.get_api_password())\n\n if api_response.status_code >= 200:\n self.API_TOKEN = api_response.content.decode()\n\n return self.API_TOKEN\n else:\n return None", "def test_access_token_refreshed_for_token_expired_with_post_method(self):\n with patch('hgw_common.models.OAuth2Session', MockOAuth2Session):\n MockOAuth2Session.RESPONSES = [TokenExpiredError(), 200]\n proxy = OAuth2SessionProxy(self.service_url, self.client_id, self.client_secret)\n session = proxy._session\n first_token = session.token['access_token']\n # m.token['expires_at'] = m.token['expires_at'] - 36001\n proxy.post(\"/fake_url/1/\")\n second_token = session.token['access_token']\n self.assertEqual(len(session.post.call_args_list), 2) # Number of calls\n self.assertEqual(len(session.fetch_token.call_args_list), 2) # Number of calls\n session.post.assert_has_calls([call('/fake_url/1/'), call('/fake_url/1/')])\n self.assertEqual(AccessToken.objects.count(), 1)\n self.assertNotEquals(first_token, second_token)" ]
[ "0.7643279", "0.7634847", "0.7390794", "0.7374576", "0.7371795", "0.73076856", "0.73074526", "0.7294325", "0.7146626", "0.7060527", "0.7053016", "0.7022221", "0.6941598", "0.6938422", "0.69234806", "0.6905834", "0.6905202", "0.68798673", "0.6875702", "0.6870887", "0.68264604", "0.68263024", "0.6823447", "0.68206537", "0.6808699", "0.6757726", "0.6718002", "0.67176825", "0.66611975", "0.66584265" ]
0.76784253
0
Get the user info and preferences for subscribing to the websocket and get the token timestamp as milliseconds access token used to get information on my account
def get_user_principals(access_token): #Make request to user info and preferences to get principals for login user_url = 'https://api.tdameritrade.com/v1/userprincipals' headers = {'Authorization': 'Bearer {}'.format(access_token)} params = {'fields':'streamerSubscriptionKeys,streamerConnectionInfo'} user_principals_json = requests.get(url=user_url,headers=headers,params=params) user_principals = user_principals_json.json() #convert token timestamp to milliseconds (required for login to websocket) tokenTimeStamp = user_principals['streamerInfo']['tokenTimestamp'] token_date = dateutil.parser.parse(tokenTimeStamp,ignoretz=True) epoch = datetime.datetime.utcfromtimestamp(0) tokenTimeStampAsMs = int((token_date-epoch).total_seconds()*1000.0) return (user_principals,tokenTimeStampAsMs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ws_user_data(self):\n return self.ws_request(self.ws_request(self.start_userdata_stream()['listenKey']))", "def UserToken(self) -> object:", "def get_token(self) -> None:\n context_dict = demisto.getIntegrationContext()\n cur_token = context_dict.get('token')\n refresh_token = context_dict.get('refresh_token')\n\n if cur_token:\n self._headers['NetWitness-Token'] = cur_token\n self.refresh_token = refresh_token\n else:\n self.generate_new_token(refresh_token)", "def _get_token(self):\n return user.get_token()", "def get_oauth_token():\n return session.get('remote_oauth')", "def get_token(request):\n capability = TwilioCapability(\n settings.TWILIO_ACCOUNT_SID,\n settings.TWILIO_AUTH_TOKEN)\n \"\"\"Allow our users to make outgoing calls with Twilio Client\"\"\"\n capability.allow_client_outgoing(settings.TWIML_APPLICATION_SID)\n\n \"\"\"Allow our users to accept incoming calls from pyphon\"\"\"\n capability.allow_client_incoming('caller')\n\n \"\"\"Generate the capability token\"\"\"\n token = capability.generate()\n\n return JsonResponse({'token': token})", "def _get_ws(self):\n try:\n create_connection\n except:\n from websocket import create_connection\n\n if self._ws is None:\n try:\n self._ws = create_connection(('wss://{}:8080/api/ws'.format(self._wshost)), timeout=10)\n\n payload = {\n 'action' : \"userOnline\",\n 'userAgent' : 'app',\n 'version' : 6,\n 'nonce' : gen_nonce(15),\n 'apkVesrion': \"1.8\",\n 'os' : 'ios',\n 'at' : self.get_bearer_token(),\n 'apikey' : self.get_user_apikey(),\n 'ts' : str(int(time.time())),\n 'model' : 'iPhone10,6',\n 'romVersion': '11.1.2',\n 'sequence' : str(time.time()).replace('.','')\n }\n\n self._ws.send(json.dumps(payload))\n wsresp = self._ws.recv()\n # _LOGGER.error(\"open socket: %s\", wsresp)\n\n except (socket.timeout, ConnectionRefusedError, ConnectionResetError):\n _LOGGER.error('failed to create the websocket')\n self._ws = None\n\n return self._ws", "async def twitchtoken(self):\n self.settings[\"TWITCH_TOKEN\"] = \"6mmlypg9emj6jebbpylmlpejwxj2pn\"\n dataIO.save_json(\"data/streams/settings.json\", self.settings)\n await self.bot.say('Twitch Client-ID set.')", "def get_token(request):\n # Create a TwilioCapability token with our Twilio API credentials\n capability = ClientCapabilityToken(\n settings.TWILIO_ACCOUNT_SID, settings.TWILIO_AUTH_TOKEN\n )\n\n capability.allow_client_outgoing(settings.TWILIO_ACCOUNT_SID)\n capability.allow_client_incoming('support_agent')\n token = capability.to_jwt()\n\n return JsonResponse({'token': token.decode('utf-8')})", "def websocket(self) -> Websocket:\n self.__http_client.data_snapshot()\n host_uri = f'ws://{self.__http_client.host_ip}/api/v1/data/stream'\n subprotocols = [f'SessionToken_{self.__http_client.session_token}', \"object\"]\n return Websocket(host_uri, subprotocols, timeout=self.__http_client.request_timeout)", "def connect(self):\n r = authentication.token(connection=self)\n\n\n self.auth_token = r.json().get('token')", "def get_oauth_data():", "def get_token(self):\n oauth_provider = UserSocialAuth.objects.get(provider='drchrono')\n access_token = oauth_provider.extra_data['access_token']\n return access_token", "async def test_websocket(keyfile):\n server = Server(key = keyfile.get('key'), secret = keyfile.get('secret'))\n async with RestClient(server=server) as rest_kraken:\n token = await rest_kraken.websockets_token()\n assert isinstance(token, str) # TODO : proper type here\n print(token)", "def websocket(self):\n return self._websocket_client.wss", "def get_auth_token_teacher():\n\n token = g.user.generate_auth_token(600)\n return jsonify({'token': token.decode('ascii'), 'duration': 600})", "def _oauth_payload_generate(self):\n\t\tresult = {\n\t\t\t\"oauth_consumer_key\" : self.key,\n\t\t\t\"oauth_nonce\" : self._oauth_nonce_generate(),\n\t\t\t\"oauth_signature_method\" : \"HMAC-SHA1\",\n\t\t\t\"oauth_timestamp\" : str( int( time.time()) ),\n\t\t\t\"oauth_version\" : \"1.0\"\n\t\t}\n\n\t\t# * if token is unavaliable, this func must be called from request_token\n\t\t# provide callback addr instead.\n\t\t# * access token should have a higher priority ...\n\t\tif self.has_user():\n\t\t\tresult[\"oauth_token\"] = self.a_token\n\t\telse:\n\t\t\tif len( self.token ) > 0:\n\t\t\t\tresult[\"oauth_token\"] = self.token\n\t\t\telse:\n\t\t\t\tresult[\"oauth_callback\"] = self.callback\n\n\t\treturn result", "def auth_token(self):", "def get_token():\n return session.get('microsoft_token')", "def get_token():\n return session.get('microsoft_token')", "def get_token(self):\n message = {\n \"request\": \"access_token\",\n \"account\": self.account,\n \"min_valid_period\": self.validity,\n \"application_hint\": \"orpy\",\n }\n try:\n self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n self._sock.connect(self.socket_path)\n self._sock.sendall(json.dumps(message).encode())\n\n data = \"\"\n while True:\n recv = self._sock.recv(16).decode()\n if recv:\n data += recv\n else:\n break\n except socket.error as err:\n raise exceptions.AuthExceptiob(\n err=\"Cannot communicate with the \" \"oidc-agent: %s\" % err\n )\n finally:\n self._sock.close()\n\n token = json.loads(data)\n if token.get(\"status\") == \"failure\":\n raise exceptions.AuthError(err=token.get(\"error\"))\n return token", "def get_user_info(self, access_token, openid):\n url = get_config(\"login.wechat.user_info_url\") % (access_token, openid)\n return self._access_wxapi_or_raise(url)", "def get_connected_user():\n usernames = clients.keys()\n data = json.dumps(usernames)\n emit('on_client_list_received', data)", "async def _fetch_access_token(session: ClientSession) -> dict:\n LOGGER.debug('fetching access token...')\n password = config.get('WFWX_SECRET')\n user = config.get('WFWX_USER')\n auth_url = config.get('WFWX_AUTH_URL')\n async with session.get(auth_url, auth=BasicAuth(login=user, password=password)) as response:\n return await response.json()", "async def _perform_connect(self):\n # Return connected if we are already connected.\n if self._websocket:\n if self._websocket.open:\n return True\n\n self.logger.debug(\"Starting connect.\")\n\n self.logger.debug(\"Connecting to %s\" % self.wsc_url)\n self._websocket = await websockets.connect(self.wsc_url)\n \n #We need to authenticate upon opening the connection (modified to remove apkVesrion, os, model, romVersion NW 28th Oct 2020)\n payload = {}\n \n payload['action'] = \"userOnline\"\n payload['userAgent'] = 'app'\n payload['version'] = 8\n payload['appid'] = self._appid\n payload['_nonce'] = self._nonce\n #payload['apkVesrion'] = \"1.8\"\n #payload['apkVersion'] = \"1.8\"\n #payload['os'] = 'ios'\n payload['at'] = self.authenticationToken\n payload['apikey'] = self.apikey\n payload['ts'] = self.timestamp\n #payload['model'] = 'iPhone10,6'\n #payload['romVersion'] = '11.1.2'\n payload['sequence'] = self.sequence\n\n string = json.dumps(payload);\n\n self.logger.debug('Sending login request [%s]' % string);\n\n await self._send_request(string)", "def _get_access_token():\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n '/home/homeassistant/.homeassistant/custom_components/service-account.json', FCM_SCOPE)\n access_token_info = credentials.get_access_token()\n logger.debug(\"Using token: \" + access_token_info.access_token)\n return access_token_info.access_token\n # [END retrieve_access_token]\n\n \"\"\"Server Side FCM sample.\n Firebase Cloud Messaging (FCM) can be used to send messages to clients on iOS,\n Android and Web.\n This sample uses FCM to send two types of messages to clients that are subscribed\n to the `news` topic. One type of message is a simple notification message (display message).\n The other is a notification message (display notification) with platform specific\n customizations. For example, a badge is added to messages that are sent to iOS devices.\n \"\"\"", "async def _authenticate(self, ws: WSAssistant):\n try:\n auth_payload: Dict[str, Any] = self._bitmart_auth.get_ws_auth_payload(bitmart_utils.get_ms_timestamp())\n ws_message: WSRequest = WSRequest(auth_payload)\n\n await ws.send(ws_message)\n ws_response = await ws.receive()\n\n auth_resp: Dict[str, Any] = ws_response.data\n\n if \"errorCode\" in auth_resp.keys():\n self.logger().error(f\"WebSocket login errored with message: {auth_resp['errorMessage']}\",\n exc_info=True)\n raise ConnectionError\n except asyncio.CancelledError:\n raise\n except Exception:\n self.logger().error(\"Error occurred when authenticating to user stream.\", exc_info=True)\n raise", "def get_token(self):\n client_auth = requests.auth.HTTPBasicAuth(self.client, self.secret)\n post_data = {'grant_type': 'password', 'username': self.user, 'password': self.password}\n headers = {'User-Agent': self.user_agent}\n response = requests.Session()\n response2 = response.post(self.token_url, auth=client_auth, data=post_data, headers=headers)\n self.token = response2.json()['access_token']\n self.t_type = response2.json()['token_type']", "def get_token(self):\n client_auth = requests.auth.HTTPBasicAuth(self.client, self.secret)\n post_data = {'grant_type': 'password', 'username': self.user, 'password': self.password}\n headers = {'User-Agent': self.user_agent}\n response = requests.Session()\n response2 = response.post(self.token_url, auth=client_auth, data=post_data, headers=headers)\n self.token = response2.json()['access_token']\n self.t_type = response2.json()['token_type']", "def on_open(ws):\n logger.info('WebSocket open, sending authentication.')\n authenticate(ws, service_account_file, audience)\n ws.send(STATUS_COMMAND_FORMAT.format(status_payload=json.dumps(get_status(router_password, source_port, dest_ip, dest_port))))" ]
[ "0.59929764", "0.5843327", "0.5818074", "0.5717326", "0.57172483", "0.5674695", "0.5656598", "0.5551188", "0.5536643", "0.5520706", "0.55206686", "0.55195224", "0.54835796", "0.5472258", "0.54532564", "0.54329437", "0.54327536", "0.5409122", "0.5407365", "0.5407365", "0.53839606", "0.5381598", "0.5363264", "0.5360801", "0.53468317", "0.534667", "0.53457665", "0.5342892", "0.5342892", "0.53194" ]
0.58820873
1
Get orders for the account in the specified date range on TD Ameritrade access_token token used to access the TD Ameritrade site start_date beginning time period to get orders (includes start_date in returned orders) end_date ending time period to get orders (includes end_date in returned orders)
def get_orders(access_token,start_date,end_date,status): orders_url = 'https://api.tdameritrade.com/v1/orders' headers={'Authorization': 'Bearer {}'.format(access_token)} #Parameters for the order params = {'accountId':TDAuth_Info.account_num, 'fromEnteredTime': start_date, 'toEnteredTime': end_date, 'status': status} #Make the get request to TD Ameritrade orders_data_json = requests.get(url=orders_url,headers=headers,params=params) return orders_data_json.json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_price_history_dates(access_token,ticker,start_date,end_date,frequencyType,frequency):\r\n \r\n price_url = 'https://api.tdameritrade.com/v1/marketdata/{}/pricehistory'.format(ticker)\r\n\r\n #The header for getting a quote needs to define the input type (json)\r\n headers = {'Authorization':'Bearer {}'.format(access_token),\r\n 'Content-Type':'application/json'}\r\n\r\n #Parameters for period of time and frequency of data to get\r\n params = {'startDate':start_date,\r\n 'endDate': end_date,\r\n 'frequencyType': frequencyType,\r\n 'frequency': frequency}\r\n \r\n #Make the get request to TD Ameritrade\r\n price_history_json = requests.get(url=price_url,headers=headers,params=params)\r\n return price_history_json.json()", "def get_orders_from_lengow(\n start_date, end_date, account_id=ACCOUNT_ID, group_id=GROUP_ID, flux_id=FLUX_ID,\n order_status=\"all\", response_format=\"json\"):\n date_range = \"{start_date:{df}}/{end_date:{df}}\".format(\n start_date=start_date, end_date=end_date, df=\"%Y-%m-%d\")\n flux_filter = \"{account_id:d}/{group_id:d}/{flux_id}\".format(\n account_id=account_id, group_id=group_id, flux_id=flux_id)\n suffix = \"commands/{order_status}/{response_format}\".format(\n order_status=order_status, response_format=response_format)\n url = \"{base_url}/{date_range}/{flux_filter}/{suffix}/\".format(\n base_url=BASE_URL, date_range=date_range, flux_filter=flux_filter, suffix=suffix)\n\n return requests.get(url).json()", "def get_transaction_list(self, account_id, from_date, to_date,\n type_list=None):\n endpoint = 'accounts/{0}/transactions/idrange'.format(account_id)\n\n params = {}\n\n params[\"from\"] = from_date\n params[\"to\"] = to_date\n\n if type_list:\n type_list = \"%2C\".join(type_list)\n params[\"type\"] = type_list\n\n return self._api.request(endpoint, params=params)", "async def get_all_orders(self, symbol, order_id=None, start=None, end=None, limit=500):\n uri = \"/fapi/v1/allOrders\"\n params = {\n \"symbol\": symbol,\n \"limit\": limit,\n \"timestamp\": tools.get_cur_timestamp_ms()\n }\n if order_id:\n params[\"orderId\"] = order_id\n if start:\n params[\"startTime\"] = start\n if end:\n params[\"endTime\"] = end\n success, error = await self.request(\"GET\", uri, params=params, auth=True)\n return success, error", "def get_orders(*, deal_id, from_date=None, start_date=None, end_date=None):\n return GetOrders(\n deal_id=deal_id, from_date=from_date, start_date=start_date, end_date=end_date\n ).orders", "def get_spend_by_account_custom_daterange(self, account_id, start_date, end_date):\n try:\n account = Client.objects.get(id=account_id)\n except Client.DoesNotExist:\n return\n\n spend_sum = 0\n adwords_accounts = account.adwords.all()\n for adwords_account in adwords_accounts:\n client = get_client()\n client.client_customer_id = adwords_account.dependent_account_id\n\n report_downloader = client.GetReportDownloader(version=settings.API_VERSION)\n\n campaign_report_selector = {\n 'fields': ['Cost', 'CampaignId', 'CampaignStatus', 'CampaignName', 'Labels', 'Impressions'],\n 'predicates': [\n {\n 'field': 'Cost',\n 'operator': 'GREATER_THAN',\n 'values': '0'\n },\n ],\n 'dateRange': {\n 'min': start_date.strftime('%Y%m%d'),\n 'max': end_date.strftime('%Y%m%d')\n }\n }\n\n try:\n campaign_exclusion = CampaignExclusions.objects.get(account=account)\n excluded_campaign_ids = [campaign.campaign_id for campaign in campaign_exclusion.aw_campaigns.all()]\n if len(excluded_campaign_ids) > 0:\n campaign_report_selector['predicates'].append({\n 'field': 'CampaignId',\n 'operator': 'NOT_IN',\n 'values': excluded_campaign_ids\n })\n except CampaignExclusions.DoesNotExist:\n pass\n\n campaign_report_query = {\n 'reportName': 'CAMPAIGN_PERFORMANCE_REPORT',\n 'dateRangeType': 'CUSTOM_DATE',\n 'reportType': 'CAMPAIGN_PERFORMANCE_REPORT',\n 'downloadFormat': 'CSV',\n 'selector': campaign_report_selector\n }\n\n campaign_report = Reporting.parse_report_csv_new(\n report_downloader.DownloadReportAsString(campaign_report_query))\n for campaign_row in campaign_report:\n # This is the cost for this timerange\n cost = int(campaign_row['cost']) / 1000000\n spend_sum += cost\n\n return spend_sum", "def query_orders(self):\n return self._call_txtrader_api('query_orders', {})", "def returnOpenOrders(self, account=None):\n if not account:\n if \"default_account\" in config:\n account = config[\"default_account\"]\n if not account:\n raise ValueError(\"You need to provide an account\")\n\n orders = self.dpay.rpc.get_open_orders(account, limit=1000)\n return orders", "def get_options_data(self, from_date, to_date, range=\"None\"):\n base_url = 'https://api.tdameritrade.com/v1/marketdata/chains?&symbol={stock_ticker}&fromDate={startdate}&toDate={enddate}&range={range}'\n endpoint = base_url.format(stock_ticker=self.ticker, startdate=from_date, enddate=to_date, range=range)\n\n page = requests.get(url=endpoint, params={'apikey': self.key})\n time.sleep(1)\n content = json.loads(page.content)\n\n call_options = pd.DataFrame()\n put_options = pd.DataFrame()\n\n if content[\"putExpDateMap\"] and content[\"callExpDateMap\"]:\n for date in content[\"callExpDateMap\"]:\n for strike in content[\"callExpDateMap\"][date]:\n for data in content[\"callExpDateMap\"][date][strike]:\n call_options = call_options.append({\n 'strikePrice': data[\"strikePrice\"],\n 'expirationDate': data[\"expirationDate\"],\n 'daysToExpiration': data[\"daysToExpiration\"],\n 'call': data[\"putCall\"],\n 'call_bid': data[\"bid\"],\n 'call_ask': data[\"ask\"],\n 'call_last': data[\"last\"],\n 'call_mark': data[\"mark\"],\n 'call_bidSize': data[\"bidSize\"],\n 'call_askSize': data[\"askSize\"],\n 'call_bidAskSize': data[\"bidAskSize\"],\n 'call_lastSize': data[\"lastSize\"],\n 'call_highPrice': data[\"highPrice\"],\n 'call_lowPrice': data[\"lowPrice\"],\n 'call_openPrice': data[\"openPrice\"],\n 'call_closePrice': data[\"closePrice\"],\n 'call_totalVolume': data[\"totalVolume\"],\n 'call_tradeDate': data[\"tradeDate\"],\n 'call_tradeTimeInLong': data[\"tradeTimeInLong\"],\n 'call_quoteTimeInLong': data[\"quoteTimeInLong\"],\n 'call_netChange': data[\"netChange\"],\n 'call_volatility': data[\"volatility\"],\n 'call_delta': data[\"delta\"],\n 'call_gamma': data[\"gamma\"],\n 'call_theta': data[\"theta\"],\n 'call_vega': data[\"vega\"],\n 'call_rho': data[\"rho\"],\n 'call_openInterest': data[\"openInterest\"],\n 'call_timeValue': data[\"timeValue\"],\n 'call_theoreticalOptionValue': data[\"theoreticalOptionValue\"],\n 'call_theoreticalVolatility': data[\"theoreticalVolatility\"],\n 'call_optionDeliverablesList': data[\"optionDeliverablesList\"],\n 'call_expirationType': data[\"expirationType\"],\n 'call_lastTradingDay': data[\"lastTradingDay\"],\n 'call_multiplier': data[\"multiplier\"],\n 'call_percentChange': data[\"percentChange\"],\n 'call_markChange': data[\"markChange\"],\n 'call_markPercentChange': data[\"markPercentChange\"]},\n ignore_index=True)\n\n for date in content[\"putExpDateMap\"]:\n for strike in content[\"putExpDateMap\"][date]:\n for data in content[\"putExpDateMap\"][date][strike]:\n put_options = put_options.append({\n 'strikePrice': data[\"strikePrice\"],\n 'expirationDate': data[\"expirationDate\"],\n 'daysToExpiration': data[\"daysToExpiration\"],\n 'put': data[\"putCall\"],\n 'put_bid': data[\"bid\"],\n 'put_ask': data[\"ask\"],\n 'put_last': data[\"last\"],\n 'put_mark': data[\"mark\"],\n 'put_bidSize': data[\"bidSize\"],\n 'put_askSize': data[\"askSize\"],\n 'put_bidAskSize': data[\"bidAskSize\"],\n 'put_lastSize': data[\"lastSize\"],\n 'put_highPrice': data[\"highPrice\"],\n 'put_lowPrice': data[\"lowPrice\"],\n 'put_openPrice': data[\"openPrice\"],\n 'put_closePrice': data[\"closePrice\"],\n 'put_totalVolume': data[\"totalVolume\"],\n 'put_tradeDate': data[\"tradeDate\"],\n 'put_tradeTimeInLong': data[\"tradeTimeInLong\"],\n 'put_quoteTimeInLong': data[\"quoteTimeInLong\"],\n 'put_netChange': data[\"netChange\"],\n 'put_volatility': data[\"volatility\"],\n 'put_delta': data[\"delta\"],\n 'put_gamma': data[\"gamma\"],\n 'put_theta': data[\"theta\"],\n 'put_vega': data[\"vega\"],\n 'put_rho': data[\"rho\"],\n 'put_openInterest': data[\"openInterest\"],\n 'put_timeValue': data[\"timeValue\"],\n 'put_theoreticalOptionValue': data[\"theoreticalOptionValue\"],\n 'put_theoreticalVolatility': data[\"theoreticalVolatility\"],\n 'put_optionDeliverablesList': data[\"optionDeliverablesList\"],\n 'put_expirationType': data[\"expirationType\"],\n 'put_lastTradingDay': data[\"lastTradingDay\"],\n 'put_multiplier': data[\"multiplier\"],\n 'put_percentChange': data[\"percentChange\"],\n 'put_markChange': data[\"markChange\"],\n 'put_markPercentChange': data[\"markPercentChange\"]},\n ignore_index=True)\n\n self.options_chain = pd.merge(call_options, put_options, how='outer', on=['strikePrice', 'expirationDate', 'daysToExpiration'], suffixes=(\"_call\", \"_put\"))\n self.options_chain['expirationDate'] = pd.to_datetime(self.options_chain['expirationDate'], unit = \"ms\")\n\n self.options_chain[\"call_put_volume\"] = self.options_chain[\"call_totalVolume\"] / self.options_chain[\"put_totalVolume\"]\n self.options_chain[\"call_put_oi\"] = self.options_chain[\"call_openInterest\"] / self.options_chain[\"put_openInterest\"]\n self.options_chain[\"call_volume_oi\"] = self.options_chain[\"call_totalVolume\"] / self.options_chain[\"call_openInterest\"]\n\n self.options_chain[\"put_call_volume\"] = self.options_chain[\"put_totalVolume\"] / self.options_chain[\"call_totalVolume\"]\n self.options_chain[\"put_call_oi\"] = self.options_chain[\"put_openInterest\"] / self.options_chain[\"call_openInterest\"]\n self.options_chain[\"put_volume_oi\"] = self.options_chain[\"put_totalVolume\"] / self.options_chain[\"put_openInterest\"]\n\n self.options_chain[\"ticker\"] = self.ticker\n\n self.options_chain = self.options_chain.replace(np.nan, 0)\n self.options_chain = self.options_chain.replace([np.inf, -np.inf], 999999)\n\n return self.options_chain\n\n else:\n return call_options", "def get_transactions(self, account_id, from_date=None, to_date=None,\n page_size=None, type_list=None):\n endpoint = 'accounts/{0}/transactions'.format(account_id)\n\n params = {}\n\n if from_date:\n params[\"from\"] = from_date\n\n if to_date:\n params[\"to\"] = to_date\n\n if page_size:\n params[\"pageSize\"] = page_size\n\n if type_list:\n type_list = \"%2C\".join(type_list)\n params[\"type\"] = type_list\n\n return self._api.request(endpoint, params=params)", "def get_order_by_id(access_token,order_ID):\r\n\r\n orders_url = 'https://api.tdameritrade.com/v1/accounts/{}/orders/{}'.format(TDAuth_Info.account_num,order_ID)\r\n headers={'Authorization': 'Bearer {}'.format(access_token)}\r\n\r\n #Make the get request to TD Ameritrade\r\n orders_data_json = requests.get(url=orders_url,headers=headers)\r\n return orders_data_json.json()", "def get_orders(request):\n close_old_connections()\n try:\n # Give all orders maded on the given date.\n return Order.objects.filter(\n date__date=request.GET['date']).order_by('-date')\n except MultiValueDictKeyError:\n # Give all orders today.\n return Order.objects.filter(\n date__date=datetime.now().date()).order_by('-date')", "def generate_agreement_orders(self, start_date, end_date):\n self.ensure_one()\n if not self.active:\n return\n lines_to_order = {}\n exp_date = fields.Date.from_string(self.next_expiration_date)\n if exp_date < end_date and self.prolong != 'unlimited':\n end_date = exp_date\n for line in self.agreement_line:\n # Check if there is any agreement line to order\n if not line.active_chk:\n continue\n # Check future orders for this line until end_date\n next_order_date = self._get_next_order_date(line, start_date)\n while next_order_date <= end_date:\n # Add to a list to order all lines together\n if not lines_to_order.get(next_order_date):\n lines_to_order[next_order_date] = self.env[\n 'sale.recurring_orders.agreement.line']\n lines_to_order[next_order_date] |= line\n next_order_date = self._get_next_order_date(\n line, next_order_date)\n # Order all pending lines\n dates = lines_to_order.keys()\n dates.sort()\n for date in dates:\n # Check if an order exists for that date\n order = self.order_line.filtered(\n lambda x: (\n fields.Date.to_string(\n fields.Datetime.from_string(x.date_order)) ==\n fields.Date.to_string(date)))\n if not order:\n # create it if not exists\n self.create_order(\n fields.Date.to_string(date), lines_to_order[date])", "def archive_transactions(start_date, end_date):\n\n # This is a whitelisted function; check permissions.\n if not frappe.has_permission('eBay Manager', 'write'):\n frappe.throw('You do not have permission to access the eBay Manager',\n frappe.PermissionError)\n\n # Convert string or datetime arguments to date objects (e.g. from JS)\n if not (start_date and end_date):\n frappe.throw('Must have start and end dates!')\n start_date = frappe.utils.getdate(start_date)\n end_date = frappe.utils.getdate(end_date)\n\n # Check start and end date (inclusive) are reasonable\n if end_date < start_date:\n frappe.throw('Start date after end date!')\n if end_date >= datetime.datetime.now().date():\n frappe.throw('Cannot archive dates on or after current UTC date!')\n dates = tuple(date_range(start_date, end_date))\n\n # Create table if it does not exist\n frappe.db.sql(\"\"\"\n CREATE TABLE IF NOT EXISTS `zeBayTransactions`\n (\n posting_date DATE PRIMARY KEY,\n transactions MEDIUMTEXT NOT NULL,\n payouts MEDIUMTEXT NOT NULL\n );\n \"\"\")\n # Get transactions\n transactions_by_date = {x: [] for x in dates}\n transactions = get_transactions(start_date=start_date, end_date=end_date)\n transactions.sort(key=operator.itemgetter('transaction_date'))\n for transaction in transactions:\n # Get date of transaction and append to list\n transaction_date = datetime.datetime.strptime(\n transaction['transaction_date'], '%Y-%m-%dT%H:%M:%S.%fZ'\n ).date()\n # Find item code(s) of transaction, if any\n transaction['item_codes'] = find_item_codes(transaction)\n transactions_by_date[transaction_date].append(transaction)\n\n # Get payouts\n payouts_by_date = {x: [] for x in dates}\n payouts = get_payouts(start_date=start_date, end_date=end_date)\n payouts.sort(key=operator.itemgetter('payout_date'))\n for payout in payouts:\n # Get date of payout and append to list\n payout_date = datetime.datetime.strptime(\n payout['payout_date'], '%Y-%m-%dT%H:%M:%S.%fZ'\n ).date()\n payouts_by_date[payout_date].append(payout)\n\n # Save transactions and payouts in table\n for entry_date in dates:\n t_data = transactions_by_date[entry_date]\n p_data = payouts_by_date[entry_date]\n if not (t_data or p_data):\n # No transactions or payouts for this date\n frappe.db.sql(\"\"\"\n DELETE FROM `zeBayTransactions`\n WHERE posting_date = %(posting_date)s;\n \"\"\", {'posting_date': entry_date})\n else:\n # Store transactions and payouts\n params = {\n 'posting_date': entry_date,\n 'transactions': json.dumps(t_data),\n 'payouts': json.dumps(p_data)\n }\n frappe.db.sql(\"\"\"\n REPLACE INTO `zeBayTransactions`\n VALUES (%(posting_date)s, %(transactions)s, %(payouts)s);\n \"\"\", params)\n\n frappe.db.commit()", "def post_order(access_token,json_request):\r\n orders_url = 'https://api.tdameritrade.com/v1/accounts/{}/orders'.format(TDAuth_Info.account_num)\r\n\r\n #The header for placing in order needs to define the input type (json)\r\n headers = {'Authorization':'Bearer {}'.format(access_token),\r\n 'Content-Type':'application/json'}\r\n\r\n #Post the order on TD Ameritrade and check the response\r\n post_order_response=requests.post(url=orders_url,headers=headers,json=json_request)\r\n\r\n return post_order_response", "def create_get_aggregate_trades_request(self, symbol: str,\n from_id: Optional[int] = None,\n start_time: Optional[int] = None,\n end_time: Optional[int] = None,\n limit: Optional[int] = None\n ) -> Request:", "def list_orders(\n page: int = 1,\n limit: int = 15,\n duration: int = 180,\n current_user: CurrentUser = Depends(AuthService.verify_auth_access_token),\n):\n past_date = datetime.today().date() - timedelta(days=duration)\n orders = Order.objects.filter(\n is_active=True,\n order_session__user_id=current_user.user_id,\n created_at__gt=past_date,\n ).order_by(\"-created_at\")\n orders = orders[(page - 1) * limit : (page - 1) * limit + limit]\n orders = parse_obj_as(List[OrderResponse], list(orders))\n return JSONResponse(\n content=[order.simple_dict() for order in orders],\n status_code=status.HTTP_200_OK,\n )", "def extractHourlyOrders(orders, fromDate, toDate=datetime.today()):\n orderTimeStamps = getTimeStampsFromMongoOrderData(orders)\n toDate = datetime.today() + timedelta(days=1)\n # Every day fromDate to toDate.\n dateRange = getDaysInDateRange(fromDate, toDate)\n\n orderDetailsForDateRange = []\n for date in dateRange:\n orderDetails = {\n \"date\": object,\n \"orders\": []\n }\n orderDetails[\"date\"] = date\n # Get the orders for this date\n ordersForDate = getOrdersForDate(date, orderTimeStamps)\n # If order number is zero just fill all hours with order amount = 0\n if len(ordersForDate) == 0:\n orderDetails[\"orders\"] = zeroFillOrdersForFullDay(date)\n orderDetailsForDateRange.append(orderDetails)\n continue\n\n for hour in hours:\n ordersAmountForHour = len(getOrdersForHour(hour, ordersForDate))\n # As each hour only contains XX:XX, it doesn't have a date.\n # Combine the current hour iteration with the current date iteration\n hour = datetime.combine(date, datetime.time(hour))\n if ordersAmountForHour == 0:\n info = {\n \"hour\": hour,\n \"amount\": 0\n }\n orderDetails[\"orders\"].append(info)\n else:\n info = {\n \"hour\": hour,\n \"amount\": ordersAmountForHour\n }\n orderDetails[\"orders\"].append(info)\n orderDetailsForDateRange.append(orderDetails)\n return orderDetailsForDateRange", "def request_active_orders(self, custom_id=None, **params):\n self.conn.send('getOrders', custom_id=custom_id, **params)", "def populate_twitter_acct_tweets_by_date():\n api = twitter.Api(**settings.TWITTER_OAUTH, sleep_on_rate_limit=False)\n twitter_accts = CredibleUSTwitterAccount.objects.all()\n\n for acct in twitter_accts:\n results = api.GetSearch(raw_query=\"l=&q=from%3AReutersUS%20since%3A2017-12-01%20until%3A2017-12-02&src=typd\")", "def get_awards_from(self, start_date, batch_func=None, batch_number=10):\n self.params['dateStart'] = start_date\n\n request_url = self.nsf_api + self._build_field_request() + self._build_param_request()\n\n # xml_files = self._send_request_xml(request_url, batch_func=None, batch_number=1000)\n # data = self._construct_data_xml(xml_files)\n\n data = self._send_request_xml(request_url, batch_func=batch_func, batch_number=batch_number)\n\n return data", "def query_accounts(self):\n return self._call_txtrader_api('query_accounts', {})", "def get_orders(self, **kwargs) -> ApiResponse:\n return self._request(kwargs.pop('path'), params={**kwargs})", "async def test_retrieve_history_orders_by_time_range(self):\n history_orders = {\n 'historyOrders': [{\n 'clientId': 'TE_GBPUSD_7hyINWqAlE',\n 'currentPrice': 1.261,\n 'currentVolume': 0,\n 'doneTime': '2020-04-15T02:45:06.521Z',\n 'id': '46214692',\n 'magic': 1000,\n 'platform': 'mt5',\n 'positionId': '46214692',\n 'state': 'ORDER_STATE_FILLED',\n 'symbol': 'GBPUSD',\n 'time': '2020-04-15T02:45:06.260Z',\n 'type': 'ORDER_TYPE_BUY',\n 'volume': 0.07\n }],\n 'synchronizing': False\n }\n client.get_history_orders_by_time_range = AsyncMock(return_value=history_orders)\n start_time = datetime.now() - timedelta(seconds=1)\n end_time = datetime.now()\n actual = await api.get_history_orders_by_time_range(start_time, end_time, 1, 100)\n assert actual == history_orders\n client.get_history_orders_by_time_range.assert_called_with('accountId', start_time, end_time, 1, 100)", "def list(self, request, *args, **kwargs):\n data = self.process_query_params()\n if data:\n self.currency_client.get_exchange_rates_by_date_range(**data)\n return super().list(request, *args, **kwargs)", "def _DateRangeQuery(self, start_date='2007-01-01', end_date='2007-07-01'):\n\n print 'Date range query for events on Primary Calendar: %s to %s' % (\n start_date, end_date,)\n query = gdata.calendar.client.CalendarEventQuery(start_min=start_date, start_max=end_date)\n feed = self.cal_client.GetCalendarEventFeed(q=query)\n for i, an_event in zip(xrange(len(feed.entry)), feed.entry):\n print '\\t%s. %s' % (i, an_event.title.text,)\n for a_when in an_event.when:\n print '\\t\\tStart time: %s' % (a_when.start,)\n print '\\t\\tEnd time: %s' % (a_when.end,)", "def orders ( self, block: bool = True ):\n\tresult = OutstandingOrders(\n\t\tauth\t\t= self.auth,\n\t\taccount_nbr = self.account_nbr,\n\t\tblock\t\t= block\n\t).request()\n\n\treturn result", "def get_orders():\n\n\t# Get the email from the user making the request\n\temail = get_jwt_identity()\n\n\t# Checks if the reader exists in the database\n\treader = Reader.query.filter_by(email=email).first()\n\tif not reader:\n\t\treturn bad_request(\"Reader does not exist.\")\n\n\t# Gets a list of all the users requested rooms\n\troom_relation = RoomRequest.query \\\n\t\t.filter_by(reader_id=reader.id) \\\n\t\t.join(Room, Room.id == RoomRequest.room_id) \\\n\t\t.join(ApprovesRoomRequest, ApprovesRoomRequest.room_request_id == RoomRequest.id) \\\n\t\t.join(Reader, Reader.id == ApprovesRoomRequest.approver_id) \\\n\t\t.all()\n\troom_orders = [\n\t\t{\"room_id\": x.room_id, \"name\": x.room.name.capitalize(), \"approver\": x.request_approver.approver.email,\n\t\t\t\"date\": x.datetime_requested, \"type\": \"Room\"} for x in room_relation]\n\n\t# Gets a list of all the users requested access groups\n\tag_relation = AccessGroupRequest.query \\\n\t\t.filter_by(reader_id=reader.id) \\\n\t\t.join(AccessGroup, AccessGroup.id == AccessGroupRequest.ag_id) \\\n\t\t.join(ApprovesAgRequest, ApprovesAgRequest.ag_request_id == AccessGroupRequest.id) \\\n\t\t.join(Reader, Reader.id == ApprovesAgRequest.approver_id) \\\n\t\t.all()\n\tag_orders = [\n\t\t{\"ag_id\": x.ag_id, \"name\": x.ag.name.capitalize(), \"approver\": x.request_approver.approver.email,\n\t\t\"date\": x.datetime_requested, \"type\": \"Access group\"} for x in ag_relation\n\t]\n\n\treturn ok({\"orders\": room_orders + ag_orders})", "def fetch_agg_trades(\n self,\n symbol: str,\n from_id: int = None,\n start_time: int = None,\n end_time: int = None,\n limit: int = None\n ):\n try:\n if not symbol:\n raise ValueError(\"Did't got mandatory param symbol: {}\".format(symbol))\n if not self._host:\n raise ValueError(\"Did't got host param from config\")\n query_params = \"symbol={0}\".format(symbol)\n if from_id:\n query_params += \"&fromId={}\".format(from_id)\n if start_time:\n query_params += \"&startTime={}\".format(start_time)\n if end_time:\n query_params += \"&endTime={}\".format(end_time)\n if limit:\n query_params += \"&limit={}\".format(str(limit))\n endpoint = \"/api/v1/aggTrades\"\n url = self._host + endpoint + \"?\" + query_params\n LOG.debug(\"Try to get aggregate trades list by symbol. url:{}\".format(url))\n responce = requests.get(url=url, timeout=self._request_timeout)\n return responce.json()\n except Exception as ex:\n LOG.error(\"Error fired with fetch_agg_trades:{}\".format(ex.args[-1]))", "def test_api_can_search_employee_by_between_dates(self):\n res = self.client().get(service_url_emp+'/search_between/2013-10-24,2014-10-24')\n self.assertEqual(res.status_code, 200)\n self.assertIn('name1', str(res.data))\n self.assertIn('name2', str(res.data))" ]
[ "0.634202", "0.59751403", "0.59119153", "0.5911235", "0.5864832", "0.57331246", "0.5706887", "0.5646497", "0.55046344", "0.5489912", "0.54788554", "0.5437924", "0.5368096", "0.5338739", "0.5332285", "0.53309786", "0.5319454", "0.5304169", "0.53006923", "0.5285335", "0.5283874", "0.5263397", "0.52572435", "0.52146983", "0.52131504", "0.5196625", "0.51965934", "0.5174273", "0.5170538", "0.5156198" ]
0.80785364
0
Gets a specific order access_token used to login to TD Ameritrade site order_ID the ID of the order we are getting
def get_order_by_id(access_token,order_ID): orders_url = 'https://api.tdameritrade.com/v1/accounts/{}/orders/{}'.format(TDAuth_Info.account_num,order_ID) headers={'Authorization': 'Bearer {}'.format(access_token)} #Make the get request to TD Ameritrade orders_data_json = requests.get(url=orders_url,headers=headers) return orders_data_json.json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def account_order(self, orderid):\n return self.get(f'orders/{orderid}', auth=True)", "def get_order(self, order_id):\n request = OrdersGetRequest(order_id)\n response = self.client.execute(request)\n\n return response", "def GetOrder(order_id): \n\t\"\"\"Method to get order\"\"\"\n\trequest = OrdersGetRequest(order_id)\n\tresponse = client.execute(request)\n\treturn response.result.__dict__[\"_dict\"]", "def get_orders(access_token,start_date,end_date,status):\r\n\r\n orders_url = 'https://api.tdameritrade.com/v1/orders'\r\n headers={'Authorization': 'Bearer {}'.format(access_token)}\r\n #Parameters for the order\r\n params = {'accountId':TDAuth_Info.account_num,\r\n 'fromEnteredTime': start_date,\r\n 'toEnteredTime': end_date,\r\n 'status': status}\r\n\r\n #Make the get request to TD Ameritrade\r\n orders_data_json = requests.get(url=orders_url,headers=headers,params=params)\r\n return orders_data_json.json()", "def get_order(self, walletId, orderId):\n return", "async def get_order(cls, session, account, order_id):\n if not session.logged_in:\n raise Exception('Tastyworks session not logged in.')\n\n url = '{}/accounts/{}/orders/{}'.format(\n session.API_url,\n account.account_number,\n order_id\n )\n\n async with aiohttp.request('GET', url, headers=session.get_request_headers()) as resp:\n if resp.status != 200:\n raise Exception('Could not retreive the order')\n data = (await resp.json())['data']\n order = cls.from_dict(data)\n return order", "def delete_order(access_token,order_ID):\r\n orders_url = 'https://api.tdameritrade.com/v1/accounts/{}/orders/{}'.format(TDAuth_Info.account_num,order_ID)\r\n headers={'Authorization': 'Bearer {}'.format(access_token)}\r\n order_status = requests.delete(url=orders_url,headers=headers)\r\n return order_status", "def get_access_token():\n\n account = get_account()\n\n account.EnsureCredentials(dbus_interface=GOA_ACCOUNT)\n access_token, _ = account.GetAccessToken(dbus_interface=GOA_ACCOUNT_OAUTH2)\n return str(access_token)", "def get_order(self, order_id):\n request = OrdersGetRequest(order_id)\n # 3. Call PayPal to get the transaction\n response = self.client.execute(request)\n # 4. Save the transaction in your database. Implement logic to save transaction to your database for future reference.\n print('Status Code: ', response.status_code)\n print('Status: ', response.result.status)\n print('Order ID: ', response.result.id)\n print('Intent: ', response.result.intent)\n print('Links:')\n for link in response.result.links:\n print('\\t{}: {}\\tCall Type: {}'.format(\n link.rel, link.href, link.method))\n print('Gross Amount: {} {}'.format(response.result.purchase_units[0].amount.currency_code,\n response.result.purchase_units[0].amount.value))", "def get_token(self):\n oauth_provider = UserSocialAuth.objects.get(provider='drchrono')\n access_token = oauth_provider.extra_data['access_token']\n return access_token", "def _request_access_token(self):\n resp = requests.get(self.TOKEN_URL_FORMAT.format(\n self.creds().consumer_key(), self.creds().app_secret())\n )\n status = resp.status_code\n\n # If the token request fails, try to use the configured app id\n # and secret. This probably won't work, but the docs say that it\n # should. for more info, see:\n # https://developers.facebook.com/docs/facebook-login/access-tokens\n token = \"%s|%s\" % (self.creds().consumer_key(),\n self.creds().app_secret())\n if status == 200:\n token = resp.text.split('access_token=')[1]\n else:\n self.logger.error(\n \"Facebook token request failed with status %d\" % status\n )\n return token", "def get_access(access_token='',expire_time=0):\r\n #Get a new access token if it expires or is five minutes away from exp#iration\r\n if (expire_time==0) or (len(access_token)==0) or (time.time()-expire_time>=-300):\r\n\r\n #API needed to authorize account with refresh token\r\n auth_url = 'https://api.tdameritrade.com/v1/oauth2/token'\r\n\r\n #Data needed for token\r\n data = {'grant_type':'refresh_token',\r\n 'refresh_token':TDAuth_Info.refresh_token,\r\n 'client_id':TDAuth_Info.client_id}\r\n\r\n #Post the data to get the token\r\n auth_reply_json = requests.post(url=auth_url,data=data)\r\n auth_reply=auth_reply_json.json()\r\n\r\n #Now use the token to get account information\r\n access_token = auth_reply['access_token']\r\n expire_time=time.time()+auth_reply['expires_in']\r\n \r\n return (access_token,expire_time)", "def get_access_token(self, request) -> str or Exception:\n pass", "def query_order(self, order_id: str):\n return self._call_txtrader_api('query_order', {'id': order_id})", "def get_token(self): # pragma: no cover\n\t\treturn (session.get(\"access_token\"), \"\")", "def get_token(self, access_token):\n if access_token:\n return access_token\n elif self.default_access_token:\n return self.default_access_token\n else:\n return ''", "def _get_access_token(self) -> dict:\n demisto.debug('CDL - Fetching access token')\n try:\n oproxy_response = self._http_request('POST',\n '/cdl-token',\n json_data={'token': get_encrypted(self.refresh_token, self.enc_key)},\n timeout=(60 * 3, 60 * 3),\n retries=3,\n backoff_factor=10,\n status_list_to_retry=[400])\n except DemistoException as e:\n if re.match(BAD_REQUEST_REGEX, str(e)):\n demisto.error('The request to retrieve the access token has failed with 400 status code.')\n demisto.setIntegrationContext(self._cache_failure_times(demisto.getIntegrationContext()))\n raise e\n\n self.reset_failure_times()\n return oproxy_response", "def get_access_token(client_id, refresh_token):\n h = {\n \"content-type\": 'application/x-www-form-urlencoded'\n }\n\n d = {\n \"grant_type\": \"refresh_token\",\n \"refresh_token\": refresh_token,\n \"client_id\": client_id\n }\n\n r = requests.post(\"https://api.tdameritrade.com/v1/oauth2/token\", data=d, headers=h)\n\n return json.loads(r.text)[\"access_token\"]", "def request_access_token(self, *args, **kwargs):\n response = super().request_access_token(*args, **kwargs)\n if \"access_token\" not in response:\n response[\"access_token\"] = response[\"id_token\"]\n return response", "def get_access_token(*args, **kwargs):\n return get_access_token_async(*args, **kwargs).get_result()", "def get_access_token(self, token_url):\n # type: (str) -> str\n\n payload = {\n \"grant_type\" : \"client_credentials\",\n \"client_id\" : self.client_id,\n \"client_secret\" : self.client_secret,\n \"scope\" : self.client_scope,\n }\n headers = {\n \"accept\" : \"application/json\",\n }\n resp = requests.post(f\"{self.base_url}/{token_url}\", data=payload, headers=headers)\n try:\n if (resp.ok):\n return resp.json().get('access_token')\n except (ValueError):\n self.__log.error (\"Error obtaining access token with credentials\")", "def get_access_token(self):\n\n token_work = time.time() < self.expires\n\n if token_work:\n # No need update token\n return self.access_token\n\n data = {\n 'client_id': self.client_id,\n 'grant_type': 'implicit'\n }\n\n response = requests.post('https://api.moltin.com/oauth/access_token', data=data)\n raise_response_errors(response)\n\n response_json = response.json()\n\n self.access_token = response_json['access_token']\n self.expires = response_json['expires']\n\n logger.debug('elasticpathh access token was updated')\n\n return self.access_token", "def build_access_token():\n return do_build_access_token(tenant_id='intility_tenant_id')", "def get_order(self, orderid):\n return self.get_orders(orderid=orderid)", "def obtain_access_token():\n\tpost_data = {'grant_type': 'client_credentials',\n\t\t\t\t 'client_id': conos_config['client_id'],\n\t\t\t\t 'client_secret': conos_config['client_secret']}\n\n\ttry:\n\t\tresponse = requests.post(url=conos_config['sts_url'], data=post_data, timeout=60) # 60 seconds\n\t\tif response.ok:\n\t\t\treturn 'Bearer ' + response.json()['access_token']\n\t\telse:\n\t\t\tprint('\\nERROR: Can not obtain access token')\n\t\t\tprint('\\nResponse error: ', response.json())\n\t\t\tresponse.raise_for_status()\n\texcept requests.exceptions.RequestException as e:\n\t\t# All exceptions that Requests explicitly raises inherit from requests.exceptions.RequestException\n\t\tprint(\"Root cause: \", e)\n\t\tsys.exit(1)", "def access_token(self):\n if self.has_expired():\n self.update()\n\n return self.token['access_token']", "def post_order(access_token,json_request):\r\n orders_url = 'https://api.tdameritrade.com/v1/accounts/{}/orders'.format(TDAuth_Info.account_num)\r\n\r\n #The header for placing in order needs to define the input type (json)\r\n headers = {'Authorization':'Bearer {}'.format(access_token),\r\n 'Content-Type':'application/json'}\r\n\r\n #Post the order on TD Ameritrade and check the response\r\n post_order_response=requests.post(url=orders_url,headers=headers,json=json_request)\r\n\r\n return post_order_response", "def rest_open_order(self, orderID):\n o = self.client.Order.Order_getOrders(filter=json.dumps({\"open\": True, \"orderID\": orderID})).result()\n if o[0].__len__():\n return o[0][0]\n return None", "def get_access_token(request):\n user = request.user\n flow = _create_flow(request)\n\n flow.params['state'] = _build_state_value(request, user)\n credentials = StorageByKeyName(\n CredentialsNDBModel, user.user_id(), 'credentials').get()\n\n authorize_url = flow.step1_get_authorize_url()\n redirect_response_object = HttpResponseRedirect(authorize_url)\n if credentials is None or credentials.invalid:\n return redirect_response_object\n\n # Find out if credentials is expired\n refresh_failed = False\n if credentials.access_token is None or credentials.access_token_expired:\n try:\n credentials.refresh(httplib2.Http())\n except AccessTokenRefreshError:\n return redirect_response_object\n except Exception:\n refresh_failed = True\n\n port_value = _validate_port(request.GET.get('port'))\n if port_value is None:\n return HttpTextResponse('Access Token: %s' % (credentials.access_token,))\n\n # Send access token along to localhost client\n redirect_template_args = {'port': port_value}\n if refresh_failed:\n quoted_error = urllib.quote(OAUTH_DEFAULT_ERROR_MESSAGE)\n redirect_template_args['error'] = quoted_error\n client_uri = ACCESS_TOKEN_FAIL_REDIRECT_TEMPLATE % redirect_template_args\n else:\n quoted_access_token = urllib.quote(credentials.access_token)\n redirect_template_args['token'] = quoted_access_token\n client_uri = ACCESS_TOKEN_REDIRECT_TEMPLATE % redirect_template_args\n\n return HttpResponseRedirect(client_uri)", "def get_access_token(request):\n user = request.user\n flow = _create_flow(request)\n\n flow.params['state'] = _build_state_value(request, user)\n credentials = StorageByKeyName(\n CredentialsNDBModel, user.user_id(), 'credentials').get()\n\n authorize_url = flow.step1_get_authorize_url()\n redirect_response_object = HttpResponseRedirect(authorize_url)\n if credentials is None or credentials.invalid:\n return redirect_response_object\n\n # Find out if credentials is expired\n refresh_failed = False\n if credentials.access_token is None or credentials.access_token_expired:\n try:\n credentials.refresh(httplib2.Http())\n except AccessTokenRefreshError:\n return redirect_response_object\n except:\n refresh_failed = True\n\n port_value = _validate_port(request.GET.get('port'))\n if port_value is None:\n return HttpTextResponse('Access Token: %s' % (credentials.access_token,))\n\n # Send access token along to localhost client\n redirect_template_args = {'port': port_value}\n if refresh_failed:\n quoted_error = urllib.quote(OAUTH_DEFAULT_ERROR_MESSAGE)\n redirect_template_args['error'] = quoted_error\n client_uri = ACCESS_TOKEN_FAIL_REDIRECT_TEMPLATE % redirect_template_args\n else:\n quoted_access_token = urllib.quote(credentials.access_token)\n redirect_template_args['token'] = quoted_access_token\n client_uri = ACCESS_TOKEN_REDIRECT_TEMPLATE % redirect_template_args\n\n return HttpResponseRedirect(client_uri)" ]
[ "0.69531524", "0.67687076", "0.656684", "0.65420085", "0.65196705", "0.64443076", "0.64358777", "0.63968205", "0.6366604", "0.6312543", "0.6238618", "0.6228013", "0.61980855", "0.6166722", "0.6139371", "0.612343", "0.6123228", "0.6099858", "0.60809356", "0.60761327", "0.6069127", "0.6067195", "0.606622", "0.60578495", "0.6054082", "0.603133", "0.6011305", "0.6000005", "0.5975189", "0.5974637" ]
0.8059914
0
Deletes orders on the TD Ameritrade Site access_token token used to access the TD Ameritrade site order_ID the ID of the order to delete
def delete_order(access_token,order_ID): orders_url = 'https://api.tdameritrade.com/v1/accounts/{}/orders/{}'.format(TDAuth_Info.account_num,order_ID) headers={'Authorization': 'Bearer {}'.format(access_token)} order_status = requests.delete(url=orders_url,headers=headers) return order_status
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def api_delete_order(request, id):\n\n close_old_connections()\n\n # Not marking it as served if it isn't even ready yet.\n if not request.user.is_authenticated:\n return HttpResponseForbidden(\"You're not authenticated.\")\n \n # Delete the order.\n Order.objects.get(id=id).delete()\n\n close_old_connections()\n \n return HttpResponse('Deleted.')", "def DeleteOrder(self, tenantId, orderids):\n\t\tif tenantId and orderids:\n\t\t\torders = DBSession.query(Order).filter(Order.Id.in_(orderids), Order.TenantId == tenantId).all()\n\t\t\tfor o in orders:\n\t\t\t\tDBSession.delete(o)\n\t\tpass", "async def delete_order(request: web.Request, order_id) -> web.Response:\n return web.Response(status=200)", "def test_order_can_be_deleted_by_owner(self):\n\n\t\tres = self.login_user()\n\t\taccess_token = json.loads(res.data.decode())['access_token']\n\n\t\tresponse = self.client().post(\n\t\t\t'/api/v2/orders',\n\t\t\theaders={\"x-access-token\": access_token},\n\t\t\tdata = json.dumps(\n\t\t\t\tself.order_data) , content_type = 'application/json')\n\t\tself.assertEqual(response.status_code, 201)\n\n\t\tresponse = self.client().delete(\n\t\t\t'/api/v2/orders/1',\n\t\t\theaders={\"x-access-token\": access_token})\n\n\t\tresult = json.loads(response.data)\n\t\tself.assertEqual(response.status_code, 200)\n\t\tself.assertEqual(result[\"message\"], \"Order deleted succesfully\")", "def test_order_cannot_be_deleted_if_dont_exist(self):\n\n\t\tres = self.login_user()\n\t\taccess_token = json.loads(res.data.decode())['access_token']\n\n\t\tresponse = self.client().post(\n\t\t\t'/api/v2/orders',\n\t\t\theaders={\"x-access-token\": access_token},\n\t\t\tdata = json.dumps(\n\t\t\t\tself.order_data) , content_type = 'application/json')\n\t\tself.assertEqual(response.status_code, 201)\n\n\t\tresponse = self.client().delete(\n\t\t\t'/api/v2/orders/5',\n\t\t\theaders={\"x-access-token\": access_token})\n\n\t\tresult = json.loads(response.data)\n\t\tself.assertEqual(response.status_code, 404)\n\t\tself.assertEqual(result[\"message\"], \"That order is not available\")", "def delete_order(order_id):\n with MY_CONNECTION as connection:\n connection.execute(\"DELETE FROM Orders WHERE id_order=?\", (order_id,))", "def get_order_by_id(access_token,order_ID):\r\n\r\n orders_url = 'https://api.tdameritrade.com/v1/accounts/{}/orders/{}'.format(TDAuth_Info.account_num,order_ID)\r\n headers={'Authorization': 'Bearer {}'.format(access_token)}\r\n\r\n #Make the get request to TD Ameritrade\r\n orders_data_json = requests.get(url=orders_url,headers=headers)\r\n return orders_data_json.json()", "def _delete_all_orders(self):\n for order_ref in self.created_entities['order']:\n order = self.barbicanclient.orders.get(order_ref)\n if order.secret_ref:\n self.barbicanclient.secrets.delete(order.secret_ref)\n # see if containers are supported\n container_attr_exists = getattr(order, \"container_ref\", None)\n if container_attr_exists and order.container_ref:\n self.barbicanclient.containers.delete(order.container_ref)\n\n self.barbicanclient.orders.delete(order_ref)", "def service_token_delete(self):\n\n self._client.delete(\n \"{}/servicetoken\".format(LKECluster.api_endpoint), model=self\n )", "def delete(self):\n parser = reqparse.RequestParser()\n parser.add_argument('orderId', type=int, required=True,\n help='Order ID to cancel')\n args = parser.parse_args()\n return sync.cancel_order(args['orderId'])", "def test_delete_o_auth_access_token(self):\n pass", "def run(self):\n keys = self.admin_barbican.create_key()\n self.admin_barbican.orders_delete(keys.order_ref)", "def delete_orders_on(user, order_date):\n db.session.query(BreadOrder).filter(\n BreadOrder.user_id == user.id,\n BreadOrder.date_id == order_date.id\n ).delete()\n db.session.commit()", "def delete_orders_by_id(request):\n json_value = request.POST.keys()[0] # first occurence because the post pass a string and not an array\n data = json.loads(json_value)\n\n # deleting local OrderDetail record\n for order_detail in data:\n record = OrderDetail.objects.get(ac_od_id=int(order_detail))\n record.delete()\n\n return HttpResponse(json.dumps('ok'), mimetype='application/json')", "def post_order(access_token,json_request):\r\n orders_url = 'https://api.tdameritrade.com/v1/accounts/{}/orders'.format(TDAuth_Info.account_num)\r\n\r\n #The header for placing in order needs to define the input type (json)\r\n headers = {'Authorization':'Bearer {}'.format(access_token),\r\n 'Content-Type':'application/json'}\r\n\r\n #Post the order on TD Ameritrade and check the response\r\n post_order_response=requests.post(url=orders_url,headers=headers,json=json_request)\r\n\r\n return post_order_response", "def test_order_cannot_be_deleted_if_not_owner(self):\n\n\t\tres = self.login_user()\n\t\tress = self.login_admin_user()\n\t\taccess_token = json.loads(res.data.decode())['access_token']\n\t\ta_access_token = json.loads(ress.data.decode())['access_token']\n\n\t\tresponse = self.client().post(\n\t\t\t'/api/v2/orders',\n\t\t\theaders={\"x-access-token\": access_token},\n\t\t\tdata = json.dumps(\n\t\t\t\tself.order_data) , content_type = 'application/json')\n\t\tself.assertEqual(response.status_code, 201)\n\n\t\tresponse = self.client().delete(\n\t\t\t'/api/v2/orders/1',\n\t\t\theaders={\"x-access-token\": a_access_token})\n\n\t\tresult = json.loads(response.data)\n\t\tself.assertEqual(response.status_code, 401)\n\t\tself.assertEqual(result[\"message\"], \n\t\t\t\"Not authorized to perform this function!\")", "def delete_order(self, offer_id, session=None):\n date_time_sent = datetime.datetime.utcnow()\n method = 'offers/{}'.format(offer_id)\n response = self.request('DELETE', self.client.urn_edge, method, session=session)\n date_time_received = datetime.datetime.utcnow()\n return self.process_response(\n response.json(), resources.Order, date_time_sent, date_time_received\n )", "def test_delete_order(self):\n order = PizzaOrder.objects.get(id=self.response.data['id'])\n response = self.client.delete(\n reverse('order_details',\n kwargs={'order_id': order.id}),\n format='json',\n follow=True\n )\n\n self.assertEquals(response.status_code, status.HTTP_204_NO_CONTENT)", "def test_delete_order(self):\n self.orders_list.place_order(self.order)\n self.orders_list.place_order(self.order)\n self.assertEqual(self.orders_list.deletes_order(1), \"Order successfully trashed\")\n self.assertEqual(len(self.orders_list.get_orders()), 1)", "def replace_order(access_token,order_ID,json_request):\r\n orders_url = 'https://api.tdameritrade.com/v1/accounts/{}/orders/{}'.format(TDAuth_Info.account_num,order_ID)\r\n\r\n #The header for placing in order needs to define the input type (json)\r\n headers = {'Authorization':'Bearer {}'.format(access_token),\r\n 'Content-Type':'application/json'}\r\n\r\n #Post the order on TD Ameritrade and check the response\r\n replace_order_response=requests.put(url=orders_url,headers=headers,json=json_request)\r\n\r\n return replace_order_response", "def delete_order():", "def market_cancel(self, orderid):\n return self.delete(f'orders/{orderid}', auth=True)", "def m_ts_OrderDeleted(self, sender, e):\r\n print(\"Order was deleted.\")", "def get_orders(access_token,start_date,end_date,status):\r\n\r\n orders_url = 'https://api.tdameritrade.com/v1/orders'\r\n headers={'Authorization': 'Bearer {}'.format(access_token)}\r\n #Parameters for the order\r\n params = {'accountId':TDAuth_Info.account_num,\r\n 'fromEnteredTime': start_date,\r\n 'toEnteredTime': end_date,\r\n 'status': status}\r\n\r\n #Make the get request to TD Ameritrade\r\n orders_data_json = requests.get(url=orders_url,headers=headers,params=params)\r\n return orders_data_json.json()", "def delete(self):\n return self.request.delete_cookie('token')", "def delete_auth_token():\n data = get_request_data(request)\n address = data.get(\"address\")\n token = data.get(\"token\")\n\n valid, message = is_token_valid(token, address)\n if not valid:\n return jsonify(error=message), 400\n\n force_expire_token(token)\n\n return jsonify(success=\"Token has been deactivated.\")", "def delete_bulk_orders(self, event_ids=None, market_ids=None, runner_ids=None, offer_ids=None, session=None):\n params = clean_locals(locals())\n date_time_sent = datetime.datetime.utcnow()\n method = 'offers'\n response = self.request('DELETE', self.client.urn_edge, method, data=params, session=session)\n date_time_received = datetime.datetime.utcnow()\n return self.process_response(\n response.json().get('offers', []), resources.Order, date_time_sent, date_time_received\n )", "async def revoke_order(self, symbol, order_id, client_order_id):\n uri = \"/fapi/v1/order\"\n params = {\n \"symbol\": symbol,\n \"orderId\": order_id,\n \"origClientOrderId\": client_order_id,\n \"timestamp\": tools.get_cur_timestamp_ms()\n }\n success, error = await self.request(\"DELETE\", uri, params=params, auth=True)\n return success, error", "def purge_order(self, order_id):\n order = self.load_order(order_id)\n for authz_id in order.authorization_ids:\n authz = self.load_authorization(authz_id)\n for chall_id in authz.challenge_ids:\n self.delete('challenge', chall_id)\n self.delete('authorization', authz_id)\n if order.certificate_id:\n self.delete('certificate', order.certificate_id)\n self.delete('order', order_id)", "def cancel_order(user_data):\n can_res = requests.delete(url=\"http://127.0.0.1:5000/cancel_order\", json=user_data)\n return can_res.text" ]
[ "0.65937054", "0.6493973", "0.64761704", "0.620927", "0.6152156", "0.61286193", "0.60274494", "0.6003759", "0.5973157", "0.59723866", "0.59202516", "0.5837919", "0.58353496", "0.5781898", "0.5772327", "0.5762132", "0.5739936", "0.5728079", "0.5668074", "0.5599602", "0.5576779", "0.55575025", "0.55300224", "0.5521605", "0.5451503", "0.54139423", "0.5388679", "0.53560483", "0.53080803", "0.5298715" ]
0.8097233
0
Posts an order to the TD Ameritrade website access_token token used to access the TD Ameritrade site json_request the order request in json format Returns the response to the post request
def post_order(access_token,json_request): orders_url = 'https://api.tdameritrade.com/v1/accounts/{}/orders'.format(TDAuth_Info.account_num) #The header for placing in order needs to define the input type (json) headers = {'Authorization':'Bearer {}'.format(access_token), 'Content-Type':'application/json'} #Post the order on TD Ameritrade and check the response post_order_response=requests.post(url=orders_url,headers=headers,json=json_request) return post_order_response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def replace_order(access_token,order_ID,json_request):\r\n orders_url = 'https://api.tdameritrade.com/v1/accounts/{}/orders/{}'.format(TDAuth_Info.account_num,order_ID)\r\n\r\n #The header for placing in order needs to define the input type (json)\r\n headers = {'Authorization':'Bearer {}'.format(access_token),\r\n 'Content-Type':'application/json'}\r\n\r\n #Post the order on TD Ameritrade and check the response\r\n replace_order_response=requests.put(url=orders_url,headers=headers,json=json_request)\r\n\r\n return replace_order_response", "def test_post_order(\n test_client, pydex_client, make_veth_signed_order\n):\n order = make_veth_signed_order(\n asset_type=\"LONG\",\n qty=0.0001,\n price=0.5,\n side=\"BUY\",\n )\n res = test_client.post(\n pydex_client.post_order_url,\n json=order.to_json()\n )\n assert res.status_code == 200\n # Retrieve order via get order endpoint\n res = test_client.get(\n \"{}{}\".format(pydex_client.get_order_url, order.hash)\n )\n assert res.status_code == 200\n res = res.get_json()\n assert_valid(res, \"/relayerApiOrderSchema\")\n assert res[\"order\"] == order.to_json()", "def post_order(self, order):\n url = self.build_url(\"orders/\")\n res = post(url, json=order)\n if res.ok:\n return res.json()[\"id\"]\n return None", "def post(cls):\n data = request.get_json() # token + list of item ids [1, 2, 3, 5, 5, 5]\n items = []\n item_id_quantities = Counter(data[\"item_ids\"])\n\n # Iterate over items and retrieve them from the database\n for _id, _count in item_id_quantities.most_common():\n item = ItemModel.find_by_id(_id)\n if not item:\n return {\"message\": gettext(\"order_item_by_id_not_found\").format(_id)}, 404\n \n items.append(ItemInOrder(item_id=_id, quantity=_count))\n \n order = OrderModel(items = items, status=\"pending\")\n order.save_to_db()\n\n order.set_status(\"failed\") # assume the order would fail until it's completed\n #order.charge_with_stripe(data[\"token\"])\n order.set_status(\"complete\") # charge succeeded\n\n return order_schema.dump(order), 200", "async def place_order(request: web.Request, body) -> web.Response:\n body = Order.from_dict(body)\n return web.Response(status=200)", "def create_order(self, request):\n data = request.data\n address_id = request.query_params[\"address_id\"]\n # data._mutable = True\n data[\"user\"] = request.user.id\n data[\"address\"] = address_id\n serializer = OrderCreateSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return Response({\"result\": serializer.data, \"message\": \"Done\", \"status\": True},\n status=status.HTTP_201_CREATED)\n return Response({\"result\": serializer.errors, \"message\": \"Done\", \"status\": False},\n status=status.HTTP_400_BAD_REQUEST)", "def test_post(self):\n response = self.client.post('/api/v1/parcels', data = json.dumps(self.order), content_type='application/json')\n result = json.loads(response.data.decode())\n self.assertEqual(result[\"message\"], \"Order placed Successfully\", msg = \"Order registration failed\")\n self.assertEqual(response.status_code, 201)", "def post(self):\n order_json = request.json\n if not order_json:\n logging.error('Attempt to create order without json')\n return {'message': 'Wrong data'}, 400\n try:\n worker = Worker.query.filter_by(uuid=order_json['worker_uuid']).first()\n food = Food.query.filter_by(uuid=order_json['food_uuid']).first()\n order = Order(\n worker=worker,\n food=food,\n quantity=order_json['quantity']\n )\n db.session.add(order)\n db.session.commit()\n except (ValueError, KeyError):\n logging.error(f'Order was not created due wrong data')\n return {'message': 'Wrong data'}, 400\n logging.info(f'Order with uuid {order.uuid} was created')\n return {'message': 'Created successfully', 'uuid': order.uuid}, 201", "def test_create_order(self):\n # Test with valid data format and right auth token\n response = self.client.post('/api/v1/parcels',\n data=json.dumps(self.order), content_type='application/json', headers=self.user_token_dict)\n self.assertTrue('order' in json.loads(response.data))\n self.assertEqual(json.loads(response.data)['message'], 'Order created')\n self.assertEqual(response.status_code, 201)", "async def create_order(request):\n async with transaction(request.app) as trans:\n order_id = await trans.connection.scalar(\n tables.software_order.insert().values(\n purchaser_id=request['auth']['account']['id']\n )\n )\n await trans.commit()\n return web.json_response({'order_id': order_id})", "def test_make_order_by_authorised_user(self):\n\n\t\tres = self.login_user()\n\t\taccess_token = json.loads(res.data.decode())['access_token']\n\n\t\tresponse = self.client().post(\n\t\t\t'/api/v2/orders',\n\t\t\theaders={\"x-access-token\": access_token},\n\t\t\tdata = json.dumps(\n\t\t\t\tself.order_data) , content_type = 'application/json')\n\t\tresult = json.loads(response.data)\n\t\tself.assertEqual(result[\"message\"], \"Order succesfully posted\")\n\t\tself.assertEqual(response.status_code, 201)", "def post(self):\n \n access_token = accessToken.gerated_access_token\n api_url = \"https://sandbox.safaricom.co.ke/mpesa/stkpush/v1/processrequest\"\n headers = { \"Authorization\": \"Bearer %s\" % access_token }\n request = {\n \"BusinessShortCode\": constants.BusinessShortCode ,\n \"Password\": generated_password,\n \"Timestamp\": generated_timestamp,\n \"TransactionType\": \"CustomerPayBillOnline\",\n \"Amount\": \"1\",\n \"PartyA\": \"254705275702\",\n \"PartyB\": constants.BusinessShortCode,\n \"PhoneNumber\": \"\", #pass in the phone number that will be prompted to enter the pin\n \"CallBackURL\": \"https://test.com\", #pass in an actual callback url if you have one\n \"AccountReference\": \"Test100\",\n \"TransactionDesc\": \"Test payment\"\n }\n \n response = requests.post(api_url, json = request, headers=headers)\n # print (response.text)\n\n return {\"response\":response.json()}", "def make_post_request(self, url, data):\n auth = (self.AUTH_ID, self.AUTH_TOKEN)\n headers = {'content-type': 'application/json'}\n return requests.post(url, data=data, auth=auth, headers=headers)", "def order_test(self, **params):\n return self._post('order/test', signed=True, data=params)", "def test_create_order(self):\n # Test with wrong data type\n response = self.client.post('/api/v1/parcels',\n data=json.dumps(['jay', 'bad', 'data']), content_type='application/json', headers=self.user_token_dict)\n data = json.loads(response.data)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(\n data, {'message': 'Payload must be a dictionary(object)'})", "def create_new_order():\n res = requests.get(url='http://127.0.0.1:5000/new_order')\n return res.text", "def test_make_new_order(self):\n response = self.api_test_client.post('{}/orders'.format(\n self.BASE_URL), json=self.ORDER, headers={\n 'Content-Type': 'application/json'})\n\n self.assertEqual(response.status_code, 201)\n self.assertEqual(response_as_json(\n response)['order']['item_name'], self.ORDER['item_name'])\n self.assertEqual(response_as_json(\n response)['order']['order_id'], 1)\n self.assertEqual(response_as_json(\n response)['order']['quantity'], 1)\n self.assertEqual(response_as_json(\n response)['order']['total_order_cost'], 200)", "def get_orders(access_token,start_date,end_date,status):\r\n\r\n orders_url = 'https://api.tdameritrade.com/v1/orders'\r\n headers={'Authorization': 'Bearer {}'.format(access_token)}\r\n #Parameters for the order\r\n params = {'accountId':TDAuth_Info.account_num,\r\n 'fromEnteredTime': start_date,\r\n 'toEnteredTime': end_date,\r\n 'status': status}\r\n\r\n #Make the get request to TD Ameritrade\r\n orders_data_json = requests.get(url=orders_url,headers=headers,params=params)\r\n return orders_data_json.json()", "def execute_order(order: dict):\n\n # TODO: Check validity, e.g., against filters (min, max) and our own limits\n\n if App.config[\"trader\"][\"test_order_before_submit\"]:\n try:\n log.info(f\"Submitting test order: {order}\")\n test_response = App.client.create_test_order(**order) # Returns {} if ok. Does not check available balances - only trade rules\n except Exception as e:\n log.error(f\"Binance exception in 'create_test_order' {e}\")\n # TODO: Reset/resync whole account\n return\n\n if App.config[\"trader\"][\"simulate_order_execution\"]:\n # TODO: Simply store order so that later we can check conditions of its execution\n print(order)\n print(App.signal)\n pass\n else:\n # -----\n # Submit order\n try:\n log.info(f\"Submitting order: {order}\")\n order = App.client.create_order(**order)\n except Exception as e:\n log.error(f\"Binance exception in 'create_order' {e}\")\n return\n\n if not order or not order.get(\"status\"):\n return None\n\n return order", "def test_make_order_with_non_json_data(self):\n response = self.api_test_client.post('{}/orders'.format(\n self.BASE_URL), data='item_name=Guacamole&item_price=200')\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(response_as_json(\n response)['message'],\n 'Bad request. Request data must be in json format')", "def delete_order(access_token,order_ID):\r\n orders_url = 'https://api.tdameritrade.com/v1/accounts/{}/orders/{}'.format(TDAuth_Info.account_num,order_ID)\r\n headers={'Authorization': 'Bearer {}'.format(access_token)}\r\n order_status = requests.delete(url=orders_url,headers=headers)\r\n return order_status", "async def post_submit_order(self, symbol, amount, price):\n order = await self.client.submit_order(\n symbol=symbol,\n market_type=Order.Type.EXCHANGE_MARKET,\n amount=amount,\n price=price\n )\n return order", "def create_order(order):\n response = requests.post(\n settings.SHOPIFY_ORDERS_URL,\n auth=(settings.SHOPIFY_API_KEY, settings.SHOPIFY_PASSWORD),\n json={\"order\": order},\n )\n if response.status_code != 201:\n raise ShopifyResponseException(\n f\"The Shopify API returned an invalid response:\\n{response.text}\"\n )", "def auth_postrequest_json(self, url, token, params):\n headers = {\n \"Authorization\": token,\n \"Content-Type\": \"application/json\"\n }\n\n response = self.postrequest_json(url, params, headers)\n return response", "def _post(self, json_data):\n headers = {}\n if self.token is not None:\n headers[\"Authorization\"] = f\"Bearer {self.token}\"\n response = requests.post(self.api_url, json=json_data, headers=headers)\n response.raise_for_status()\n return response.json()", "def get_order_by_id(access_token,order_ID):\r\n\r\n orders_url = 'https://api.tdameritrade.com/v1/accounts/{}/orders/{}'.format(TDAuth_Info.account_num,order_ID)\r\n headers={'Authorization': 'Bearer {}'.format(access_token)}\r\n\r\n #Make the get request to TD Ameritrade\r\n orders_data_json = requests.get(url=orders_url,headers=headers)\r\n return orders_data_json.json()", "def test_make_order_without_any_request_data(self):\n response = self.api_test_client.post('{}/orders'.format(\n self.BASE_URL), json={}, headers={\n 'Content-Type': 'application/json'})\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(response_as_json(\n response)['message'], 'Bad request. Missing required param')", "async def update_order(request):\n # request_schema = UpdateOrderSchema(strict=True)\n # payload = request_schema.load(await request.json()).data\n return web.json_response({'result': 'ok'})", "def post(cls):\n data = request.get_json() # token ,item_ids [1, 3, 3, 5, 5, 5]\n items = []\n item_id_quantities = Counter(data[\"item_ids\"])\n\n for _id, count in item_id_quantities.most_common(): # [(5,3),(3,2),(1,1)]\n item = ItemModel.find_by_id(_id)\n if not item:\n return {\"message\": gettext(\"order_item_by_id_not_found\").format(_id)}, 404\n\n \"\"\"ItemsInOrder get item_id and quantity, however\n order_id will be set later on,\n when items is passed into OrderModel, because back_populates=\"order\"\n it goes over to order column of ItemsInOrder table,\n and set order_id for each of those item in OrderModel\n to be the order to which you have added those items\"\"\"\n items.append(ItemsInOrder(item_id=_id, quantity=count))\n\n # items is a list of ItemsInOrder obj\n order = OrderModel(items=items, status=\"pending\") # pending until send to Stripe\n order.save_to_db() # this does not submit to Stripe\n\n try:\n order.set_status(\"failed\") # assume the order would fail until it's completed\n order.charge_with_stripe(data[\"token\"])\n order.set_status(\"complete\") # charge succeeded\n return order_schema.dump(order), 200\n # the following error handling is advised by Stripe, although the handling implementations are identical,\n # we choose to specify them separately just to give the students a better idea what we can expect\n except error.CardError as e:\n # Since it's a decline, stripe.error.CardError will be caught\n return e.json_body, e.http_status\n except error.RateLimitError as e:\n # Too many requests made to the API too quickly\n return e.json_body, e.http_status\n except error.InvalidRequestError as e:\n # Invalid parameters were supplied to Stripe's API\n return e.json_body, e.http_status\n except error.AuthenticationError as e:\n # Authentication with Stripe's API failed\n # (maybe you changed API keys recently)\n return e.json_body, e.http_status\n except error.APIConnectionError as e:\n # Network communication with Stripe failed\n return e.json_body, e.http_status\n except error.StripeError as e:\n # Display a very generic error to the user, and maybe send\n # yourself an email\n return e.json_body, e.http_status\n except Exception as e:\n # Something else happened, completely unrelated to Stripe\n print(e)\n return {\"message\": gettext(\"order_error\")}, 500", "def get_order(self, order_id):\n request = OrdersGetRequest(order_id)\n #3. Call PayPal to get the transaction\n response = self.client.execute(request)\n return response\n #4. Save the transaction in your database. Implement logic to save transaction to your database for future reference." ]
[ "0.7507134", "0.7014159", "0.6829043", "0.66372055", "0.6441789", "0.6441514", "0.642561", "0.6387601", "0.6369167", "0.63190925", "0.62325263", "0.62321603", "0.61621636", "0.61475813", "0.6122801", "0.61213946", "0.6105965", "0.6085061", "0.6072476", "0.60601467", "0.6041702", "0.6038962", "0.60286784", "0.59338844", "0.590689", "0.59028953", "0.58950967", "0.5886539", "0.5812674", "0.5806364" ]
0.91650337
0
Replaces an order on the TD Ameritrade website access_token token used to access the TD Ameritrade site order_ID ID of the order to replace json_request the new order request in json format to replace the old one Returns the response to the replace order request
def replace_order(access_token,order_ID,json_request): orders_url = 'https://api.tdameritrade.com/v1/accounts/{}/orders/{}'.format(TDAuth_Info.account_num,order_ID) #The header for placing in order needs to define the input type (json) headers = {'Authorization':'Bearer {}'.format(access_token), 'Content-Type':'application/json'} #Post the order on TD Ameritrade and check the response replace_order_response=requests.put(url=orders_url,headers=headers,json=json_request) return replace_order_response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def update_order(request):\n # request_schema = UpdateOrderSchema(strict=True)\n # payload = request_schema.load(await request.json()).data\n return web.json_response({'result': 'ok'})", "def post_order(access_token,json_request):\r\n orders_url = 'https://api.tdameritrade.com/v1/accounts/{}/orders'.format(TDAuth_Info.account_num)\r\n\r\n #The header for placing in order needs to define the input type (json)\r\n headers = {'Authorization':'Bearer {}'.format(access_token),\r\n 'Content-Type':'application/json'}\r\n\r\n #Post the order on TD Ameritrade and check the response\r\n post_order_response=requests.post(url=orders_url,headers=headers,json=json_request)\r\n\r\n return post_order_response", "def replace_order(self, custom_id=None, **params):\n self.conn.send('cancelReplaceOrder', custom_id=custom_id, **params)", "def delete_order(access_token,order_ID):\r\n orders_url = 'https://api.tdameritrade.com/v1/accounts/{}/orders/{}'.format(TDAuth_Info.account_num,order_ID)\r\n headers={'Authorization': 'Bearer {}'.format(access_token)}\r\n order_status = requests.delete(url=orders_url,headers=headers)\r\n return order_status", "def test_an_order_can_be_edited(self):\n\n\t\tnew_data = {\n\t\t\t\t\t\"owner\": \"Pemwa\",\n\t\t\t\t\t\"meal_name\": \"Burger\",\n\t\t\t\t\t\"quantity\": 8\n\t\t\t\t\t }\n\n\t\tres = self.login_user()\n\t\taccess_token = json.loads(res.data.decode())['access_token']\n\n\t\tresponse = self.client().post(\n\t\t\t'/api/v2/orders',\n\t\t\theaders={\"x-access-token\": access_token},\n\t\t\tdata = json.dumps(\n\t\t\t\tself.order_data) , content_type = 'application/json')\n\t\tself.assertEqual(response.status_code, 201)\n\n\t\tresponse = self.client().put(\n\t\t\t'/api/v2/orders/1',\n\t\t\theaders={\"x-access-token\": access_token},\n\t\t\tdata = json.dumps(\n\t\t\t\tnew_data), content_type = 'application/json')\n\t\tresult = json.loads(response.data)\n\t\tself.assertEqual(response.status_code, 200)", "def test_update_order_with_non_json_data(self):\n response = self.api_test_client.put('{}/orders/1'.format(\n self.BASE_URL), data='order_status=rejected')\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(response_as_json(\n response)['message'],\n 'Bad request. Request data must be in json format')", "async def place_order(request: web.Request, body) -> web.Response:\n body = Order.from_dict(body)\n return web.Response(status=200)", "def put(self, order_id):\n\n ###############\n # json_input = self.get_input()\n # log.pp(json_input)\n # key = 'request_id'\n # order_id = json_input.get(key)\n # if order_id is None:\n # error = \"Order ID parameter '%s': missing\" % key\n # return self.send_errors(error, code=hcodes.HTTP_BAD_REQUEST)\n # else:\n # order_id = str(order_id)\n\n ###############\n log.info(\"Order id '%s' has to be restricted\", order_id)\n\n # Create the path\n log.info(\"Order request: %s\", order_id)\n imain = self.get_service_instance(service_name='irods')\n order_path = self.get_order_path(imain, order_id)\n log.debug(\"Order path: %s\", order_path)\n\n ###############\n error = \"Order '%s' not enabled or you have no permissions\" % order_id\n if not imain.is_collection(order_path):\n return self.send_errors(error, code=hcodes.HTTP_BAD_REQUEST)\n else:\n metadata, _ = imain.get_metadata(order_path)\n key = 'restricted'\n if key not in metadata:\n return self.send_errors(error, code=hcodes.HTTP_BAD_REQUEST)\n else:\n string = metadata.get(key)\n import json\n restricted_users = json.loads(string)\n # log.pp(restricted_users)\n if len(restricted_users) < 1:\n return self.send_errors(\n error, code=hcodes.HTTP_BAD_REQUEST)\n\n ###############\n obj = self.init_endpoint()\n if obj.username not in restricted_users:\n return self.send_errors(error, code=hcodes.HTTP_BAD_REQUEST)\n\n ###############\n # irods copy\n label = \"%s_%s.%s\" % (obj.username, '123', 'zip')\n ipath = self.complete_path(order_path, label)\n self.stream_to_irods(imain, ipath)\n log.verbose(\"Uploaded: %s\", ipath)\n\n ###############\n # define zip final path\n from utilities import path\n filename = 'order_%s' % order_id\n # zip_file_name = path.append_compress_extension(filename)\n zip_ipath = path.join(order_path, filename, return_str=True)\n\n ###############\n # launch container\n self.ingest_restricted_zip(imain, order_id, zip_ipath, ipath)\n\n ###############\n response = {\n 'order_id': order_id,\n 'status': 'filled',\n }\n return self.force_response(response)", "def test_update_order_by_user(client):\n resp = put_json(client, '/v1/orders/1/', {\n \"status\": \"Complete\", },\n headers={'Authorization': 'Bearer ' + user_two(client)})\n assert resp.status_code == 401\n assert b'Not authorized' in resp.data", "def finish_order(self, order_id):\n request_name = \"get_order_info\"\n\n orders = self.make_request(request_name, url_id=order_id)\n if orders is None:\n print(\"Unsuccessful updating order\")\n return\n order = orders[0]\n update_dict = dict()\n for key in order:\n if str(key)[0] == \"_\":\n continue\n try:\n update_dict[key.encode('utf-8')] = order[key].encode('utf-8')\n except AttributeError:\n update_dict[key.encode('utf-8')] = order[key]\n\n update_dict['status'] = 'Complete'\n resp = self.make_request('set_inventory_order', url_id=order_id, arguments=update_dict)", "def test_update_an_order(self):\n order = PizzaOrder.objects.get(id=self.response.data['id'])\n updated_order = {\"customer\": {\n \"first_name\": \"Lara\",\n \"last_name\": \"Tanbari\",\n \"address\": \"Coppistr22, 10365 Berlin\"\n },\n \"size\": \"SMALL\"\n }\n res = self.client.put(\n reverse('order_details',\n kwargs={'order_id': order.id}),\n updated_order,\n format='json'\n )\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def put(self):\n request_data = json.loads(request.data)\n print(request_data)\n order_id = request_data['order_id']\n status = request_data['status']\n MM.update_order_status(ObjectId(order_id), status)\n return {\"message\": \"Order Status Updated\"}, 200", "def test_update_order(self):\n # create a order to update\n test_order = OrderFactory()\n resp = self.app.post('/orders',\n json=test_order.serialize(),\n content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)\n\n # update the order\n new_order = resp.get_json()\n new_order['product_id'] = 2\n resp = self.app.put('/orders/{}'.format(new_order['id']),\n json=new_order,\n content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n updated_order = resp.get_json()\n self.assertEqual(updated_order['product_id'], 2)", "def test_update_order(self):\n response = self.api_test_client.put('{}/orders/1'.format(\n self.BASE_URL), json={'order_status': 'accepted'})\n\n self.assertEqual(response.status_code, 201)\n self.assertTrue(\n response_as_json(response)['order']['status_updated_on'])\n self.assertEqual(\n response_as_json(response)['order']['order_status'], 'accepted')", "def place_order(id):\n data = services.validate_order(id, request.json, current_user)\n if \"error\" in data:\n return jsonify({\"message\": data[\"message\"]}), HTTPStatus.BAD_REQUEST\n order_key = f\"PENDING_ORDER#{current_user.id}#{data['event'].id}\"\n session[order_key] = [\n {\n \"package_id\": package.id,\n \"event_id\": data[\"event\"].id,\n \"sponsor_id\": current_user.id,\n \"price\": float(package.price),\n \"name\": package.name\n }\n for package in data[\"packages\"]\n ]\n session[\"user\"] = \"Facebook\"\n return jsonify({\"url\": url_for(\"payments.checkout\", event_id=data[\"event\"].id)})", "def get_order_by_id(access_token,order_ID):\r\n\r\n orders_url = 'https://api.tdameritrade.com/v1/accounts/{}/orders/{}'.format(TDAuth_Info.account_num,order_ID)\r\n headers={'Authorization': 'Bearer {}'.format(access_token)}\r\n\r\n #Make the get request to TD Ameritrade\r\n orders_data_json = requests.get(url=orders_url,headers=headers)\r\n return orders_data_json.json()", "def test_update_order_with_no_status(self):\n response = self.api_test_client.put('{}/orders/1'.format(\n self.BASE_URL), json={})\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(response_as_json(\n response)['message'], 'Bad request. Missing required param')", "def test_update_order_failure(self):\n # create a order to update\n test_order = OrderFactory()\n resp = self.app.post('/orders',\n json=test_order.serialize(),\n content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)\n\n # update the order\n new_order = resp.get_json()\n new_order['product_id'] = 2\n resp = self.app.put('/orders/{}'.format(5),\n json=new_order,\n content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)", "def replace_item(self, order_id, item_obj):\n for order in self.order_lst:\n if int(order.get_orderId()) == int(order_id):\n order.replace_item(item_obj)", "def modify_order(self, order, price, size):\r\n request_params = {\r\n \"price\": str(price),\r\n \"size\": str(size)\r\n }\r\n\r\n method = self.private_endpoints['modify_order']['method']\r\n url = self.base_url + self.private_endpoints['modify_order']['url'].format(orderId=order)\r\n req = requests.request(method, url, headers=self.get_auth_headers(nonce=True), json=request_params)\r\n res = req.json()\r\n\r\n if res['success'] == True:\r\n return True\r\n else:\r\n return res", "def test_admin_change_order_status(self):\n # Test unregistered id\n # Correct format but not there\n response = self.client.put(\n 'api/v1/parcels/35420', headers=self.admin_token_dict)\n data = json.loads(response.data)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(\n data, {'message': 'No Parcel delivery order with that id'})\n # Test invalid format id\n response = self.client.put(\n 'api/v1/parcels/35uh420', headers=self.admin_token_dict) # Incorrect id format\n data = json.loads(response.data)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(data, {'message': 'Wrong id format'})", "def cancel_aws_order(order_id):\n mws_shipments = mws.OutboundShipments(\n access_key=MWS_ACCESS_KEY, secret_key=MWS_SECRET_KEY,\n account_id=MWS_MERCHANT_ID, region=\"FR\")\n\n data = dict(Action=\"CancelFulfillmentOrder\", SellerFulfillmentOrderId=order_id)\n return mws_shipments.make_request(data, \"POST\")", "def test_make_similar_order(self):\n response = self.api_test_client.post('{}/orders'.format(\n self.BASE_URL), json=self.ORDER, headers={\n 'Content-Type': 'application/json'})\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(response_as_json(\n response)['message'], 'Order already exists')\n self.assertTrue(response_as_json(\n response)['order'])", "async def handle_cancel_order_response(self, response: RequesterResponse\n ) -> HitbtcOrderModel:", "def post_order(self, order):\n url = self.build_url(\"orders/\")\n res = post(url, json=order)\n if res.ok:\n return res.json()[\"id\"]\n return None", "async def handle_new_order_response(self, response: RequesterResponse\n ) -> HitbtcOrderModel:", "async def handle_cancel_orders_response(self, response: RequesterResponse\n ) -> HitbtcOrders:", "def get_order(self, order_id):\n request = OrdersGetRequest(order_id)\n #3. Call PayPal to get the transaction\n response = self.client.execute(request)\n return response\n #4. Save the transaction in your database. Implement logic to save transaction to your database for future reference.", "def update_order(self):\n if not self.context.model.is_editable():\n raise Unauthorized(\"Editing is not allowed\")\n\n self.context.model.reorder_agenda_items(\n json.loads(self.request.get('sortOrder')))\n\n return JSONResponse(self.request).info(\n _('agenda_item_order_updated',\n default=u\"Agenda Item order updated.\")).dump()", "def editShipment(order_id, comment, appendComment, notify):\n generate_request = oAuth_magento()\n\n payload = {\"searchCriteria[filter_groups][0][filters][0][field]\": \"increment_id\",\n \"searchCriteria[filter_groups][0][filters][0][value]\": order_id,\n \"searchCriteria[filter_groups][0][filters][0][condition_type]\": \"eq\",\n \"fields\": \"items[entity_id]\"}\n\n response = requests.request(\"GET\", url=generate_request[0], headers=generate_request[1], params=payload)\n json_response = json.loads(response.text)\n entity_id = json_response['items'][0]['entity_id']\n\n if appendComment == \"true\": \n payload = {\"appendComment\": \"true\",\n \"notify\": notify,\n \"comment\": {\n \"extension_attributes\": {},\n \"comment\": comment,\n \"is_visible_on_front\": 1\n }\n }\n \n else:\n payload = {\"notify\": notify}\n\n shipment_response = requests.request(\"POST\", url=\"https://www.amsbio.com/index.php/rest/V1/order/\" + str(entity_id) + \"/ship\", headers=generate_request[1], data=json.dumps(payload))\n return json.loads(shipment_response.text)" ]
[ "0.66815823", "0.6566408", "0.6400565", "0.5917457", "0.58600324", "0.5854894", "0.5815817", "0.5705267", "0.56687856", "0.56081367", "0.5593533", "0.55626035", "0.55596524", "0.55464923", "0.5544397", "0.5501006", "0.5493626", "0.5448207", "0.5365709", "0.5352007", "0.5303004", "0.52779055", "0.5277444", "0.52763134", "0.5263312", "0.5260022", "0.52324045", "0.52169067", "0.5201724", "0.5185061" ]
0.91279715
0
Build a request to the TD Ameritrade API session trading session, 'Normal', 'AM', 'PM', 'SEAMLESS' for extended hours and day hours duration 'DAY' or 'GOOD_TO_CANCEL' orderType 'MARKET' or 'LIMIT' orderLegCollection contains instruction ('BUY' or 'SELL'), symbol, assetType, quantity, etc orderStrategy 'SINGLE', 'TRIGGER', or 'OCO' one cancels the other price if limit order, the price to buy or sell the order Returns an order requests.
def build_order_request(session,duration,orderType,orderLegCollection,orderStrategy,price=''): #Build order request based on parameters order_request = { 'session':session, 'duration':duration, 'orderType':orderType, 'orderLegCollection':orderLegCollection, 'orderStrategyType':orderStrategy} if len(price)>0: order_request['price']=price return order_request
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_get_active_orders_request(self, symbol: Optional[Symbol] = None) -> Request:\n\n method = \"GET\"\n path = \"/order\"\n params: Params = {}\n if symbol is not None:\n params[\"symbol\"] = symbol\n\n url = URL(self._create_url(path))\n url = url.with_query(params)\n\n headers = self._auth.sign(\n method=method, url_path=url.path, url_query=url.query_string)\n return Request(method=method, url=url, headers=headers)", "def request_orders(self):\r\n if self.use_http():\r\n self.enqueue_http_request(\"money/orders\", {}, \"orders\")\r\n else:\r\n self.send_signed_call(\"private/orders\", {}, \"orders\")", "async def fetch_open_orders(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):\n await self.load_markets()\n request = {}\n market = None\n response = None\n if symbol is None:\n response = await self.privatePostAuthROrders(self.extend(request, params))\n else:\n market = self.market(symbol)\n request['symbol'] = market['id']\n response = await self.privatePostAuthROrdersSymbol(self.extend(request, params))\n #\n # [\n # [\n # 95408916206, # Order ID\n # null, # Group Order ID\n # 1653322349926, # Client Order ID\n # \"tDOGE:UST\", # Market ID\n # 1653322349926, # Created Timestamp in milliseconds\n # 1653322349927, # Updated Timestamp in milliseconds\n # -10, # Amount remaining(Positive means buy, negative means sell)\n # -10, # Original amount\n # \"EXCHANGE LIMIT\", # Order type\n # null, # Previous Order Type\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # 0, # Flags, see parseOrderFlags()\n # \"ACTIVE\", # Order Status, see parseOrderStatus()\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # 0.11, # Price\n # 0, # Average Price\n # 0, # Trailing Price\n # 0, # Auxiliary Limit price(for STOP LIMIT)\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # 0, # Hidden(0 if False, 1 if True)\n # 0, # Placed ID(If another order caused self order to be placed(OCO) self will be that other order's ID)\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # \"API>BFX\", # Routing, indicates origin of action: BFX, ETHFX, API>BFX, API>ETHFX\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # {\"$F7\":1} # additional meta information about the order( $F7 = IS_POST_ONLY(0 if False, 1 if True), $F33 = Leverage(int))\n # ],\n # ]\n #\n return self.parse_orders(response, market, since, limit)", "def make_order_request(self, page):\n return api_methods.Orders(\n page=page,\n per_page=self.PER_PAGE,\n from_date=self.from_date,\n start_date=self.start_date,\n end_date=self.end_date,\n deal_id=self.deal_id,\n ).call()", "def get_orders(access_token,start_date,end_date,status):\r\n\r\n orders_url = 'https://api.tdameritrade.com/v1/orders'\r\n headers={'Authorization': 'Bearer {}'.format(access_token)}\r\n #Parameters for the order\r\n params = {'accountId':TDAuth_Info.account_num,\r\n 'fromEnteredTime': start_date,\r\n 'toEnteredTime': end_date,\r\n 'status': status}\r\n\r\n #Make the get request to TD Ameritrade\r\n orders_data_json = requests.get(url=orders_url,headers=headers,params=params)\r\n return orders_data_json.json()", "def create_get_trades_request(self, symbol: str,\n limit: Optional[int] = None\n ) -> Request:", "def order(\n self,\n order_class: str,\n symbol: str,\n order_type: str,\n duration: str,\n quantity: Optional[int],\n side: Optional[str],\n limit_price: float = None,\n stop_price: float = None,\n tag: str = None,\n account_id: str = None,\n option_symbol: str = None,\n option_symbol_0: str = None,\n side_0: str = None,\n quantity_0: int = None,\n option_symbol_1: str = None,\n side_1: str = None,\n quantity_1: int = None,\n option_symbol_2: str = None,\n side_2: str = None,\n quantity_2: int = None,\n option_symbol_3: str = None,\n side_3: str = None,\n quantity_3: int = None,\n ) -> OrderDetails:\n if account_id is None:\n account_id = self.default_account_id\n url = f\"/v1/accounts/{account_id}/orders\"\n params = {\n \"class\": order_class,\n \"symbol\": symbol,\n \"option_symbol\": option_symbol,\n \"side\": side,\n \"quantity\": quantity,\n \"type\": order_type,\n \"duration\": duration,\n \"price\": limit_price,\n \"stop\": stop_price,\n \"tag\": tag,\n \"option_symbol[0]\": option_symbol_0,\n \"side[0]\": side_0,\n \"quantity[0]\": quantity_0,\n \"option_symbol[1]\": option_symbol_1,\n \"side[1]\": side_1,\n \"quantity[1]\": quantity_1,\n \"option_symbol[2]\": option_symbol_2,\n \"side[2]\": side_2,\n \"quantity[2]\": quantity_2,\n \"option_symbol[3]\": option_symbol_3,\n \"side[3]\": side_3,\n \"quantity[3]\": quantity_3,\n }\n params = {k: v for k, v in params.items() if v is not None}\n data = self.post(url, params)\n res = OrderAPIResponse(**data)\n if res.errors:\n raise TradierOrderError(res.errors.error_list)\n return res.order", "def create_get_order_book_ticker_request(self, symbol: Optional[str] = None) -> Request:", "def create_get_aggregate_trades_request(self, symbol: str,\n from_id: Optional[int] = None,\n start_time: Optional[int] = None,\n end_time: Optional[int] = None,\n limit: Optional[int] = None\n ) -> Request:", "async def fetch_closed_orders(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):\n # returns the most recent closed or canceled orders up to circa two weeks ago\n await self.load_markets()\n request = {}\n if since is not None:\n request['start'] = since\n if limit is not None:\n request['limit'] = limit # default 25, max 2500\n market = None\n response = None\n if symbol is None:\n response = await self.privatePostAuthROrdersHist(self.extend(request, params))\n else:\n market = self.market(symbol)\n request['symbol'] = market['id']\n response = await self.privatePostAuthROrdersSymbolHist(self.extend(request, params))\n #\n # [\n # [\n # 95412102131, # Order ID\n # null, # Group Order ID\n # 1653325121798, # Client Order ID\n # \"tDOGE:UST\", # Market ID\n # 1653325122000, # Created Timestamp in milliseconds\n # 1653325122000, # Updated Timestamp in milliseconds\n # -10, # Amount remaining(Positive means buy, negative means sell)\n # -10, # Original amount\n # \"EXCHANGE LIMIT\", # Order type\n # null, # Previous Order Type\n # null, # Millisecond timestamp of Time-In-Force: automatic order cancellation\n # null, # _PLACEHOLDER\n # \"4096\", # Flags, see parseOrderFlags()\n # \"POSTONLY CANCELED\", # Order Status, see parseOrderStatus()\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # 0.071, # Price\n # 0, # Average Price\n # 0, # Trailing Price\n # 0, # Auxiliary Limit price(for STOP LIMIT)\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # 0, # Notify(0 if False, 1 if True)\n # 0, # Hidden(0 if False, 1 if True)\n # null, # Placed ID(If another order caused self order to be placed(OCO) self will be that other order's ID)\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # \"API>BFX\", # Routing, indicates origin of action: BFX, ETHFX, API>BFX, API>ETHFX\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # {\"_$F7\":1} # additional meta information about the order( _$F7 = IS_POST_ONLY(0 if False, 1 if True), _$F33 = Leverage(int))\n # ]\n # ]\n #\n return self.parse_orders(response, market, since, limit)", "def post_order(access_token,json_request):\r\n orders_url = 'https://api.tdameritrade.com/v1/accounts/{}/orders'.format(TDAuth_Info.account_num)\r\n\r\n #The header for placing in order needs to define the input type (json)\r\n headers = {'Authorization':'Bearer {}'.format(access_token),\r\n 'Content-Type':'application/json'}\r\n\r\n #Post the order on TD Ameritrade and check the response\r\n post_order_response=requests.post(url=orders_url,headers=headers,json=json_request)\r\n\r\n return post_order_response", "async def fetch_orders(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):\n await self.load_markets()\n market = None\n request = {}\n if symbol is not None:\n market = self.market(symbol)\n request['symbol'] = market['id']\n if since is not None:\n request['startTime'] = self.iso8601(since)\n if limit is not None:\n request['count'] = limit\n request = self.deep_extend(request, params)\n # why the hassle? urlencode in python is kinda broken for nested dicts.\n # E.g. self.urlencode({\"filter\": {\"open\": True}}) will return \"filter={'open':+True}\"\n # Bitmex doesn't like that. Hence resorting to self hack.\n if 'filter' in request:\n request['filter'] = self.json(request['filter'])\n response = await self.privateGetOrder(request)\n return self.parse_orders(response, market, since, limit)", "def create_order(order_type, quantity, action, price = None):\n order = Order()\n order.m_orderType = order_type\n order.m_totalQuantity = quantity\n order.m_action = action\n order.m_account = ConfigMgr.get_ib_config()['account_code']\n if order_type == 'LMT':\n order.m_lmtPrice = price\n elif order_type == 'STP':\n order.m_auxPrice = price\n return order", "async def create_order(self, symbol: str, type: OrderType, side: OrderSide, amount, price=None, params={}):\n await self.load_markets()\n market = self.market(symbol)\n # order types \"limit\" and \"market\" immediatley parsed \"EXCHANGE LIMIT\" and \"EXCHANGE MARKET\"\n # note: same order types exist for margin orders without the EXCHANGE prefix\n orderTypes = self.safe_value(self.options, 'orderTypes', {})\n orderType = type.upper()\n if market['spot']:\n # although they claim that type needs to be 'exchange limit' or 'exchange market'\n # in fact that's not the case for swap markets\n orderType = self.safe_string_upper(orderTypes, type, type)\n stopPrice = self.safe_string_2(params, 'stopPrice', 'triggerPrice')\n timeInForce = self.safe_string(params, 'timeInForce')\n postOnlyParam = self.safe_value(params, 'postOnly', False)\n reduceOnly = self.safe_value(params, 'reduceOnly', False)\n clientOrderId = self.safe_value_2(params, 'cid', 'clientOrderId')\n params = self.omit(params, ['triggerPrice', 'stopPrice', 'timeInForce', 'postOnly', 'reduceOnly', 'price_aux_limit'])\n amountString = self.amount_to_precision(symbol, amount)\n amountString = amountString if (side == 'buy') else Precise.string_neg(amountString)\n request = {\n # 'gid': 0123456789, # int32, optional group id for the order\n # 'cid': 0123456789, # int32 client order id\n 'type': orderType,\n 'symbol': market['id'],\n # 'price': self.number_to_string(price),\n 'amount': amountString,\n # 'flags': 0, # int32, https://docs.bitfinex.com/v2/docs/flag-values\n # 'lev': 10, # leverage for a derivative orders, the value should be between 1 and 100 inclusive, optional, 10 by default\n # 'price_trailing': self.number_to_string(priceTrailing),\n # 'price_aux_limit': self.number_to_string(stopPrice),\n # 'price_oco_stop': self.number_to_string(ocoStopPrice),\n # 'tif': '2020-01-01 10:45:23', # datetime for automatic order cancellation\n # 'meta': {\n # 'aff_code': 'AFF_CODE_HERE'\n # },\n }\n stopLimit = ((orderType == 'EXCHANGE STOP LIMIT') or ((orderType == 'EXCHANGE LIMIT') and (stopPrice is not None)))\n exchangeStop = (orderType == 'EXCHANGE STOP')\n exchangeMarket = (orderType == 'EXCHANGE MARKET')\n stopMarket = (exchangeStop or (exchangeMarket and (stopPrice is not None)))\n ioc = ((orderType == 'EXCHANGE IOC') or (timeInForce == 'IOC'))\n fok = ((orderType == 'EXCHANGE FOK') or (timeInForce == 'FOK'))\n postOnly = (postOnlyParam or (timeInForce == 'PO'))\n if (ioc or fok) and (price is None):\n raise InvalidOrder(self.id + ' createOrder() requires a price argument with IOC and FOK orders')\n if (ioc or fok) and exchangeMarket:\n raise InvalidOrder(self.id + ' createOrder() does not allow market IOC and FOK orders')\n if (orderType != 'MARKET') and (not exchangeMarket) and (not exchangeStop):\n request['price'] = self.price_to_precision(symbol, price)\n if stopLimit or stopMarket:\n # request['price'] is taken for stop orders\n request['price'] = self.price_to_precision(symbol, stopPrice)\n if stopMarket:\n request['type'] = 'EXCHANGE STOP'\n elif stopLimit:\n request['type'] = 'EXCHANGE STOP LIMIT'\n request['price_aux_limit'] = self.price_to_precision(symbol, price)\n if ioc:\n request['type'] = 'EXCHANGE IOC'\n elif fok:\n request['type'] = 'EXCHANGE FOK'\n # flag values may be summed to combine flags\n flags = 0\n if postOnly:\n flags = self.sum(flags, 4096)\n if reduceOnly:\n flags = self.sum(flags, 1024)\n if flags != 0:\n request['flags'] = flags\n if clientOrderId is not None:\n request['cid'] = clientOrderId\n params = self.omit(params, ['cid', 'clientOrderId'])\n response = await self.privatePostAuthWOrderSubmit(self.extend(request, params))\n #\n # [\n # 1653325121, # Timestamp in milliseconds\n # \"on-req\", # Purpose of notification('on-req', 'oc-req', 'uca', 'fon-req', 'foc-req')\n # null, # unique ID of the message\n # null,\n # [\n # [\n # 95412102131, # Order ID\n # null, # Group ID\n # 1653325121798, # Client Order ID\n # \"tDOGE:UST\", # Market ID\n # 1653325121798, # Millisecond timestamp of creation\n # 1653325121798, # Millisecond timestamp of update\n # -10, # Amount(Positive means buy, negative means sell)\n # -10, # Original amount\n # \"EXCHANGE LIMIT\", # Type of the order: LIMIT, EXCHANGE LIMIT, MARKET, EXCHANGE MARKET, STOP, EXCHANGE STOP, STOP LIMIT, EXCHANGE STOP LIMIT, TRAILING STOP, EXCHANGE TRAILING STOP, FOK, EXCHANGE FOK, IOC, EXCHANGE IOC.\n # null, # Previous order type(stop-limit orders are converted to limit orders so for them previous type is always STOP)\n # null, # Millisecond timestamp of Time-In-Force: automatic order cancellation\n # null, # _PLACEHOLDER\n # 4096, # Flags, see parseOrderFlags()\n # \"ACTIVE\", # Order Status, see parseOrderStatus()\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # 0.071, # Price(Stop Price for stop-limit orders, Limit Price for limit orders)\n # 0, # Average Price\n # 0, # Trailing Price\n # 0, # Auxiliary Limit price(for STOP LIMIT)\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # 0, # Hidden(0 if False, 1 if True)\n # 0, # Placed ID(If another order caused self order to be placed(OCO) self will be that other order's ID)\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # \"API>BFX\", # Routing, indicates origin of action: BFX, ETHFX, API>BFX, API>ETHFX\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # {\"$F7\":1} # additional meta information about the order( $F7 = IS_POST_ONLY(0 if False, 1 if True), $F33 = Leverage(int))\n # ]\n # ],\n # null, # CODE(work in progress)\n # \"SUCCESS\", # Status of the request\n # \"Submitting 1 orders.\" # Message\n # ]\n #\n status = self.safe_string(response, 6)\n if status != 'SUCCESS':\n errorCode = response[5]\n errorText = response[7]\n raise ExchangeError(self.id + ' ' + response[6] + ': ' + errorText + '(#' + errorCode + ')')\n orders = self.safe_value(response, 4, [])\n order = self.safe_value(orders, 0)\n return self.parse_order(order, market)", "def query_orders(self):\n return self._call_txtrader_api('query_orders', {})", "def _new_order_parameters(self, create_order_action):\n parameters = {}\n parameters['client_order_id'] = str(id(create_order_action))\n parameters['amount'] = str(create_order_action.amount)\n parameters['symbol'] = \"btcusd\"\n parameters['side'] = 'buy' if create_order_action.side == \\\n exchanges.Side.BID else 'sell'\n # The only supported type is a limit order.\n parameters['type'] = 'exchange limit'\n # A market order needs to be carried out as a limit order.\n if create_order_action.type == exchanges.Order.Type.MARKET:\n parameters['options'] = [\"immediate-or-cancel\"]\n # TODO: there is an opportunity to provide extra safety.\n temp_max_price = \"1000000\" # $1 million\n temp_min_price = \"0\"\n if create_order_action.side == exchanges.Side.BID:\n parameters['price'] = temp_max_price\n else:\n parameters['price'] = temp_min_price\n else:\n parameters['price'] = str(create_order_action.price)\n return parameters", "async def handle_get_active_orders_response(self, response: RequesterResponse\n ) -> HitbtcOrders:", "def request_active_orders(self, custom_id=None, **params):\n self.conn.send('getOrders', custom_id=custom_id, **params)", "async def fetch_open_orders(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):\n await self.load_markets()\n market = self.market(symbol)\n request = {\n 'pair': market['id'],\n }\n if limit is not None:\n request['count'] = limit\n if since is not None:\n request['since'] = self.parse_to_int(since / 1000)\n response = await self.privateGetUserSpotActiveOrders(self.extend(request, params))\n data = self.safe_value(response, 'data', {})\n orders = self.safe_value(data, 'orders', [])\n return self.parse_orders(orders, market, since, limit)", "def query_order(self, symbol, orderId):\n payload = {\n 'symbol': symbol,\n 'orderId': orderId\n }\n return self.signed_request('GET', '/api/v3/order', **payload)", "def create_get_price_ticker_request(self, symbol: Optional[str] = None) -> Request:", "def create_cancel_orders_request(self, symbol: Optional[Symbol] = None) -> Request:\n\n method = \"DELETE\"\n path = f\"/order\"\n url = URL(self._create_url(path))\n\n params: Params = {}\n if symbol is not None:\n params[\"symbol\"] = symbol\n url = url.with_query(params)\n\n headers = self._auth.sign(\n method=method, url_path=url.path, url_query=url.query_string)\n return Request(method=method, url=url, headers=headers)", "def create_get_order_book_request(self, symbol: str,\n limit: Optional[int] = None\n ) -> Request:", "async def get_order(cls, session, account, order_id):\n if not session.logged_in:\n raise Exception('Tastyworks session not logged in.')\n\n url = '{}/accounts/{}/orders/{}'.format(\n session.API_url,\n account.account_number,\n order_id\n )\n\n async with aiohttp.request('GET', url, headers=session.get_request_headers()) as resp:\n if resp.status != 200:\n raise Exception('Could not retreive the order')\n data = (await resp.json())['data']\n order = cls.from_dict(data)\n return order", "def get_orders():\n\n\t# Get the email from the user making the request\n\temail = get_jwt_identity()\n\n\t# Checks if the reader exists in the database\n\treader = Reader.query.filter_by(email=email).first()\n\tif not reader:\n\t\treturn bad_request(\"Reader does not exist.\")\n\n\t# Gets a list of all the users requested rooms\n\troom_relation = RoomRequest.query \\\n\t\t.filter_by(reader_id=reader.id) \\\n\t\t.join(Room, Room.id == RoomRequest.room_id) \\\n\t\t.join(ApprovesRoomRequest, ApprovesRoomRequest.room_request_id == RoomRequest.id) \\\n\t\t.join(Reader, Reader.id == ApprovesRoomRequest.approver_id) \\\n\t\t.all()\n\troom_orders = [\n\t\t{\"room_id\": x.room_id, \"name\": x.room.name.capitalize(), \"approver\": x.request_approver.approver.email,\n\t\t\t\"date\": x.datetime_requested, \"type\": \"Room\"} for x in room_relation]\n\n\t# Gets a list of all the users requested access groups\n\tag_relation = AccessGroupRequest.query \\\n\t\t.filter_by(reader_id=reader.id) \\\n\t\t.join(AccessGroup, AccessGroup.id == AccessGroupRequest.ag_id) \\\n\t\t.join(ApprovesAgRequest, ApprovesAgRequest.ag_request_id == AccessGroupRequest.id) \\\n\t\t.join(Reader, Reader.id == ApprovesAgRequest.approver_id) \\\n\t\t.all()\n\tag_orders = [\n\t\t{\"ag_id\": x.ag_id, \"name\": x.ag.name.capitalize(), \"approver\": x.request_approver.approver.email,\n\t\t\"date\": x.datetime_requested, \"type\": \"Access group\"} for x in ag_relation\n\t]\n\n\treturn ok({\"orders\": room_orders + ag_orders})", "def map_to_order(self, raw_order: HitbtcRawOrderModel) -> HitbtcOrderModel:\n\n id_ = raw_order[\"id\"]\n client_order_id = raw_order[\"clientOrderId\"]\n symbol = raw_order[\"symbol\"]\n side = raw_order[\"side\"]\n status = raw_order[\"status\"]\n type_ = raw_order[\"type\"]\n time_in_force = raw_order[\"timeInForce\"]\n quantity = Decimal(raw_order[\"quantity\"])\n price = Decimal(raw_order[\"price\"])\n cum_quantity = Decimal(raw_order[\"cumQuantity\"])\n created_at = raw_order[\"createdAt\"]\n updated_at = raw_order[\"updatedAt\"]\n post_only = raw_order[\"postOnly\"]\n raw_avg_price = raw_order.get(\"avgPrice\")\n avg_price = Decimal(\n raw_avg_price) if raw_avg_price is not None else raw_avg_price\n raw_stop_price = raw_order.get(\"stopPrice\")\n stop_price = Decimal(\n raw_stop_price) if raw_stop_price is not None else raw_stop_price\n expire_time = raw_order.get(\"expireTime\")\n raw_trades_report = raw_order.get(\"tradesReport\")\n trades_report = (self.map_to_symbol_trades(\n raw_trades_report) if raw_trades_report is not None else raw_trades_report)\n\n res = HitbtcOrderModel(\n id=id_,\n client_order_id=client_order_id,\n symbol=symbol,\n side=side,\n status=status,\n type=type_,\n time_in_force=time_in_force,\n quantity=quantity,\n price=price,\n cum_quantity=cum_quantity,\n created_at=created_at,\n updated_at=updated_at,\n post_only=post_only,\n avg_price=avg_price,\n stop_price=stop_price,\n expire_time=expire_time,\n trades_report=trades_report)\n\n return res", "def open_orders(self, **params):\n return self._get('option/openOrders', signed=True, params=params, version=None)", "def create_get_active_order_request(self, client_order_id: str,\n wait: Optional[int] = None\n ) -> Request:\n\n method = \"GET\"\n path = f\"/order/{client_order_id}\"\n url = URL(self._create_url(path))\n\n params: Params = {}\n if wait is not None:\n params[\"wait\"] = str(wait)\n url = url.with_query(params)\n\n headers = self._auth.sign(\n method=method, url_path=url.path, url_query=url.query_string)\n return Request(method=method, url=url, headers=headers)", "def new_order(self, symbol: Symbol, side: OrderSide, order_type: OrderType, quantity: str,\n price: Optional[str] = None,\n receive_window: Optional[int] = None):\n api_params = {\n 'symbol': symbol.value,\n 'side': side.value,\n 'type': order_type.value,\n 'quantity': quantity,\n 'timestamp': get_current_time_milliseconds()\n }\n\n if price is not None:\n api_params['price'] = price\n\n if receive_window is not None:\n api_params['receiveWindow'] = receive_window\n\n return self.request.post(path='/order', json_data=api_params)", "async def create_order(self, symbol: str, type: OrderType, side: OrderSide, amount, price=None, params={}):\n await self.load_markets()\n market = self.market(symbol)\n orderType = self.capitalize(type)\n reduceOnly = self.safe_value(params, 'reduceOnly')\n if reduceOnly is not None:\n if (market['type'] != 'swap') and (market['type'] != 'future'):\n raise InvalidOrder(self.id + ' createOrder() does not support reduceOnly for ' + market['type'] + ' orders, reduceOnly orders are supported for swap and future markets only')\n brokerId = self.safe_string(self.options, 'brokerId', 'CCXT')\n qty = self.parse_to_int(self.amount_to_precision(symbol, amount))\n request = {\n 'symbol': market['id'],\n 'side': self.capitalize(side),\n 'orderQty': qty, # lot size multiplied by the number of contracts\n 'ordType': orderType,\n 'text': brokerId,\n }\n if (orderType == 'Stop') or (orderType == 'StopLimit') or (orderType == 'MarketIfTouched') or (orderType == 'LimitIfTouched'):\n stopPrice = self.safe_number_2(params, 'stopPx', 'stopPrice')\n if stopPrice is None:\n raise ArgumentsRequired(self.id + ' createOrder() requires a stopPx or stopPrice parameter for the ' + orderType + ' order type')\n else:\n request['stopPx'] = float(self.price_to_precision(symbol, stopPrice))\n params = self.omit(params, ['stopPx', 'stopPrice'])\n if (orderType == 'Limit') or (orderType == 'StopLimit') or (orderType == 'LimitIfTouched'):\n request['price'] = float(self.price_to_precision(symbol, price))\n clientOrderId = self.safe_string_2(params, 'clOrdID', 'clientOrderId')\n if clientOrderId is not None:\n request['clOrdID'] = clientOrderId\n params = self.omit(params, ['clOrdID', 'clientOrderId'])\n response = await self.privatePostOrder(self.extend(request, params))\n return self.parse_order(response, market)" ]
[ "0.6393432", "0.6084401", "0.6051327", "0.59973747", "0.5689987", "0.5622757", "0.5583636", "0.5567413", "0.5521866", "0.5508778", "0.5501757", "0.5360733", "0.5360709", "0.5330701", "0.53168505", "0.5225455", "0.5218088", "0.5207039", "0.5206021", "0.51971817", "0.5187848", "0.51797736", "0.51721895", "0.5170066", "0.51446235", "0.51443434", "0.51398206", "0.513237", "0.513148", "0.5122608" ]
0.72301
0
Get quote/price information for a stock access_token token used to access the TD Ameritrade site ticker the stock ticker symbol
def get_quote(access_token,ticker): quote_url = 'https://api.tdameritrade.com/v1/marketdata/{}/quotes'.format(ticker) #The header for getting a quote needs to define the input type (json) headers = {'Authorization':'Bearer {}'.format(access_token), 'Content-Type':'application/json'} #Make the get request to TD Ameritrade quote_data_json = requests.get(url=quote_url,headers=headers) return quote_data_json.json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_multi_quotes(access_token,tickers):\r\n quote_url = 'https://api.tdameritrade.com/v1/marketdata/quotes'\r\n\r\n #The header for getting a quote needs to define the input type (json)\r\n headers = {'Authorization':'Bearer {}'.format(access_token),\r\n 'Content-Type':'application/json'}\r\n\r\n #Pass in the symbols as parameters\r\n params = {'symbol':tickers}\r\n\r\n #Make the get request to TD Ameritrade\r\n quote_data_json = requests.get(url=quote_url,headers=headers,params=params)\r\n return quote_data_json.json()", "def get_quote(self, ticker):\r\n key = 'GLC0GTVKR51SY1V'\r\n quote_url = 'https://www.alphavantage.co/query?function=GLOBAL_QUOTE&symbol=' + ticker.upper() + '&apikey=' + key\r\n key_metrics_url = 'https://www.alphavantage.co/query?function=OVERVIEW&symbol=' + ticker.upper() + '&apikey=' + key\r\n\r\n quote_response = requests.get(quote_url)\r\n string = quote_response.json()\r\n\r\n key_metrics_response= requests.get(key_metrics_url)\r\n metrics_str = key_metrics_response.json()\r\n color_tag = None\r\n\r\n if quote_response and 'Global Quote' in string:\r\n\r\n current_price = round(float(string['Global Quote']['05. price']), 2)\r\n change = round(float(string['Global Quote']['09. change']), 2)\r\n change_pct = string['Global Quote']['10. change percent'][:5] + \"%\"\r\n previous_price = round(float(string['Global Quote']['08. previous close']), 2)\r\n\r\n yearly_high = metrics_str['52WeekHigh']\r\n mark_cap = round(int(metrics_str['MarketCapitalization'])/10E8, 2)\r\n mark_cap_str = str(mark_cap) + \"B\"\r\n\r\n if ticker not in self.holdings:\r\n self.holdings[ticker] = current_price\r\n tuples = [ticker, current_price, change, change_pct, yearly_high, mark_cap_str]\r\n\r\n if current_price > previous_price:\r\n color_tag = 'green'\r\n else:\r\n color_tag = 'red'\r\n self.treeview.insert(parent='', index='end', values=tuples, tags=(color_tag,))\r\n return current_price\r\n else:\r\n return None", "def get_price_history_lookback(access_token,ticker,periodType,period,frequencyType,frequency):\r\n \r\n price_url = 'https://api.tdameritrade.com/v1/marketdata/{}/pricehistory'.format(ticker)\r\n\r\n #The header for getting a quote needs to define the input type (json)\r\n headers = {'Authorization':'Bearer {}'.format(access_token),\r\n 'Content-Type':'application/json'}\r\n\r\n #Parameters for period of time and frequency of data to get\r\n params = {'periodType':periodType,\r\n 'period': period,\r\n 'frequencyType': frequencyType,\r\n 'frequency': frequency}\r\n \r\n #Make the get request to TD Ameritrade\r\n price_history_json = requests.get(url=price_url,headers=headers,params=params)\r\n return price_history_json.json()", "def lookup(symbol):\n\n # Contact API\n try:\n api_key = os.environ.get(\"API_KEY\")\n symbol = urllib.parse.quote_plus(symbol)\n url = f\"https://cloud-sse.iexapis.com/stable/stock/{symbol}/quote?token={api_key}\"\n response = requests.get(url)\n response.raise_for_status()\n except requests.RequestException:\n return None\n\n # Parse response\n try:\n quote = response.json()\n return {\n \"name\": quote[\"companyName\"],\n \"price\": float(quote[\"latestPrice\"]),\n \"symbol\": quote[\"symbol\"]\n }\n except (KeyError, TypeError, ValueError):\n return None", "def fetchPrice(self, token):\n i = 0\n cache = self.cache\n cacheLen = len(self.cache)\n stamp = time.time()\n minStamp = stamp - self.maxCacheAge\n data = None\n while True:\n if i >= cacheLen:\n break\n cacheToken, cacheStamp, cacheData = cache[i]\n if cacheStamp < minStamp:\n print(\"CMClient: expired cache data for %s\" % cacheToken)\n cache.pop(i)\n cacheLen -= 1\n continue\n if token == cacheToken:\n data = cacheData\n i += 1\n if data:\n print(\"CMClient: returning cached data for %s\" % token)\n return data\n data = helpers.getUriAsJson(self.tickerTemplate % token)\n cache.insert(0, (token, stamp, data))\n self.saveSettings()\n print(\"CMClient: returning new data for %s\" % token)\n return data", "def lookup(symbol):\n\n # Contact API\n try:\n api_key = os.environ.get(\"API_KEY\")\n response = requests.get(f\"https://cloud-sse.iexapis.com/stable/stock/{urllib.parse.quote_plus(str(symbol))}/quote?token={api_key}\")\n response.raise_for_status()\n except requests.RequestException:\n flash(\"Please set API_KEY\", 'danger')\n return None\n\n # Parse response\n try:\n quote = response.json()\n return {\n \"name\": quote[\"companyName\"],\n \"price\": float(quote[\"latestPrice\"]),\n \"symbol\": quote[\"symbol\"],\n \"change\": quote[\"change\"],\n \"changePercent\": quote[\"changePercent\"],\n \"volume\": quote[\"volume\"],\n \"week52High\": quote[\"week52High\"],\n \"week52Low\": quote[\"week52Low\"],\n \"open\" :quote[\"open\"],\n \"high\" :quote['high'],\n \"low\" : quote[\"low\"]\n }\n except (KeyError, TypeError, ValueError):\n return None", "async def _get_stock_data(self, stocks: list):\n\t\tapi_url = 'https://sandbox.tradier.com/v1/markets/quotes'\n\t\tstocks = ','.join(stocks)\n\t\tif not stocks:\n\t\t\treturn []\n\t\ttoken = await self.bot.get_shared_api_tokens('stocks')\n\t\ttoken = token.get('key', None)\n\t\tif not token:\n\t\t\traise ValueError(\n\t\t\t\t'You need to set an API key!\\n'\n\t\t\t\t'Follow this guide for instructions on how to get one:\\n'\n\t\t\t\t'<https://github.com/Flame442/FlameCogs/blob/master/stocks/setup.md>'\n\t\t\t)\n\t\tparams = {'symbols': stocks}\n\t\theaders = {'Authorization': f'Bearer {token}', 'Accept': 'application/json'}\n\t\tasync with aiohttp.ClientSession() as session:\n\t\t\tasync with session.get(api_url, params=params, headers=headers) as r:\n\t\t\t\ttry:\n\t\t\t\t\tr = await r.json()\n\t\t\t\texcept aiohttp.client_exceptions.ContentTypeError:\n\t\t\t\t\t#This might happen when being rate limited, but IDK for sure...\n\t\t\t\t\traise ValueError('Could not get stock data. The API key entered is most likely not valid.')\n\t\tr = r['quotes']\n\t\tif 'quote' not in r:\n\t\t\treturn []\n\t\tr = r['quote']\n\t\tif not isinstance(r, list):\n\t\t\tr = [r]\n\t\tstock = {\n\t\t\tx['symbol']: {\n\t\t\t\t'price': max(1, int(x['last'] * 100)),\n\t\t\t\t#New API does not give this info.\n\t\t\t\t'total_count': None, #int(x['marketCap'] / x['last']) if x['marketCap'] else None\n\t\t\t} for x in r if 'last' in x and x['last'] is not None\n\t\t}\n\t\treturn stock", "def lookup(symbol):\n\n # Contact API\n try:\n api_key = app.config[\"API_KEY\"]\n response = requests.get(\n f\"https://cloud-sse.iexapis.com/stable/stock/{urllib.parse.quote_plus(symbol)}/quote?token={api_key}\")\n response.raise_for_status()\n except requests.RequestException:\n return None\n\n # Parse response\n try:\n\n quote = response.json()\n\n return {\n \"name\": quote[\"companyName\"],\n \"price\": float(quote[\"latestPrice\"]),\n \"symbol\": quote[\"symbol\"],\n \"isotime\": datetime.datetime.utcnow().isoformat()\n }\n\n except (KeyError, TypeError, ValueError):\n return None", "async def get_trade(self, symbol, limit=500):\n uri = \"/fapi/v1/trades\"\n params = {\n \"symbol\": symbol,\n \"limit\": limit\n }\n success, error = await self.request(\"GET\", uri, params)\n return success, error", "def get_price_history_dates(access_token,ticker,start_date,end_date,frequencyType,frequency):\r\n \r\n price_url = 'https://api.tdameritrade.com/v1/marketdata/{}/pricehistory'.format(ticker)\r\n\r\n #The header for getting a quote needs to define the input type (json)\r\n headers = {'Authorization':'Bearer {}'.format(access_token),\r\n 'Content-Type':'application/json'}\r\n\r\n #Parameters for period of time and frequency of data to get\r\n params = {'startDate':start_date,\r\n 'endDate': end_date,\r\n 'frequencyType': frequencyType,\r\n 'frequency': frequency}\r\n \r\n #Make the get request to TD Ameritrade\r\n price_history_json = requests.get(url=price_url,headers=headers,params=params)\r\n return price_history_json.json()", "def get_put_data(stock_name, expire_time, strike_price):\n date = time.mktime(datetime.datetime.strptime(expire_time, \"%d/%m/%Y\").timetuple())+(16*3600)\n url = 'https://finance.yahoo.com/quote/'+stock_name+'/options?date='+str(int(date))+'&p='+stock_name\n print(url)\n response = requests.get(url)\n soup = BeautifulSoup(response.text, \"html.parser\")\n values = soup.findAll(\"table\")[1].findAll(\"td\")\n\n for i in range(2,len(values),11):\n x = float(str(values[i].contents[0].contents[0]))\n if x == float(strike_price):\n option_link = 'https://finance.yahoo.com/'+str(values[i-2].contents[0])[61:109]\n bid = float(values[i+2].contents[0])\n ask = float(values[i+3].contents[0])\n return bid, ask", "def gen_quote(token):\n\tmpk = global_storage.get_collector_master_public_key()\n\n\tif not sanetoken(token):\n\t\traise rpc_lib.RPCException(\"Token not sane.\")\n\n\t# FIXME: Should we be worried about race conditions here?\n\tdbentry = SellerDB.get(token=token)\n\tif dbentry != None: # This token is already in the database.\n\t\tindex, address, price = dbentry['index'], dbentry['address'], dbentry['price']\n\t\tassert address == bitcoin.electrum_address(mpk, index) # Index is the index used to generate address.(deterministic key generation)\n\telse:\n\t\t# Index is a large random number that combines with the master public key to yield the address. This combination takes constant time -- it doesn't hurt us to use a very large index. An attacker which knows index, mpk, address, and the _private_ key for address can get the private key for _any_ public key generated using mpk. To limit the damage if one private key gets leaked, we'll make index cryptographically securely random, even though it's probably unnecessary.\n\t\tindex = random.SystemRandom().getrandbits(128)\n\t\taddress = bitcoin.electrum_address(mpk, index)\n\t\t# Price is the price to buy a bond, in satoshi. (We don't use BTC because we don't want floating point errors.)\n\t\tprice = global_storage.bond_price\n\t\tSellerDB.put(token=token, index=index, address=address, price=price)\n\treturn (address, price)", "def __get(self, ticker_symbol):\n\n # n = name\n # l1 = last trade\n # c1 = change\n # p2 = change percent\n url = \"http://finance.yahoo.com/d/quotes.csv?s=%s&f=nl1c1p2\" % ticker_symbol\n req = Request(url)\n resp = urlopen(req) \n csv_str = resp.read().decode().strip()\n\n elems = csv_str.split(',')\n\n return dict(name=elems[0].strip('\"'), ask_price=elems[1], change=elems[2], changep=elems[3].strip('\"'))", "def quotes_request(ticker):\n api_key = conf['TD']['key']\n url = conf['TD']['quotesURL']\n\n params = {\n 'apikey': api_key,\n 'symbol': ticker\n }\n\n request = requests.get(\n url=url,\n params=params\n ).json()\n\n time.sleep(1)\n\n return pd.DataFrame.from_dict(\n request,\n orient='index'\n ).reset_index(drop=True)", "def get_mytrade(self, symbol):\n payload = {'symbol': symbol}\n return self.signed_request('GET', '/api/v3/myTrades', **payload)", "async def stock(self, ctx, ticker: str):\n symbols = await self.bot.aiojson(\"https://api.robinhood.com/quotes/\"\\\n f\"?symbols={ticker.upper()}\")\n if not symbols:\n await ctx.send(\"Stock not found. This stock is probably not tradeable on robinhood.\")\n return\n symbols_result = symbols[\"results\"][0]\n instrument = await self.bot.aiojson(symbols_result[\"instrument\"])\n fundamentals = await self.bot.aiojson(\n f\"https://api.robinhood.com/fundamentals/{ticker.upper()}/\")\n\n current_price = (symbols_result[\"last_trade_price\"] if\n \"last_extended_hours_trade_price\" in symbols_result\n else symbols_result[\"last_extended_hours_trade_price\"])\n diff = Decimal(Decimal(current_price) -\n Decimal(symbols_result[\"previous_close\"]))\n percentage = str(100 * diff / Decimal(current_price))[:6]\n\n if not percentage.startswith(\"-\"):\n percentage = \"+\" + percentage\n\n current_price_string = self.format_currency(current_price)\n diff_string = self.format_currency(diff)\n bid_price_string = self.format_currency(Decimal(symbols_result[\"bid_price\"]))\n ask_price_string = self.format_currency(Decimal(symbols_result[\"ask_price\"]))\n tradeable_string = (\n \":white_check_mark:\" if instrument[\"tradeable\"] else \":x:\")\n\n update_timestamp = parser.parse(symbols_result[\"updated_at\"])\n\n symbol = symbols_result[\"symbol\"]\n change_color = await self.get_stock_change_color(symbol)\n\n embed = discord.Embed(title=f\"{symbol}'s stocks info\",\n color=change_color,\n timestamp=update_timestamp)\n\n embed.add_field(name=\"Name\", value=instrument[\"name\"])\n embed.add_field(name=\"Current Price\", value=current_price_string)\n embed.add_field(name=\"Change from yesterday\", value=f\"{diff_string} ({percentage}%)\")\n embed.add_field(name=\"Bid size\", value=f\"{symbols_result['bid_size']} ({bid_price_string})\")\n embed.add_field(name=\"Ask size\", value=f\"{symbols_result['ask_size']} ({ask_price_string})\")\n embed.add_field(name=\"Current Volume\", value=fundamentals[\"volume\"])\n embed.add_field(name=\"Average Volume\", value=fundamentals[\"average_volume\"])\n embed.add_field(name=\"Tradeable on Robinhood\", value=tradeable_string)\n embed.add_field(name=\"Country\", value=f\":flag_{instrument['country'].lower()}:\")\n\n await ctx.send(embed=embed)", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n uri = \"https://api.kraken.com/0/public/Ticker\"\n requestUrl = uri + \"?pair=\" + pair\n jsonResponse = self.getJson(requestUrl)\n currentPrice = jsonResponse[\"result\"][pair][\"c\"]\n return currentPrice", "def lookup_symbol(symbol):\n\n try:\n res = requests.get(\"https://cloud.iexapis.com/stable/stock/\" +\n f\"{urllib.parse.quote_plus(symbol)}/quote?token={Config.API_KEY}\")\n res.raise_for_status()\n except requests.RequestException:\n return None\n\n try:\n quote = res.json()\n return {\n \"name\": quote[\"companyName\"],\n \"price\": float(quote[\"latestPrice\"]),\n \"symbol\": quote[\"symbol\"],\n }\n except (KeyError, TypeError, ValueError):\n return None", "def get_stock_price(df_excld):\n\n ts = TimeSeries(os.environ['ALPHA_VANTAGE_KEY'])\n\n info = []\n symbols = []\n counter = 0\n\n for t in df_excld['Ticker']:\n\n if counter % 5 == 0:\n time.sleep(65)\n\n i, m = ts.get_daily(symbol=t, outputsize='full')\n info.append(i)\n symbols.append(m['2. Symbol'])\n counter += 1\n\n return info, symbols", "def get_call_data(stock_name, expire_time, strike_price):\n date = time.mktime(datetime.datetime.strptime(expire_time, \"%d/%m/%Y\").timetuple())+(16*3600)\n url = 'https://finance.yahoo.com/quote/'+stock_name+'/options?date='+str(int(date))+'&p='+stock_name\n response = requests.get(url)\n soup = BeautifulSoup(response.text, \"html.parser\")\n values = soup.findAll(\"td\" )\n\n for i in range(2,len(values),11):\n x = float(str(values[i].contents[0].contents[0]))\n if x == float(strike_price):\n option_link = 'https://finance.yahoo.com/'+str(values[i-2].contents[0])[61:109]\n bid = float(values[i+2].contents[0])\n ask = float(values[i+3].contents[0])\n return bid, ask", "def local_stocks_prices():\n url1 = \"https://api.invertironline.com/token\"\n\n data = {\n \"username\": usuario,\n \"password\": password,\n \"grant_type\": \"password\" \n }\n response = requests.post(url1, data=data)\n if response.status_code == 200:\n content = response.text\n access_key = token_key(content)\n\n url2 = f'https://api.invertironline.com/api/v2/Cotizaciones/Acciones/Merval/argentina'\n datos = requests.get(url2, headers={\n 'Authorization': 'Bearer '+access_key\n })\n datos = json.loads(datos.text)\n datos = datos['titulos']\n datos = clean_assets(datos)\n return datos", "def get_stock_price(stock):\n pass", "def fetch_price():\n\n url = \"https://www.bitstamp.net/api/ticker/\"\n\n response = json.load(urllib2.urlopen(url))\n\n return {\"buy\": response['ask'], \"sell\": response['bid']}", "def get_crypto_quote(symbol, info=None):\n id = get_crypto_info(symbol, info='id')\n url = urls.crypto_quote(id)\n data = helper.request_get(url)\n return(helper.filter(data, info))", "def get_quote(symbol):\n\t\n\t# For Step C: Replace CODE HERE to get the stock\n\t# prices from the Yahoo Finance website using\n\t# requests and Beautiful Soup\n\tprices = ['20', '25', '30', '30', '30', '20']\n\tprice = prices[0]\n\tprev_price = '10'\n\n\ttext = \"Start watching \" + symbol + \": Price: \" + price\n\tprint(text)\n\tlogging.info(text)\n\n\ti = 0 # not needed with Step C (remove)\n\n\t# Start watching and continue until CTRL-Break\n\twhile True:\n\t\n\t\t# Get Price with Steps A and B only\n\t\t# Step C use requests and Beautiful Soup\n\t\tprice = prices[i%6]\n\n\t\t# Send price for symbol to log\n\t\tlogging.info(symbol + \"\\t\" + price)\n\n\t\ti = i + 1 # not needed with Step C (remove)\n\n\t\t# Check for price difference and send email,\n\t\t# if different\n\t\tif price != prev_price:\n\t\t\ttext = symbol + \" now at \" + price + \\\n\t\t\t\t \"; was \" + prev_price\n\t\t\tprint(text)\n\t\t\tsend_email(text)\n\t\t\tprev_price = price\n\n\t\ttime.sleep(WAIT_INTERVAL)", "def fetch_mytrades(self, symbol):\r\n param = {}\r\n param['symbol'] = self.__transfer_symbol(symbol)\r\n param['appid'] = self.apiKey\r\n param['nonce'] = int(time.time() * 1000)\r\n param['timestamp'] = int(time.time())\r\n return self.__signed_GET('/api/v1/history-orders', param, self.timeout)", "def test_batch_quotes_python2(self, mock_urlopen):\n ts = TimeSeries(key=TestAlphaVantage._API_KEY_TEST)\n url = \"https://www.alphavantage.co/query?function=BATCH_STOCK_QUOTES&symbols=MSFT,FB,AAPL&apikey=test\"\n path_file = self.get_file_from_url(url)\n with open(path_file) as f:\n mock_urlopen.return_value = f\n data, _ = ts.get_batch_stock_quotes(symbols=('MSFT', 'FB', 'AAPL'))\n self.assertIsInstance(\n data[0], dict, 'Result Data must be a json dictionary')", "def prices(symbol):\n to = date.today().strftime(\"%Y%m%d\")\n c = db.cursor()\n c.execute(\"SELECT DATE_ADD(max(date), INTERVAL 1 DAY) FROM quote where symbol = %s\",\n (symbol))\n (_from, ) = c.fetchone()\n if _from == date.today():\n print \"Skipping %s\" % symbol\n return\n print \"Downloading %s\" % symbol\n if _from is None: \n _from = start_date\n else:\n _from = _from.strftime(\"%Y%m%d\")\n prices = stockquote.get_historical_prices(symbol, _from, to)\n headers = prices[0]\n try:\n close = get_idx(headers, 'Close')\n date_ = get_idx(headers, 'Date')\n open = get_idx(headers, 'Open')\n high = get_idx(headers, 'High')\n low = get_idx(headers, 'Low')\n quotes = prices[1:]\n for l in quotes:\n #print \"%s %s\" % (l[date_], l[close])\n try:\n insert(symbol, l[date_], l[close], l[high], l[low], l[open])\n except Exception, e:\n print \"Could not insert %s:%s\" % (symbol, e)\n print \"Inserted %s new quotes for %s\" % (len(quotes), symbol)\n except Exception, e:\n print \"Could not download %s\" % symbol\n print e", "def stock():\n stock=stock_data('AAPL',start(2019,12,1))\n return stock", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n uri = \"https://www.bitfinex.com/v2/ticker/t\"\n requestUrl = uri + pair\n jsonResponse = self.getJson(requestUrl)\n currentPrice = jsonResponse[0]\n return currentPrice" ]
[ "0.724852", "0.6970904", "0.6869229", "0.6441689", "0.6396739", "0.63900095", "0.6348864", "0.63472736", "0.63055825", "0.6273491", "0.62635237", "0.62429434", "0.6217562", "0.62126017", "0.618903", "0.6181409", "0.6145811", "0.6087003", "0.60634583", "0.6036593", "0.6017648", "0.6007338", "0.597357", "0.5958284", "0.5925145", "0.5910741", "0.59060246", "0.589271", "0.58759826", "0.58592767" ]
0.8138068
0
Gets quotes for multiple ticker symbols access_token token used to access the TD Ameritrade site
def get_multi_quotes(access_token,tickers): quote_url = 'https://api.tdameritrade.com/v1/marketdata/quotes' #The header for getting a quote needs to define the input type (json) headers = {'Authorization':'Bearer {}'.format(access_token), 'Content-Type':'application/json'} #Pass in the symbols as parameters params = {'symbol':tickers} #Make the get request to TD Ameritrade quote_data_json = requests.get(url=quote_url,headers=headers,params=params) return quote_data_json.json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_quote(access_token,ticker):\r\n quote_url = 'https://api.tdameritrade.com/v1/marketdata/{}/quotes'.format(ticker)\r\n\r\n #The header for getting a quote needs to define the input type (json)\r\n headers = {'Authorization':'Bearer {}'.format(access_token),\r\n 'Content-Type':'application/json'}\r\n\r\n #Make the get request to TD Ameritrade\r\n quote_data_json = requests.get(url=quote_url,headers=headers)\r\n return quote_data_json.json()", "def quotes_request(ticker):\n api_key = conf['TD']['key']\n url = conf['TD']['quotesURL']\n\n params = {\n 'apikey': api_key,\n 'symbol': ticker\n }\n\n request = requests.get(\n url=url,\n params=params\n ).json()\n\n time.sleep(1)\n\n return pd.DataFrame.from_dict(\n request,\n orient='index'\n ).reset_index(drop=True)", "def _get_quotes(symbols):\n # have to format symbols list to from (\"SYM1\", \"SYM2\", .... ,\"SYMN\")\n symbols = \"(\" + \",\".join(['\\\"' + s.upper() + '\"' for s in symbols]) + \")\"\n query = 'SELECT * FROM yahoo.finance.quote WHERE symbol in {0}'.format(symbols)\n payload = {\n \"q\": query, 'format':'json', \"env\":'store://datatables.org/alltableswithkeys'\n }\n try:\n resp = requests.get('http://query.yahooapis.com/v1/public/yql?', params=payload)\n resp.raise_for_status()\n except requests.exceptions.RequestException as e:\n print(e)\n return\n return json.loads(resp.text)[\"query\"][\"results\"][\"quote\"]", "async def _get_stock_data(self, stocks: list):\n\t\tapi_url = 'https://sandbox.tradier.com/v1/markets/quotes'\n\t\tstocks = ','.join(stocks)\n\t\tif not stocks:\n\t\t\treturn []\n\t\ttoken = await self.bot.get_shared_api_tokens('stocks')\n\t\ttoken = token.get('key', None)\n\t\tif not token:\n\t\t\traise ValueError(\n\t\t\t\t'You need to set an API key!\\n'\n\t\t\t\t'Follow this guide for instructions on how to get one:\\n'\n\t\t\t\t'<https://github.com/Flame442/FlameCogs/blob/master/stocks/setup.md>'\n\t\t\t)\n\t\tparams = {'symbols': stocks}\n\t\theaders = {'Authorization': f'Bearer {token}', 'Accept': 'application/json'}\n\t\tasync with aiohttp.ClientSession() as session:\n\t\t\tasync with session.get(api_url, params=params, headers=headers) as r:\n\t\t\t\ttry:\n\t\t\t\t\tr = await r.json()\n\t\t\t\texcept aiohttp.client_exceptions.ContentTypeError:\n\t\t\t\t\t#This might happen when being rate limited, but IDK for sure...\n\t\t\t\t\traise ValueError('Could not get stock data. The API key entered is most likely not valid.')\n\t\tr = r['quotes']\n\t\tif 'quote' not in r:\n\t\t\treturn []\n\t\tr = r['quote']\n\t\tif not isinstance(r, list):\n\t\t\tr = [r]\n\t\tstock = {\n\t\t\tx['symbol']: {\n\t\t\t\t'price': max(1, int(x['last'] * 100)),\n\t\t\t\t#New API does not give this info.\n\t\t\t\t'total_count': None, #int(x['marketCap'] / x['last']) if x['marketCap'] else None\n\t\t\t} for x in r if 'last' in x and x['last'] is not None\n\t\t}\n\t\treturn stock", "def get_quotes(self, symbols: str, greeks: bool = False) -> List[Quote]:\n url = \"/v1/markets/quotes\"\n params = {\"symbols\": symbols, \"greeks\": greeks}\n\n data = self.get(url, params)\n res = MarketsAPIResponse(**ensure_list(data, \"quotes\"))\n return res.quotes.quotes", "def test_batch_quotes_python2(self, mock_urlopen):\n ts = TimeSeries(key=TestAlphaVantage._API_KEY_TEST)\n url = \"https://www.alphavantage.co/query?function=BATCH_STOCK_QUOTES&symbols=MSFT,FB,AAPL&apikey=test\"\n path_file = self.get_file_from_url(url)\n with open(path_file) as f:\n mock_urlopen.return_value = f\n data, _ = ts.get_batch_stock_quotes(symbols=('MSFT', 'FB', 'AAPL'))\n self.assertIsInstance(\n data[0], dict, 'Result Data must be a json dictionary')", "def test_batch_quotes(self, mock_request):\n ts = TimeSeries(key=TestAlphaVantage._API_KEY_TEST)\n url = \"http://www.alphavantage.co/query?function=BATCH_STOCK_QUOTES&symbols=MSFT,FB,AAPL&apikey=test\"\n path_file = self.get_file_from_url(\"mock_batch_quotes\")\n with open(path_file) as f:\n mock_request.get(url, text=f.read())\n data, _ = ts.get_batch_stock_quotes(symbols=('MSFT', 'FB', 'AAPL'))\n self.assertIsInstance(\n data[0], dict, 'Result Data must be a json dictionary')", "def get_price_history_lookback(access_token,ticker,periodType,period,frequencyType,frequency):\r\n \r\n price_url = 'https://api.tdameritrade.com/v1/marketdata/{}/pricehistory'.format(ticker)\r\n\r\n #The header for getting a quote needs to define the input type (json)\r\n headers = {'Authorization':'Bearer {}'.format(access_token),\r\n 'Content-Type':'application/json'}\r\n\r\n #Parameters for period of time and frequency of data to get\r\n params = {'periodType':periodType,\r\n 'period': period,\r\n 'frequencyType': frequencyType,\r\n 'frequency': frequency}\r\n \r\n #Make the get request to TD Ameritrade\r\n price_history_json = requests.get(url=price_url,headers=headers,params=params)\r\n return price_history_json.json()", "def test_batch_quotes_python3(self, mock_urlopen):\n ts = TimeSeries(key=TestAlphaVantage._API_KEY_TEST)\n url = \"https://www.alphavantage.co/query?function=BATCH_STOCK_QUOTES&symbols=MSFT,FB,AAPL&apikey=test\"\n path_file = self.get_file_from_url(url)\n with open(path_file) as f:\n mock_urlopen.return_value = f\n data, _ = ts.get_batch_stock_quotes(symbols=('MSFT', 'FB', 'AAPL'))\n self.assertIsInstance(\n data[0], dict, 'Result Data must be a json dictionary')", "def get_all_quotes() -> List[Dict]:\n return [quote for quote in quote_dict[\"quotes\"]]", "def get_orders(access_token,start_date,end_date,status):\r\n\r\n orders_url = 'https://api.tdameritrade.com/v1/orders'\r\n headers={'Authorization': 'Bearer {}'.format(access_token)}\r\n #Parameters for the order\r\n params = {'accountId':TDAuth_Info.account_num,\r\n 'fromEnteredTime': start_date,\r\n 'toEnteredTime': end_date,\r\n 'status': status}\r\n\r\n #Make the get request to TD Ameritrade\r\n orders_data_json = requests.get(url=orders_url,headers=headers,params=params)\r\n return orders_data_json.json()", "async def get_trade(self, symbol, limit=500):\n uri = \"/fapi/v1/trades\"\n params = {\n \"symbol\": symbol,\n \"limit\": limit\n }\n success, error = await self.request(\"GET\", uri, params)\n return success, error", "async def get_trades(self, symbol, limit=100):\n uri = \"/v3/trades\"\n params = {\n \"symbol\": symbol,\n \"limit\": limit\n }\n success, error = await self.request(\"GET\", uri, params)\n return success, error", "def get_quote(self, ticker):\r\n key = 'GLC0GTVKR51SY1V'\r\n quote_url = 'https://www.alphavantage.co/query?function=GLOBAL_QUOTE&symbol=' + ticker.upper() + '&apikey=' + key\r\n key_metrics_url = 'https://www.alphavantage.co/query?function=OVERVIEW&symbol=' + ticker.upper() + '&apikey=' + key\r\n\r\n quote_response = requests.get(quote_url)\r\n string = quote_response.json()\r\n\r\n key_metrics_response= requests.get(key_metrics_url)\r\n metrics_str = key_metrics_response.json()\r\n color_tag = None\r\n\r\n if quote_response and 'Global Quote' in string:\r\n\r\n current_price = round(float(string['Global Quote']['05. price']), 2)\r\n change = round(float(string['Global Quote']['09. change']), 2)\r\n change_pct = string['Global Quote']['10. change percent'][:5] + \"%\"\r\n previous_price = round(float(string['Global Quote']['08. previous close']), 2)\r\n\r\n yearly_high = metrics_str['52WeekHigh']\r\n mark_cap = round(int(metrics_str['MarketCapitalization'])/10E8, 2)\r\n mark_cap_str = str(mark_cap) + \"B\"\r\n\r\n if ticker not in self.holdings:\r\n self.holdings[ticker] = current_price\r\n tuples = [ticker, current_price, change, change_pct, yearly_high, mark_cap_str]\r\n\r\n if current_price > previous_price:\r\n color_tag = 'green'\r\n else:\r\n color_tag = 'red'\r\n self.treeview.insert(parent='', index='end', values=tuples, tags=(color_tag,))\r\n return current_price\r\n else:\r\n return None", "def get_price_history_dates(access_token,ticker,start_date,end_date,frequencyType,frequency):\r\n \r\n price_url = 'https://api.tdameritrade.com/v1/marketdata/{}/pricehistory'.format(ticker)\r\n\r\n #The header for getting a quote needs to define the input type (json)\r\n headers = {'Authorization':'Bearer {}'.format(access_token),\r\n 'Content-Type':'application/json'}\r\n\r\n #Parameters for period of time and frequency of data to get\r\n params = {'startDate':start_date,\r\n 'endDate': end_date,\r\n 'frequencyType': frequencyType,\r\n 'frequency': frequency}\r\n \r\n #Make the get request to TD Ameritrade\r\n price_history_json = requests.get(url=price_url,headers=headers,params=params)\r\n return price_history_json.json()", "def query_symbols(self):\n return self._call_txtrader_api('query_symbols', {'data': False})", "def fetch_mytrades(self, symbol):\r\n param = {}\r\n param['symbol'] = self.__transfer_symbol(symbol)\r\n param['appid'] = self.apiKey\r\n param['nonce'] = int(time.time() * 1000)\r\n param['timestamp'] = int(time.time())\r\n return self.__signed_GET('/api/v1/history-orders', param, self.timeout)", "def get_all_stocks():\n url = r\"https://brapi.ga/api/quote/list\"\n response = requests.get(url)\n return [stock[\"stock\"] for stock in response.json()[\"stocks\"]]", "def get_quotes(scanner: SkyScanner, start, end, start_date, entire_month=\"false\"):\n all_quotes = []\n if entire_month == \"false\":\n quotes, airports = scanner.get_quotes_oneway(start, end, start_date)\n else:\n start_date = start_date.split(\"-\")[0] + \"-\" + start_date.split(\"-\")[1]\n quotes, airports = scanner.get_quotes_oneway(start, end, start_date)\n for quote in quotes[0]:\n price = quote['MinPrice']\n start_airport = airports[quote['OutboundLeg'][\"OriginId\"]]\n end_airport = airports[quote['OutboundLeg'][\"DestinationId\"]]\n start_time = quote['OutboundLeg'][\"DepartureDate\"].split(\"T\")[0]\n company = scanner.carriers[quote['OutboundLeg']['CarrierIds'][0]]\n all_quotes.append(Quote(start_time, start_airport, end_airport, price, company))\n cheapest_price = 9999999999999\n cheapest_quote = None\n cheapest_index = None\n\n for i, quote in enumerate(all_quotes):\n if quote.price < cheapest_price:\n cheapest_price = quote.price\n cheapest_quote = quote\n cheapest_index = i\n\n if cheapest_index is not None:\n del all_quotes[cheapest_index]\n scanner.cheapest = cheapest_quote\n scanner.all_quotes = all_quotes\n return cheapest_quote, all_quotes, airports", "def getTradedTickers(self):\n jsonResponse = self.getJson(\"https://api.kraken.com/0/public/Assets\")\n availableTickers = []\n for asset in jsonResponse[\"result\"]:\n if asset[0]==\"X\" or asset[0]==\"Z\":\n asset = asset[1:]\n availableTickers.append(asset)\n return availableTickers", "async def get_active_exchange_markets(cls) -> pd.DataFrame:\n async with aiohttp.ClientSession() as client:\n\n trading_pairs_response = await client.get(ASSET_PAIRS_URL)\n trading_pairs_response: aiohttp.ClientResponse = trading_pairs_response\n\n if trading_pairs_response.status != 200:\n raise IOError(f\"Error fetching Kraken trading pairs. \"\n f\"HTTP status is {trading_pairs_response.status}.\")\n\n trading_pairs_data: Dict[str, Any] = await trading_pairs_response.json()\n trading_pairs_data[\"result\"] = {\n pair: details for pair, details in trading_pairs_data[\"result\"].items() if \".\" not in pair}\n\n wsname_dict: Dict[str, str] = {pair: details[\"wsname\"]\n for pair, details in trading_pairs_data[\"result\"].items()}\n trading_pairs: Dict[str, Any] = {pair: {\"baseAsset\": wsname_dict[pair].split(\"/\")[0],\n \"quoteAsset\": wsname_dict[pair].split(\"/\")[1],\n \"wsname\": wsname_dict[pair]}\n for pair in trading_pairs_data[\"result\"]}\n\n trading_pairs_str: str = ','.join(trading_pairs.keys())\n\n market_response = await client.get(f\"{TICKER_URL}?pair={trading_pairs_str}\")\n market_response: aiohttp.ClientResponse = market_response\n\n if market_response.status != 200:\n raise IOError(f\"Error fetching Kraken markets information. \"\n f\"HTTP status is {market_response.status}.\")\n\n market_data = await market_response.json()\n\n market_data: List[Dict[str, Any]] = [{\"pair\": pair, **market_data[\"result\"][pair], **trading_pairs[pair]}\n for pair in market_data[\"result\"]\n if pair in trading_pairs]\n\n # Build the data frame.\n all_markets: pd.DataFrame = pd.DataFrame.from_records(data=market_data, index=\"pair\")\n all_markets[\"lastPrice\"] = all_markets.c.map(lambda x: x[0]).astype(\"float\")\n all_markets.loc[:, \"volume\"] = all_markets.v.map(lambda x: x[1]).astype(\"float\")\n\n price_dict: Dict[str, float] = await cls.get_prices_from_df(all_markets)\n\n usd_volume: List[float] = [\n (\n baseVolume * price_dict[baseAsset] if baseAsset in price_dict else -1\n )\n for baseAsset, baseVolume in zip(all_markets.baseAsset,\n all_markets.volume)]\n all_markets.loc[:, \"USDVolume\"] = usd_volume\n\n return all_markets.sort_values(\"USDVolume\", ascending=False)", "def list_quotes(\n self,\n ticker: str,\n timestamp: Optional[Union[str, int, datetime, date]] = None,\n timestamp_lt: Optional[Union[str, int, datetime, date]] = None,\n timestamp_lte: Optional[Union[str, int, datetime, date]] = None,\n timestamp_gt: Optional[Union[str, int, datetime, date]] = None,\n timestamp_gte: Optional[Union[str, int, datetime, date]] = None,\n limit: Optional[int] = None,\n sort: Optional[Union[str, Sort]] = None,\n order: Optional[Union[str, Order]] = None,\n params: Optional[Dict[str, Any]] = None,\n raw: bool = False,\n options: Optional[RequestOptionBuilder] = None,\n ) -> Union[Iterator[Quote], HTTPResponse]:\n url = f\"/v3/quotes/{ticker}\"\n\n return self._paginate(\n path=url,\n params=self._get_params(self.list_quotes, locals()),\n raw=raw,\n deserializer=Quote.from_dict,\n options=options,\n )", "def get_quote (self, symbols, fields=[]):\n \n # Ensure correctly-typed input\n if not utils.check(symbols):\n return {}\n \n # Correctly format Symbols, also store split up symbols\n if type(symbols) == type([]):\n # We were passed list\n fmt_symbols = ','.join(symbols)\n else:\n # We were passed string\n fmt_symbols = symbols\n symbols = symbols.split(',')\n \n \n # Correctly format Fields, also store split up fields\n if type(fields) == type([]):\n # We were passed list\n fmt_fields = ','.join(fields)\n else:\n # We were passed string\n fmt_fields = fields\n fields = fmt_fields.split(',')\n \n # For aesthetics...\n fmt_symbols = fmt_symbols.upper()\n \n \n # Assemble URL\n url = self.endpoints['base'] + 'market/ext/quotes.json'\n \n # Authenticate\n auth = self.create_auth()\n \n # Create request paramters according to how we need them\n req_params = { 'symbols':symbols }\n if fields != None:\n req_params['fids'] = fmt_fields\n \n # Create request \n auth = self.create_auth()\n results = requests.post(\\\n url,\n data=req_params,\n auth=auth\n ).json()\\\n ['response']['quotes']['quote']\n \n \n # Add symbols to output\n # ...why tf doesn't Ally include this in the quote? they usually send way too much\n if len(symbols) > 1:\n for i,sym in enumerate(symbols):\n results[i]['symbol'] = sym\n else:\n results['symbol'] = symbols[0]\n \n \n return results", "def query_all_symbols(self):\n return self._call_txtrader_api('query_symbols', {'data': True})", "def list_quotas(self, **_params):\r\n return self.get(self.quotas_path, params=_params)", "def get_stock_price(df_excld):\n\n ts = TimeSeries(os.environ['ALPHA_VANTAGE_KEY'])\n\n info = []\n symbols = []\n counter = 0\n\n for t in df_excld['Ticker']:\n\n if counter % 5 == 0:\n time.sleep(65)\n\n i, m = ts.get_daily(symbol=t, outputsize='full')\n info.append(i)\n symbols.append(m['2. Symbol'])\n counter += 1\n\n return info, symbols", "def test_batch_quotes_pandas_python2(self, mock_urlopen):\n ts = TimeSeries(key=TestAlphaVantage._API_KEY_TEST,\n output_format='pandas')\n url = \"https://www.alphavantage.co/query?function=BATCH_STOCK_QUOTES&symbols=MSFT,FB,AAPL&apikey=test\"\n path_file = self.get_file_from_url(url)\n with open(path_file) as f:\n mock_urlopen.return_value = f\n data, _ = ts.get_batch_stock_quotes(symbols=('MSFT', 'FB', 'AAPL'))\n self.assertIsInstance(\n data, df, 'Result Data must be a pandas dataframe')", "async def fetch_tickers(self, symbols: Optional[List[str]] = None, params={}):\n await self.load_markets()\n symbols = self.market_symbols(symbols)\n request = {}\n if symbols is not None:\n ids = self.market_ids(symbols)\n request['symbols'] = ','.join(ids)\n else:\n request['symbols'] = 'ALL'\n tickers = await self.publicGetTickers(self.extend(request, params))\n #\n # [\n # # on trading pairs(ex. tBTCUSD)\n # [\n # SYMBOL,\n # BID,\n # BID_SIZE,\n # ASK,\n # ASK_SIZE,\n # DAILY_CHANGE,\n # DAILY_CHANGE_RELATIVE,\n # LAST_PRICE,\n # VOLUME,\n # HIGH,\n # LOW\n # ],\n # # on funding currencies(ex. fUSD)\n # [\n # SYMBOL,\n # FRR,\n # BID,\n # BID_PERIOD,\n # BID_SIZE,\n # ASK,\n # ASK_PERIOD,\n # ASK_SIZE,\n # DAILY_CHANGE,\n # DAILY_CHANGE_RELATIVE,\n # LAST_PRICE,\n # VOLUME,\n # HIGH,\n # LOW,\n # _PLACEHOLDER,\n # _PLACEHOLDER,\n # FRR_AMOUNT_AVAILABLE\n # ],\n # ...\n # ]\n #\n result = {}\n for i in range(0, len(tickers)):\n ticker = tickers[i]\n marketId = self.safe_string(ticker, 0)\n market = self.safe_market(marketId)\n symbol = market['symbol']\n result[symbol] = self.parse_ticker(ticker, market)\n return self.filter_by_array(result, 'symbol', symbols)", "def fetch_tickers(self, symbol):\r\n symbol = self.__transfer_symbol(symbol)\r\n return self.__public_request('GET', '/api/v1/ticker/%s' % symbol)", "def gen_quote(token):\n\tmpk = global_storage.get_collector_master_public_key()\n\n\tif not sanetoken(token):\n\t\traise rpc_lib.RPCException(\"Token not sane.\")\n\n\t# FIXME: Should we be worried about race conditions here?\n\tdbentry = SellerDB.get(token=token)\n\tif dbentry != None: # This token is already in the database.\n\t\tindex, address, price = dbentry['index'], dbentry['address'], dbentry['price']\n\t\tassert address == bitcoin.electrum_address(mpk, index) # Index is the index used to generate address.(deterministic key generation)\n\telse:\n\t\t# Index is a large random number that combines with the master public key to yield the address. This combination takes constant time -- it doesn't hurt us to use a very large index. An attacker which knows index, mpk, address, and the _private_ key for address can get the private key for _any_ public key generated using mpk. To limit the damage if one private key gets leaked, we'll make index cryptographically securely random, even though it's probably unnecessary.\n\t\tindex = random.SystemRandom().getrandbits(128)\n\t\taddress = bitcoin.electrum_address(mpk, index)\n\t\t# Price is the price to buy a bond, in satoshi. (We don't use BTC because we don't want floating point errors.)\n\t\tprice = global_storage.bond_price\n\t\tSellerDB.put(token=token, index=index, address=address, price=price)\n\treturn (address, price)" ]
[ "0.7891363", "0.6873732", "0.67089283", "0.65867734", "0.65348876", "0.6282515", "0.62716085", "0.6097361", "0.60793877", "0.6064195", "0.60637885", "0.6047419", "0.60030806", "0.6001138", "0.599085", "0.59795886", "0.59471214", "0.59236175", "0.59121275", "0.58546513", "0.5845866", "0.5839773", "0.58242834", "0.57778394", "0.5717897", "0.5707988", "0.57030135", "0.5686273", "0.5686206", "0.5672801" ]
0.8782872
0
Get price history of a stock looking back from today access_token token used to access the TD Ameritrade site ticker the stock ticker symbol periodType day, month, year, or ytd (default day) period the number of periods to show (default 10 days, 1 month, 1 year, 1 ytd) frequencyType the frequency of data to return; minute (110 days only), daily, weekly, and monthly frequency the number of frequency to be included in each candle (granularity of data) 1 is default 1, 5, 10, 15, and 30 available for minute (only 1 for other types)
def get_price_history_lookback(access_token,ticker,periodType,period,frequencyType,frequency): price_url = 'https://api.tdameritrade.com/v1/marketdata/{}/pricehistory'.format(ticker) #The header for getting a quote needs to define the input type (json) headers = {'Authorization':'Bearer {}'.format(access_token), 'Content-Type':'application/json'} #Parameters for period of time and frequency of data to get params = {'periodType':periodType, 'period': period, 'frequencyType': frequencyType, 'frequency': frequency} #Make the get request to TD Ameritrade price_history_json = requests.get(url=price_url,headers=headers,params=params) return price_history_json.json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_price_history_dates(access_token,ticker,start_date,end_date,frequencyType,frequency):\r\n \r\n price_url = 'https://api.tdameritrade.com/v1/marketdata/{}/pricehistory'.format(ticker)\r\n\r\n #The header for getting a quote needs to define the input type (json)\r\n headers = {'Authorization':'Bearer {}'.format(access_token),\r\n 'Content-Type':'application/json'}\r\n\r\n #Parameters for period of time and frequency of data to get\r\n params = {'startDate':start_date,\r\n 'endDate': end_date,\r\n 'frequencyType': frequencyType,\r\n 'frequency': frequency}\r\n \r\n #Make the get request to TD Ameritrade\r\n price_history_json = requests.get(url=price_url,headers=headers,params=params)\r\n return price_history_json.json()", "def get_full_history(symbol):\n to_date = int(datetime.datetime.timestamp(datetime.datetime.now()))\n from_date = int(datetime.datetime.timestamp(datetime.datetime(1990, 1, 1, 1, 0, 0)))\n url_base = \"https://query1.finance.yahoo.com/v7/finance/download/\"\n url_params = f\"{symbol}.NS?period1={from_date}&period2={to_date}&interval=1d&events=history\"\n resp = requests.get(url_base + url_params)\n a = csv_to_list(resp)[1:]\n return create_price(symbol, a)", "def get_history_for(symbol: str):\n baseurl = \"https://www.alphavantage.co/query\"\n params = {\n \"function\": \"TIME_SERIES_DAILY_ADJUSTED\", # account for split/dividend\n \"symbol\": symbol,\n \"outputsize\": \"full\", # if \"full\", get 20 years of data\n \"apikey\": ALPHA_API_KEY\n }\n return make_request(baseurl=baseurl, params=params)", "def get_price_data(ticker, days_befoure):\r\n #config_file=raw_input('config file: ')\r\n config_file=\"d:/tmp/moex.json\" \r\n try:\r\n with open(config_file) as config_file: \r\n conn_data = json.load(config_file)\r\n except:\r\n print \"Error: Unable to read config file. \"\r\n sys.exit(1)\r\n\r\n username = conn_data['username']\r\n password = conn_data['password']\r\n my_config = Config(user=username, password=password, proxy_url='')\r\n\r\n my_auth = MicexAuth(my_config)\r\n date = datetime.datetime.now() - datetime.timedelta(days_befoure)\r\n \r\n #ticker = 'SBER' # for tesing...\r\n \r\n if my_auth.is_real_time():\r\n iss = MicexISSClient(my_config, my_auth, MyDataHandler, MyData)\r\n iss.get_history_securities('stock',\r\n 'shares',\r\n 'tqbr',\r\n ticker, \r\n date.strftime(\"%Y-%m-%d\")\r\n #here to be start end dates\r\n )\r\n #print iss.handler.data.history\r\n return iss.handler.data.as_dataframe()", "def get_data(symbol_id='BTC', period_id='1DAY', request_limit=1000, tdelta=30):\n now = datetime.utcnow()\n month = timedelta(days=tdelta)\n past_month = (now - month).isoformat()\n\n parameters = {'symbol_id': symbol_id, 'period_id': period_id, 'time_start': past_month[:-3], 'limit':request_limit}\n response = requests.get(HISTORY_URL, params=parameters, headers=header)\n\n while response.status_code != 200:\n time.sleep(5)\n response = requests.get(HISTORY_URL, params=parameters, headers=header)\n \n data = response.json()\n \n # this is a commnet\n csv_headers = ['time_period_start', 'time_period_end', 'price_high', 'price_low', 'price_close', 'price_open', 'trades_count', \n 'volume_traded', 'time_open', 'time_close']\n\n\n with open(str(datafolder / f'{symbol_id}_{tdelta}_day.csv'), 'w', newline='') as f:\n writer = csv.DictWriter(f, csv_headers)\n writer.writeheader()\n for item in data:\n writer.writerow(item)", "def get_historical_quotes(\n self, symbol: str, interval: str = None, start: date = None, end: date = None\n ) -> List[HistoricQuote]:\n url = \"/v1/markets/history\"\n params = {\"symbol\": symbol, \"interval\": interval, \"start\": start, \"end\": end}\n\n data = self.get(url, params)\n res = MarketsAPIResponse(**data)\n return res.history.day", "def fetch_price_history(**kwargs):\r\n\r\n\r\n url = \"\"\"https://api.tdameritrade.com/v1/marketdata/\r\n {}/pricehistory\"\"\".format(kwargs.get('symbol'))\r\n\r\n params = {}\r\n params.update({'apikey': API_KEY})\r\n\r\n # CREATE FOR LOOP TO INSERT KEY:VALUE PAIRS THAT\r\n # ARE PASSED AS PARAMS TO THE GET CALL INTO\r\n # THE PARAMS DICTIONARY SO THEY CAN BE INSERTED\r\n # INTO THE url VARIABLE ABOVE.\r\n\r\n for arg in kwargs:\r\n parameter = {arg: kwargs.get(arg)}\r\n params.update(parameter)\r\n\r\n # NOW RETURN THE DESIRED params\r\n return requests.get(url, params=params).json()", "async def fetch_funding_rate_history(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):\n await self.load_markets()\n request = {}\n market = None\n if symbol in self.currencies:\n code = self.currency(symbol)\n request['symbol'] = code['id']\n elif symbol is not None:\n splitSymbol = symbol.split(':')\n splitSymbolLength = len(splitSymbol)\n timeframes = ['nearest', 'daily', 'weekly', 'monthly', 'quarterly', 'biquarterly', 'perpetual']\n if (splitSymbolLength > 1) and self.in_array(splitSymbol[1], timeframes):\n code = self.currency(splitSymbol[0])\n symbol = code['id'] + ':' + splitSymbol[1]\n request['symbol'] = symbol\n else:\n market = self.market(symbol)\n request['symbol'] = market['id']\n if since is not None:\n request['startTime'] = self.iso8601(since)\n if limit is not None:\n request['count'] = limit\n until = self.safe_integer_2(params, 'until', 'till')\n params = self.omit(params, ['until', 'till'])\n if until is not None:\n request['endTime'] = self.iso8601(until)\n response = await self.publicGetFunding(self.extend(request, params))\n #\n # [\n # {\n # \"timestamp\": \"2016-05-07T12:00:00.000Z\",\n # \"symbol\": \"ETHXBT\",\n # \"fundingInterval\": \"2000-01-02T00:00:00.000Z\",\n # \"fundingRate\": 0.0010890000000000001,\n # \"fundingRateDaily\": 0.0010890000000000001\n # }\n # ]\n #\n return self.parse_funding_rate_histories(response, market, since, limit)", "def getFullPriceHistory(self, stockSymbol, stockExchange):\n response = requests.get(\"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol={}:{}&outputsize=full&apikey={}\".format(\n stockExchange, stockSymbol, self.ALPHA_VANTAGE_SECRET_KEY))\n data = response.json()\n timestamps, aClose = [], []\n for key in data['Time Series (Daily)']:\n timestamps.append(key)\n dates = [datetime.strptime(\n ts, \"%Y-%m-%d\") for ts in timestamps]\n dates.sort()\n dates.reverse()\n Dates = [datetime.strftime(ts, \"%Y-%m-%d\") for ts in dates]\n for date in Dates:\n aClose.append(\n float(data['Time Series (Daily)'][date]['5. adjusted close']))\n return (Dates, aClose)", "def get_stock_time_history(name):\n # Get stock data\n url = r\"https://brapi.ga/api/quote/\"\n response = requests.get(url + name, params=dict(interval=\"1d\", range=\"1y\"))\n results = response.json()[\"results\"][0]\n\n time_history = results[\"historicalDataPrice\"]\n\n for day in time_history:\n day[\"date\"] = datetime.datetime.fromtimestamp(day[\"date\"])\n\n return pd.DataFrame(time_history)", "def get_stock(symbol, interval):\n \n try:\n \n time_interval = TIME_INTERVALS[interval]\n \n if(time_interval == TIME_INTERVALS['Intraday']):\n json_data = requests.request('GET', 'https://www.alphavantage.co'+\n '/query?function=TIME_SERIES_INTRADAY&symbol='+symbol+\n '&interval=1min&apikey='+API_KEY).json()\n data_frame = pd.DataFrame.from_records(json_data['Time Series (1min)'])\n \n else:\n json_data = requests.request('GET', 'https://www.alphavantage.co'+\n '/query?function='+time_interval+'&symbol='+symbol+\n '&apikey='+API_KEY).json()\n \n data_key = ''\n \n if(time_interval == TIME_INTERVALS['Daily']):\n data_key = 'Time Series (Daily)'\n elif(time_interval == TIME_INTERVALS['Weekly']):\n data_key = 'Weekly Time Series'\n else:\n data_key = 'Monthly Time Series'\n \n data_frame = pd.DataFrame.from_records(json_data[data_key])\n \n data_frame = data_frame.transpose()\n data_frame.columns = ['Open', 'High', 'Low', 'Close', 'Volume']\n return data_frame\n \n except:\n print(\"Error while loading data\")\n return None", "def get_ticker(self, ticker=\"ACN\", refresh=False):\n \n # Build financials\n logger.debug(\"Request for {0} forcing_refresh {1}\".format(ticker, refresh))\n # financial = {}\n\n financial = None\n stockdate = None\n ratios = None\n alt_ticker = None\n\n try:\n financial_item = self._get_financial(ticker, refresh)\n financial = financial_item.dump()\n \n today = datetime.today().date()\n if financial_item.updated.year < today.year or financial_item.updated.month < today.month:\n refresh = True\n # Get Key Ratios (incl health)\n logger.debug(\n f\"This is the financial {financial} REFRESH {refresh}\"\n )\n \n ratios = [val.dump() for val in self._get_key_ratios(ticker, refresh)]\n \n # Get Valuation\n valuations = [val.dump() for val in self._get_valuation_history(ticker, refresh)]\n logger.debug(valuations)\n \n\n financial[\"ratios\"] = ratios\n financial[\"valuations\"] = valuations\n \n # TODO: Get Dividend history\n dividend_history = [val.dump() for val in self._get_dividend_history(ticker, refresh)]\n \n if (refresh):\n financial_item.updated = today\n db.session.commit()\n \n # Assemble stock data\n stockdata = {\n \"symbol\": ticker,\n \"name\": financial[\"company_name\"],\n \"financials\": financial,\n \"dividend_history\": dividend_history,\n }\n \n return jsonify(stockdata)\n \n except Exception as e:\n logger.exception(\n \"Failed to retrieve data for {ticker}\".format(ticker=ticker)\n )\n return \"Not found\", 404", "def get_daily_historic_data(self, ticker, start_date, end_date):\n av_url = self._construct_alpha_vantage_symbol_call(ticker)\n\n try:\n av_data_js = requests.get(av_url)\n data = json.loads(av_data_js.text)['Time Series (Daily)']\n except Exception as e:\n print(\n \"Could not download AlphaVantage data for %s ticker \"\n \"(%s)...stopping.\" % (ticker, e)\n )\n return pd.DataFrame(columns=COLUMNS).set_index('Date')\n else:\n prices = []\n for date_str in sorted(data.keys()):\n date = dt.strptime(date_str, '%Y-%m-%d')\n if date < start_date or date > end_date:\n continue\n\n bar = data[date_str]\n prices.append(\n (\n date, \n float(bar['1. open']),\n float(bar['2. high']),\n float(bar['3. low']),\n float(bar['4. close']),\n int(bar['6. volume']),\n float(bar['5. adjusted close'])\n )\n )\n price_df = pd.DataFrame(prices, columns=COLUMNS).set_index('Date').sort_index()\n self._correct_back_adjusted_prices(price_df)\n return price_df", "def query_history(self, req: HistoryRequest) -> List[BarData]:\n history = []\n\n start_time = generate_datetime3(req.start)\n end_time = generate_datetime3(req.end)\n\n mt5_req = {\n \"type\": FUNCTION_QUERYHISTORY,\n \"symbol\": req.symbol.replace('-', '.'),\n \"interval\": INTERVAL_VT2MT[req.interval],\n \"start_time\": start_time,\n \"end_time\": end_time,\n }\n packet = self.client.send_request(mt5_req)\n\n if packet[\"result\"] == -1:\n self.write_log(\"获取历史数据失败\")\n else:\n for d in packet[\"data\"]:\n bar = BarData(\n symbol=req.symbol.replace('.', '-'),\n exchange=Exchange.OTC,\n datetime=generate_datetime2(d[\"time\"]),\n interval=req.interval,\n volume=d[\"real_volume\"],\n open_price=d[\"open\"],\n high_price=d[\"high\"],\n low_price=d[\"low\"],\n close_price=d[\"close\"],\n gateway_name=self.gateway_name\n )\n history.append(bar)\n\n data = packet[\"data\"]\n begin = generate_datetime2(data[0][\"time\"])\n end = generate_datetime2(data[-1][\"time\"])\n\n msg = f\"获取历史数据成功,{req.symbol.replace('.','-')} - {req.interval.value},{begin} - {end}\"\n self.write_log(msg)\n\n return history", "def get_stock_info(request):\n if request.method == 'GET':\n\n dailyParams = {\n 'symbol': request.query_params.get('symbol'),\n 'function': 'TIME_SERIES_INTRADAY',\n 'interval': '30min',\n 'apikey': request.query_params.get('apikey'),\n 'outputsize': 'full',\n }\n\n historicParams = {\n 'symbol': request.query_params.get('symbol'), # request.query_params.symbol\n 'function': 'TIME_SERIES_DAILY',\n 'apikey': request.query_params.get('apikey'),\n 'outputsize': 'full',\n }\n\n dailyData = requests.get(\n 'https://www.alphavantage.co/query?',\n params=dailyParams,\n )\n dailyFormated = format_data(\n json.loads(dailyData.content.decode('utf-8')),\n \"Time Series (30min)\",\n '%H:%M:%S'\n )\n\n historicData = requests.get(\n 'https://www.alphavantage.co/query?',\n params=historicParams,\n )\n historicFormated = format_data(\n json.loads(historicData.content.decode('utf-8')),\n \"Time Series (Daily)\",\n '%Y-%m-%d'\n )\n\n # historicOrdered = arr.array('i', historicFormated)\n # Make calcs, categorize time data into slices,\n # add kpis, and package all together\n\n # Response = {\n # 'data': {\n # 'daily': dailyFormated,\n # 'historic': historicFormated,\n # },\n # 'kpis': {\n # 'PE': 5,\n # },\n # 'request': {'method': request.method,\n # 'path': request.path,\n # 'params': request.query_params,\n # },\n #\n # }\n\n if historicData.status_code == 200 and dailyData.status_code == 200:\n return Response({\n 'daily': dailyFormated,\n 'historic': {\n 'fiveDays': historicFormated[:5],\n 'month': historicFormated[:30],\n 'sixMonths': historicFormated[:180],\n 'year': historicFormated[:365],\n 'fiveYears': historicFormated[:1825],\n 'max': historicFormated,\n },\n 'kpis': {\n 'open': 120,\n 'close': dailyFormated[-1],\n 'PE': 5,\n },\n 'request': {'method': request.method,\n 'path': request.path,\n 'params': request.query_params,\n },\n\n })\n else:\n return None", "def get_data(ticker, interval, start_date, end_date):\r\n # Display indication\r\n print('[INFO] {} - Retrieving {}_{} historical data'.format(get_now(), ticker, interval))\r\n # Download ticker's ohlcv\r\n ohlcv = yf.download(tickers=ticker, start=start_date, end=end_date, interval=interval)\r\n # Modify dataframe\r\n ohlcv.drop(columns=['Adj Close'], inplace=True)\r\n ohlcv.sort_index(axis=0, ascending=False, inplace=True)\r\n ohlcv.reset_index(inplace=True)\r\n if \"Datetime\" in ohlcv.columns:\r\n ohlcv['Datetime'] = ohlcv['Datetime'].astype(str).str[:-9]\r\n return ohlcv", "def get_historic_data(end_date = datetime.now(), \n start_date = datetime.now() + timedelta(-365),\n ticker=[],\n close_only=True):\n #checks if the parameters provided through \"ticker\" is not an empty list\n #if it is, the function won't go forward after this point. returns explanatory message.\n if ticker == []:\n return \"Empty list of tickers\"\n \n #if a string is provided as \"ticker\" parameter, then it splits the string by \n #spaces and store the outcome in a list.\n elif type(ticker) is str:\n ticker = ticker.split(\" \")\n \n iex_token = os.getenv(\"IEX_TOKEN\")#not necessary anymore.\n if type(iex_token) == str: print(\"IEX Key found successfully ...getting data\")\n else: return \"Error: IEX Key NOT found\"\n \n \n #Gets historical data with the parameters provided.\n #Gets only \"close\" and \"volume\" value for efficiency.\n prices = get_historical_data(ticker, start_date, end_date,\n output_format='pandas', \n token=iex_token, \n close_only=close_only\n )\n \n #If only one ticker is provided, then it adds another indexing level to the column\n #with the ticker. This is done for two reasons: 1) To visualize the ticker downloaded \n #as a confirmation that I am working with correct data. 2) To mimic the format of the\n #dataframe obtained when getting 2 or more tickers data (2-level column indexing).\n if len(ticker) == 1:\n new_columns = pd.MultiIndex.from_product([ [ticker[0]],prices.columns ] )\n prices.columns = new_columns\n \n return prices", "def graph_data(self, ticker):\r\n key = 'GLC0GTVKR51SY1V'\r\n\r\n url = 'https://www.alphavantage.co/query?function=TIME_SERIES_MONTHLY&symbol=IBM&apikey=demo'\r\n response = requests.get(url)\r\n string = response.json()\r\n\r\n ticker = string['Meta Data']['2. Symbol']\r\n dic = string['Monthly Time Series']\r\n keys = string['Monthly Time Series'].keys()\r\n key_list = list(keys)\r\n\r\n key_data = []\r\n date_list = []\r\n open_list = []\r\n high_list = []\r\n low_list = []\r\n close_list = []\r\n volume_list = []\r\n\r\n for x in range(len(key_list)-1, 0, -1):\r\n\r\n date = key_list[x]\r\n Open = dic[key_list[x]]['1. open']\r\n High = dic[key_list[x]]['2. high']\r\n Low = dic[key_list[x]]['3. low']\r\n Close = dic[key_list[x]]['4. close']\r\n Volume = dic[key_list[x]]['5. volume']\r\n\r\n entry = date + \",\" + Open\r\n key_data.append(entry)\r\n date_list.append(date)\r\n open_list.append(float(Open))\r\n high_list.append(float(High))\r\n low_list.append(float(Low))\r\n close_list.append(float(Close))\r\n volume_list.append(float(Volume))\r\n\r\n date, price = np.loadtxt(reversed(key_data), delimiter=',', unpack=True, converters={0: self.bytes_to_dates})\r\n\r\n # datelist_strs = []\r\n #\r\n # for date in date_list:\r\n # new_date = datetime.datetime.strptime(date, \"%Y-%m-%d\")\r\n # datelist_strs.append(new_date)\r\n\r\n date_objects = [datetime.datetime.strptime(date, '%Y-%m-%d') for date in date_list]\r\n\r\n dictionary = {'Date': date_objects, 'Open': open_list, 'High': high_list, 'Low': low_list, 'Close': close_list,\r\n 'Volume': volume_list}\r\n\r\n df = pd.DataFrame.from_dict(dictionary)\r\n df.set_index('Date', inplace=True)\r\n\r\n self.df = df\r\n self.date = date\r\n self.price = price\r\n self.date_list = date_list\r\n self.generate_graph(ticker)", "def test_get_historical_prices(self, mock_requests_get):\n from grand_exchanger.resources.graph import Graph\n\n result = resources.get_historical_prices(1)\n\n assert result == Graph(\n daily={\n datetime(2020, 7, 27, 0, 0): 100,\n datetime(2020, 7, 26, 0, 0): 120,\n datetime(2020, 7, 25, 0, 0): 110,\n },\n average={\n datetime(2020, 7, 27, 0, 0): 100,\n datetime(2020, 7, 26, 0, 0): 110,\n datetime(2020, 7, 25, 0, 0): 104,\n },\n )", "def stock_data(ticker, start,today=date.today()):\n df= web.DataReader(ticker,'yahoo',start,today)\n return df", "def download_stock_data(symbol, interval, period):\n\tprices = yf.Ticker(symbol).history(period=period, interval=interval)\n\tif len(prices) > 0:\n\t\tresult_file = '../data/' + symbol + '.csv'\n\t\tprices.to_csv(result_file)\n\t\tprint(f\"Downloaded stock data of {stocks[s]}, data shape {prices.shape}, saved as {result_file}\")", "def get_price_history(self):\n # Connect to the database and return cursor\n database = DatabaseMySQL()\n\n # Query database.\n sql = \"Select published_at, `close` from company_price_volume_history \\\n where company_id =\" + str(self.company_id)\n df = database.get_query_df(sql)\n\n return df", "def historical(self, date, base='USD'):\n try:\n resp = self.client.get(self.ENDPOINT_HISTORICAL %\n date.strftime(\"%Y-%m-%d\"),\n params={'base': base})\n resp.raise_for_status()\n except requests.exceptions.RequestException as e:\n raise OpenExchangeRatesClientException(e)\n return resp.json(parse_int=decimal.Decimal,\n parse_float=decimal.Decimal)", "def getHistoricalPrices(symbol, startDate, endDate):\n params = urlencode({\n 's': symbol,\n 'a': startDate.month - 1,\n 'b': startDate.day - 1,\n 'c': startDate.year,\n 'd': endDate.month - 1,\n 'e': endDate.day - 1,\n 'f': endDate.year,\n 'g': 'd',\n 'ignore': '.csv',\n })\n url = 'http://ichart.yahoo.com/table.csv?%s' % params\n req = Request(url)\n resp = urlopen(req)\n content = str(resp.read().decode('utf-8').strip())\n daily_data = content.splitlines()\n hist_dict = dict()\n keys = daily_data[0].split(',')\n for day in daily_data[1:]:\n day_data = day.split(',')\n date = day_data[0]\n hist_dict[date] = \\\n {keys[1]: day_data[1],\n keys[2]: day_data[2],\n keys[3]: day_data[3],\n keys[4]: day_data[4],\n keys[5]: day_data[5],\n keys[6]: day_data[6]}\n return hist_dict", "def stocks_history(request):\n\n symbol = request.args.get('symbol')\n\n if symbol is None:\n return jsonify([])\n\n client = bigquery.Client()\n qry = client.query(\"\"\"\n SELECT \n date,\n adj_close,\n symbol,\n sma_20,\n std_20,\n sma_50,\n sma_200,\n bb_perc_20\n FROM `ticker-224822.ticker_test_120718.analytics_view`\n where \n symbol = '{symbol}'\n and extract(year from date) >= 2010\n \"\"\".format(symbol=symbol))\n\n results = qry.result()\n results = [dict(row.items()) for row in results]\n resp = custom_jsonify(results)\n resp.headers.add('Access-Control-Allow-Origin', '*')\n resp.headers.add('Access-Control-Allow-Methods', 'GET')\n return resp", "def YahooFinancials_Data(Ticker=[],Start='',End ='',Frequency ='daily'):\n\n\n \n import pandas as pd\n from yahoofinancials import YahooFinancials\n import datetime as dt \n \n Ticker = Ticker or input(\"Enter Tcikers separated by',': \").split(',')\n Start = Start or input(\"Enter Start Date separated by '-': \") or (dt.date.today()-\n dt.timedelta(1825)).strftime(\"%Y-%m-%d\")\n End = End or input(\"Enter End Date separated by '-': \") or (dt.date.today()).strftime(\"%Y-%m-%d\")\n Frequency = Frequency or input(\"Enter Frequency like 'daily','weekly': \") or 'daily'\n \n data = pd.DataFrame()\n for i in range(len(Ticker)):\n try:\n yahoo_financials = YahooFinancials(Ticker[i])\n Json_obj = yahoo_financials.get_historical_price_data(Start, End, Frequency)\n Ohlv = Json_obj[Ticker[i]]['prices']\n temp = pd.DataFrame(Ohlv)[[\"formatted_date\",\"adjclose\"]]\n temp.set_index(\"formatted_date\", inplace = True)\n temp = temp[~temp.index.duplicated(keep = 'first')]\n data[Ticker[i]] = temp['adjclose']\n \n except:\n print(f\"Unable to get the Data for: {Ticker[i]}\")\n continue\n \n return data", "def get_stock_price(df_excld):\n\n ts = TimeSeries(os.environ['ALPHA_VANTAGE_KEY'])\n\n info = []\n symbols = []\n counter = 0\n\n for t in df_excld['Ticker']:\n\n if counter % 5 == 0:\n time.sleep(65)\n\n i, m = ts.get_daily(symbol=t, outputsize='full')\n info.append(i)\n symbols.append(m['2. Symbol'])\n counter += 1\n\n return info, symbols", "async def stocks(request: Request, ticker, period=\"1d\", interval=\"1d\"):\n logger.info(F\"NewApiRequest: /stocks/{ticker} - Client: {request.client.host}\")\n result = req_yfinance_data(ticker, period, interval)\n return result", "def get_stock_prices(ticker, start_date, end_date=None):\n if end_date is None:\n end_date = dt.date.today()\n\n shares = Share(ticker)\n df = pd.DataFrame(shares.get_historical(start_date.isoformat(),\n end_date.isoformat()))\n return df.set_index(\"Date\", drop=True) \\\n .drop(\"Symbol\", axis=1) \\\n .astype(float) \\\n .sort_index()", "def LoadingData(self, ticker, FullHistory=False):\r\n if FullHistory == False:\r\n url = \"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol={}&apikey={}\"\r\n else:\r\n url = \"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol={}&outputsize=full&apikey={}\"\r\n\r\n try:\r\n response = requests.get(url.format(ticker, self.key))\r\n response.raise_for_status()\r\n except requests.exceptions.RequestException as e:\r\n raise SystemExit(e)\r\n\r\n # The API returns 200 status even after you have a typo\r\n try:\r\n outputjson = response.json()['Time Series (Daily)']\r\n except:\r\n print(\"Please check ticker for typos or mismatches\")\r\n outputjson = None\r\n\r\n return outputjson, ticker" ]
[ "0.78879863", "0.72872126", "0.69083875", "0.66606575", "0.6602232", "0.6473682", "0.6446148", "0.6428861", "0.6426803", "0.6383207", "0.6333592", "0.63018507", "0.62857276", "0.62840873", "0.62560195", "0.6174368", "0.61729586", "0.6156546", "0.61404", "0.61305135", "0.610676", "0.6103648", "0.609658", "0.60956955", "0.60895586", "0.60836804", "0.6071506", "0.6048713", "0.60482013", "0.60454184" ]
0.83570635
0
Sets the prev link for the node.
def setPrev(self, prev): self.prev = prev
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prev(self, prev):\n\n self._prev = prev", "def set_prev(self, p) -> None:\n self.prev = p", "def setPrev(self, prev_half_edge):\n self.prev = prev_half_edge", "def prev_page_url(self, prev_page_url):\n\n self._prev_page_url = prev_page_url", "def goToPrevHistory(self: Self, event: Event = None) -> None:\n c = self\n c.nodeHistory.goPrev()", "def prev(self,p):\n assert type(p) is type(self)\n self.__prev = p", "def setPrev(self, edge):\n self.half1.setPrev(edge.half1)\n self.half2.setPrev(edge.half2)\n self.prev = edge", "def set_previous(self, new_previous):\n self.previous = new_previous", "def prevSibling(self):\n raise NotImplementedError(\"method must be implemented by subclass\")", "def prev(self):\n return self.__prev", "def previous(self, node) -> NoReturn:\n if node is None:\n self.my_previous = None\n else:\n if type(node) == type(_QueueNode(0)):\n self.my_previous = node\n else:\n raise ValueError(\"Invalid type\")", "def previous(self):\n\n pass", "def previous(self, item) -> LinkedListNode:\n node = self.head\n while node is not self._nil:\n if node.next.item is item:\n return node\n node = node.next\n return node", "def movePrev(self):\n parentNode = self.parentNode\n index = parentNode.idevices.index(self)\n if index > 0:\n temp = parentNode.idevices[index - 1]\n parentNode.idevices[index - 1] = self\n parentNode.idevices[index] = temp", "def addPrevSibling(self, elem):\n if elem is None: elem__o = None\n else: elem__o = elem._o\n ret = libxml2mod.xmlAddPrevSibling(self._o, elem__o)\n if ret is None:raise treeError('xmlAddPrevSibling() failed')\n __tmp = xmlNode(_obj=ret)\n return __tmp", "def __init__(self, prev=None):\n self.prev = prev", "def previous(self, result, **kwargs):\n if result[\"previous\"]:\n return self._get(result[\"previous\"], **kwargs)\n\n return None", "def getPrev(self):\n\t\t\treturn self.prev", "def _populate_prev_and_next_links(self):\n self._prev_link = self._next_link = None\n links = self._response.headers.get('Link', None)\n if links:\n for link, rel in (link.split('; ') for link in links.split(', ')):\n link = link[1:-1]\n rel = rel.split('\"')[1]\n if rel == 'prev':\n self._prev_link = link\n elif rel == 'next':\n self._next_link = link", "def setup(self, parent=None, previous=None):\r\n self.parent = parent\r\n self.previous = previous\r\n self.next = None\r\n self.previousSibling = None\r\n self.nextSibling = None\r\n if self.parent and self.parent.contents:\r\n self.previousSibling = self.parent.contents[-1]\r\n self.previousSibling.nextSibling = self", "def get_prev(self):\n return self.prev", "def previous(self):\n if self.current and self.current.prev:\n self.current = self.current.prev\n return True\n return False", "def prev(self):\n if self.signbit.dec_value == 0:\n method = 'prev'\n else:\n method = 'next'\n return self._step(method)", "def prev_page(self):\n if self._start == 0:\n raise ValueError('Already at the first page.')\n self._start = (self._start - self._num) if self._start > self._num else 0", "def goToPrevLink():\n if wikiPageStackTrace[-2].getUrl() != \"\":\n oldpage = wikiPageStackTrace[-2]\n print(\"going back to \", oldpage.getUrl())\n titleStackTrace.append(oldpage.getTitle())\n urlStackTrace.append(oldpage.getUrl())\n del wikiPageStackTrace[-1]\n update()\n else:\n update()", "def previous(self, _event):\n self.set_val(self.val - 1)", "async def prev_page(self):\n if self.page_num == 1:\n self.page_num = len(self.pages) # Loop around to the last item\n else:\n self.page_num -= 1\n return await self.update()", "def prev(self, delta=1):\n return Prufer.unrank(self.rank -delta, self.nodes)", "def on_btPagePrev_clicked(self, widget, data=None):\n\n if self.page > 1:\n self.page -= 1\n self.part = 1\n self.refresh()", "def nav_prev_sibling(self):\r\n siblings = self.nav_siblings()\r\n prev_sibling = None\r\n for i, sibling in enumerate(siblings):\r\n if sibling == self and i > 0:\r\n prev_sibling = siblings[i-1]\r\n return prev_sibling" ]
[ "0.7465448", "0.7228299", "0.7093982", "0.7074755", "0.6893062", "0.68524987", "0.67959595", "0.668916", "0.6375687", "0.6178108", "0.61040765", "0.60961574", "0.6089415", "0.6055914", "0.5996824", "0.5947542", "0.59411544", "0.5934818", "0.5934224", "0.59309167", "0.5916836", "0.5914551", "0.5885099", "0.5829563", "0.5810482", "0.58000344", "0.57168597", "0.56827587", "0.566834", "0.56574315" ]
0.7875985
0
Fills the list with information from a File, Assumes new line seperated.
def populate(self, fileName): #assuming a newline seperated with open(fileName, 'r') as inFile: for line in inFile: self.append(line.strip())#strip the \n
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fill_list_file(list, file):\n # Local Variables\n # data = data from the file\n \n data = open(file)\n for line in data:\n for word in line.split():\n list.insert(word)", "def create_from_file(self, file):\n self.value = []\n with open(file, \"r\") as f:\n fl = f.readlines()\n\n for l in fl:\n self.value.append([int(x) for x in l.split()])", "def LoadListFile(file):\n\tlst = []\n\ttry:\n\t\twith open(file,'r') as f:\n\t\t\tfor line in f:\n\t\t\t\tline = line.rstrip()\n\t\t\t\tlst.append(line)\n\texcept:\n\t\treturn []\n\treturn lst", "def readfile(self):\n try:\n with open(filename, mode=\"r\") as fileobject:\n for line in fileobject:\n line = line.rstrip()\n self.__domainlist.append(line)\n\n fileobject.close()\n except:\n print(\"Error when reading file\")", "def readAllfromFile(self):\n with open(self._fname, 'r') as f:\n lines = f.readlines()\n readList = []\n for line in lines:\n line = line.strip()\n if len(line) > 1:\n gra = self._readGrafromLine(line)\n readList.append(gra)\n f.close()\n return readList", "def make_list(list_file):\r\n return [line.strip('\\n').strip('\\r') for line in open(list_file)]", "def loadList(file_name):\n with open(file_name) as f:\n l = [line.strip() for line in f]\n return l", "def fill(fname):\n return [[float(line.split()[-3]), float(line.split()[-1])]\\\n for line in open(fname).readlines()\\\n if FITRESRE1.match(line)]", "def make_list(list_file):\n return [line.strip('\\n').strip('\\r') for line in open(list_file)]", "def populate_weight_list(file_object):\n new_list = []\n\n for line in file_object:\n new_list.append(line.split())\n \n return new_list", "def FileToList(FilePath):\r\n List = []\r\n with open(FilePath) as f:\r\n for line in f:\r\n List.append(line.rstrip())\r\n return List", "def fill_list(path):\n my_list = []\n for i in range(len(path)):\n my_file = open(path[i], 'r')\n line = my_file.readline()\n line = my_file.readline()\n while len(line) > 3:\n my_list.append(line)\n line = my_file.readline()\n my_file.close()\n return my_list", "def parse_file(input_file):\n \n all_lines = input_file.split('\\n')\n all_info_list = []\n for line in all_lines:\n line = line.split('\\t')\n info_per_row_list = []\n for value in line:\n my_string = \"\"\n value = value.strip('\\'\"')\n if len(value) == 0:\n value = \"NA\"\n my_string += value\n info_per_row_list += [my_string]\n all_info_list += [info_per_row_list]\n return all_info_list", "def afisare_filme(self,list):\n with open(\"filme.txt\", 'r') as f: # modificam si in fisier\n lines = f.readlines()\n for line in lines:\n line_sep = line.split('/')\n list.append(line_sep[0]+\" \"+line_sep[1]+\" \"+line_sep[2]+\" \"+str(line_sep[3])+\" \"+line_sep[4]+\"\\n\")\n return list", "def loadListFromFile (filename):\n retval = []\n filename = os.path.expanduser (filename)\n if not os.path.exists (filename):\n print(\"Error: file '%s' does not exist.\"%(filename))\n raise RuntimeError(\"Bad filename\")\n source = open (filename, 'r') \n for line in source.readlines():\n line = re.sub (r'#.+$', '', line) # remove comment characters\n line = line.strip()\n if len (line):\n retval.append (line)\n source.close()\n return retval", "def load_items(self, filename):\n with open(filename, \"r\") as f:\n itemss = []\n for line in f:\n line = line.strip()\n # Add name, description and initial location to each item object\n if line.upper():\n name = line\n line = f.readline()\n line = line.strip()\n description = line\n line = f.readline()\n line = line.strip()\n initial_room_id = line\n item = Item(name, description, initial_room_id)\n itemss.append(item)\n line = f.readline()\n return itemss", "def file_update(self, data):\n file = open(\"../util/LinkedList_File\", \"r+\")\n file.truncate(0)\n file.close()\n if self.search_item(data) == True:\n self.remove(data)\n file = open(\"../util/LinkedList_File\", \"a+\")\n\n linkedlist_content = []\n linkedlist_content = self.display_content()\n\n for i in linkedlist_content:\n file.write(i + \" \", )\n file.close()\n file = open(\"../util/LinkedList_File\", \"r\")\n for i in file:\n print(i)\n file.close()\n else:\n self.append(data)\n\n file = open(\"../util/LinkedList_File\", \"a+\")\n\n linkedlist_content = []\n linkedlist_content = self.display_content()\n\n for i in linkedlist_content:\n file.write(i + \" \")\n file.close()\n\n file = open(\"../util/LinkedList_File\", \"r\")\n for i in file:\n print(i)\n file.close()", "def file_update(self, data):\n file = open(\"../util/LinkedList_File\", \"r+\")\n file.truncate(0)\n file.close()\n if self.search_item(data) == True:\n self.remove(data)\n file = open(\"../util/LinkedList_File\", \"a+\")\n\n orderedlist_content = []\n orderedlist_content = self.display_content()\n\n for i in orderedlist_content:\n file.write(i + \" \", )\n file.close()\n file = open(\"../util/LinkedList_File\", \"r\")\n for i in file:\n print(i)\n file.close()\n else:\n self.add(data)\n\n file = open(\"../util/LinkedList_File\", \"a+\")\n\n orderedlist_content = []\n orderedlist_content = self.display_content()\n\n for i in orderedlist_content:\n file.write(i + \" \")\n file.close()\n\n file = open(\"../util/LinkedList_File\", \"r\")\n for i in file:\n print(i)\n file.close()", "def read_list(fname):\n with open(fname) as handle:\n items = [line.strip() for line in handle]\n return items", "def __build_list(self, word_list, file):\n \n for word in open(file):\n word = str.strip(word)\n \n if word != '' and not word.startswith(';'):\n word_list.append(word)", "def read_file(filename) -> List[Todo]:\n with pathlib.Path(filename).expanduser().open('r') as fp:\n return [Todo(_id, line) for _id, line in enumerate(fp)]", "def read_file(file=\"input4.txt\"):\n res = []\n with open(file, encoding='utf-8') as f:\n for i in f.readlines():\n res.append(i.split())\n for i in res:\n t = i.pop(1).split(':')\n i.insert(1, t[0])\n return res", "def read_file():\r\n fp = open_file() \r\n csv_fp = csv.reader(fp) #Csv reader because splitting cannont be done on commas \r\n L = [] \r\n for line in csv_fp:\r\n data_lst = line\r\n race = data_lst[15]\r\n gender = data_lst[16]\r\n victim_info = data_lst[27]\r\n T = (race, gender, victim_info)\r\n L.append(T) #add the information to our list that began as empty\r\n \r\n return (L[1:]) #this is so that victim info is off by one.\r", "def FileList(file):\n with open(file,\"r\") as f:\n list1 = [r.split()[1] for r in f]\n list1 = [int(i) for i in list1]\n return list1", "def File2List(filename):\n items = []\n if IsFile(filename):\n readlist = open(filename, 'r')\n for line in readlist:\n if not line.startswith('#') and len(line) > 1:\n items.append(line.rstrip())\n return items", "def load_linelist(filename):\n linelist = []\n infile = open(filename)\n for row in infile:\n row = row.strip()\n if len(row)==0 or row[0] in '#%!@':\n continue\n g = row.split()\n wl = float(g[0])\n if len(g)>1:\n species = g[1]\n else:\n species = ''\n linelist.append((wl, species))\n infile.close()\n return linelist", "def parse(self, f):\n \n for line in f:\n self.parse_line(line)", "def read_file_list(filename):\n file = open(filename)\n data = file.read()\n lines = data.replace(\",\", \" \").replace(\"\\t\", \" \").split(\"\\n\")\n #strip() function can be used to remove space located in the head or the tail places of a string\n # v.strip('0')remove 0\n \n # list = [[v.strip() for v in line.split(\" \") if v.strip() != \"\"] for line in lines if\n # len(line) > 0 and line[0] != \"#\"]\n # list = [(float(l[0]), l[1:]) for l in list if len(l) > 1]\n \n list= []\n listResult = []\n for line in lines:\n tmpList = []\n if len(line) > 0 and line[0] != \"#\":\n for v in line.split(\" \"):\n if v.strip() != \"\":\n tmpList.append(v.strip())\n list.append(tmpList)\n \n for l in list:\n if len(l) > 1:\n listResult.append((float(l[0]), l[1:]))\n \n return dict(listResult)", "def read_data_from_file(file_name):\r\n file = open(file_name, 'r')\r\n for line in file:\r\n data = line.split(\",\")\r\n newProduct = Product(data[0].strip(), data[1].strip())\r\n lstOfProductObjects.append(newProduct)\r\n file.close()\r\n return lstOfProductObjects", "def parseInputFileList (self):\n filelist = []\n try:\n with open (self.cfgName) as fIn:\n for line in fIn:\n line = (line.split(\"@@@\")[0]).strip()\n if line:\n self.lines.append(line)\n except IOError:\n print \"*** WARNING: label cfg file \" , self.cfgName , \" not found\"\n return" ]
[ "0.7336654", "0.6782013", "0.66367686", "0.6446616", "0.63737994", "0.6366721", "0.63587606", "0.62649655", "0.6230008", "0.61351603", "0.60994554", "0.6090269", "0.6088071", "0.60788745", "0.60174197", "0.6000459", "0.59552413", "0.5948468", "0.5942839", "0.59296787", "0.59274", "0.59220064", "0.5918294", "0.5901058", "0.5900752", "0.5890794", "0.58862126", "0.5871837", "0.58663464", "0.5852752" ]
0.7674582
0
Open and read the cache file if it exists
def __read_cache_file_if_exists(self) -> None: if os.path.exists(self.__cache_file): self.__config.open_file(self.__cache_file, "r", self.__process_cache)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _read_cache(self, path):\n if self._cache:\n cache_path = os.path.join(self._cache, path)\n\n if os.path.exists(cache_path):\n with io.open(cache_path, encoding='utf-8') as f:\n text = f.read()\n\n return text\n\n msg = ('Unable to download remote file \"{0}\" and local cache is not '\n 'available.').format(path)\n raise RuntimeError(msg)", "def read_cached_file(self, path):\n if self.config.get('do_caching', False):\n ext = path.split('.')[-1]\n\n if ext == 'cache':\n with open(path, 'r') as fd:\n try:\n return fd.read()\n except UnicodeDecodeError as e:\n self.logger.warning(str(e))\n else:\n raise Exception('\"{}\" is a invalid cache file.'.format(path))", "def loadCacheFile(self):\n if not os.path.exists(self.cachePath):\n self.initCacheFile()\n else:\n with open(self.cachePath) as json_cacheFile:\n self.cacheData = json.load(json_cacheFile)", "def _locate_from_cache_file():\n path_file = os.path.join(_get_temp_dir(), _config.pathfile)\n return _read_file(path_file) if os.path.isfile(path_file) else None", "def _load_cached_2to3(self, path, cache):\n try:\n cache_stats = os.stat(cache)\n source_stats = os.stat(path)\n except OSError as e:\n if e.errno == errno.ENOENT: # FileNotFoundError\n self.logger.debug('Cache miss: %s' % cache)\n return None\n else:\n raise\n\n if cache_stats.st_mtime <= source_stats.st_mtime:\n self.logger.debug('Cache miss (stale): %s' % cache)\n return None\n\n self.logger.debug(\"Cache hit: %s\" % cache)\n return super().get_data(cache)", "def read_cache(self):\n with open(self.get_cache_filename(), 'rb') as f:\n data = pickle.loads(f.read())\n self.timestamp = data['timestamp']\n self.cache = data['cache']", "def _load_cache(self):\n self.cache = self.cache_manager.retrieve(self.cache_file)\n if self.cache is None:\n self.cache = {}\n return", "def read_data_cache(self):\n if os.path.exists(self.cache_filename):\n return self.read_data_cache_file()\n else:\n data = self._empty_data()\n self.write_data_cache(data)\n return data", "def _read_cache_file(self) -> bytes:\n with open(self.cache_file, 'rb') as file:\n return file.read()", "def get_output_from_cache(name, filename):\n cache_filename = _get_cache_filename(name, filename)\n if (os.path.exists(cache_filename) and\n os.path.getmtime(filename) < os.path.getmtime(cache_filename)):\n with io.open(cache_filename) as f:\n return f.read()\n\n return None", "def open(self):\n super(NoneCache, self).open()", "def loadcache(self, cachepath):\n loadfunc = json.load if self.serializer == 'json' else pickle.load\n try:\n # check for recency\n if self.expiration > 0:\n elapsed = time.time() - os.stat(cachepath).st_mtime\n #print >>sys.stderr, '%s exp, %s elapsed' % (self.expiration, elapsed)\n if elapsed > self.expiration:\n if self.expirepolicy == 'archive':\n os.rename(cachepath, self.archivepath(cachepath))\n raise IOError\n return loadfunc(open(cachepath))\n except Exception, e:\n #print >>sys.stderr, 'Could not load cache file %s: %s' % (cachepath, e)\n raise IOError('Could not load cache file %s: %s' % (cachepath, e))", "def GetFromCache(self, filename):\n return memcache.get('%s%s' % (self.CACHE_PREFIX, filename))", "def GetFromCache(self, filename):\n return memcache.get('%s%s' % (self.CACHE_PREFIX, filename))", "def get_cachefile(filename):\n if not os.path.exists(cachedir):\n os.makedirs(cachedir)\n return os.path.join(cachedir, filename)", "def read_cached_file(filename, cache_info, reload_func=None):\n mtime = os.path.getmtime(filename)\n if not cache_info or mtime != cache_info.get('mtime'):\n LOG.debug(_(\"Reloading cached file %s\") % filename)\n with open(filename) as fap:\n cache_info['data'] = fap.read()\n cache_info['mtime'] = mtime\n if reload_func:\n reload_func(cache_info['data'])\n return cache_info['data']", "def read_cached_file(filename, cache_info, reload_func=None):\n mtime = os.path.getmtime(filename)\n if not cache_info or mtime != cache_info.get('mtime'):\n LOG.debug(\"Reloading cached file %s\" % filename)\n with open(filename) as fap:\n cache_info['data'] = fap.read()\n cache_info['mtime'] = mtime\n if reload_func:\n reload_func(cache_info['data'])\n return cache_info['data']", "def read_data_cache_file(self):\n with open(self.cache_filename, 'r') as json_data:\n return json.load(json_data)", "def auth_from_cache(self, rfid):\n \n #can we open the file\n base = os.path.dirname(os.path.abspath(__file__))\n fname = \"{}/db/{}.json\".format(base, rfid)\n \n try:\n mtime = os.path.getmtime(fname)\n delta_t = time.time() - mtime\n if delta_t > self.CACHE_STALE_T:\n return False\n \n db_file = open(fname, 'r')\n print('Opened file in cache [{}]'.format(fname))\n data = db_file.read()\n db_file.close()\n return self.json_has_access_now(data)\n except FileNotFoundError:\n print('Could not open [{}]'.format(fname))\n return False", "def _read_cache(url):\n\n j = None\n m = hashlib.md5()\n m.update(url)\n if os.path.exists('.cache.%s' % m.hexdigest()):\n with open('.cache.%s' % m.hexdigest(), 'rb') as infile:\n j = json.load(infile)\n\n return j", "def safe_read(self, name):\n full_path = self.path(name)\n content = None\n if self.exists(name):\n with caches['default'].lock('{}_{}'.format(full_path, 'reader')):\n fd = self._volume.open(full_path)\n try:\n content = fd.read()\n finally:\n fd.close()\n return content", "def loadGameFromCache(self, theKey):\n theGameFile = File(self.theCacheDirectory, theKey + \".zip\")\n theLine = None\n try:\n theLine = br.readLine()\n br.close()\n ir.close()\n gIn.close()\n fIn.close()\n except Exception as e:\n if theLine == None:\n return None\n return Game.loadFromJSON(theLine)", "def use_cached_files(self, cache_key):\r\n pass", "def get_cached(self, keyword, search_engine, scrapemode, page_number):\n if self.config.get('do_caching', False):\n file_name = self.cached_file_name(\n keyword,\n search_engine,\n scrapemode,\n page_number\n )\n cache_dir = self.config.get('cachedir', self.CACHEDIR)\n if file_name in os.listdir(cache_dir):\n try:\n modtime = os.path.getmtime(\n os.path.join(cache_dir, file_name)\n )\n except FileNotFoundError:\n return False\n modtime = (time.time() - modtime) / 60 / 60\n if (modtime > int(self.config('clean_cache_after', 48))):\n return False\n path = os.path.join(cache_dir, file_name)\n return self.read_cached_file(path)\n else:\n return False", "def read_cache():\n try:\n cache_file = open(CACHE_FILENAME, 'r', encoding=\"utf-8\")\n cache_contents = cache_file.read()\n cache_dict = json.loads(cache_contents)\n cache_file.close()\n return cache_dict\n except:\n cache_dict = {}\n return cache_dict", "def cache_get(item: str) -> object:\n\titem = str(item)\n\tcache = cache_find(item)\n\n\t# cache_find() will return none if the cache does not exist\n\t# the returned location is guaranteed to exist, so no point checking again.\n\n\tif cache is not None:\n\t\ttry:\n\t\t\tcached = pickle.load(open(cache, \"rb\"))\n\t\texcept EOFError as ex:\n\t\t\t# Cache file is corrupted, so print an error and act like it does\n\t\t\t# not exist. We do not delete the cache file incase the user wants\n\t\t\t# to recover the file.\n\t\t\tuux.show_error(\"Error when loading file from cache: \" + str(ex))\n\t\t\treturn None\n\t\texcept Exception as ex:\n\t\t\traise ex\n\t\tuux.show_debug(\"Cache hit for \" + item)\n\t\treturn cached\n\n\treturn None", "def _check_cache(self):\n return os.path.exists(self._cache_key)", "def cached_load(filepath: str) -> io.BytesIO:\n with open(filepath, 'rb') as f:\n return io.BytesIO(f.read())", "def getFile(self, path):\n\t\ttry:\n\t\t\tlogger.info('getFile(%s)' % (path))\n\n\t\t\t# Check if file is in cache\n\t\t\tif self.cache_files.has_key(path):\n\t\t\t\tlogger.info('* Retrieving tmpfile name from the cache')\n\t\t\t\treturn self.cache_files[path]['tmpfile']\n\t\t\telse:\n\t\t\t\tlogger.info('* Needs to download the file')\n\n\t\t\t\t# Generate temp file; tmp has a file descriptor, tmp_name the name of the file\n\t\t\t\ttmp, tmp_name = mkstemp()\n\t\t\t\t\n\t\t\t\tlogger.info('* Generated name = %s' % (tmp_name))\n\n\t\t\t\t# Download file from dropbox\n\t\t\t\tif self.downloadFile(path, tmp) == True:\n\t\t\t\t\tlogger.info('* File downloaded')\n\n\t\t\t\t\t# Add to cache\n\t\t\t\t\tself.cache_files[path] = {} \n\t\t\t\t\tself.cache_files[path]['tmpfile'] = tmp_name\n\t\t\t\t\tlogger.info('* Added to cache, file %s is actually %s' % (path, tmp_name))\n\t\t\t\telse:\n\t\t\t\t\treturn False\n\n\t\t\t\treturn tmp_name\n\t\texcept Exception, e:\n\t\t\tinfo = sys.exc_info()\n\t\t\tlogger.error(\"Exception %s at getFile(%s)\" % (info[0],path))\n\t\t\tlogger.error(pformat(info))\n\t\t\treturn False", "def get_json_from_cache(file_name):\n result = None\n path = clean_path(file_name)\n cached_file_name = get_cached_file_name(path)\n if os.path.exists(cached_file_name):\n time = os.path.getmtime(path)\n cached_time = os.path.getmtime(cached_file_name)\n if cached_time > time:\n try:\n source = open(cached_file_name, \"r\")\n try:\n result = json.load(source)\n except ValueError:\n pass\n source.close()\n except OSError:\n # Includes IOError\n pass\n return result" ]
[ "0.75952566", "0.73163617", "0.72234976", "0.71021736", "0.70085645", "0.69819254", "0.69125575", "0.69095606", "0.6903061", "0.689758", "0.6829601", "0.67801297", "0.67788893", "0.67788893", "0.6751688", "0.6725002", "0.67220324", "0.67138314", "0.6657382", "0.66273785", "0.65604734", "0.6443998", "0.64288324", "0.64285004", "0.64209944", "0.6374369", "0.6349324", "0.6346097", "0.63149285", "0.63101566" ]
0.8685231
0
Given a `node` returns its target typename. For "call_method" node, return node.target which is the name of that method being called. This could potential lead to conflict but should be okay because normally it's on a tensor. For "call_function" node, return typename of node.target. For "call_module" node, return typename of the module that node.target point to. If seeing "_VariableFunctionsClass" in the target name string, it will be replaced by "torch". e.g. _VariableFunctionsClass.relu would become torch.relu.
def get_node_target(submodules: Mapping[str, torch.nn.Module], node: pippy.fx.Node) -> str: assert node.op in CALLABLE_NODE_OPS, ( "Expect op types of " + ", ".join(CALLABLE_NODE_OPS) + f", but found {node.op}" ) if node.op == "call_module": assert isinstance(node.target, str) submod = submodules[node.target] submod_type = getattr(submod, "_base_class_origin", type(submod)) return get_acc_ops_name(submod_type) elif node.op == "call_function": target: Any = node.target return ( f"acc_ops.{target.__name__}" if target.__module__ is not None and "acc_ops" in target.__module__ else _get_qualified_name(target) ) else: assert isinstance(node.target, str) return node.target
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _targetof(node):\r\n if node is None: return None\r\n return node.target", "def _determine_function_name_type(\n node: nodes.FunctionDef, config: argparse.Namespace\n) -> str:\n property_classes, property_names = _get_properties(config)\n if not node.is_method():\n return \"function\"\n\n if is_property_setter(node) or is_property_deleter(node):\n # If the function is decorated using the prop_method.{setter,getter}\n # form, treat it like an attribute as well.\n return \"attr\"\n\n decorators = node.decorators.nodes if node.decorators else []\n for decorator in decorators:\n # If the function is a property (decorated with @property\n # or @abc.abstractproperty), the name type is 'attr'.\n if isinstance(decorator, nodes.Name) or (\n isinstance(decorator, nodes.Attribute)\n and decorator.attrname in property_names\n ):\n inferred = utils.safe_infer(decorator)\n if (\n inferred\n and hasattr(inferred, \"qname\")\n and inferred.qname() in property_classes\n ):\n return \"attr\"\n return \"method\"", "def get_type(node):\n # Assume there is only one type inferred\n # If there are multiple types inferred we have to\n # choose which one to pick\n try:\n if len(node.inferred()) > 0:\n ty_infer = node.inferred()[0]\n if isinstance(ty_infer, Module):\n ty = ty_infer.name\n elif isinstance(ty_infer, ClassDef):\n ty = ty_infer.name\n elif isinstance(ty_infer, type(Uninferable)):\n ty = None\n else:\n ty = ty_infer.pytype().replace(\"builtins.\", \"\").lstrip(\".\")\n else:\n ty = None\n except Exception as err:\n ty = None\n\n return ty", "def target_entity_type_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"target_entity_type_name\")", "def node_name(self) -> str:\n op_name = f\"{self.name.name}_{self.name.overload_name}\".lower()\n return \"\".join(word.capitalize() or \"\" for word in op_name.split(\"_\"))", "def test_get_node_type_name(self):\n pass", "def target_type(self) -> str:\n return pulumi.get(self, \"target_type\")", "def target_entity_type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"target_entity_type\")", "def identifyTargetType(self, target):\n ipAddress = re.compile('\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}')\n ipFind = re.findall(ipAddress, target)\n if ipFind is not None and len(ipFind) > 0:\n return \"ip\"\n\n md5 = re.compile('[a-fA-F0-9]{32}', re.IGNORECASE)\n md5Find = re.findall(md5,target)\n if md5Find is not None and len(md5Find) > 0:\n return \"md5\"\n\n return \"hostname\"", "def get_node_type(self, node):\n raise NotImplementedError()", "def infer_type(node):\n mod = tvm.IRModule.from_expr(node)\n mod = relay.transform.InferType()(mod)\n entry = mod[\"main\"]\n return entry if isinstance(node, relay.Function) else entry.body", "def get_name_for(node: Union[str, cst.CSTNode]) -> Optional[str]:\n if isinstance(node, cst.Name):\n return node.value\n elif isinstance(node, str):\n return node\n elif isinstance(node, cst.Call):\n return _NameUtil.get_name_for(node.func)\n elif isinstance(node, cst.Subscript):\n return _NameUtil.get_name_for(node.value)\n elif isinstance(node, (cst.FunctionDef, cst.ClassDef)):\n return _NameUtil.get_name_for(node.name)\n return None", "def target_type(self) -> Optional[str]:\n return pulumi.get(self, \"target_type\")", "def node_type(self) -> str:\n return pulumi.get(self, \"node_type\")", "def infer_type(node, mod=None):\n new_mod = _module.Module.from_expr(node)\n if mod is not None:\n new_mod.update(mod)\n new_mod = _transform.InferType()(new_mod)\n entry = new_mod[\"main\"]\n return entry if isinstance(node, _expr.Function) else entry.body", "def _getNodeType(node): # {{{\n if node.nodeType == node.ELEMENT_NODE : return \"ELEMENT_NODE\"\n elif node.nodeType == node.ATTRIBUTE_NODE : return \"ATTRIBUTE_NODE\"\n elif node.nodeType == node.TEXT_NODE : return \"TEXT_NODE\"\n elif node.nodeType == node.CDATA_SECTION_NODE : return \"CDATA_SECTION_NODE\"\n elif node.nodeType == node.ENTITY_NODE : return \"ENTITY_NODE\"\n elif node.nodeType == node.PROCESSING_INSTRUCTION_NODE : return \"PROCESSING_INSTRUCTION_NODE\"\n elif node.nodeType == node.COMMENT_NODE : return \"COMMENT_NODE\"\n elif node.nodeType == node.DOCUMENT_NODE : return \"DOCUMENT_NODE\"\n elif node.nodeType == node.DOCUMENT_TYPE_NODE : return \"DOCUMENT_TYPE_NODE\"\n elif node.nodeType == node.NOTATION_NODE : return \"NOTATION_NODE\"\n return \"UKNOWN NODE\"", "def getTargetType(self):\n return self.target_type", "def node_type(self):\n return self._node_type", "def typename ( o ) :\n return type ( o ) .__name__", "def node_type( fdt, node_offset, verbose=0 ):\n rt = \"\"\n try:\n node = fdt.get_node( node_offset )\n rt = node.props[\"compatible\"].to_string()\n except Exception as e:\n pass\n\n return rt", "def call_node_infer_type(node):\n infer_out = infer_type(node)\n out_type = infer_out._checked_type_\n if isinstance(out_type, TensorType):\n types = [out_type]\n elif isinstance(out_type, TupleType):\n types = list(out_type.fields)\n else:\n raise RuntimeError(f\"Unsupported output type {type(out_type)} in operator {node.op.name}\")\n\n return types", "def target_type(self):\n\n return self._target_type", "def node_type(self) -> Optional[str]:\n return pulumi.get(self, \"node_type\")", "def get_nodeName(taxonomy, nodeId):", "def getnodetype(self, node_p):\n node_p = self.getnodenamed(node_p) # Verify pointer.\n\n # (const node_bn* node)\n cnetica.GetNodeType_bn.argtypes = [c_void_p]\n cnetica.GetNodeType_bn.restype = c_int\n return cnetica.GetNodeType_bn(node_p) # node_type", "def name(cls):\n return arg.s()(cls.func).func.__name__", "def gen_type_string(self, node):\n return self._gen_table[node.node_type()](self, node)", "def get_target(self):\n task = self.task.get_task(self.task_id)\n if 'name' in task:\n return str(task['name'])\n return str(task)", "def typename(obj):\n return obj.__name__ if hasattr(obj, '__name__') else type(obj).__qualname__", "def typename(atype):\n if not isinstance(atype, type):\n raise Exception('Argument is not a type')\n\n modulename = atype.__module__\n typename = atype.__name__\n\n if modulename != '__builtin__':\n typename = modulename + '.' + typename\n\n return typename" ]
[ "0.63309675", "0.6100639", "0.60816646", "0.59453785", "0.58337826", "0.571037", "0.5704307", "0.5654451", "0.56432414", "0.5600983", "0.5590909", "0.5473086", "0.5463391", "0.5450949", "0.54132587", "0.5390162", "0.53383803", "0.5285833", "0.5279004", "0.5275299", "0.5263115", "0.5255802", "0.52515846", "0.52498335", "0.5244101", "0.5228004", "0.5197141", "0.5164247", "0.5151675", "0.5132773" ]
0.7547294
0
Checks if the node output produces a Tensor or not.
def is_node_output_tensor(node: pippy.fx.Node) -> bool: type_ = node.meta.get("type", None) return type_ is not None and issubclass(type_, torch.Tensor)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _is_tensor(x: Any) -> bool:\n if has_tensorflow and isinstance(x, _TfTensor):\n return True\n if has_pytorch and isinstance(x, _PtTensor):\n return True\n if isinstance(x, np.ndarray):\n return True\n return False", "def do_type_checking(self, node):\r\n\r\n if not isinstance(node.inputs[0].type, theano.tensor.TensorType):\r\n raise NotImplementedError()", "def is_tensor(self):\n return not self.is_scalar", "def is_tensor(obj):\n raise NotImplementedError", "def is_keras_tensor(x):\n if not is_tensor(x):\n raise ValueError('Unexpectedly found an instance of type `' +\n str(type(x)) + '`. '\n 'Expected a symbolic tensor instance.')\n return hasattr(x, '_keras_history')", "def is_tensor(x: Any, backend=None) -> bool:\r\n module = get_module(backend)\r\n return module.is_tensor(x)", "def check_tensor(tens: Tensorable, device='cpu') -> torch.Tensor:\n if isinstance(tens, torch.Tensor):\n return tens\n return torch.Tensor(tens).to(device)", "def check_is_tensor(obj):\n if not isinstance(obj, torch.Tensor):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}\".format(type(obj)))", "def check_output(self, out, target):\n size_out = out.size()\n size_target = target.size()\n assert size_target\n assert size_out[0] == size_target[0], \\\n (\"Output and target have different batch sizes (first dimension): \"\n f\"{size_out} != {size_target}\")\n # Transform the output into classes.\n out = self._check_output_helper(out)\n size_out = out.size()\n assert size_out == size_target, \\\n f\"Output and target sizes do not match: {size_out} != {size_target}\"\n # eq(): Compare the outputs to the labels.\n # type(): Cast the resulting bools to ints.\n # sum(): Sum them up to get the total number of correct predictions.\n return out.eq(target).type(torch.int).sum().item()", "def is_tensor(x):\n if is_torch_available():\n import torch\n\n if isinstance(x, torch.Tensor):\n return True\n\n return isinstance(x, np.ndarray)", "def is_tensor_spec(self) -> bool:\n return self.inputs and isinstance(self.inputs[0], TensorSpec)", "def HasTensor(tensor):\n return _C.HasTensor(_stringify_tensor(tensor))", "def HasTensor(tensor):\n return HasTensorCC(_stringify_tensor(tensor))", "def IsOutputNode(self) -> bool:\n return self._isoutput", "def _check_output_is_scalar(cls, module: Module) -> None:\n if module.output.numel() != 1:\n raise ValueError(\n \"Output must be scalar. Got {}\".format(module.output.shape)\n )", "def is_not_sink_tf(self) -> tf.Tensor:\n return tf.not_equal(self.out_degrees_tf_vector, 0)", "def has_tensor(obj) -> bool:\n if isinstance(obj, torch.Tensor):\n return True\n elif isinstance(obj, dict):\n return any(has_tensor(value) for value in obj.values())\n elif isinstance(obj, (list, tuple)):\n return any(has_tensor(item) for item in obj)\n else:\n return False", "def is_tcns(x):\n return type(x) is T.TensorConstant", "def check_device_type(self, tensor, device_input, prev_device):\n if None is device_input:\n assert tensor.device.type == prev_device\n\n else:\n assert tensor.device.type == device_input", "def __is_tree_node(self, node):\n if not node.input:\n if len(node.output) > 1:\n return False\n\n if len(node.output) > 1:\n return False\n\n for input_node in node.input:\n cls = self.__is_tree_node(input_node)\n if not cls:\n return False\n return True", "def check_nodes(self) -> bool:\n # check the input-output consistency\n for op_name in self.__ops:\n op = cast(Operator, self.__ops[op_name])\n inputs: Dict[str, Operator] = op.input_ops\n for i in inputs.values():\n if op not in i.output_op_list:\n return False\n\n return True", "def _skip_tensor(self, op_id, out_tensor, report_handler):\n\n # Skips a tensor if the tensor has a non-numeric type.\n # Note: we cannot use check_ops.is_numeric_tensor(out_tensor)\n # because it also excludes tensors with dtypes, bool, and\n # float32_ref, which we actually want to trace.\n non_numeric_tensor_types = set([dtypes.variant, dtypes.resource,\n dtypes.string])\n if out_tensor.dtype in non_numeric_tensor_types:\n\n report_handler.instrument_tensor(\n out_tensor, TensorTracer.reason(op_id, _REASON_NON_NUMERIC_TENSOR))\n return True\n # Skip a tensor if it feeds a special while loop op.\n if [consumer for consumer in out_tensor.consumers() if\n TensorTracer.while_loop_op(consumer)]:\n report_handler.instrument_tensor(\n out_tensor, TensorTracer.reason(op_id, _REASON_FEEDS_WHILELOOP_OP))\n return True\n if self._is_user_included_op(out_tensor.op):\n report_handler.instrument_tensor(\n out_tensor, TensorTracer.reason(op_id, _REASON_USER_INCLUDED))\n if tensor_tracer_flags.TT_CHECK_FILTER.value:\n logging.info('USER_INCLUDED tensor %s', out_tensor.name)\n return False\n if self._is_user_excluded_op(out_tensor.op):\n report_handler.instrument_tensor(\n out_tensor, TensorTracer.reason(op_id, _REASON_USER_EXCLUDED))\n if tensor_tracer_flags.TT_CHECK_FILTER.value:\n logging.info('USER_EXCLUDED tensor %s', out_tensor.name)\n return True\n if not out_tensor.get_shape().is_fully_defined():\n # If trace mode is nan-inf, norm or max, then the tensor will be reduced\n # to a scalar before the outside compilation call.\n if self._parameters.trace_mode in (\n tensor_tracer_flags.TRACE_MODE_NAN_INF,\n tensor_tracer_flags.TRACE_MODE_NORM,\n tensor_tracer_flags.TRACE_MODE_HISTORY,\n tensor_tracer_flags.TRACE_MODE_MAX_ABS,\n tensor_tracer_flags.TRACE_MODE_SUMMARY\n ):\n report_handler.instrument_tensor(\n out_tensor, TensorTracer.reason(op_id, _REASON_TENSOR_GET_TRACED))\n return False\n else:\n report_handler.instrument_tensor(\n out_tensor, TensorTracer.reason(op_id, _REASON_DYNAMIC_SHAPE))\n return True\n rank = len(out_tensor.shape)\n if rank < 1:\n # scalar\n if self._parameters.trace_scalar_ops:\n if TensorTracer.unsafe_scalar_trace(out_tensor.op):\n report_handler.instrument_tensor(\n out_tensor, TensorTracer.reason(op_id, _REASON_UNSAFE_SCALAR))\n return True\n else:\n report_handler.instrument_tensor(\n out_tensor, TensorTracer.reason(op_id, _REASON_SCALAR_GET_TRACED))\n return False\n else:\n report_handler.instrument_tensor(\n out_tensor, TensorTracer.reason(op_id, _REASON_SKIP_SCALAR))\n return True\n else:\n # tensor\n report_handler.instrument_tensor(\n out_tensor, TensorTracer.reason(op_id, _REASON_TENSOR_GET_TRACED))\n return False", "def do_type_checking(self, node):\r\n\r\n if not isinstance(node.inputs[0].type, GpuArrayType):\r\n raise NotImplementedError()", "def _inspect_tensor(tensor):\n if (self._parameters.trace_mode ==\n tensor_tracer_flags.TRACE_MODE_NAN_INF):\n return cond.cond(\n math_ops.greater(tensor, 0.0),\n lambda: 'has NaNs/Infs!',\n lambda: 'has no NaNs or Infs.')\n else:\n return tensor", "def is_encrypted_tensor(obj):\n return isinstance(obj, CrypTensor)", "def _tensors_defined(self):\n tensors = [self.tensor_u, self.tensor_v, self.tensor_w]\n return all((tensor is not None for tensor in tensors))", "def _is_sink() -> bool:\n\n def _is_inplace(n: Node):\n \"\"\"Get the inplace argument from ``torch.fx.Node``\n \"\"\"\n inplace = False\n if n.op == \"call_function\":\n inplace = n.kwargs.get(\"inplace\", False)\n elif n.op == \"call_module\":\n inplace = getattr(n.graph.owning_module.get_submodule(n.target), \"inplace\", False)\n return inplace\n\n def _is_shape_consistency(n: Node):\n \"\"\"Check if this node is shape-consistency node (i.e. ``runtime_apply`` or ``runtime_apply_for_iterable_object``)\n \"\"\"\n return n.target in [runtime_apply, runtime_apply_for_iterable_object, runtime_comm_spec_apply]\n\n return not sum([v for _, v in deps.items()]) and not any(map(_is_inplace, n.users)) and not any(\n map(_is_shape_consistency, n.users))", "def run_on_device(self, tensor: torch.Tensor) -> DeviceOutputs:\n if not isinstance(tensor, torch.Tensor):\n raise ValueError(\n f\"Expected a torch Tensor for tensor summary {self.name}, \"\n f\"got {type(tensor)}\"\n )\n return super().run_on_device(tensor)", "def test1():\n x = [0, 1, 2]\n print(\"whether x is tensor :\", torch.is_tensor(x)) # check whether is tensor -> False\n print(\"whether x is storage :\", torch.is_storage(x)) # check whether is stored -> False\n\n y = torch.randn(3,2) # shape=(3, 2) / torch.zeros(3, 2)\n print(\"whether y is tensor :\", torch.is_tensor(y)) # check whether is tensor -> True\n print(\"whether y is storage :\", torch.is_storage(y)) # check whether is stored -> False\n\n print(\"the total number of elements in the input Tensor is : {}\".format(torch.numel(y)))", "def is_tvar(x):\n return type(x) is T.TensorVariable" ]
[ "0.7124051", "0.71176755", "0.70057577", "0.6854042", "0.6773735", "0.6654675", "0.66518706", "0.6620085", "0.65847135", "0.65643764", "0.64473575", "0.6427913", "0.6383607", "0.6345796", "0.62348086", "0.6162581", "0.607644", "0.604991", "0.5989577", "0.5882552", "0.5872916", "0.58355314", "0.5820423", "0.57816315", "0.5765823", "0.57590896", "0.5749717", "0.57423013", "0.5716794", "0.5707028" ]
0.8322501
0
Replace the graph of the given GraphModule with one that contains the same nodes as the original, but in topologically sorted order. This is used by the merge_matmul transformation below, which disturbs the topologically sorted order of its input GraphModule, so that this order is restored before further transformation.
def legalize_graph(gm: pippy.fx.GraphModule) -> pippy.fx.GraphModule: indeg = {node: 0 for node in gm.graph.nodes} new_graph = pippy.fx.Graph() # Track how many unfulfilled dependencies each node has for node in gm.graph.nodes: for user in node.users: indeg[user] += 1 queue: collections.deque = collections.deque() # Add all nodes with no dependencies to the queue for node in gm.graph.nodes: if indeg[node] == 0: queue.append(node) env: Dict[pippy.fx.Node, pippy.fx.Node] = {} # Pop nodes from the queue, and add nodes that have had all their # dependencies fulfilled while len(queue) > 0: cur = queue.popleft() env[cur] = new_graph.node_copy(cur, lambda x: env[x]) for user in cur.users: indeg[user] -= 1 if indeg[user] == 0: queue.append(user) # If the new graph's size is not as large as the old one, then there must be # a cycle (i.e. some node's dependencies were not satisfied.) if len(new_graph.nodes) < len(gm.graph.nodes): raise RuntimeError(f"Input graph has cycles, unable to add {[node for node in indeg if indeg[node] != 0]}") gm.graph = new_graph return gm
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _reset_topological_order(self):\n self._topological_order = self._input_nodes[:]\n self.sorted = False", "def transform(g):\n min_nidx = min(g.nodes)\n max_nidx = max(g.nodes)\n\n if min_nidx == 0 and max_nidx == g.number_of_nodes() - 1: # Everything already labeled as wanted\n return g\n\n nodes = sorted(g.nodes) # Get the sorted nodes\n # Relabel the nodes by their index in the list\n relabel_dict = {nidx: idx for idx, nidx in enumerate(nodes)}\n\n # Also shift node labels (important for saucy)\n if 'label' in g.nodes[g.nodes.keys()[0]]:\n for n_id in g.nodes:\n g.nodes[n_id]['label'] = relabel_dict[g.nodes[n_id]['label']]\n\n g = nx.relabel_nodes(g, relabel_dict)\n assert min(g.nodes) == 0 and max(g.nodes) == g.number_of_nodes() - 1\n\n return g", "def dag_topology_sort(self):\n mlist = []\n mod_wrapper = self.mod_wrapper.copy()\n while mod_wrapper:\n temp_list = []\n for mod, wrapper in mod_wrapper.items():\n if wrapper.is_root_mod():\n temp_list.append(mod)\n wrapper.remove_self_from_bindings()\n\n for mod in temp_list:\n mod_wrapper.pop(mod, None)\n\n mlist += temp_list\n\n mod_wrapper_sort = {}\n for mod, i in zip(mlist, range(len(mlist))):\n self.mod_wrapper[mod].set_idx_name(i)\n mod_wrapper_sort[mod] = self.mod_wrapper[mod]\n\n self.mod_wrapper = mod_wrapper_sort", "def reshuffle_graph(g, node_part=None):\n # In this case, we don't need to reshuffle node IDs and edge IDs.\n if node_part is None:\n g.ndata[\"orig_id\"] = F.arange(0, g.num_nodes())\n g.edata[\"orig_id\"] = F.arange(0, g.num_edges())\n return g, None\n\n start = time.time()\n if node_part is not None:\n node_part = utils.toindex(node_part)\n node_part = node_part.tousertensor()\n if NTYPE in g.ndata:\n is_hetero = len(F.unique(g.ndata[NTYPE])) > 1\n else:\n is_hetero = False\n if is_hetero:\n num_node_types = F.max(g.ndata[NTYPE], 0) + 1\n if node_part is not None:\n sorted_part, new2old_map = F.sort_1d(\n node_part * num_node_types + g.ndata[NTYPE]\n )\n else:\n sorted_part, new2old_map = F.sort_1d(g.ndata[NTYPE])\n sorted_part = F.floor_div(sorted_part, num_node_types)\n elif node_part is not None:\n sorted_part, new2old_map = F.sort_1d(node_part)\n else:\n g.ndata[\"orig_id\"] = g.ndata[NID]\n g.edata[\"orig_id\"] = g.edata[EID]\n return g, None\n\n new_node_ids = np.zeros((g.num_nodes(),), dtype=np.int64)\n new_node_ids[F.asnumpy(new2old_map)] = np.arange(0, g.num_nodes())\n # If the input graph is homogneous, we only need to create an empty array, so that\n # _CAPI_DGLReassignEdges_Hetero knows how to handle it.\n etype = (\n g.edata[ETYPE]\n if ETYPE in g.edata\n else F.zeros((0), F.dtype(sorted_part), F.cpu())\n )\n g = reorder_nodes(g, new_node_ids)\n node_part = utils.toindex(sorted_part)\n # We reassign edges in in-CSR. In this way, after partitioning, we can ensure\n # that all edges in a partition are in the contiguous ID space.\n etype_idx = utils.toindex(etype)\n orig_eids = _CAPI_DGLReassignEdges_Hetero(\n g._graph, etype_idx.todgltensor(), node_part.todgltensor(), True\n )\n orig_eids = utils.toindex(orig_eids)\n orig_eids = orig_eids.tousertensor()\n g.edata[\"orig_id\"] = orig_eids\n\n print(\n \"Reshuffle nodes and edges: {:.3f} seconds\".format(time.time() - start)\n )\n return g, node_part.tousertensor()", "def reorder_nodes(g, new_node_ids):\n assert (\n len(new_node_ids) == g.num_nodes()\n ), \"The number of new node ids must match #nodes in the graph.\"\n new_node_ids = utils.toindex(new_node_ids)\n sorted_ids, idx = F.sort_1d(new_node_ids.tousertensor())\n assert (\n F.asnumpy(sorted_ids[0]) == 0\n and F.asnumpy(sorted_ids[-1]) == g.num_nodes() - 1\n ), \"The new node IDs are incorrect.\"\n new_gidx = _CAPI_DGLReorderGraph_Hetero(\n g._graph, new_node_ids.todgltensor()\n )\n new_g = DGLGraph(gidx=new_gidx, ntypes=[\"_N\"], etypes=[\"_E\"])\n new_g.ndata[\"orig_id\"] = idx\n return new_g", "def swap(newGraph):\n\n oldGraph = globals()[\"currentGraph\"]\n globals()[\"currentGraph\"] = newGraph\n return oldGraph", "def reset_graph(self):\n self.graph = OrderedDict()", "def _reorder_nodes(orient, nodes, flip_matrix, unflip=False):\n # reorder nodes (Code adapted from\n # meshmode.mesh.processing.flip_simplex_element_group)\n\n # ( round to int bc applying on integers)\n flip_mat = np.rint(flip_matrix)\n if unflip:\n flip_mat = flip_mat.T\n\n # flipping twice should be identity\n assert la.norm(\n np.dot(flip_mat, flip_mat)\n - np.eye(len(flip_mat))) < 1e-13\n\n # flip nodes that need to be flipped\n flipped_nodes = np.copy(nodes)\n flipped_nodes[orient < 0] = np.einsum(\n \"ij,ej->ei\",\n flip_mat, nodes[orient < 0])\n\n return flipped_nodes", "def cloneGraph2(self, node):\n if not node:\n return None\n cloned = {}\n queue = [node]\n for original_node in queue:\n if original_node.label not in cloned:\n cloned[original_node.label] = UndirectedGraphNode(original_node.label)\n for neighbor in original_node.neighbors:\n if neighbor.label not in cloned:\n cloned[neighbor.label] = UndirectedGraphNode(neighbor.label)\n queue.append(neighbor)\n cloned[original_node.label].neighbors.append(cloned[neighbor.label])\n return cloned[node.label]", "def flip(graph, node):\n old_neighs = graph.neighbors(node)\n new_node = np.asarray(old_neighs[0]) + np.asarray(old_neighs[1]) + np.asarray(old_neighs[2]) - 2*np.asarray(node)\n new_node = tuple(new_node)\n old_vecs = -np.asarray(node) + old_neighs\n new_neighs = [(new_node, tuple(new_node - v)) for v in old_vecs if tuple(new_node - v) in graph.nodes()]\n \n # remove the node\n graph.remove_node(node)\n # add the new node and connect it to its neighbors\n graph.add_edges_from(new_neighs)\n \n # recompute the projections and change them\n graph.node[new_node]['para'] = np.dot(P, new_node)\n graph.node[new_node]['perp'] = np.dot(Pi, new_node)", "def get_graph(self):\n graph = copy.deepcopy(self.G)\n for source, dests in graph.items():\n for dest in dests:\n constraint = graph[source][dest]['constraint']\n new_constraint = self.preprocess_constraint(constraint)\n graph[source][dest]['constraint'] = new_constraint\n return graph", "def relabel_nodes(graph: BaseGraph, mapping: Dict) -> None:\n relabel_nodes(graph.graph, mapping, copy=False)", "def replace_node(old_node: Node, new_node: Node):\n assert old_node.graph is new_node.graph\n graph = old_node.graph\n # save output edges and reconnect them to new node\n for i in range(len(old_node.out_nodes())):\n graph.add_edge(new_node.id, old_node.out_node(i).id, **old_node.out_edge(i))\n # TODO Need to check if there are other users for this node\n graph.remove_node(old_node.id)", "def robust_topological_sort(graph):\n \n components = strongly_connected_components(graph)\n \n node_component = { }\n for component in components:\n for node in component:\n node_component[node] = component\n \n component_graph = { }\n for component in components:\n component_graph[component] = [ ]\n \n for node in graph:\n node_c = node_component[node]\n for successor in graph[node]:\n successor_c = node_component[successor]\n if node_c != successor_c:\n component_graph[node_c].append(successor_c) \n \n return topological_sort(component_graph)", "def topological_sort(self):\n in_degree = {}\n for node in self.graph:\n in_degree[node] = 0\n\n for from_node in self.graph:\n for to_node in self.graph[from_node]:\n in_degree[to_node] += 1\n\n queue = deque()\n for node in in_degree:\n if in_degree[node] == 0:\n queue.appendleft(node)\n\n sorted_nodes = []\n while queue:\n independent_node = queue.pop()\n sorted_nodes.append(independent_node)\n for next_node in self.graph[independent_node]:\n in_degree[next_node] -= 1\n if in_degree[next_node] == 0:\n queue.appendleft(next_node)\n\n if len(sorted_nodes) == len(self.graph):\n return sorted_nodes\n else:\n raise ValueError('graph is not acyclic')", "def topological_sort(self, graph=None):\n if graph is None:\n graph = self.graph\n\n in_degree = {}\n for u in graph:\n in_degree[u] = 0\n\n for u in graph:\n for v in graph[u]:\n in_degree[v] += 1\n\n queue = deque()\n for u in in_degree:\n if in_degree[u] == 0:\n queue.appendleft(u)\n\n l = []\n while queue:\n u = queue.pop()\n l.append(u)\n for v in graph[u]:\n in_degree[v] -= 1\n if in_degree[v] == 0:\n queue.appendleft(v)\n\n if len(l) == len(graph):\n return l\n else:\n raise ValueError(\"graph is not acyclic\")", "def copy_graph(g):\n return copy.deepcopy(g)", "def partitioner(graph: GraphModule) -> GraphModule:\n shape_adjustment_ops = {\n aten._unsafe_view.default: 1,\n aten.expand.default: 1,\n aten.new_zeros.default: 1,\n aten.ones.default: 0,\n aten.reshape.default: 1,\n aten.view.default: 1,\n aten.zeros.default: 0,\n }\n # partition the graph to distributed\n for node in graph.graph.nodes:\n node_sharding = node.meta[\"sharding\"]\n # None sharding means this node don't need sharding\n if node_sharding is None:\n continue\n\n if node.op == \"placeholder\":\n out_spec = node_sharding.output_spec\n if not hasattr(out_spec, \"from_local\"):\n local_val = _partition_val(node.meta[\"val\"], out_spec)\n # update node value\n node.meta[\"val\"] = local_val\n elif node.op == \"call_function\":\n out_spec = node_sharding.output_spec\n\n # check if there's misaligned sharding, insert reshard if there is\n expected_input_specs = node_sharding.input_specs\n for idx, input_arg in enumerate(node.all_input_nodes):\n input_arg_sharding = input_arg.meta[\"sharding\"]\n\n input_arg_spec = input_arg_sharding.output_spec\n desired_spec = (\n out_spec\n if expected_input_specs is None\n else expected_input_specs[idx]\n )\n if input_arg_spec != desired_spec:\n input_full_shape = input_arg.meta[\"tensor_meta\"].shape\n input_arg_tensor = input_arg.meta[\"val\"]\n\n # insert reshard operation\n def reshard_fn(local_tensor: torch.Tensor) -> torch.Tensor:\n return _redistribute_with_local_tensor(\n local_tensor,\n input_full_shape,\n out_spec.mesh,\n input_arg_spec.placements,\n desired_spec.placements,\n )\n\n reshard_gm = make_fx(reshard_fn)(input_arg_tensor)\n reshard_gm_nodes = list(reshard_gm.graph.nodes)\n input_node = reshard_gm_nodes[0]\n with graph.graph.inserting_before(node):\n output_node = graph.graph.graph_copy(\n reshard_gm.graph,\n val_map={\n input_node: input_arg,\n },\n )\n node.replace_input_with(input_arg, output_node)\n\n output_val = node.meta[\"val\"]\n\n if node.target == torch.ops.aten.repeat.default:\n # for repeat op, we need to infer the repeat sizes\n assert isinstance(output_val, torch.Tensor)\n local_shape = compute_local_shape(\n output_val.shape, out_spec.mesh, out_spec.placements\n )\n input_shape = node.args[0].meta[\"val\"].shape\n\n def infer_repeat_sizes(repeated_shape, input_shape):\n repeated_size = [1] * len(repeated_shape)\n padded_length = len(repeated_shape) - len(input_shape)\n for i in range(len(repeated_shape)):\n if i < padded_length:\n repeated_size[i] = repeated_shape[i]\n else:\n repeated_size[i] = (\n repeated_shape[i] // input_shape[i - padded_length]\n )\n\n return repeated_size\n\n node.update_arg(1, infer_repeat_sizes(local_shape, input_shape))\n\n elif node.target in shape_adjustment_ops:\n # for view related op that needs shape, adjust shape to local shape if needed\n assert isinstance(output_val, torch.Tensor)\n local_shape = compute_local_shape(\n output_val.shape, out_spec.mesh, out_spec.placements\n )\n shape_arg_num = shape_adjustment_ops[node.target]\n node.update_arg(shape_arg_num, local_shape)\n\n # convert output val to its local component\n node.meta[\"val\"] = _partition_val(output_val, out_spec)\n\n elif node.op == \"output\":\n break\n else:\n raise RuntimeError(f\"op code {node} not supported\")\n\n # clean up the graph by removing sharding and partitioning related metadata\n for node in graph.graph.nodes:\n if \"sharding\" in node.meta:\n del node.meta[\"sharding\"]\n if \"val\" in node.meta and isinstance(node.meta[\"val\"], torch.Tensor):\n local_tensor_meta = _extract_tensor_metadata(node.meta[\"val\"])\n node.meta[\"tensor_meta\"] = local_tensor_meta\n\n graph.graph.lint()\n graph.recompile()\n return graph", "def permuteEdges(self):\n\t\tpermuted_graph = copy.copy(self)\n\t\t# swap about half the edges\n\t\ti = len(self.graph)/2\n\t\twhile i > 0:\n\t\t\t# swap edge targets\n\t\t\tsourceA, targetA = random.choice(permuted_graph.graph.keys())\n\t\t\tiTypeA, emA = permuted_graph.graph[(sourceA, targetA)]\n\t\t\tsourceB, targetB = random.choice(permuted_graph.graph.keys())\n\t\t\tiTypeB, emB = permuted_graph.graph[(sourceB, targetB)]\n\n\t\t\t# can't be the same random choice, obviously...\n\t\t\tif sourceA == sourceB or targetA == targetB:\n\t\t\t\tcontinue\n\n\t\t\t# add edges\n\t\t\tpermuted_graph.graph[(sourceA, targetB)] = (iTypeA, emA)\n\t\t\tpermuted_graph.graph[(sourceB, targetA)] = (iTypeB, emB)\n\n\t\t\tdel permuted_graph.graph[(sourceA, targetA)]\n\t\t\tdel permuted_graph.graph[(sourceB, targetB)]\n\n\t\t\ti -= 1\n\n\t\t# return a new graph object\t\t\n\t\treturn permuted_graph", "def robust_topological_sort(graph):\n\n components = strongly_connected_components(graph)\n\n node_component = {}\n for component in components:\n for node in component:\n node_component[node] = component\n\n component_graph = {}\n for component in components:\n component_graph[component] = []\n for node in graph:\n node_c = node_component[node]\n for successor in graph[node]:\n successor_c = node_component[successor]\n if node_c != successor_c:\n component_graph[node_c].append(successor_c)\n\n return topological_sort(component_graph)", "def topological_sort(graph_unsorted):\n graph_sorted = []\n graph_unsorted = dict(graph_unsorted)\n while graph_unsorted:\n acyclic = False\n for node, edges in list(graph_unsorted.items()):\n if edges is None:\n edges = []\n for edge in edges:\n if edge in graph_unsorted:\n break\n else:\n acyclic = True\n del graph_unsorted[node]\n graph_sorted.append((node, edges))\n if not acyclic:\n raise RuntimeError('A cyclic dependency occurred')\n return graph_sorted", "def copy_graph(graph):\r\n new_graph = {}\r\n for node in graph:\r\n new_graph[node] = set(graph[node])\r\n return new_graph", "def copy_graph(graph):\r\n new_graph = {}\r\n for node in graph:\r\n new_graph[node] = set(graph[node])\r\n return new_graph", "def reset_graph(self):\n raise NotImplementedError", "def ordering_graph(self):\n\n g = nx.DiGraph()\n\n # add times\n for t in self.nodes_iter():\n g.add_node(t)\n\n # add existing edges\n for t1, t2 in self.edges_iter():\n g.add_edge(t1, t2)\n\n # connect every pair of anchored times\n anchored = sorted(self.anchored())\n for t1, t2 in itertools.combinations(anchored, 2):\n g.add_edge(t1, t2)\n\n # connect every time with its sucessors\n _g = g.copy()\n for t1 in _g:\n for t2 in set([target for (_, target) in nx.bfs_edges(_g, t1)]):\n g.add_edge(t1, t2)\n\n return g", "def copy_graph(graph):\n new_graph = {}\n for node in graph:\n new_graph[node] = set(graph[node])\n return new_graph", "def copy_graph(graph):\n new_graph = {}\n for node in graph:\n new_graph[node] = set(graph[node])\n return new_graph", "def copy_graph(graph):\n new_graph = {}\n for node in graph:\n new_graph[node] = set(graph[node])\n return new_graph", "def copy_graph(graph):\n new_graph = {}\n for node in graph:\n new_graph[node] = set(graph[node])\n return new_graph", "def copy_graph(graph):\n new_graph = {}\n for node in graph:\n new_graph[node] = set(graph[node])\n return new_graph" ]
[ "0.6281253", "0.6020051", "0.5968213", "0.5939152", "0.5891148", "0.587402", "0.57739174", "0.57242393", "0.5655821", "0.56404704", "0.56368893", "0.56269574", "0.56102455", "0.5603213", "0.55983084", "0.5588909", "0.5579634", "0.5571751", "0.55631703", "0.5544777", "0.5541431", "0.554", "0.554", "0.5536057", "0.55269474", "0.55260044", "0.55260044", "0.55260044", "0.55260044", "0.55260044" ]
0.62657577
1
Set the progress trait from the iteration.
def _setProgress(self): self.progress = (self.iteration, self.iterationCount)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_progress(self, progress: float):", "def set_progress(self, progress: int) -> None:\n self.update(progress % len(self.frames)) # prevent IndexError if progress >= len(frames)", "def progress(self, progress):\n\n self._progress = progress", "def progress(self, progress):\n\n self._progress = progress", "def _setProgress(self, progress):\n # print \"Progress set %.2f --------------------------------\" % progress\n\n if progress > 100.0:\n progress = 100.0\n\n self._progress = progress\n self._progressChangedNotifier.notify(self)", "def setProgress(self, prog):\n\t\tself.progress = prog", "def setProgress(self, progress):\n\t\tself.config.PROGRESS = [progress]", "def set_progress_value(self, value):\r\n\r\n pass", "def _set_progress(self, value: float) -> None:\n\n self._progress = round(value, 4)\n self._render(self._old_value, self._value, value)", "def set_progress(self, step):\n if self._max and step > self._max:\n self._max = step\n elif step < 0:\n step = 0\n\n prev_period = int(self._step / self.redraw_freq)\n curr_period = int(step / self.redraw_freq)\n\n self._step = step\n\n if self._max:\n self._percent = self._step / self._max\n else:\n self._percent = 0.0\n\n if prev_period != curr_period or self._max == step:\n self.display()", "def iteration(self, iteration):\n self._iteration = iteration", "def iteration(self, iteration):\n self._iteration = iteration", "def iteration(self, iteration):\n self._iteration = iteration", "def progress(self, progress: float):\n if progress is None:\n raise ValueError(\"Invalid value for `progress`, must not be `None`\") # noqa: E501\n \n self._progress = progress", "def progressions(self, progressions):\n\n self._progressions = progressions", "def progressions(self, progressions):\n\n self._progressions = progressions", "def progress(self, progress: float):\n if progress is None:\n raise ValueError(\"Invalid value for `progress`, must not be `None`\") # noqa: E501\n\n self._progress = progress", "def set_progress(self, current):\n self._current = current\n if self._last_time is None or (datetime.datetime.now() - self._last_time).seconds > 1:\n self._update_time()\n\n self._draw()\n if self._current == self._total:\n self.reset(0)", "def setProgress(self, value: int):\n self.ui.progress.setValue(value)", "def set_progress(self, value):\n self.gauge.SetValue(value)", "def current_progress(self, current_progress):\n\n self._current_progress = current_progress", "def set_progress(self, progress: Dict[str, str], is_add: bool):\n if is_add:\n self.progress.append(progress)\n else:\n self.progress[-1] = progress", "def start_progress_bar(self):\r\n self.progress[\"value\"] = self.progress_step", "def __init__(self: \"InProgress\", progress: int = 0) -> None:\n self.progress = max(0, min(progress, 100))", "def increase_progress(self, value):\r\n\r\n pass", "def setReportProgress(self, newReportProgress):\n \n pass", "def _progress(self, walker):\n\n raise NotImplementedError", "def progress(self, value):\n self.step = float(value)\n self._draw()", "def set_Progress(self,func):\n self.__obj.set_Progress(func)", "def update(self, iteration):\n pass" ]
[ "0.7724764", "0.7345636", "0.7262811", "0.7262811", "0.7089067", "0.70505095", "0.6872752", "0.67855513", "0.6774502", "0.67634153", "0.6579046", "0.6579046", "0.6579046", "0.651001", "0.6473924", "0.6473924", "0.64663595", "0.64363086", "0.6374182", "0.63731956", "0.63265216", "0.6313078", "0.63038635", "0.62230355", "0.6221291", "0.6219667", "0.62105995", "0.61861145", "0.6150602", "0.6073169" ]
0.8085796
0
Disable any regions that can't be run. Currently only looks for VectorFileEffectors whose outputFile parameter is equal to 'No output file specified'.
def _disableRegions(self): effectors = [e for e in _getElements(self.network) if _isEffector(e)] for e in effectors: if e.getParameter('outputFile') == 'No outputFile specified': _disable(self.network, e.getName())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def disable_vae_slicing(self):\n self.vae.disable_slicing()", "def disable_vae_tiling(self):\n self.vae.disable_tiling()", "def primers_are_useless(self):\r\n #TODO: send a message telling these primers can be taken out.\r\n for feature in self.gt_seq_region:\r\n if feature.attributes.active:\r\n feature.attributes.disable_feature(\"has no interesting sequence variation\")\r\n for feature in self.pcr_product:\r\n if feature.attributes.active:\r\n feature.attributes.disable_feature(\"has no interesting sequence variation\")\r\n for feature in self.forward_primer:\r\n if feature.attributes.active:\r\n feature.attributes.disable_feature(\"has no interesting sequence variation\")\r\n for feature in self.reverse_primer:\r\n if feature.attributes.active:\r\n feature.attributes.disable_feature(\"has no interesting sequence variation\")", "def DisableByRunIf(self):\n self.run_if = 'False'", "def suppress_analyze(more_exclusions=None):\n return api.override_step_data(\n 'read filter exclusion spec',\n api.json.output({\n 'base': {\n 'exclusions': ['f.*'] + (more_exclusions or []),\n },\n 'chromium': {\n 'exclusions': [],\n },\n })\n )", "def no_events(self):\n for lane in self.lanes.values():\n lane.disabled = True\n for empty in lane.beers:\n empty.touchable = False", "def disable_detector():\n global enable_detector, enable_detection, detector\n\n detector = None\n\n if detector is None:\n print(\"Detector stopped...\")\n enable_detection = False\n enable_detector = ''\n\n return render_settings_view()", "def skip_experiment(conf):\n return (\n (conf.dataset == 'rfw' and conf.feature == 'arcface')\n or (conf.dataset == 'bfw' and conf.feature == 'facenet')\n )", "def is_vuln_mode_disabled(self):\n # Set this value if you want the vuln data to be collected in the S3 file.\n return os.environ.get('DISABLE_VULN_MODE', 'false').lower() in ('1', 'yes', 'true')", "def onSkipSegLimit(self):\r\n profprint()\r\n #research\r\n logic = self.logic\r\n logic.placeAxialLimitMarker(assign=False)", "def flagUntexturedObject(self, object):\n object.setShaderInput(\"texDisable\", 1, 1, 1, 1)", "def allOff():\n # Get/set special slice IDs\n root_xid = bwlimit.get_xid(\"root\")\n default_xid = bwlimit.get_xid(\"default\")\n kernelhtbs = gethtbs(root_xid, default_xid)\n if len(kernelhtbs):\n logger.log(\"bwmon: Disabling all running HTBs.\")\n for htb in kernelhtbs.keys(): bwlimit.off(htb, dev = dev_default)", "def disable(self) -> None:", "def no_vnodes():\n return unittest.skipIf(not DISABLE_VNODES, 'Test disabled for vnodes')", "def disable():\n if _status_apf():\n return __apf_cmd(\"-f\")", "def disable_eye_dome_lighting(self):\n self._render_passes.disable_edl_pass()", "def disable(self):\n\n super().disable()\n self._slo_image_size.disable()\n self._slo_neural_network.disable()\n self._slo_number_of_epochs.disable()\n self._slo_examples_per_batch.disable()", "def exclusions(ctx, excl_h5):\n ctx.obj['EXCL_H5'] = excl_h5", "def __disableControls(self):\n self.ignoreAll()", "def negate_file(input_file, output_file, iline=9, xline=21):\n if not is_segy_valid(input_file):\n error_message = f'Refusing to continue as the {input_file} is not SEG-Y.'\n raise RuntimeError(error_message)\n\n try:\n fast_copy(input_file, output_file)\n except OSError as o:\n # TODO\n raise o \n\n output_segy_file = segyio.open(output_file, mode='r+', ignore_geometry=True,\n strict=False, iline=iline, xline=xline)\n\n for it in range(output_segy_file.tracecount):\n output_segy_file.trace[it] *= -1\n \n output_segy_file.close()", "def actionEraseCheck(self):\n sys.stderr.write(\"Erase Check by file ...\\n\")\n sys.stderr.flush()\n if self.data is not None:\n self.programData(self.data, self.ACTION_ERASE_CHECK)\n else:\n raise BSLException(\"cannot do erase check against data with not knowing the actual data\")", "def disable(self):", "def suppress_output_before_run(app):\n if not hasattr(app.pargs, 'output_handler_override'):\n return\n elif app.pargs.output_handler_override == 'yaml':\n app._suppress_output()", "def disableAllInputImages(self):\n self.logger.debug('Disabling all input image types')\n self.inputImages = {}", "def disable(self):\n pass", "def setLogFileOff(self):\n self.edLogging.setLogFileOff()", "def disable(self):\n raise NotImplementedError", "def setVerboseOff(self):\n self.edLogging.setVerboseOff()", "def disable():\n Plotter.enable = False", "def turnOffAllEnvir(self, isCutoutChecked):\n #if cmds.objExists(\"ENVIR\"):\n collection = self.createAllEnvirCollection()\n if collection is not None:\n o_envirVisibility = collection.createOverride(\"EnvirVisibility\", override.AbsOverride.kTypeId)\n attribute = self.getListUptoFirstMeshNode(assets()[\"ENVIR\"][0])\n if isCutoutChecked == True:\n o_envirVisibility.finalize(attribute+\".aiMatte\")\n o_envirVisibility.setAttrValue(1)\n else:\n o_envirVisibility.finalize(attribute+\".primaryVisibility\")\n o_envirVisibility.setAttrValue(0)" ]
[ "0.56054944", "0.54844016", "0.5445084", "0.53977", "0.5381937", "0.5360015", "0.53208935", "0.51000714", "0.5042197", "0.50409454", "0.50250703", "0.5024363", "0.5021316", "0.50164264", "0.5013679", "0.50099427", "0.5003581", "0.49923182", "0.4961914", "0.49477282", "0.4932406", "0.49295154", "0.49216384", "0.49013135", "0.48896423", "0.48890424", "0.48809367", "0.4878339", "0.48690265", "0.48669013" ]
0.84394544
0
Start running the network, and start the mainloop if necessary. numIterations Number of iterations to run for (optional). target Run until this condition is met (used by the Vision Framework). This is distinct from the userspecified target in the GUI, which simply tells the network when to pause. tier Tier being trained. Used to check the target on it. stop Whether to stop immediately (used when training a tier for 0 iterations). callback Called right after initialization (optional).
def start(self, numIterations=0, target=None, tier=0, stop=False, callback=None): #title() self._registerCallbacks() self.iterationCount = numIterations self.iteration = 0 self.stopTarget = target self.tier = tier self.pauseAtPhaseSetup = False if callback: callback() self._disableRegions() # Disable regions which can't be run (e.g. effectors) # Re-enable this window (use this hack to get a reference to it), because # it may have been disabled at the end of the previous tier/dataset when # running from the Vision Framework wx.GetApp().GetTopWindow().Enable() # Insert a call into the event loop to run (or stop) #if stop: # Immediately stop - used by training GUI with 0 iterations # wx.CallLater(self.runInterval, self.stop) #elif self.running: # wx.CallLater(self.runInterval, self._run) self.mainloopRunning = True if not wx.GetApp().IsMainLoopRunning(): wx.GetApp().MainLoop() self.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train(self, iterations=1):\n for _ in range(iterations):\n self.trainer.train()\n self.test_network()", "def train(self, t_max):\n # Initialize session, model, variables.\n tf.global_variables_initializer().run()\n self.stat.load_model()\n self.target_network.run_copy()\n self.stat.t_start = self.stat.get_t()\n\n # Burn in.\n self.burn_in()\n\n print(\" [*] Training.\")\n\n # Progress display mechanism.\n if self.chtc:\n self.t_range = range(self.stat.t_start, t_max)\n else:\n self.t_range = trange(self.stat.t_start, t_max)\n\n # Start a new game.\n observation, reward, terminal = self.new_game()\n\n # Initialize history.\n for _ in range(self.history_length):\n self.history.add(observation)\n\n try:\n for self.t in self.t_range:\n # Linearly decaying exploration factor.\n epsilon = self.ep_end + max(0., (self.ep_start - self.ep_end) * (self.t_ep_end - max(0., self.t - self.t_learn_start)) / self.t_ep_end)\n\n # 1. Predict.\n action = self.predict(self.history.get(), epsilon)\n # 2. Act.\n observation, reward, terminal, _ = self.env.step(action, is_training=True)\n # 3. Observe.\n self.observe(observation, reward, action, terminal)\n # 4. Update.\n _, _, is_update = self.update()\n # 5. Test. \n terminal = self.test() or terminal\n\n # Notify the statistic module of the new iteration number, in case it needs to save the model.\n self.stat.on_step(self.t, is_update)\n \n # If the game has terminated, reset.\n if terminal:\n observation, reward, terminal = self.new_game()\n for _ in range(self.history_length):\n self.history.add(observation)\n\n except KeyboardInterrupt:\n print(\"\\n [!] Keyboard interrupt registered. Exiting!\")\n # The model is typically saved every t_test iterations, but if the training needs to be paused, we can save immediately before quitting.\n self.stat.save_model(self.t, self.stat.latest_saver)\n\n except Exception as e:\n print(\" [!] Unhandled exception encountered:\", e, \"\\nExiting!\")\n self.stat.save_model(self.t, self.stat.latest_saver)\n\n\n self.stat.zip_data(False)", "def setup(self, opt: argparse.Namespace) -> None:\n if not self.is_train or opt.continue_train:\n self.load_networks(opt.epoch)\n self.print_networks(opt.verbose)\n return", "def run_net(self,\n pre_trained_chckpnt_dir ='' #for resuming training, load the model from this directory\n ):\n\n _rd = _read_data(data=self.data)\n\n self.alpha_coeff=1\n\n #read path of the images for train, test, and validation\n train_CTs, train_GTVs, train_Torso, train_penalize, train_surface,\\\n validation_CTs, validation_GTVs, validation_Torso, validation_penalize, validation_surface,\\\n test_CTs, test_GTVs, test_Torso, test_penalize,test_surface=_rd.read_data_path(fold=self.fold)\n self.img_width = self.img_width\n self.img_height = self.img_height\n # ======================================\n #validation instances\n bunch_of_images_no=20\n _image_class_vl = image_class(validation_CTs, validation_GTVs, validation_Torso,validation_penalize,validation_surface\n , bunch_of_images_no=bunch_of_images_no, is_training=0,\n patch_window=self.patch_window)\n _patch_extractor_thread_vl = _patch_extractor_thread(_image_class=_image_class_vl,\n sample_no=self.sample_no, patch_window=self.patch_window,\n GTV_patchs_size=self.GTV_patchs_size,\n tumor_percent=self.tumor_percent,\n img_no=bunch_of_images_no,\n mutex=settings.mutex,is_training=0,vl_sample_no=self.validation_samples\n )\n _fill_thread_vl = fill_thread(validation_CTs,\n validation_GTVs,\n validation_Torso,\n validation_penalize,\n validation_surface,\n _image_class_vl,\n sample_no=self.sample_no,\n total_sample_no=self.validation_samples,\n patch_window=self.patch_window,\n GTV_patchs_size=self.GTV_patchs_size,\n img_width=self.img_width, img_height=self.img_height,\n mutex=settings.mutex,\n tumor_percent=self.tumor_percent,\n is_training=0,\n patch_extractor=_patch_extractor_thread_vl,\n fold=self.fold)\n\n\n _fill_thread_vl.start()\n _patch_extractor_thread_vl.start()\n _read_thread_vl = read_thread(_fill_thread_vl, mutex=settings.mutex,\n validation_sample_no=self.validation_samples, is_training=0)\n _read_thread_vl.start()\n # ======================================\n #training instances\n bunch_of_images_no = 24\n _image_class = image_class(train_CTs, train_GTVs, train_Torso,train_penalize,train_surface\n , bunch_of_images_no=bunch_of_images_no,is_training=1,patch_window=self.patch_window\n )\n patch_extractor_thread = _patch_extractor_thread(_image_class=_image_class,\n sample_no=240, patch_window=self.patch_window,\n GTV_patchs_size=self.GTV_patchs_size,\n tumor_percent=self.tumor_percent,\n img_no=bunch_of_images_no,\n mutex=settings.mutex,is_training=1)\n _fill_thread = fill_thread(train_CTs, train_GTVs, train_Torso,train_penalize,train_surface,\n _image_class,\n sample_no=self.sample_no,total_sample_no=self.sample_no,\n patch_window=self.patch_window,\n GTV_patchs_size=self.GTV_patchs_size,\n img_width=self.img_width,\n img_height=self.img_height,mutex=settings.mutex,\n tumor_percent=self.tumor_percent,\n is_training=1,\n patch_extractor=patch_extractor_thread,\n fold=self.fold)\n\n _fill_thread.start()\n patch_extractor_thread.start()\n\n _read_thread = read_thread(_fill_thread,mutex=settings.mutex,is_training=1)\n _read_thread.start()\n # ======================================\n\n image = tf.placeholder(tf.float32, shape=[None, None, None, None, 1])\n label = tf.placeholder(tf.float32, shape=[None, None, None, None, 2])\n penalize = tf.placeholder(tf.float32, shape=[None, None, None, None,1])\n surf_map = tf.placeholder(tf.float32, shape=[None, None, None, None,1])\n loss_coef = tf.placeholder(tf.float32, shape=[None, 2]) # shape: batchno * 2 values for each class\n alpha = tf.placeholder(tf.float32, name='alpha') # background coeff\n beta = tf.placeholder(tf.float32, name='beta') # tumor coeff\n\n ave_vali_acc=tf.placeholder(tf.float32)\n ave_loss_vali=tf.placeholder(tf.float32)\n ave_dsc_vali=tf.placeholder(tf.float32)\n\n dropout=tf.placeholder(tf.float32,name='dropout')\n is_training = tf.placeholder(tf.bool, name='is_training')\n is_training_bn = tf.placeholder(tf.bool, name='is_training_bn')\n dense_net_dim = tf.placeholder(tf.int32, name='dense_net_dim')\n\n _dn = _densenet_unet(self.densnet_unet_config,self.compression_coefficient,self.growth_rate) #create object\n y=_dn.dens_net(image=image,is_training=is_training,dropout_rate1=0,dropout_rate2=0,dim=dense_net_dim,is_training_bn=is_training_bn)\n # y = _dn.vgg(image)\n\n y_dirX = ((y[:, int(self.GTV_patchs_size / 2), :, :, 0, np.newaxis]))\n label_dirX = (label[:, int(self.GTV_patchs_size / 2), :, :, 0, np.newaxis])\n penalize_dirX = (penalize[:,16,:,:,0,np.newaxis])\n surf_map_dirX = (surf_map[:,16,:,:,0,np.newaxis])\n image_dirX = ((image[:, int(self.patch_window / 2), :, :, 0, np.newaxis]))\n\n show_img=tf.nn.softmax(y)[:, int(self.GTV_patchs_size / 2) , :, :, 0, np.newaxis]\n tf.summary.image('outprunut',show_img , 3)\n tf.summary.image('output without softmax',y_dirX ,3)\n tf.summary.image('groundtruth', label_dirX,3)\n tf.summary.image('penalize', penalize_dirX,3)\n tf.summary.image('surf_map', surf_map_dirX,3)\n tf.summary.image('image',image_dirX ,3)\n\n print('*****************************************')\n print('*****************************************')\n print('*****************************************')\n sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))\n devices = sess.list_devices()\n print(devices)\n\n print(device_lib.list_local_devices())\n print('*****************************************')\n print('*****************************************')\n print('*****************************************')\n\n train_writer = tf.summary.FileWriter(self.LOGDIR + '/train' ,graph=tf.get_default_graph())\n validation_writer = tf.summary.FileWriter(self.LOGDIR + '/validation' , graph=sess.graph)\n\n extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n saver=tf.train.Saver(tf.global_variables(), max_to_keep=1000)\n\n\n\n #define the loss function\n with tf.name_scope('cost'):\n penalize_weight=0\n [ penalized_loss,\n soft_dice_coef,logt,lbl]=self.loss_instance.dice_plus_distance_penalize(logits=y, labels=label,penalize=penalize)\n surface_loss= self.loss_instance.surface_loss(logits=y, labels=label, surf_map=surf_map)\n cost = tf.reduce_mean((1.0 - soft_dice_coef[1])+penalize_weight*penalized_loss+surface_loss, name=\"cost\")\n\n #Setup the Tensorboard plots\n tf.summary.scalar(\"cost\", cost)\n f1_measure = self.loss_instance.f1_measure(logits=y, labels=label)\n tf.summary.scalar(\"dice_bakground\", f1_measure[0])\n tf.summary.scalar(\"dice_tumor\", f1_measure[1])\n\n pwc = self.loss_instance.PWC(y, label)\n tf.summary.scalar(\"pwc_bakground\", pwc[0])\n tf.summary.scalar(\"pwc_tumor\", pwc[1])\n\n recall = self.loss_instance.Recall(y, label)\n tf.summary.scalar(\"recall_bakground\", recall[0])\n tf.summary.scalar(\"recall_tumor\", recall[1])\n\n precision = self.loss_instance.Precision(y, label)\n tf.summary.scalar(\"precision_bakground\", precision[0])\n tf.summary.scalar(\"precision_tumor\", precision[1])\n\n fpr = self.loss_instance.FPR(y, label)\n tf.summary.scalar(\"FPR_bakground\", fpr[0])\n tf.summary.scalar(\"FPR_tumor\", fpr[1])\n\n fnr = self.loss_instance.FNR(y, label)\n tf.summary.scalar(\"FNR_bakground\", fnr[0])\n tf.summary.scalar(\"FNR_tumor\", fnr[1])\n\n extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(extra_update_ops):\n optimizer_tmp = tf.train.AdamOptimizer(self.learning_rate,epsilon=0.001)\n optimizer = optimizer_tmp.minimize(cost)\n\n with tf.name_scope('validation'):\n average_validation_accuracy=ave_vali_acc\n average_validation_loss=ave_loss_vali\n average_dsc_loss=ave_dsc_vali\n tf.summary.scalar(\"average_validation_accuracy\",average_validation_accuracy)\n tf.summary.scalar(\"average_validation_loss\",average_validation_loss)\n tf.summary.scalar(\"average_dsc_loss\",average_dsc_loss)\n\n with tf.name_scope('accuracy'):\n accuracy=self.loss_instance.accuracy_fn(y, label)\n\n tf.summary.scalar(\"accuracy\", accuracy)\n\n sess.run(tf.global_variables_initializer())\n logging.debug('total number of variables %s' % (\n np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])))\n summ=tf.summary.merge_all()\n\n point = 0 # starting point, starts from a value > 0 if training is resumed\n itr1 = 0 # number of iterations\n if len(pre_trained_chckpnt_dir):\n ckpt = tf.train.get_checkpoint_state(pre_trained_chckpnt_dir)\n saver.restore(sess, ckpt.model_checkpoint_path)\n point=int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])\n itr1=point\n\n\n # patch_radius = 49\n '''loop for epochs'''\n\n for epoch in range(self.total_epochs):\n while self.no_sample_per_each_itr*int(point/self.no_sample_per_each_itr)<self.sample_no:\n print('0')\n print(\"epoch #: %d\" %(epoch))\n startTime = time.time()\n step = 0\n self.beta_coeff=1+1 * np.exp(-point/2000)\n # =============start validation================\n if itr1 % self.display_validation_step ==0:\n '''Validation: '''\n loss_validation = 0\n acc_validation = 0\n validation_step = 0\n dsc_validation=0\n while (validation_step * self.batch_no_validation <settings.validation_totalimg_patch):\n [validation_CT_image, validation_GTV_image,validation_Penalize_patch,validation_Surface_patch] = _image_class_vl.return_patches_validation( validation_step * self.batch_no_validation, (validation_step + 1) *self.batch_no_validation)\n if (len(validation_CT_image)<self.batch_no_validation) | (len(validation_GTV_image)<self.batch_no_validation) | (len(validation_Penalize_patch)<self.batch_no_validation) | (len(validation_Surface_patch)<self.batch_no_validation) :\n _read_thread_vl.resume()\n time.sleep(0.5)\n continue\n\n validation_CT_image_patchs = validation_CT_image\n validation_GTV_label = validation_GTV_image\n tic=time.time()\n\n [acc_vali, loss_vali,dsc_vali,surface_loss1] = sess.run([accuracy, cost,f1_measure,surface_loss],\n feed_dict={image: validation_CT_image_patchs,\n label: validation_GTV_label,\n penalize: validation_Penalize_patch,\n dropout: 1,\n is_training: False,\n ave_vali_acc: -1,\n ave_loss_vali: -1,\n ave_dsc_vali:-1,\n dense_net_dim: self.patch_window,\n is_training_bn:False,\n alpha:1,\n beta:1,\n surf_map:validation_Surface_patch,\n })\n elapsed=time.time()-tic\n\n acc_validation += acc_vali\n loss_validation += loss_vali\n dsc_validation+=dsc_vali[1]\n validation_step += 1\n if np.isnan(dsc_validation) or np.isnan(loss_validation) or np.isnan(acc_validation):\n print('nan problem')\n process = psutil.Process(os.getpid())\n\n print(\n '%d - > %d: elapsed_time:%d acc_validation: %f, loss_validation: %f, memory_percent: %4s' % (\n validation_step,validation_step * self.batch_no_validation\n , elapsed, acc_vali, loss_vali, str(process.memory_percent()),\n ))\n\n settings.queue_isready_vl = False\n acc_validation = acc_validation / (validation_step)\n loss_validation = loss_validation / (validation_step)\n dsc_validation = dsc_validation / (validation_step)\n if np.isnan(dsc_validation) or np.isnan(loss_validation) or np.isnan(acc_validation):\n print('nan problem')\n _fill_thread_vl.kill_thread()\n print('******Validation, step: %d , accuracy: %.4f, loss: %f*******' % (\n itr1, acc_validation, loss_validation))\n\n [sum_validation] = sess.run([summ],\n feed_dict={image: validation_CT_image_patchs,\n label: validation_GTV_label,\n penalize: validation_Penalize_patch,\n dropout: 1,\n is_training: False,\n ave_vali_acc: acc_validation,\n ave_loss_vali: loss_validation,\n ave_dsc_vali:dsc_validation,\n dense_net_dim: self.patch_window,\n is_training_bn: False,\n alpha: 1,\n beta: 1,\n surf_map: validation_Surface_patch,\n\n })\n validation_writer.add_summary(sum_validation, point)\n print('end of validation---------%d' % (point))\n\n #loop for training batches\n while(step*self.batch_no<self.no_sample_per_each_itr):\n [train_CT_image_patchs, train_GTV_label, train_Penalize_patch,loss_coef_weights,train_Surface_patch] = _image_class.return_patches( self.batch_no)\n\n if (len(train_CT_image_patchs)<self.batch_no)|(len(train_GTV_label)<self.batch_no)\\\n |(len(train_Penalize_patch)<self.batch_no)|(len(train_Surface_patch)<self.batch_no):\n time.sleep(0.5)\n _read_thread.resume()\n continue\n\n tic=time.time()\n [acc_train1, loss_train1, optimizing,out,dsc_train11] = sess.run([accuracy, cost, optimizer,y,f1_measure],\n feed_dict={image: train_CT_image_patchs,\n label: train_GTV_label,\n penalize: train_Penalize_patch,\n # loss_coef: loss_coef_weights,\n dropout: self.dropout_keep,\n is_training: True,\n ave_vali_acc: -1,\n ave_loss_vali: -1,\n ave_dsc_vali: -1,\n dense_net_dim: self.patch_window,\n is_training_bn: True,\n alpha: self.alpha_coeff,\n beta: self.beta_coeff,\n surf_map: train_Surface_patch,\n\n })\n elapsed=time.time()-tic\n dsc_train1=dsc_train11[1]\n\n self.x_hist=self.x_hist+1\n # np.hstack((self.x_hist, [np.ceil(\n\n [sum_train] = sess.run([summ],\n feed_dict={image: train_CT_image_patchs,\n label: train_GTV_label,\n penalize: train_Penalize_patch,\n dropout: self.dropout_keep, is_training: True,\n ave_vali_acc: acc_train1,\n ave_loss_vali: loss_train1,\n ave_dsc_vali: dsc_train1,\n dense_net_dim: self.patch_window,\n is_training_bn: True,\n alpha: self.alpha_coeff,\n beta: self.beta_coeff,\n surf_map: train_Surface_patch,\n\n })\n train_writer.add_summary(sum_train,point)\n step = step + 1\n\n process = psutil.Process(os.getpid())\n\n print(\n 'point: %d, elapsed_time:%d step*self.batch_no:%f , LR: %.15f, acc_train1:%f, loss_train1:%f,memory_percent: %4s' % (\n int((point)),elapsed,\n step * self.batch_no, self.learning_rate, acc_train1, loss_train1,\n str(process.memory_percent())))\n\n\n point=int((point))\n if point%100==0:\n '''saveing model inter epoch'''\n chckpnt_path = os.path.join(self.chckpnt_dir,\n ('densenet_unet_inter_epoch%d_point%d.ckpt' % (epoch, point)))\n saver.save(sess, chckpnt_path, global_step=point)\n itr1 = itr1 + 1\n point=point+1\n endTime = time.time()\n\n #==============\n '''saveing model after each epoch'''\n chckpnt_path = os.path.join(self.chckpnt_dir, 'densenet_unet.ckpt')\n saver.save(sess, chckpnt_path, global_step=epoch)\n print(\"End of epoch----> %d, elapsed time: %d\" % (epoch, endTime - startTime))", "def trainNet():", "def train_main(cls):\n launcher = cls()\n launcher.launch()", "def train(self):\n self.mode = \"train\"\n self.online_net.train()", "def train(self):\n self.mode = \"train\"\n self.online_net.train()", "def train_tier(args: argparse.Namespace, hp: HParams, tier: int, extension_architecture: str,\n timestamp: str, tensorboardwriter: TensorboardWriter,\n logger: logging.Logger) -> None:\n logger.info(f\"Start training of tier {tier}/{hp.network.n_tiers}\")\n\n # Setup the data ready to be consumed\n train_dataloader, test_dataloader, num_samples = get_dataloader(hp)\n\n # Setup tier\n # Calculate size of FREQ dimension for this tier\n tier_freq = tierutil.get_size_freqdim_of_tier(n_mels=hp.audio.mel_channels,\n n_tiers=hp.network.n_tiers,\n tier=tier)\n\n if tier == 1:\n model = Tier1(tier=tier,\n n_layers=hp.network.layers[tier - 1],\n hidden_size=hp.network.hidden_size,\n gmm_size=hp.network.gmm_size,\n freq=tier_freq)\n else:\n model = Tier(tier=tier,\n n_layers=hp.network.layers[tier - 1],\n hidden_size=hp.network.hidden_size,\n gmm_size=hp.network.gmm_size,\n freq=tier_freq)\n model = model.to(hp.device)\n model.train()\n\n # Setup loss criterion and optimizer\n criterion = GMMLoss()\n optimizer = torch.optim.RMSprop(params=model.parameters(),\n lr=hp.training.lr,\n momentum=hp.training.momentum)\n\n # Check if training has to be resumed from previous checkpoint\n if args.checkpoint_path is not None:\n model, optimizer = resume_training(args, hp, tier, model, optimizer, logger)\n else:\n logger.info(f\"Starting new training on dataset {hp.data.dataset} with configuration file \"\n f\"name {hp.name}\")\n\n # Train the tier\n total_iterations = 0\n loss_logging = 0 # accumulated loss between logging iterations\n loss_save = 0 # accumulated loss between saving iterations\n prev_loss_onesample = 1e8 # used to compare between saving iterations and decide whether or not\n # to save the model\n\n for epoch in range(hp.training.epochs):\n logger.info(f\"Epoch: {epoch}/{hp.training.epochs} - Starting\")\n for i, (waveform, utterance) in enumerate(train_dataloader):\n\n # 1.1 Transform waveform input to melspectrogram and apply preprocessing to normalize\n waveform = waveform.to(device=hp.device, non_blocking=True)\n spectrogram = transforms.wave_to_melspectrogram(waveform, hp)\n spectrogram = audio_normalizing.preprocessing(spectrogram, hp)\n # 1.2 Get input and output from the original spectrogram for this tier\n input_spectrogram, output_spectrogram = tierutil.split(spectrogram=spectrogram,\n tier=tier,\n n_tiers=hp.network.n_tiers)\n length_spectrogram = input_spectrogram.size(2)\n # 2. Clear the gradients\n optimizer.zero_grad()\n # 3. Compute the model output\n if tier == 1:\n # generation is unconditional so there is only one input\n mu_hat, std_hat, pi_hat = model(spectrogram=input_spectrogram)\n else:\n # generation is conditional on the spectrogram generated by previous tiers\n mu_hat, std_hat, pi_hat = model(spectrogram=output_spectrogram,\n spectrogram_prev_tier=input_spectrogram)\n # 4. Calculate the loss\n loss = criterion(mu=mu_hat, std=std_hat, pi=pi_hat, target=output_spectrogram)\n del spectrogram\n del mu_hat, std_hat, pi_hat\n\n # 4.1 Check if loss has exploded\n if torch.isnan(loss) or torch.isinf(loss):\n error_msg = f\"Loss exploded at Epoch: {epoch}/{hp.training.epochs} - \" \\\n f\"Iteration: {i * hp.training.batch_size}/{num_samples}\"\n logger.error(error_msg)\n raise Exception(error_msg)\n\n # 5. Perform backpropagation\n loss_cpu = loss.item()\n loss.backward()\n optimizer.step()\n\n # 6. Logging and saving model\n loss_oneframe = loss_cpu / (length_spectrogram * hp.training.batch_size)\n loss_logging += loss_oneframe # accumulated loss between logging iterations\n loss_save += loss_oneframe # accumulated loss between saving iterations\n\n # 6.1 Save model (if is better than previous tier)\n if (total_iterations + 1) % hp.training.save_iterations == 0:\n # Calculate average loss of one sample of a batch\n loss_onesample = int(loss_save / hp.training.save_iterations)\n # if loss_onesample of these iterations is lower, the tier is better and we save it\n if loss_onesample < prev_loss_onesample:\n path = f\"{hp.training.dir_chkpt}/tier{tier}_{timestamp}_loss{loss_onesample}.pt\"\n torch.save(obj={'dataset': hp.data.dataset,\n 'tier_idx': tier,\n 'hp': hp,\n 'epoch': epoch,\n 'iterations': i,\n 'total_iterations': total_iterations,\n 'tier': model.state_dict(),\n 'optimizer': optimizer.state_dict()}, f=path)\n logger.info(f\"Model saved to: {path}\")\n prev_loss_onesample = loss_onesample\n loss_save = 0\n\n # 6.2 Logging\n if (total_iterations + 1) % hp.logging.log_iterations == 0:\n # Calculate average loss of one sample of a batch\n loss_onesample = int(loss_logging / hp.logging.log_iterations)\n tensorboardwriter.log_training(hp, loss_onesample, total_iterations)\n logger.info(f\"Epoch: {epoch}/{hp.training.epochs} - \"\n f\"Iteration: {i * hp.training.batch_size}/{num_samples} - \"\n f\"Loss: {loss_onesample}\")\n loss_logging = 0\n\n # 6.3 Evaluate\n if (total_iterations + 1) % hp.training.evaluation_iterations == 0:\n evaluation(hp, tier, test_dataloader, model, criterion, logger)\n total_iterations += 1\n\n # After finishing training: save model, hyperparameters and total loss\n path = f\"{hp.training.dir_chkpt}/tier{tier}_{timestamp}_final.pt\"\n torch.save(obj={'dataset': hp.data.dataset,\n 'tier_idx': tier,\n 'hp': hp,\n 'epoch': epoch,\n 'iterations': evaluation(hp, tier, test_dataloader, model, criterion,\n logger),\n 'total_iterations': total_iterations,\n 'tier': model.state_dict(),\n 'optimizer': optimizer.state_dict()}, f=path)\n logger.info(f\"Model saved to: {path}\")\n tensorboardwriter.log_end_training(hp=hp, loss=-1)\n logger.info(\"Finished training\")", "def setup(\n self,\n dim_data: int,\n neural_net: ModelBase,\n optimizer: optax.OptState,\n ):\n # neural network\n self.state_neural_net = neural_net.create_train_state(\n self.rng, optimizer, dim_data\n )\n\n # step function\n self.step_fn = self._get_step_fn()", "def train_net(solver_prototxt, roidb, output_dir,\n pretrained_model=None, max_iters=40000):\n sw = SolverWrapper(solver_prototxt, roidb, output_dir,\n pretrained_model=pretrained_model)\n\n print 'Solving...'\n sw.train_model(max_iters)\n print 'done solving'", "def start_training(self):\n self.training = True", "def run(self):\n # This should do nothing if the user has already configured\n # logging, and will it least enable error messages otherwise.\n logging.basicConfig()\n\n # If this is resumption from a checkpoint, it is crucial to\n # reset `profile.current`. Otherwise, it simply does not hurt.\n self.profile.current = []\n\n # Sanity check for the most common case\n if (self._model and isinstance(self._model, Model) and\n isinstance(self.algorithm, GradientDescent)):\n if not (set(self._model.get_parameter_dict().values()) ==\n set(self.algorithm.parameters)):\n logger.warning(\"different parameters for model and algorithm\")\n\n with change_recursion_limit(config.recursion_limit):\n self.original_sigint_handler = signal.signal(\n signal.SIGINT, self._handle_epoch_interrupt)\n self.original_sigterm_handler = signal.signal(\n signal.SIGTERM, self._handle_batch_interrupt)\n try:\n logger.info(\"Entered the main loop\")\n if not self.status['training_started']:\n for extension in self.extensions:\n extension.main_loop = self\n self._run_extensions('before_training')\n with Timer('initialization', self.profile):\n self.algorithm.initialize()\n self.status['training_started'] = True\n # We can not write \"else:\" here because extensions\n # called \"before_training\" could have changed the status\n # of the main loop.\n if self.log.status['iterations_done'] > 0:\n self.log.resume()\n self._run_extensions('on_resumption')\n self.status['epoch_interrupt_received'] = False\n self.status['batch_interrupt_received'] = False\n with Timer('training', self.profile):\n while self._run_epoch():\n pass\n except TrainingFinish:\n self.log.current_row['training_finished'] = True\n except Exception as e:\n self._restore_signal_handlers()\n self.log.current_row['got_exception'] = traceback.format_exc()\n logger.error(\"Error occured during training.\" + error_message)\n try:\n self._run_extensions('on_error')\n except Exception:\n logger.error(traceback.format_exc())\n logger.error(\"Error occured when running extensions.\" +\n error_in_error_handling_message)\n reraise_as(e)\n finally:\n self._restore_signal_handlers()\n if self.log.current_row.get('training_finished', False):\n self._run_extensions('after_training')\n if config.profile:\n self.profile.report()", "def train(self, num_iterations):\n for t in range(num_iterations):\n self._step()\n # Maybe print training loss\n if (t != 0) and (t % self.print_every == 0):\n print('(Iteration %d / %d) loss: %f' % (\n t + 1, num_iterations, self.loss_history[-1]))", "def main():\n args, config = parse_args()\n\n \"\"\"\n Log on wandb for track of experiments\n \"\"\"\n wandb.init(project=\"adaptive-finetuning-resnet\", name=f'Inference_{config.VERSION}', config=config)\n\n \"\"\"\n Set config GPUs and torch cuda device\n \"\"\"\n config.GPUS = str(0)\n torch.cuda.set_device(0)\n\n \"\"\"\n Create the model, put it to GPU and then create dataloader\n \"\"\"\n model = eval(config.MODULE)(config=config.NETWORK)\n model = model.cuda()\n\n val_loader = make_dataloader(config, mode='val', distributed=False)\n\n \"\"\"\n Load the model with pretrained weights\n \"\"\"\n assert config.NETWORK.PRETRAINED_MODEL != '', \"For inference, there must be pre-trained weights\"\n\n pretrain_state_dict = torch.load(config.NETWORK.PRETRAINED_MODEL, map_location = lambda storage, loc: storage)['net_state_dict']\n smart_model_load(model, pretrain_state_dict, loading_method=config.NETWORK.PRETRAINED_LOADING_METHOD)\n\n \"\"\"\n Pass the model and val loader for validation\n \"\"\"\n print(\"Inference started!!\")\n val_accuracy = do_validation(config, model, val_loader)\n print(f\"Inference complete!!\\nAccuracy:{val_accuracy}\")\n\n wandb.log({'Accuracy': val_accuracy})", "def run_training(self, task, output_folder):\n # -- Update the self.output_folder, otherwise the data will always be in the same folder for every task -- #\n # -- and everything will be overwritten over and over again -- #\n self.output_folder = join(output_folder, \"fold_%s\" % str(self.fold))\n\n # -- Make the directory so there will no problems when trying to save some files -- #\n maybe_mkdir_p(self.output_folder)\n\n # -- Add the current task to the self.already_trained_on dict in case of restoring -- #\n self.update_save_trained_on_json(task, False) # Add task to start_training\n\n # -- Set self.trainer to None after this, since it will never be used afterwards. -- #\n # -- If a pre trained network is used to iitialize an extension network, this will only -- #\n # -- effect the first task, more or less at this point, the everything has been done with -- #\n # -- the trainer. So it was only needed here for the already_trained_on to set the correct -- #\n # -- previous_trainer in case of a restoring. From now on, the prev_trainer is always this -- #\n # -- current network from the extension and self.trainer has no use when it is trained using -- #\n # -- one of the extensions. Thus just set it to None, and the already_trained_on sets the prev-trainer -- #\n # -- correct and as expected as well. -- #\n self.trainer = None\n\n # -- Register the task if it does not exist in one of the heads -- #\n if task not in self.mh_network.heads.keys():\n # -- Add this task into heads -- #\n self.mh_network.add_new_task(task)\n\n # -- Activate the model based on task --> self.mh_network.active_task is now set to task as well -- #\n self.mh_network.assemble_model(task)\n \n # -- Run the training from parent class -- #\n ret = super().run_training()\n\n # -- Add task to finished_training -- #\n self.update_save_trained_on_json(task, True)\n\n # -- When model trained on second task and the self.new_trainer is still not updated, then update it -- #\n if self.new_trainer and len(self.already_trained_on) > 1:\n self.new_trainer = False\n\n # -- Before returning, reset the self.epoch variable, otherwise the following task will only be trained for the last epoch -- #\n self.epoch = 0\n\n # -- Empty the lists that are tracking losses etc., since this will lead to conflicts in additional tasks durig plotting -- #\n # -- Do not worry about it, the right data is stored during checkpoints and will be restored as well, but after -- #\n # -- a task is finished and before the next one starts, the data needs to be emptied otherwise its added to the lists. -- #\n self.all_tr_losses = []\n self.all_val_losses = []\n self.all_val_losses_tr_mode = []\n self.all_val_eval_metrics = []\n\n return ret # Finished with training for the specific task", "def main(configuration_path, mode):\n config = toml.load(configuration_path)\n train_conf = read_config(config)\n\n click.echo(\"\\n Train config:\")\n print(train_conf, \"\\n\")\n\n # create databunch\n data = create_databunch(\n data_path=train_conf[\"data_path\"],\n fourier=train_conf[\"fourier\"],\n batch_size=train_conf[\"batch_size\"],\n )\n\n # get image size\n train_conf[\"image_size\"] = data.train_ds[0][0][0].shape[1]\n\n # define architecture\n arch = define_arch(\n arch_name=train_conf[\"arch_name\"], img_size=train_conf[\"image_size\"]\n )\n\n if mode == \"train\":\n if train_conf[\"normalize\"] == \"mean\":\n train_conf[\"norm_factors\"] = get_normalisation_factors(data)\n # check out path and look for existing model files\n check_outpath(train_conf[\"model_path\"], train_conf)\n\n click.echo(\"Start training of the model.\\n\")\n\n # define_learner\n learn = define_learner(data, arch, train_conf)\n\n # load pretrained model\n if train_conf[\"pre_model\"] != \"none\":\n learn.create_opt()\n load_pre_model(learn, train_conf[\"pre_model\"])\n\n # Train the model, except interrupt\n # train_conf[\"comet_ml\"] = True\n try:\n if train_conf[\"comet_ml\"]:\n learn.comet.experiment.log_parameters(train_conf)\n with learn.comet.experiment.train():\n learn.fit(train_conf[\"num_epochs\"])\n else:\n learn.fit(train_conf[\"num_epochs\"])\n except KeyboardInterrupt:\n pop_interrupt(learn, train_conf)\n\n end_training(learn, train_conf)\n\n if train_conf[\"inspection\"]:\n after_training_plots(train_conf, rand=True)\n\n if mode == \"fine_tune\":\n click.echo(\"Start fine tuning of the model.\\n\")\n\n # define_learner\n learn = define_learner(\n data,\n arch,\n train_conf,\n )\n\n # load pretrained model\n if train_conf[\"pre_model\"] == \"none\":\n click.echo(\"Need a pre-trained modle for fine tuning!\")\n return\n\n learn.create_opt()\n load_pre_model(learn, train_conf[\"pre_model\"])\n\n # Train the model, except interrupt\n try:\n learn.fine_tune(train_conf[\"num_epochs\"])\n except KeyboardInterrupt:\n pop_interrupt(learn, train_conf)\n\n end_training(learn, train_conf)\n if train_conf[\"inspection\"]:\n after_training_plots(train_conf, rand=True)\n\n if mode == \"lr_find\":\n click.echo(\"Start lr_find.\\n\")\n if train_conf[\"normalize\"] == \"mean\":\n train_conf[\"norm_factors\"] = get_normalisation_factors(data)\n\n # define_learner\n learn = define_learner(data, arch, train_conf, lr_find=True)\n\n # load pretrained model\n if train_conf[\"pre_model\"] != \"none\":\n learn.create_opt()\n load_pre_model(learn, train_conf[\"pre_model\"])\n\n learn.lr_find()\n\n # save loss plot\n plot_lr_loss(\n learn,\n train_conf[\"arch_name\"],\n Path(train_conf[\"model_path\"]).parent,\n skip_last=5,\n output_format=train_conf[\"format\"],\n )\n\n if mode == \"plot_loss\":\n click.echo(\"Start plotting loss.\\n\")\n\n # define_learner\n learn = define_learner(data, arch, train_conf, plot_loss=True)\n # load pretrained model\n if Path(train_conf[\"model_path\"]).exists:\n load_pre_model(learn, train_conf[\"model_path\"], plot_loss=True)\n else:\n click.echo(\"Selected model does not exist.\")\n click.echo(\"Exiting.\\n\")\n sys.exit()\n\n plot_lr(\n learn, Path(train_conf[\"model_path\"]), output_format=train_conf[\"format\"]\n )\n plot_loss(\n learn, Path(train_conf[\"model_path\"]), output_format=train_conf[\"format\"]\n )", "def run_model(\n self,\n load_week=None,\n run_start=None,\n state_builder=\"acis\",\n interrupt=False,\n override_limits=None,\n out_dir=None,\n ):\n if out_dir is None:\n if load_week is None:\n raise ValueError(\"Both 'out_dir' and 'load_week' cannot be None!\")\n out_dir = self.outdir / load_week / self.name\n if load_week in nlets:\n nlet_file = tests_path / \"data\" / f\"nlets/TEST_NLET_{load_week}.txt\"\n else:\n nlet_file = None\n args = TestArgs(\n out_dir,\n run_start=run_start,\n load_week=load_week,\n interrupt=interrupt,\n nlet_file=nlet_file,\n state_builder=state_builder,\n model_spec=self.test_model_spec,\n )\n self.atc_obj.run(args, override_limits=override_limits)", "def train(self, num_iter=10000, batch_size=256, learning_rate=0.0001, \\\n beta1=0.5, beta2=0.999, val_interval=1000, val_bs=256):\n if not self._is_build:\n raise CustomException('Network has not been build yet.')\n \n logger.info('Training MI Estimator ...')\n \n with self._graph.as_default() as g:\n summary_writer = tf.summary.FileWriter( \\\n self._get_summary_dir('train_summary',\n rm_existing=not self.continue_training),\n g)\n\n self._init_validation(val_bs)\n\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,\n beta1=beta1, beta2=beta2)\n train_step = optimizer.minimize(self._t_loss,\n global_step=self._t_global_step)\n\n init_op = tf.global_variables_initializer()\n\n checkpoint_saver = tf.train.Saver(max_to_keep=5, \\\n keep_checkpoint_every_n_hours=3)\n\n with tf.Session(graph=self._graph, config=self._get_config_proto()) \\\n as sess:\n # Restore training if requested.\n iter_start, iter_end = self._init_training(sess, num_iter, init_op,\n self._t_global_step, checkpoint_saver)\n\n # Initialize training set.\n train_handle = sess.run(self._train_iter.string_handle())\n sess.run(self._train_iter.initializer,\n feed_dict={self._t_train_raw_in:\n shared.data.get_train_inputs(),\n self._t_train_raw_out:\n shared.data.get_train_outputs(),\n self._t_train_batch_size: batch_size})\n\n for i in range(iter_start, iter_end):\n if i % val_interval == 0:\n checkpoint_saver.save(sess, os.path.join( \\\n self._checkpoint_dir, 'model'), global_step=i)\n\n self._validate_training_process(sess, i)\n\n #elif i % 100 == 0 and i > 0:\n # logger.info('Running training epoch: %d.' % i)\n\n _, summary = sess.run( \\\n [train_step, self._t_summaries],\n feed_dict={self._t_handle: train_handle,\n self._t_mode: True,\n self._t_mi_known: True})\n summary_writer.add_summary(summary, i)\n\n checkpoint_saver.save(sess, os.path.join( \\\n self._checkpoint_dir, 'model'), global_step=iter_end)\n logger.info('Training ends after %d iterations.' % iter_end)\n\n summary_writer.close()\n self._val_summary_writer.close()\n\n logger.info('Training MI Estimator ... Done')", "def run( self ):\n # ---- Startup/Shutdown ----\n with self:\n\n # ---- Optionally reload from previous run ----\n if self.config.neuron.reload:\n self.reload()\n else:\n self.checkpoint()\n\n # --- Run until n_epochs ----\n while self.epoch < self.config.neuron.n_epochs:\n try:\n # ---- Train state ----\n self.run_epoch()\n\n # ---- Set weights on chain ----\n self.set_mechanism_weights()\n\n # ---- Checkpoint state ----\n self.checkpoint()\n\n except KeyboardInterrupt:\n # --- User ended session ----\n break\n\n except Exception as e:\n # --- Unknown error ----\n logger.exception('Unknown exception: {} with traceback {}', e, traceback.format_exc())\n if self.config.neuron.restart_on_failure == True:\n logger.info('Restarting from last saved state.')\n self.reload()\n else:\n break", "def run(self, u, tol, maxiter):\n raise NotImplementedError (\"This is just an abstract interface\")", "def main(\n network_type: NetworkType = Argument(..., help=\"type of the VAE network\"),\n bottleneck_dim: int = Option(\n 16, \"--bottleneck_dim\", \"-n\", help=\"size of the VAE bottleneck\"\n ),\n lr: float = Option(0.001, \"--lr\", \"-r\", help=\"learning rate for training\"),\n batch_size: int = Option(..., \"--batch_size\", \"-b\", help=\"batch size for training\"),\n epochs: int = Option(..., \"--epochs\", \"-e\", help=\"epochs to train\"),\n device: str = Option(\n \"cpu\", \"--device\", \"-d\", help='device to train on, e.g. \"cuda:0\"'\n ),\n logdir: str = Option(\n \"./results\",\n \"--logdir\",\n \"-l\",\n help=\"directory to log the models and event file to\",\n ),\n):\n\n mnist_data = dataset.MyMNIST()\n\n if network_type == NetworkType.mlp:\n net = model.MLPVAE((1, 32, 32), bottleneck_dim)\n else:\n net = model.CNNVAE((1, 32, 32), bottleneck_dim)\n\n optim = torch.optim.Adam(net.parameters(), lr)\n vae_trainer = trainer.Trainer(net, mnist_data, optim, batch_size, device, logdir)\n vae_trainer.train(epochs)", "def main():\n dataset = MNIST(BATCH_SIZE)\n \n inputs = Value(type=tf.float32, shape=(None, 784), cls=None)\n targets = Value(type=tf.int64, shape=(None), cls=10)\n fc_hidden = FCHidden(weights=[300, 150])\n\n config = Config(inputs, targets, fc_hidden, LEARNING_RATE)\n\n network_builder = FFNetworkBuilder(config)\n hidden_builder = FFHiddenBuilder()\n _ = network_builder.build_network(hidden_builder)\n\n train_config = TrainerConfig(\n epochs = EPOCHS, display_after = DISPLAY_STEP, \n keep_prob = KEEP_PROB,checkpoint_path=None, \n summary_path=None\n )\n trainer = Trainer(network_builder, train_config)\n trainer.train(dataset)", "def train(self, example):\n data = dict()\n data[\"inst\"] = \"train\"\n data[\"examples\"] = example\n\n q_idx, data_id = self.put(data, q_idx=0) # Send instruction to first nnet\n self.get(q_idx, data_id) # Blocks here\n\n # Done", "def run_training_loop():\n logging.info(\"Starting the training loop.\")\n\n trainer = trainer_class(\n output_dir=output_dir,\n train_env=train_env,\n eval_env=eval_env,\n trajectory_dump_dir=trajectory_dump_dir,\n )\n trainer.training_loop(n_epochs=n_epochs)", "def networkTraining(model, train_loader, val_loader, epochs, learning_rate, device, log_path, includeHeading): \r\n \r\n \r\n print('------ STARTING TRAINING ------')\r\n print('Number of epochs: ', epochs)\r\n print('Learning rate: ', learning_rate)\r\n print('Batch Size: ', config['batch_size'])\r\n print('City: ', config['city'])\r\n print('-' * 31)\r\n \r\n # Define optimizer\r\n optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)\r\n \r\n # Initialize log file\r\n writer = Visualizer(log_path)\r\n \r\n # dump config file \r\n with open(os.path.join(log_path,'config.json'), 'w') as fp:\r\n json.dump(config, fp)\r\n \r\n startTime = time.time()\r\n iterator = 0\r\n \r\n val_loss_min = np.Inf\r\n counter = 0\r\n \r\n # For each epoch\r\n for epoch in range(epochs):\r\n writer.write_lr(optimizer, iterator)\r\n\r\n # train for one epoch\r\n iterator = training(model, train_loader, optimizer, device, writer, epoch, iterator)\r\n \r\n # Early stopping (training failed)\r\n if iterator == -1:\r\n duration = time.time() - startTime\r\n print(\"Training finished (Error), took {:.2f}s\".format(duration))\r\n break\r\n \r\n # get validation loss and save images\r\n valLoss = validation(model, val_loader, device, writer, iterator, log_path, includeHeading)\r\n \r\n # Early stopping (didn't improve for 2 epochs)\r\n if valLoss < val_loss_min:\r\n torch.save(model.state_dict(), os.path.join(log_path, 'checkpoint.pt'))\r\n val_loss_min = valLoss\r\n counter = 0\r\n elif counter == 1:\r\n duration = time.time() - startTime\r\n print(\"Training finished (early), took {:.2f}s\".format(duration))\r\n break\r\n else:\r\n counter += 1\r\n \r\n # Dump statistics in tensorboard file\r\n duration = time.time() - startTime\r\n print(\"Training finished, took {:.2f}s\".format(duration))\r\n \r\n writer.write_text('{:.2f}'.format(duration), 'Time')\r\n \r\n if device != 'cpu':\r\n mem = torch.cuda.max_memory_allocated(device)\r\n mem = mem // 1048576\r\n writer.write_text('{:d}'.format(mem), 'Memory')\r\n writer.close()", "def train():\n pass", "def test_net(args, dataset_name, proposal_file, output_dir, ind_range=None, gpu_id=0, early_stop=False):\n # print('test_net')\n roidb, dataset, start_ind, end_ind, total_num_images = get_roidb_and_dataset(dataset_name, proposal_file, ind_range)\n model = initialize_model_from_cfg(args, gpu_id=gpu_id)\n num_images = len(roidb)\n num_classes = cfg.MODEL.NUM_CLASSES\n all_boxes = {}\n\n timers = defaultdict(Timer)\n \n \n\n\n if 'train' in dataset_name:\n if ind_range is not None:\n det_name = 'discovery_range_%s_%s.pkl' % tuple(ind_range)\n else:\n det_name = 'discovery.pkl'\n else:\n if ind_range is not None:\n det_name = 'detection_range_%s_%s.pkl' % tuple(ind_range)\n else:\n det_name = 'detections.pkl'\n \n det_file = os.path.join(output_dir, det_name)\n if os.path.exists(det_file):\n print('the file', det_file, 'exists. I am loading detections from it...')\n return load_object(det_file)['all_boxes']\n\n for i, entry in enumerate(roidb):\n if early_stop and i > 10: break\n\n box_proposals = entry['boxes']\n if len(box_proposals) == 0:\n continue\n \n im = cv2.imread(entry['image'])\n # print(entry['image'])\n cls_boxes_i = im_detect_all(model, im, box_proposals, timers)\n\n all_boxes[entry['image']] = cls_boxes_i\n\n if i % 10 == 0: # Reduce log file size\n ave_total_time = np.sum([t.average_time for t in timers.values()])\n eta_seconds = ave_total_time * (num_images - i - 1)\n eta = str(datetime.timedelta(seconds=int(eta_seconds)))\n \n det_time = (timers['im_detect_bbox'].average_time)\n \n logger.info(('im_detect: range [{:d}, {:d}] of {:d}:{:d}/{:d} {:.3f}s (eta: {})').format(\n start_ind + 1, end_ind, total_num_images, start_ind + i + 1, start_ind + num_images, det_time, eta))\n\n cfg_yaml = yaml.dump(cfg)\n\n save_object(\n dict(\n all_boxes=all_boxes,\n cfg=cfg_yaml\n ), det_file\n )\n logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))\n return all_boxes", "def train():\n # YOUR TRAINING CODE GOES HERE", "def on_start_(\n self,\n super_callback: \"AllennlpWandbCallback\",\n trainer: \"GradientDescentTrainer\",\n is_primary: bool = True,\n **kwargs: Any,\n ) -> None:\n self.trainer = trainer" ]
[ "0.5481345", "0.53400576", "0.5291693", "0.5231653", "0.52196884", "0.5207074", "0.5196552", "0.5196552", "0.5140777", "0.5139961", "0.5114426", "0.5084966", "0.50706834", "0.5069639", "0.50587445", "0.5039778", "0.5032471", "0.50109255", "0.49990544", "0.49804333", "0.49733925", "0.49683878", "0.49613833", "0.4946323", "0.4938187", "0.49283072", "0.49233842", "0.4914001", "0.4911629", "0.49006906" ]
0.82238597
0
Seek to the specified iteration.
def _seek(self, iteration): # Validate it if iteration < 1: iteration = 1 # Seek to one iteration before the specified iteration, then run the # network for one iteration, so the inspectors will show the right data self.iteration = iteration - 1 self.experiment.position.iter = iteration - 1 for sensor in self.sensors: assert sensor.type == 'VectorFileSensor' sensor.setParameter('position', self.iteration) self._step()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def seek(self, offset, whence=io.SEEK_SET):\n if offset != 0 and whence == io.SEEK_SET:\n # logging.debug('IterStream: trying to seek to offset {0}.'\n # .format(offset))\n if offset > self.curr_pos:\n self.readinto(bytearray(offset - self.curr_pos))\n elif offset == self.curr_pos:\n pass\n else: # need to re-create iterable\n self.reset()\n self.readinto(bytearray(offset))\n if self.curr_pos != offset:\n # logging.debug('IterStream: curr_pos {0} != offset {1}!'\n # .format(self.curr_pos, offset))\n raise RuntimeError('programming error in IterStream.tell!')\n return self.curr_pos\n elif whence == io.SEEK_END: # seek to end\n # logging.debug('IterStream: seek to end')\n if self.size is None:\n # logging.debug('IterStream: trying to seek to end but size '\n # 'unknown --> raise IOError')\n raise IOError('size unknown, cannot seek to end')\n self.at_end = True # fake jumping to the end\n self.iterable = None # cannot safely be used any more\n self.leftover = None\n return self.size\n elif whence == io.SEEK_SET: # seek to start\n # logging.debug('IterStream: seek to start')\n self.reset()\n return 0\n elif whence == io.SEEK_CUR: # e.g. called by tell()\n # logging.debug('IterStream: seek to curr pos')\n if self.at_end:\n return self.size\n return self.curr_pos\n elif whence not in (io.SEEK_SET, io.SEEK_CUR, io.SEEK_END):\n # logging.debug('Illegal 2nd argument to seek(): {0}'\n # .format(whence))\n raise IOError('Illegal 2nd argument to seek(): {0}'.format(whence))\n else:\n # logging.debug('not implemented: {0}, {1}'.format(offset, whence))\n raise NotImplementedError('seek only partially implemented. '\n 'Cannot yet seek to {0} from {1}'\n .format(offset, whence))", "def iteration(self, iteration):\n self._iteration = iteration", "def iteration(self, iteration):\n self._iteration = iteration", "def iteration(self, iteration):\n self._iteration = iteration", "def seek(self, *args) -> \"int\":\n return _ida_fpro.qfile_t_seek(self, *args)", "def seek(self, cutoff):\n while self.step_num < cutoff and self.op_state == Turing_Machine.RUNNING:\n \"\"\"Perform an atomic transition or chain step.\"\"\"\n if self.op_state != Turing_Machine.RUNNING:\n continue\n if self.end_time and time.time() >= self.end_time:\n self.op_state = Turing_Machine.TIME_OUT\n continue\n\n if self.compute_steps:\n self.old_step_num = self.step_num\n # Note: We increment the number of loops early to take care of all the\n # places step() could early-return.\n self.num_loops += 1\n\n # Get current symbol\n cur_symbol = self.tape.get_top_symbol()\n\n # Lookup TM transition rule\n cond, (symbol2write, next_state, next_dir), num_steps = \\\n self.machine.get_transition(cur_symbol, self.state, self.dir)\n\n # Test condition\n self.op_state = cond[0]\n self.op_details = cond[1:]\n\n # Apply transition\n # Chain move\n self.tape.apply_single_move(symbol2write, next_dir)\n self.state = next_state\n self.dir = next_dir\n self.num_macro_moves += 1\n if self.compute_steps:\n self.step_num += num_steps\n self.steps_from_macro += num_steps\n if self.op_state == Turing_Machine.INF_REPEAT:\n self.inf_reason = REPEAT_IN_PLACE\n\n if self.op_state != Turing_Machine.UNDEFINED:\n self.verbose_print()", "def _advance(self):\n if not self._is_at_end():\n self._current += 1\n return self._previous()", "def seek_to(self, ms):\n self.proxy.seek_to(ms)", "def _advance(self):\n if self._is_at_end():\n return None\n self.current += 1\n return self.source[self.current - 1]", "def advance(self, *args):\n return _libsbml.SwigPyIterator_advance(self, *args)", "def getIteration(self): \n return self.iteration", "def nextIter(self):\n\t\tpass", "def testSeek(self):\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_TSK, location='/a_directory/another_file',\n inode=self._IDENTIFIER_ANOTHER_FILE, parent=self._bde_path_spec)\n file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)\n\n self._TestSeek(file_object)", "def testSeek(self):\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_TSK, location='/a_directory/another_file',\n inode=self._IDENTIFIER_ANOTHER_FILE, parent=self._bde_path_spec)\n file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)\n\n self._TestSeek(file_object)", "async def on_step(self, iteration: int):\n raise NotImplementedError", "def _goto(self, end):\n self._position = end", "def iteration(self):\n return self._iteration", "def iteration(self):\n return self._iteration", "def iteration(self):\n return self._iteration", "def iteration(self):\n return self._iteration", "def next(self):\n self.jumpahead(1)", "def seek(self, time):\n command = 'seek ' + str(time)\n self.run_command(command)", "def _get_item_by_idx(self, iterator, idx):\r\n size = len(self)\r\n idx = operator.index(idx)\r\n if not -size <= idx < size:\r\n raise IndexError('index {} is out of range'.format(idx))\r\n idx %= size\r\n return next(itertools.islice(iterator, idx, None))", "def advance(self, n):\n return _elas.SwigPyIterator_advance(self, n)", "def seek(self, index: int, /) -> str:\n self.index = index\n return self.current", "def next(self):\n if self.step == 0:\n raise StopIteration\n self.step -= 1\n return self.step", "def __iter__(self):\n self.current = self.start\n self.returned = 0\n return self", "def advance(self, *args):\n return _osgAnimation.SwigPyIterator_advance(self, *args)", "def next(self):\n if self.step ==0:\n raise StopIteration\n self.step -= 1\n return self.step", "def get_iteration(self):\n return self.iteration" ]
[ "0.608975", "0.5976431", "0.5976431", "0.5976431", "0.5770309", "0.574652", "0.5727403", "0.57088655", "0.559905", "0.5595579", "0.5563567", "0.5559499", "0.554357", "0.554357", "0.55330926", "0.54994696", "0.54990864", "0.54990864", "0.54990864", "0.54990864", "0.5492508", "0.5484246", "0.54732305", "0.5472783", "0.5463095", "0.5427775", "0.54186684", "0.5409978", "0.5407933", "0.5403196" ]
0.76999676
0
Called automatically by Traits when the iteration updates. Update the progress bar and check the conditions to see whether to stop or pause.
def _iteration_changed(self): if self.showProgressBar: try: self._setProgress() except: # may fail when switching from training to inference from dbgp.client import brk; brk(port=9011) pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _setProgress(self):\n\n self.progress = (self.iteration, self.iterationCount)", "def progress(self):\n if self.running:\n pass\n else:\n self._engine.progress()", "def update_progress(self):\n report = self.build_progress_report()\n self.conduit.set_progress(report)", "def update_progress(self):\n report = self.build_progress_report()\n self.conduit.set_progress(report)", "def updateProgress (self, iteration, total, prefix='Progress', suffix='complete', decimals=1, length=100, fill='█', printEnd=\"\\r\"):\n if iteration == 0:\n self.start_time = timer()\n ETC = '' #Estimated Time to Completion\n if (iteration/total)*100 >= self.updates[self.update_counter]:\n elapsed = timer() - self.start_time\n if iteration != 0:\n minutes = int((elapsed * total/iteration - elapsed)//60)\n seconds = int((elapsed * total/iteration - elapsed)%60)\n ETC = \"(~{:d} mins {:d}s left)\".format(minutes, seconds)\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n # Unfortunately \\r doesn't work in the pycharm console, so we have to reprint the whole bar everytime,\n # clogging the console.\n #print(f'\\r{prefix} |{bar}| {percent}% {suffix} {ETC}', end = printEnd)\n print(f'{prefix} |{bar}| {percent}% {suffix} {ETC}')\n # Print New Line on Complete\n if iteration == total:\n print()\n self.update_counter += 1", "def tick(self):\n self.current_count += 1\n self.progress(self.current_count)", "def status(self) -> NoReturn:\n\n curr_status= self.percent_done()\n while(curr_status < 100):\n\n update_status(name=self.name, status=curr_status)\n time.sleep(0.5)\n\n curr_status = self.percent_done()\n\n update_status(name=self.name, status=curr_status)", "def progress_status(self):\n from tqdm import tqdm\n pbar_a = tqdm(total=len(self.jobs), position=0)\n pbar_a.set_description('Submitted jobs ...')\n pbar_b = tqdm(total=self.n_submit_script, position=1)\n pbar_b.set_description('Running jobs ...')\n pbar_c = tqdm(total=self.n_submit_script, position=2)\n pbar_c.set_description('Completed jobs ...')\n pbar_d = tqdm(total=self.n_submit_script, position=3)\n pbar_d.set_description('Failed? jobs ...')\n while self.n_completed < self.n_submit_script:\n pbar_a.n = self.n_submitted\n pbar_b.n = self.n_running\n pbar_c.n = self.n_completed\n pbar_d.n = self.n_failed + self.n_unknown\n pbar_a.refresh()\n pbar_b.refresh()\n pbar_c.refresh()\n pbar_d.refresh()\n sleep(5)\n self.update_status()", "def _update_status(self):\n if any([abs(v) > LIMITS[i] for i, v in enumerate(self.state)]):\n self.terminal = True\n elif abs(self.q[3]) < LIMITS[9]:\n self.terminal = True\n elif self.steps + 1 >= self.max_steps:\n self.terminal = True", "def update_progress(self, done):\r\n if done % 100 == 0:\r\n print >>sys.stderr, \" %d processed, run time %d secs\" % (done, (datetime.now() - self.started_at).seconds)", "def next(self):\n if self.skip:\n return\n\n self.counter += 1\n if self.pbar is None and (time.time() - self.start_time) > self.threshold:\n self.pbar = tqdm(total=self.n, desc=self.title, initial=self.counter)\n elif self.pbar is not None:\n self.pbar.update(1)", "def reportProgress(self):\n \n pass", "def update_stats(self, step):\n self.dynamic.progressBar.setValue(\n float(step) / float(self.meas_max_volt / self.steps) * 100\n )", "def update(self, iteration):\n pass", "def wait_progress(self):\n pass", "def wait_progress(self):\n pass", "def set_progress(self, progress: float):", "def notify_progress(self, progress_data):\n pass # pragma: no cover", "def notify_progress(self, ratio):\n self._progress_bar += ratio\n while self._progress_bar > self._offset_bar:\n self._offset_bar += 0.01\n self._progress_window.progress(100 * self._progress_bar)\n # print(100 * self._progress_bar)", "def update(self):\n\t\t# If being controlled by COM\n\t\tif self.controled_by_com :\n\t\t\t# Substract 1 from the update counter\n\t\t\tself.update_counter -= 1\n\t\t\t# If the update counter reaches zero\n\t\t\tif self.update_counter == 0. :\n\t\t\t\t# then ask for an action \n\t\t\t\tif self.intermediate_phase is False :\n\t\t\t\t\tself.action_required = True \n\t\t\t\t\t\t\n\t\t\t\t# if during a change\n\t\t\t\t# then make the change\n\t\t\t\tif self.intermediate_phase is True : \n\t\t\t\t\tself.action_required = False\n\t\t\t\t\tself._color_changer() #Make the change in the Simulator\n\t\telse :\n\t\t\tpass", "def process(self):\n self._counter += 1\n if self._counter >= self._iterations:\n self.running = False\n return super().process()", "def update_progressbar(self, count, value):\n self.status(\"Progress %s/%s\" % (value, count))", "def start_progress_bar(self):\r\n self.progress[\"value\"] = self.progress_step", "def run(self, item=None, progress=None):\n # these lines can be removed\n assert isinstance(progress, dl.Progress)\n progress.update(status='inProgress', progress=0)", "def update(self, steps):\n self.launch_progress += (steps)/self.total", "def _progress(self, walker):\n\n raise NotImplementedError", "def labelUpdate(self, run_dict):\n self.progressBar.reset()\n self.progressBar.setMinimum(1)\n self.progressBar.setMaximum(run_dict[\"Progress\"])\n self.progressLabel.setText(run_dict[\"Text\"])", "def example(self):\n while self.check_end() == False:\n plt.pause(0.25)\n end = self.update_board(random.choice(self.get_actions()), True)", "def update_percent(self):", "def run(self):\n if not self._no_progress and self._verbose:\n from progressbar import ProgressBar\n progress = ProgressBar()\n iter_range = progress(range(self._iters))\n else:\n iter_range = range(self._iters)\n\n if self._no_progress and self._time_iters:\n from time import time\n\n i = 0\n try:\n for i in iter_range:\n if self._verbose and self._no_progress:\n print(\"Iteration \" + repr(i))\n\n if self._no_progress and self._time_iters:\n start = time()\n\n self.iteration += 1\n\n self._forward(self._p_k, self._v_k)\n sigma_k = measure(self._p_k, self._v_k)\n alpha_k = self._rho_k / sigma_k\n if self._double:\n update_m_double(self._m, alpha_k, self._p_k)\n sub_scaled_vector_double(self._residual_k,\n self._residual_k,\n alpha_k, self._v_k)\n else:\n update_m(self._m, alpha_k, self._p_k)\n sub_scaled_vector(self._residual_k, self._residual_k,\n alpha_k, self._v_k)\n self._v_k = gpuarray_copy(self._residual_k)\n rho_k_plus_1 = measure(self._v_k, self._residual_k)\n rho_k_t = np.abs(rho_k_plus_1)\n\n if (rho_k_t / self._rho_0 <= self._relative_tolerance) \\\n or (rho_k_t <= self._absolute_tolerance):\n print(\"Converged.\")\n self.converged = True\n break\n\n if self._double:\n add_scaled_vector_double(self._p_k, self._v_k,\n rho_k_plus_1/self._rho_k,\n self._p_k)\n else:\n add_scaled_vector(self._p_k, self._v_k,\n rho_k_plus_1/self._rho_k, self._p_k)\n\n self._rho_k = rho_k_plus_1\n\n if self._noisy:\n print(\" Residual=\" + str(rho_k_t))\n\n if self._no_progress and self._time_iters:\n print(\"Elapsed time for iteration \" + str(i) + \": \" +\n str(time() - start) + \" seconds\")\n\n if self._save_images:\n save_image(np.abs(self._m.get().reshape(self._data.nX1,\n self._data.nX2)),\n self._out_dir, i, self._image_format)\n if self._save_matlab:\n save_matlab(self._m.get().reshape(self._data.nX1,\n self._data.nX2),\n self._out_dir, i)\n except KeyboardInterrupt:\n print(\"Reconstruction aborted (CTRL-C) at iteration \" + str(i))\n finally:\n if self._save_images:\n save_image(np.abs(self._m.get().reshape(self._data.nX1,\n self._data.nX2)),\n self._out_dir, \"result\", self._image_format)\n if self._save_matlab:\n save_matlab(self._m.get().reshape(self._data.nX1,\n self._data.nX2),\n self._out_dir, \"result\")\n self.iteration = i+1\n return (self._m.get().reshape(self._data.nX1, self._data.nX2),\n self.iteration)" ]
[ "0.6636578", "0.66308147", "0.6474163", "0.6474163", "0.6268568", "0.62476134", "0.62351364", "0.61889505", "0.6155558", "0.60760725", "0.6037743", "0.6024817", "0.6013525", "0.600907", "0.5963331", "0.5963331", "0.5913053", "0.5901051", "0.5898955", "0.5889771", "0.5860249", "0.5839217", "0.58382523", "0.583135", "0.58205926", "0.5815258", "0.57864386", "0.5779483", "0.5774616", "0.575005" ]
0.71780443
0
Determine which FA environment (Dev/Uat/Prod) the script is running in. Returns the specified path contained in the FExtensionValue which differs for each environment. Also retrieves the list of trade filters separated by ';' in the FExtensionValue config file.
def retrieve_environment(env_config, filter_config): global path, filter_list_from_config arena_data_server = acm.FDhDatabase["ADM"].ADSNameAndPort().upper() configuration = acm.GetDefaultValueFromName( acm.GetDefaultContext(), acm.FObject, env_config) filter_list_from_config = acm.GetDefaultValueFromName( acm.GetDefaultContext(), acm.FList, filter_config).split(';') dom_xml = xml.parseString(configuration) tags = dom_xml.getElementsByTagName("Host") for element in tags: if element.getAttribute("Name") == arena_data_server: path = element.getElementsByTagName( "output_path")[0].childNodes[0].data print(" path found: ", path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_current_environment():\n # Search for the environment variable set by the hutch python setup\n env = os.getenv('CONDA_ENVNAME')\n # Otherwise look for built-in Conda environment variables\n if not env:\n env = os.getenv('CONDA_DEFAULT_ENV')\n # Check the top level PYTHONPATH to see if we have packages installed in\n # development mode\n dev = os.getenv('PYTHONPATH')\n if dev:\n try:\n dev_pkgs = os.listdir(dev)\n except FileNotFoundError:\n logger.debug(\"No dev folder found\")\n dev_pkgs = list()\n else:\n dev_pkgs = list()\n return env, dev_pkgs", "def get_current_environment(self):\n for env in self.indicators:\n if self._is_env_indicator_in_url(self.indicators[env]):\n return env\n\n return Environment.PRODUCTION", "def get_environment(basedir):\n for env in ('devel', 'staging', 'prod'):\n if os.path.exists(os.path.join(basedir, env)):\n return env\n return 'devel'", "def env(self): # type: () -> t.List[str]\n return self.config['Env']", "def _get_environment():\n namespace = current_app.config.get('POD_NAMESPACE').lower()\n if namespace.endswith('dev'):\n return 'DEV'\n if namespace.endswith('test'):\n return 'TEST'\n if namespace.endswith('tools'):\n return 'SANDBOX'\n return ''", "def current_config():\n if os.environ[\"ENVIRONMENT\"] == \"production\":\n return Production()\n elif os.environ[\"ENVIRONMENT\"] == \"staging\":\n return Staging()\n elif os.environ[\"ENVIRONMENT\"] == \"testing\":\n return Testing()\n elif os.environ[\"ENVIRONMENT\"] == \"development\":\n return Development()\n else:\n raise KeyError(f\"Unknown environment '{os.environ['ENVIRONMENT']}'\")", "def _get_filter_directory(self):\n return os.path.abspath(os.environ.get('PYCOCO_FILTER_DIR', self._default_filter_dir_path))", "def getConfig():\n\n config = rFile(\"/var/www/html/config.txt\").split()\n f = int(config[0])\n mode = config[1]\n\n if (f in DABchannels and (mode == \"explore\" or mode == \"monitor\")):\n return f, mode\n else:\n return 227360000, \"explore\" # Kamzik - Bratislava", "def get_environment():\n # Auto-set settings object based on App Engine dev environ\n if 'SERVER_SOFTWARE' in os.environ:\n if os.environ['SERVER_SOFTWARE'].startswith('Dev'):\n return Config.ENV_LOCAL\n elif os.environ['SERVER_SOFTWARE'].startswith('Google App Engine/'):\n #For considering an environment staging we assume the version id\n # contains -staging and the URL\n current_version_id = str(os.environ['CURRENT_VERSION_ID']) if (\n 'CURRENT_VERSION_ID') in os.environ else ''\n if '-staging' in current_version_id:\n return Config.ENV_STAGING\n #If not local or staging then is production TODO: really?\n return Config.ENV_PRODUCTION\n return Config.ENV_LOCAL", "def get_filters() -> dict:\n if environment is None or not hasattr(environment, 'loader'):\n return {}\n return environment.filters", "def __get_environ_path(environ_key):\n environ_value = os.environ.get(environ_key)\n result = []\n\n if not environ_value:\n return result\n\n environ_path_list = environ_value.split(';')\n for each_path in environ_path_list:\n each_path = path.Path(each_path)\n\n if not each_path.exists():\n continue\n\n # make sure default directory first in the order\n if 'FrMaya' in each_path:\n result.insert(0, each_path)\n else:\n result.append(each_path)\n\n return result", "def list_envs(self):\n if self.hdfs:\n files = self.hdfs.ls(self.hdfs_home + '/.knitDeps/', True)\n return [f for f in files if f['name'].endswith('.zip')]\n else:\n raise ImportError('Set the `hdfs` attribute to be able to list'\n 'environments.')", "def get_config_file_name(self):\n argv = sys.argv\n config_type = \"dev\" # default configuration type\n if None != argv and len(argv) > 1 :\n config_type = argv[1]\n config_file = config_type + \".cfg\"\n logger.info(\"get_config_file_name() return : \" + config_file)\n return config_file", "def get_sources_filters(provider, application):\n provider = provider or ''\n return [key for key in\n ('common', provider.split(',')[0], application) if key]", "def get_environment_paths(basedir=None):\n basedir = (\n get_default_secrets_basedir() if basedir is None\n else Path(basedir)\n )\n results = list()\n for item in sorted(basedir.iterdir()):\n if is_valid_environment(item):\n results.append(item)\n return results", "def default_configuration_list(platform, ide):\n\n # All platforms support this format.\n results = [\"Debug\", \"Internal\", \"Release\"]\n\n # Xbox and Windows support link time code generation\n # as a platform\n if ide.is_visual_studio() and platform.is_windows(\n ) or platform in (PlatformTypes.xbox360,):\n results.append(\"Release_LTCG\")\n\n # Configurations specific to the Xbox 360\n if platform is PlatformTypes.xbox360:\n results.extend([\"Profile\", \"Profile_FastCap\", \"CodeAnalysis\"])\n return results", "def current_cabal():\n if get_setting_async('use_cabal_dev'):\n return get_setting_async('cabal_dev_sandbox')\n else:\n return 'cabal'", "def GetFlavor(params):\n flavors = {\n 'cygwin': 'win',\n 'win32': 'win',\n 'darwin': 'mac',\n }\n if 'flavor' in params:\n return params['flavor']\n if sys.platform in flavors:\n return flavors[sys.platform]\n if sys.platform.startswith('sunos'):\n return 'solaris'\n if sys.platform.startswith('freebsd'):\n return 'freebsd'\n if sys.platform.startswith('openbsd'):\n return 'openbsd'\n if sys.platform.startswith('netbsd'):\n return 'netbsd'\n if sys.platform.startswith('aix'):\n return 'aix'\n if sys.platform.startswith('zos'):\n return 'zos'\n if sys.platform.startswith('os390'):\n return 'zos'\n return 'linux'", "def get_browser_extensions(self, config_section):\n\n extension_string = None\n extensions = []\n if config_section is not None:\n try:\n extension_string = self.shishito_support.get_opt(config_section, 'browser_extensions') # browser config\n except configparser.NoOptionError:\n extension_string = None\n\n if extension_string is None:\n try:\n extension_string = self.shishito_support.get_opt('browser_extensions') # common config\n except configparser.NoOptionError:\n pass\n\n if extension_string is None:\n return []\n\n for item in re.split('\\s+', extension_string):\n if item != '':\n m = re.match('^\\$([A-Z][A-Z_]+)$', item)\n if m is not None:\n var_name = m.group(1)\n if var_name not in os.environ:\n raise Exception(\"Error getting browser_extensions: env variable '\" + item + \"' not defined\")\n extensions.append(os.environ[var_name]) # take the extension path as configured\n else:\n extensions.append(item) # take the extension path as configured\n\n return extensions", "def env(self):\n return spack.schema.environment.parse(self.conf.get(\"environment\", {}))", "def getSpecificBuild(self,\n release=\"\",\n baseline=\"\",\n project=\"\",\n filters=[\"BUILD\"],\n list_found_items=[]):\n table = []\n result = []\n for keyword in filters:\n self.getItemsInFolder(keyword,\n project,\n baseline,\n release,\n only_name=True,\n with_extension=True,\n mute=True,\n converted_list=result,\n list_found_items = list_found_items\n )\n if result:\n table = result\n break\n return table\n type_items = \"(cvtype='shsrc' or cvtype='executable' or cvtype='ascii' or cvtype='makefile')\"\n stdout = self._runFinduseQuery(release,project,type_items,True)\n #if stdout:\n if stdout not in (\"\",False):\n # Build regular expression to filter only configuration items under BUILD folder\n regexp, list_items_skipped = self._prepareRegexp(filters)\n output = stdout.splitlines()\n for line in output:\n item = self._filterRegexp(regexp[0],line)\n if item != \"\":\n # The item is in the folder\n list_items_skipped[0].append(item)\n # ex: SW_PLAN\\SDP\\IS_SDP_SW_PLAN_SQA.xlsm-1.7.0@SW_PLAN-1.3\n # suppress redundant items\n table = list(set(list_items_skipped[0]))\n for data in table:\n self.ihm.log('Found in BUILD folder: ' + data,False)\n else:\n table = []\n self.ihm.log('No build files found in BUILD folder.')\n return table", "def get_site_env(self):\n return self.config['SITE_ENVIRONMENT'] == 'DEV'", "def get_code_env_settings(self):\n rp = self.get_recipe_params()\n if not \"envSelection\" in rp:\n raise ValueError(\"This recipe kind does not seem to take a code env selection\")\n return rp[\"envSelection\"]", "def get_fr_config_files(self):\n self.get_config_files()\n for file in self.txt_files:\n if \"fr\" in file:\n self.fr_config_files.append(file)\n return self.fr_config_files", "def envs(self) -> Optional[Sequence['outputs.CSIVXFlexOSSpecDriverCommonEnvs']]:\n return pulumi.get(self, \"envs\")", "def _get_config_fname():\n directory = _get_vispy_app_dir()\n if directory is None:\n return None\n fname = op.join(directory, 'vispy.json')\n if os.environ.get('_VISPY_CONFIG_TESTING', None) is not None:\n fname = op.join(_TempDir(), 'vispy.json')\n return fname", "def getValue(self, value=None):\n if self.data and self.source & COMMANDLINE:\n return self.data\n\n if self.environ and str(self.environ) in os.environ:\n self.source = ENVIRONMENT\n self.file = None\n return self.cast(os.environ[str(self.environ)])\n\n if self.data:\n return self.data\n\n if self.default:\n self.source = BUILTIN\n self.file = None\n return self.default\n\n self.source = CODE\n self.file = None\n\n if value is None:\n return []\n\n return value", "def get_env(environ_name):\n temp = os.getenv(environ_name)\n if temp is None:\n if ('ProgramFiles' in environ_name) or ('ProgramW6432' in environ_name):\n temp = os.getenv('ProgramFiles')\n return temp", "def _getOpenFileFilter(self):\n if (\n self.activeWindow() is not None and\n self.activeWindow().getFileName()\n ):\n ext = os.path.splitext(self.activeWindow().getFileName())[1]\n rx = QRegExp(r\".*\\*\\.{0}[ )].*\".format(ext[1:]))\n import QScintilla.Lexers\n filters = QScintilla.Lexers.getOpenFileFiltersList()\n index = -1\n for i in range(len(filters)):\n if rx.exactMatch(filters[i]):\n index = i\n break\n if index == -1:\n return Preferences.getEditor(\"DefaultOpenFilter\")\n else:\n return filters[index]\n else:\n return Preferences.getEditor(\"DefaultOpenFilter\")", "def set_filter():\n try:\n #=======================================================================\n # isofilter=[arg.partition('=')[-1] for arg in argv if 'atomfilter=' in arg][0][1:-1].split(',')\n #=======================================================================\n isofilter = config.arg('atomfilter')[1:-1].split(',')\n isofilter = [f.split(':') for f in isofilter]\n for f in isofilter:\n if len(f) < 2:\n f.append('True')\n if len(f) < 3:\n f.append('True')\n if len(f) < 4:\n f.append('None')\n except:\n isofilter = [['element', 'H', 'True', 'None']]\n try:\n #=======================================================================\n # isopartnerfilter=[arg.partition('=')[-1] for arg in argv if 'partnerfilter=' in arg][0][1:-1].split(',')\n #=======================================================================\n isopartnerfilter = config.arg('partnerfilter')[1:-1].split(',')\n isopartnerfilter = [f.split(':') for f in isopartnerfilter]\n for f in isopartnerfilter:\n if len(f) < 2:\n f.append('True')\n if len(f) < 3:\n f.append('True')\n if len(f) < 4:\n f.append('None')\n except:\n isopartnerfilter = [['None', 'None', 'None', 'None']]\n return isofilter, isopartnerfilter\n isofilterlist = []\n isopartnerfilterlist = []\n for i in xrange(len(isofilter) / 2):\n isofilterlist.append(tuple(isofilter[2 * i:2 * i + 2]))\n for i in xrange(len(isopartnerfilter) / 2):\n isopartnerfilterlist.append(tuple(isopartnerfilter[2 * i:2 * i + 2]))\n\n return [isofilterlist, isopartnerfilterlist]" ]
[ "0.5488771", "0.52747124", "0.52578723", "0.5231717", "0.51516294", "0.50663215", "0.506381", "0.5058923", "0.505804", "0.49744675", "0.49618024", "0.4936943", "0.48768833", "0.48471853", "0.48470667", "0.4840807", "0.48290032", "0.47938055", "0.4790978", "0.47902945", "0.4788346", "0.47753254", "0.47611153", "0.4739906", "0.473429", "0.472436", "0.47228205", "0.47188336", "0.47063053", "0.46944368" ]
0.6358428
0
Add the trades already in the filter the global trades dictionary. The price is divided by 100 as per how business expects the output.
def add_trades(trdf): trade_list = trdf.Snapshot() if len(trade_list) > 0: main_dictStruct[str(trdf.Name())] = dict( (trade.Oid(), [trade.Oid(), trade.Quantity(), trade.Price() / 100, trade.TradeTime()]) for trade in trade_list)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compute_taxed_lst_price2(self):\n company_id = self._context.get(\n 'company_id', self.env.user.company_id.id)\n for product in self:\n product.taxed_lst_price = product.taxes_id.filtered(\n lambda x: x.company_id.id == company_id).compute_all(\n product.lst_price,\n self.env.user.company_id.currency_id,\n product=product)['total_included']", "def _compute_taxed_lst_price2(self):\n company_id = self._context.get(\n 'company_id', self.env.user.company_id.id)\n for product in self:\n product.taxed_lst_price2 = product.taxes_id.filtered(\n lambda x: x.company_id.id == company_id).compute_all(\n product.list_price2,\n self.env.user.company_id.currency_id,\n product=product)['total_included']", "def add_price(self, price, date, shares):\n\t\tvalue = price * shares\n\t\tself.price_list.append(value)\n\t\tself.date_priced.append(date)", "def update(self, context, data):\n self.context = context\n self.data = data\n\n dt = get_datetime()\n\n for tkt, bo in self._d_orders['trades'].items():\n price = self.data[bo.symbol].price\n bo.update(price, dt)", "def runner(ETFKey, trade):\n try:\n main_dictStruct[ETFKey][trade.trdnbr].append([\n trade.trdnbr, trade.quantity, trade.price / 100, trade.time])\n except Exception as exc:\n log(exc)", "def trades(self, symbol, **kwargs):\n pass", "def accumulate_prices(name,products,sales,types,add):\r\n return reduce(add,[get_prices_by_type(name,products,sales,types)[i] for i in get_prices_by_type(name,products,sales,types)])", "def entrycalc(self, lows, o):\n price = float(self.price)\n \n #print(nextTrade==price,nextTradeSeller==price)\n for i in range(2, self.entries + 1):\n if len(self.entryprices) > 0:\n avgentryprice = sum(self.entryprices) / len(self.entryprices)\n #if previous entry has been placed and current hasn't and other args are met\n if self.dentry[\"placedOrder\" + str(i - 1) + self.chartnumber] and price < avgentryprice and float(price) < lows[-2] and float(price) < float(o) and not self.dentry[\"placedOrder\" + str(i) + self.chartnumber]:\n self.dentry[\"placedOrder\" + str(i) + self.chartnumber] = True\n #add these to dict\n print(\"trade number\",str(i))\n self.dentry[\"tradeEntries\" + str(i) + self.chartnumber] += 1\n #self.totalentries += 1\n \n #I changed these from price to nextTrade\n self.dentry[\"orderPrice\" + str(i) + self.chartnumber] = price\n #self.dentry[\"orderPrice\" + str(i) + chartnumber] = self.nextTrade\n \n #altbuy = int(self.dentry[\"buy\" + str(i) + chartnumber] / price)\n altbuy = int(self.dentry[\"buy\" + str(i) + self.chartnumber] / self.nextTrade)\n \n #self.availablebase -= altbuy * price\n self.availablebase -= altbuy * self.nextTrade\n altbuy -= altbuy * .001\n self.amtofalt += altbuy\n ###HOW LONG TO WE WANT ENTRYPRICES TO BE??\n \n #self.entryprices.append(price)\n self.entryprices.append(self.nextTrade)\n if self.graphics:\n self.graph.buy(self.masterDick[\"currentPrice\" + self.chartnumber], self.masterDick[\"count\" + self.chartnumber], self.chartnumber, i)\n #print(\"Fun:\",self.amtofalt)\n print(\"Buy\" + str(i),self.dentry[\"buy\" + str(i) + self.chartnumber])\n break", "def get_prices(self):\n pass", "async def prepare_trades(self, pair: str):\n\n if pair not in self.trades:\n self.trades[pair] = {\n 'last_open_time': 0.0,\n 'rebuy_count': 0,\n 'open': [],\n 'closed': []\n }", "def _construct_all_prices(self):\n d = dict([(s+'-', 0.0) for s in self.symbol_list] +\n [(s+'+', 0.0) for s in self.symbol_list])\n d['datetime'] = self.backtest_date\n return [d]", "async def update_trade_stats(self):\n\n summary_keys = [base for base in config['min_base_volumes']] + ['global']\n summaries = {\n key: {\n 'open_count': 0,\n 'buys': 0,\n 'rebuys': 0,\n 'sells': 0,\n 'collect_sells': 0,\n 'soft_stop_sells': 0,\n 'total_profit': 0.0,\n 'total_loss': 0.0,\n 'total_fees': 0.0,\n 'balancer_refills': 0,\n 'balancer_remits': 0,\n 'balancer_stop_losses': 0,\n 'balancer_profit': 0.0,\n 'balancer_loss': 0.0,\n 'balancer_fees': 0.0,\n } for key in summary_keys\n }\n\n for pair in self.trades:\n if pair not in self.trade_stats[self.time_prefix]:\n continue\n\n base = pair.split('-', 1)[0]\n open_count = len(self.trades[pair]['open'])\n\n summaries[base]['open_count'] += open_count\n summaries[base]['buys'] += self.trade_stats[self.time_prefix][pair]['buys']\n summaries[base]['rebuys'] += self.trade_stats[self.time_prefix][pair]['rebuys']\n summaries[base]['sells'] += self.trade_stats[self.time_prefix][pair]['sells']\n summaries[base]['collect_sells'] += self.trade_stats[self.time_prefix][pair]['collect_sells']\n summaries[base]['soft_stop_sells'] += self.trade_stats[self.time_prefix][pair]['soft_stop_sells']\n summaries[base]['total_profit'] += self.trade_stats[self.time_prefix][pair]['total_profit']\n summaries[base]['total_loss'] += self.trade_stats[self.time_prefix][pair]['total_loss']\n summaries[base]['total_fees'] += self.trade_stats[self.time_prefix][pair]['total_fees']\n summaries[base]['balancer_refills'] += self.trade_stats[self.time_prefix][pair]['balancer_refills']\n summaries[base]['balancer_remits'] += self.trade_stats[self.time_prefix][pair]['balancer_remits']\n summaries[base]['balancer_profit'] += self.trade_stats[self.time_prefix][pair]['balancer_profit']\n summaries[base]['balancer_loss'] += self.trade_stats[self.time_prefix][pair]['balancer_loss']\n summaries[base]['balancer_fees'] += self.trade_stats[self.time_prefix][pair]['balancer_fees']\n\n summaries['global']['open_count'] += open_count\n summaries['global']['buys'] += self.trade_stats[self.time_prefix][pair]['buys']\n summaries['global']['rebuys'] += self.trade_stats[self.time_prefix][pair]['rebuys']\n summaries['global']['sells'] += self.trade_stats[self.time_prefix][pair]['sells']\n summaries['global']['collect_sells'] += self.trade_stats[self.time_prefix][pair]['collect_sells']\n summaries['global']['soft_stop_sells'] += self.trade_stats[self.time_prefix][pair]['soft_stop_sells']\n summaries['global']['total_profit'] += self.trade_stats[self.time_prefix][pair]['total_profit']\n summaries['global']['total_loss'] += self.trade_stats[self.time_prefix][pair]['total_loss']\n summaries['global']['total_fees'] += self.trade_stats[self.time_prefix][pair]['total_fees']\n summaries['global']['balancer_refills'] += self.trade_stats[self.time_prefix][pair]['balancer_refills']\n summaries['global']['balancer_remits'] += self.trade_stats[self.time_prefix][pair]['balancer_remits']\n summaries['global']['balancer_profit'] += self.trade_stats[self.time_prefix][pair]['balancer_profit']\n summaries['global']['balancer_loss'] += self.trade_stats[self.time_prefix][pair]['balancer_loss']\n summaries['global']['balancer_fees'] += self.trade_stats[self.time_prefix][pair]['balancer_fees']\n\n for key in summaries:\n self.trade_stats[self.time_prefix][key]['buys'] = summaries[key]['buys']\n self.trade_stats[self.time_prefix][key]['rebuys'] = summaries[key]['rebuys']\n self.trade_stats[self.time_prefix][key]['sells'] = summaries[key]['sells']\n self.trade_stats[self.time_prefix][key]['collect_sells'] = summaries[key]['collect_sells']\n self.trade_stats[self.time_prefix][key]['soft_stop_sells'] = summaries[key]['soft_stop_sells']\n self.trade_stats[self.time_prefix][key]['total_profit'] = summaries[key]['total_profit']\n self.trade_stats[self.time_prefix][key]['total_loss'] = summaries[key]['total_loss']\n self.trade_stats[self.time_prefix][key]['total_fees'] = summaries[key]['total_fees']\n self.trade_stats[self.time_prefix][key]['balancer_refills'] = summaries[key]['balancer_refills']\n self.trade_stats[self.time_prefix][key]['balancer_remits'] = summaries[key]['balancer_remits']\n self.trade_stats[self.time_prefix][key]['balancer_profit'] = summaries[key]['balancer_profit']\n self.trade_stats[self.time_prefix][key]['balancer_loss'] = summaries[key]['balancer_loss']\n self.trade_stats[self.time_prefix][key]['balancer_fees'] = summaries[key]['balancer_fees']\n\n if summaries[key]['open_count'] > self.trade_stats[self.time_prefix][key]['most_open']:\n self.trade_stats[self.time_prefix][key]['most_open'] = summaries[key]['open_count']\n\n filter_items = [pair for pair in self.trades] + [base for base in config['min_base_volumes']] + ['global']\n self.save_attr('trade_stats', max_depth=2, filter_items=filter_items, filter_keys=[self.time_prefix])", "def calc_performance(self):\n for symbol in self.portfolio.assets.keys():\n\n # Total the Performance of all the trades\n start = self.portfolio.trades[symbol].index[0]\n end = self.portfolio.trades[symbol].index[-1]\n trades = len(self.record[symbol])\n profit = self.record[symbol]['profit'].sum()\n loss = self.record[symbol]['loss'].sum()\n # Total or average the trade info for all the trades\n try:\n wins = len(self.record[symbol].groupby('win/loose').groups['w'])\n except (ValueError, KeyError):\n wins = 0\n try:\n losses = len(self.record[symbol].groupby('win/loose').groups['l'])\n except (ValueError, KeyError):\n losses = 0\n try:\n washes = len(self.record[symbol].groupby('win/loose').groups['-'])\n except (ValueError, KeyError):\n washes = 0\n max_drawdown = self.record[symbol]['drawdown'].max()\n average_drawdown = self.record[symbol]['drawdown'].mean()\n max_drawdown_time = self.record[symbol]['drawdown days'].max()\n average_drawdown_time = self.record[symbol]['drawdown days'].mean()\n # Average the risk and market comparisons for all trades\n vol_risk = self.record[symbol]['volatility'].mean()\n beta = self.record[symbol]['beta'].mean()\n lpm_risk = self.record[symbol]['lpm'].mean()\n e_r = self.record[symbol]['expected_return'].mean()\n # Calculate Risk measures\n treynor_ratio = (e_r - self.risk_free_return) / beta\n sharpe_ratio = (e_r - self.risk_free_return) / vol_risk\n # Package up the data for each symbol\n self.performance[symbol] = {\n 'start': start,\n 'end': end,\n 'trades': trades,\n 'wins': wins,\n 'losses': losses,\n 'washes': washes,\n 'profit': profit,\n 'loss': loss,\n 'net_profit': profit - loss,\n 'profit_factor': profit / loss if loss != 0 else 1.0,\n 'percent_profitable': wins / trades if trades != 0 else 0.0,\n 'average_trade_net_profit' : (profit - loss) / trades if trades != 0 else 0.0,\n 'max_drawdown' : max_drawdown,\n 'average_drawdown' : average_drawdown,\n 'max_drawdown_days' : max_drawdown_time,\n 'average_drawdown_days' : average_drawdown_time,\n 'volatility_risk' : vol_risk,\n 'beta' : beta,\n 'lower_partial_moment_risk' : lpm_risk,\n 't_r' : treynor_ratio,\n 's_r' : sharpe_ratio\n }\n\n return self", "def getTransferListSummary(self):\n p_ids_and_prices = {}\n players = self.getAllPlayerInfoTransferlist()\n\n # Get IDs of all players\n log_event(self.queue, \"Gathering player prices... \")\n for p in players:\n p_bidstatus = p[1]\n p_id = p[8]\n # removed Filter for unlisted / expired players\n if p_id not in p_ids_and_prices:\n p_sellprice = self.getPlayerSellPrice(p_id)\n # If sell price returns 0, need to fetch from Futbin\n if p_sellprice == 0:\n p_sellprice = self.getFutbinPrice_opentab(p_id)\n self.sleep_approx(5) # Delay iteration to not anger futbin\n # Add player ID and price to dict\n p_ids_and_prices[p_id] = p_sellprice\n\n for p_id in p_ids_and_prices:\n p_price = p_ids_and_prices[p_id]\n p_name = self.getPlayerCardName(p_id)\n log_event(self.queue, str(p_name) + \" - #\" +\n str(p_id) + \" Price \" + str(p_price))\n\n num_p_sold = 0\n num_p_expired = 0\n num_p_unlisted = 0\n num_p_listed = 0\n\n sold_p_value = 0\n expired_p_value = 0\n unlisted_p_value = 0\n listed_p_value = 0\n\n for p in players:\n p_bidstatus = p[1]\n p_id = p[8]\n p_soldprice = p[5] # is 0 if unlisted\n p_sellprice = int(p_ids_and_prices[p_id])\n\n if \"won\" in p_bidstatus:\n num_p_sold += 1\n sold_p_value += p_soldprice\n if \"expired\" in p_bidstatus:\n num_p_expired += 1\n expired_p_value += p_sellprice\n if (p_bidstatus == \"listFUTItem\"):\n num_p_unlisted += 1\n unlisted_p_value += p_sellprice\n if (p_bidstatus == \"listFUTItem has-auction-data\"):\n num_p_listed += 1\n listed_p_value += p_sellprice\n\n log_event(self.queue, \"Players sold: \" + str(num_p_sold))\n log_event(self.queue, \"Players expired: \" + str(num_p_expired))\n log_event(self.queue, \"Players listed: \" + str(num_p_listed))\n log_event(self.queue, \"Players unlisted: \" + str(num_p_unlisted))\n log_event(self.queue, \" - - - \")\n log_event(self.queue, \"Sold players value: \" + str(sold_p_value))\n log_event(self.queue, \"Expired players value: \" +\n str(expired_p_value))\n log_event(self.queue, \"Unlisted players value: \" +\n str(unlisted_p_value))\n log_event(self.queue, \"Listed players value: \" + str(listed_p_value))\n\n # TODO subtract bought price\n self.user_players_won += int(num_p_unlisted)\n self.p_ids_and_prices = p_ids_and_prices\n intel = [p_ids_and_prices, num_p_sold, num_p_expired, num_p_unlisted,\n num_p_listed, sold_p_value, expired_p_value, unlisted_p_value, listed_p_value]\n return intel", "def get_wallet_trades(self, walletId, filters={}):\n return", "def _get_trades(self):\n\n trade_url = self.trade_url % (self.date, self.instrument, self.exchange)\n self.trades = pd.read_csv(trade_url, parse_dates=[0],\n date_parser=lambda t: pd.to_datetime(str(t), format='%Y%m%dT%H%M%S'))\n\n self.trades.fillna(np.nan)\n self.trades.index = pd.to_datetime(self.trades.time, unit='s')\n self.trades.time = pd.to_datetime(self.trades.time, unit='s')\n self.trades.columns = ['time', 'price', 'volume', 'source', 'buyer', 'seller', 'initiator']\n # del self.trades['time']\n\n if self.exclude_derivative:\n self.trades = self.trades[(self.trades.source != 'Derivatives trade') & (self.trades.source != 'Official')]", "def user_trades(self, symbol, **kwargs):\n pass", "def field_buy(self, symbol):\r\n\r\n end_percent = 150\r\n current_price = 15#self.get_price()\r\n self.log(current_price)\r\n buys = {}\r\n new_price = current_price * 1.05\r\n while (new_price / current_price) > 150:\r\n self.log(\"New sell at: {}\".format(new_price))\r\n new_price *= 1.05\r\n\r\n self.log(buys)\r\n\r\n return buys", "def _resolve_trades(self, from_symbol, to_symbol, date, price, sell_all=False):\n\n # Calculate profits on past trades\n trades = deepcopy(self.trades[from_symbol])\n unresolved_trades = trades['unresolved_trades']\n\n resolving = {\n 'amount_fs':0,\n 'amount_ts':0,\n 'profit':0,\n 'percent_profit':[],\n 'trades_resolved':[]\n }\n\n if sell_all:\n\n for trade in unresolved_trades:\n\n diff = trade['amount_fs']*price - trade['amount_ts']\n if self.trading_fee:\n diff -= self.trading_fee*trade['amount_ts']\n\n percent_diff = 100*diff/trade['amount_ts']\n\n resolving['amount_fs'] += trade['amount_fs']\n resolving['amount_ts'] += trade['amount_fs']*price\n resolving['trades_resolved'].append(trade['id'])\n resolving['profit'] += diff\n resolving['percent_profit'].append(percent_diff)\n self._remove_unresolved(trade, from_symbol)\n\n else:\n\n # Find lowest pending trade, calculate profit based on that\n lowest_price = unresolved_trades[0]['price']\n id = unresolved_trades[0]['id']\n\n for trade in unresolved_trades:\n if trade['price'] < lowest_price:\n id = trade['id']\n\n for trade in unresolved_trades:\n\n if trade['id'] == id:\n\n diff = trade['amount_fs']*price - trade['amount_ts']\n if self.trading_fee:\n diff -= self.trading_fee*trade['amount_ts']\n\n percent_diff = 100*diff/trade['amount_ts']\n\n resolving['amount_fs'] = trade['amount_fs']\n resolving['amount_ts'] = trade['amount_fs']*price\n resolving['trades_resolved'] = trade['id']\n resolving['profit'] = diff\n resolving['percent_profit'] = percent_diff\n self._remove_unresolved(trade, from_symbol)\n\n\n\n resolving['percent_profit'] = np.average(resolving['percent_profit'])\n resolving['trades_resolved'] = ';'.join(resolving['trades_resolved'])\n self.currency_earned[to_symbol] += resolving['profit']\n\n # Update sells\n for key, value in resolving.items():\n self.trades[from_symbol]['sells'][key].append(value)\n\n resolving['id'] = uuid4().hex[:10]\n self._append_all_sells(resolving, from_symbol, date, price)\n\n # Update overall\n trades['total_invested'] -= resolving['amount_fs']\n self.purse[to_symbol] += resolving['amount_ts']\n\n if self.trades[from_symbol]['unresolved_trades']:\n self.unresolved_trade[from_symbol] = True\n else:\n self.unresolved_trade[from_symbol] = False", "def _amount_all(self):\n for order in self:\n order.update({\n 'net_rate': order.basic_rate + order.extra_rate\n })", "def before_trading_start(context, data):\r\n context.output = algo.pipeline_output('pipeline')\r\n\r\n # These are the securities that we are interested in trading each day.\r\n context.security_list = context.output.index\r\n \r\n # Loop through all assets in pipeline.\r\n for asset, row in context.output.iterrows():\r\n context.price[asset] = row.close\r\n \"\"\"\r\n # Skip entries with no flags.\r\n if row.flag_type != 'UP' and row.flag_type != 'DOWN':\r\n continue\r\n \r\n log.info('%s flag for %s. Price level = %f' % (row.flag_type, asset, context.price[asset]))\r\n \r\n # Count flags for asset in context.flags\r\n if asset in context.flags:\r\n context.flags[asset][row.flag_type] += 1\r\n else:\r\n if row.flag_type == 'UP':\r\n context.flags[asset] = {'UP': 1, 'DOWN': 0}\r\n \r\n elif row.flag_type == 'DOWN':\r\n context.flags[asset] = {'UP': 0, 'DOWN': 1}\r\n \"\"\" \r\n \r\n context.up_ratios[asset] = row.up_ratio\r\n \r\n if math.isnan(row.up_flags):\r\n continue\r\n \r\n context.flags[asset] = {'UP': row.up_flags, 'DOWN': row.down_flags}\r\n \r\n # In 2020, activate overweighting\r\n if not context.overweighting:\r\n today = get_datetime('US/Eastern')\r\n if today.year == 2020:\r\n context.overweighting = True", "def get_transaction_prices(self):\n cleaned_data = self.cleaned_data()\n supplier_cleaned_data = cleaned_data.get('cleaned_supplier_data')\n transaction_cleaned_data = cleaned_data.get('cleaned_transaction_data')\n merged_data = self.merge_supplier_transaction(supplier_cleaned_data, transaction_cleaned_data)\n calculated_data = self.calculate_prices(merged_data)\n self.export_calculated_prices(calculated_data)\n return calculated_data", "def subscribe_trades(self, symbol, update_handler=None):\n pass", "def add_buy(self, trade):\n trade = self._format_sql(trade, self.buy_table)\n self.buys[trade['id']] = trade", "def _update_trade(self):\n # Populate price ladder dataframe. Assign trade to a side assuming there\n # isn't both a bid and ask at the same price. Aggregate consecutive\n # trades at the same price and populate cumulative volume.\n if self._quotes_row > 0:\n for i in range(self._config['row_count']):\n if math.isclose(self._price_ladder[i],\n self._trades_df.loc[self._trades_row, 'price']):\n volume = self._trades_df.loc[self._trades_row, 'volume']\n if self._price_ladder_df.iloc[i, 1]:\n if self._price_ladder_df.iloc[i, 0]:\n volume += int(self._price_ladder_df.iloc[i, 0])\n\n self._price_ladder_df.iloc[i, 0] = str(volume)\n self._price_ladder_df.iloc[i, 4] = ''\n elif self._price_ladder_df.iloc[i, 3]:\n if self._price_ladder_df.iloc[i, 4]:\n volume += int(self._price_ladder_df.iloc[i, 4])\n\n self._price_ladder_df.iloc[i, 0] = ''\n self._price_ladder_df.iloc[i, 4] = str(volume)\n else:\n self._price_ladder_df.iloc[i, [0, 4]] = ''\n\n # Print this trade row and update counter.\n print(self._trades_df.iloc[self._trades_row, ].values)\n self._trades_row += 1", "def _total_price(self, cr, uid, ids, field_name, arg, context={}):\n res = {}\n for record in self.browse(cr, uid, ids, context=context):\n val = 0.0\n for line in record.item_ids:\n val += line.price\n res[record.id] = val \n return res", "def add_stock(self, symbol, quantity, unit_price):\n # TODO write SQL statement to grab unit_price\n stock_price_total = quantity * unit_price # TODO write SQL statement\n # TODO deduct stock quantity from market ??\n self.portfolios.append((symbol, quantity, unit_price))\n self.value += stock_price_total", "def add_gst (list_of_prices):\n\n add_gst=[]\n for item in list_of_prices:\n list_with_gst = round(item*1.15,2)\n add_gst+=[list_with_gst]\n return add_gst", "def fetch_my_trades(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):\n self.load_markets()\n market = None\n request = {}\n if limit is not None:\n request['take'] = limit\n request['take'] = limit\n if since is not None:\n request['toTime'] = self.yyyymmdd(self.milliseconds(), '.')\n request['fromTime'] = self.yyyymmdd(since, '.')\n if symbol is not None:\n market = self.market(symbol)\n request['pair'] = market['id']\n response = self.privateGetOrderOrderHistory(self.extend(request, params))\n #\n # [\n # {\n # \"ticks\":1574767951,\n # \"created\":\"26/11/19 13:32\",\n # \"action\":1,\n # \"price\":\"1000\",\n # \"pair\":\"EthNis\",\n # \"reference\":\"EthNis|10867390|10867377\",\n # \"fee\":\"0.5\",\n # \"feeAmount\":\"0.08\",\n # \"feeCoin\":\"₪\",\n # \"firstAmount\":\"-0.015\",\n # \"firstAmountBalance\":\"9\",\n # \"secondAmount\":\"14.93\",\n # \"secondAmountBalance\":\"130,233.28\",\n # \"firstCoin\":\"ETH\",\n # \"secondCoin\":\"₪\"\n # },\n # {\n # \"ticks\":1574767951,\n # \"created\":\"26/11/19 13:32\",\n # \"action\":0,\n # \"price\":\"1000\",\n # \"pair\":\"EthNis\",\n # \"reference\":\"EthNis|10867390|10867377\",\n # \"fee\":\"0.5\",\n # \"feeAmount\":\"0.08\",\n # \"feeCoin\":\"₪\",\n # \"firstAmount\":\"0.015\",\n # \"firstAmountBalance\":\"9.015\",\n # \"secondAmount\":\"-15.08\",\n # \"secondAmountBalance\":\"130,218.35\",\n # \"firstCoin\":\"ETH\",\n # \"secondCoin\":\"₪\"\n # }\n # ]\n #\n return self.parse_trades(response, market, since, limit)", "def _get_book_prices(self):\n for k in self.orders.keys():\n if self.orders[k].type == 'ask':\n self.ask_prices.append(self.orders[k].price)\n self.ask_snapshot[k] = self.orders[k]\n elif self.orders[k].type == 'bid':\n self.bid_prices.append(self.orders[k].price)\n self.bid_snapshot[k] = self.orders[k]\n # Sorting and removing dubbing\n self.ask_prices = list(dict.fromkeys(sorted(self.ask_prices)))\n self.bid_prices = list(dict.fromkeys(sorted(self.bid_prices, reverse=True)))" ]
[ "0.58588636", "0.5809941", "0.55692714", "0.5566447", "0.55549794", "0.55451953", "0.55318564", "0.5514227", "0.5454263", "0.5448498", "0.54461616", "0.54408807", "0.54088545", "0.54009604", "0.5368632", "0.5354905", "0.53320706", "0.53319985", "0.5319314", "0.52945083", "0.5284764", "0.5281337", "0.52675176", "0.52538294", "0.5233779", "0.5207737", "0.5186291", "0.5176367", "0.51577544", "0.5138514" ]
0.61422753
0
Differential voltage v1 = voltage between inverting terminal and ground v2 = voltage between noninverting terminal and ground
def vd(v2,v1): return v2-v1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def voltage_conversion(self):\r\n\t\tvoltage = ((self.data[0] * 256 + self.data[1]) / 65536.0) * 5.0\r\n\t\t\r\n\t\treturn {'v' : voltage}", "def velocity(df0, df1):\n velocity = df1 - df0\n return velocity", "def diff_v_x1(x1, x2, t=0.):\n return (omega) ** 2 * x1", "def impulse(self,v1,v2):\n dv_peri = self.v_peri - v1\n \n dv_aphe = self.v_peri - v2\n \n return dv_peri, dv_aphe", "def diff_v_x2(x1, x2, t=0.):\n return (omega) ** 2 * x2", "def get_voltage(self):\n print(\"voici le voltage de la batterie\")", "def voltage_divider(Vin, R1, R2):\n R1, R2 = _normalizevalue(R1), _normalizevalue(R2)\n Vout = R2 / (R1+R2) * Vin\n return _Volt(Vout)", "def voltage(self):\n return self.outputValue()", "def get_voltage_and_current(self):\n return self.voltage_and_current", "def get_voltage(self):\n summary = \" \".join(self.get_summary().split())\n pattern = '\\$.... .. (.*?) .*? .*? .*? .*? . .*? .*? . . . .*?'\n voltage = float(re.findall(pattern,summary).pop())\n return voltage", "def Leapfrog2D(p1, v1, p2, v2, dt, energy, sigma):\n a = compute_acceleration(energy, sigma, p2-p1)\n pn1 = p1 + v1*dt + 0.5 * a * dt**2\n pn2 = p2 + v2*dt - 0.5 * a * dt**2\n a2 = compute_acceleration(energy, sigma, pn2-pn1)\n vn1 = v1 + 0.5 * (a+a2) * dt\n vn2 = v2 - 0.5 * (a+a2) * dt\n return pn1, vn1, pn2, vn2", "def calc_out_voltage(self, input_photocurrent_file):\n pass", "def differential_amplifier_Vout(Vin, Vref, R1, R2, R3, R4):\n R1, R2, R3, R4 = map(_normalizevalue, (R1, R2, R3, R4))\n Vout = -Vin*(R3/R1) + Vref*(R4/(R2+R4)*((R1+R3)/R1))\n return _Volt(Vout)", "def inductive_voltdiv(Vin=None, Vout=None, L1=None, L2=None, find=''):\n if Vin is not None and L1 is not None and L2 is not None:\n Vout = (Vin * L1) / (L1 + L2)\n elif Vout is not None and L1 is not None and L2 is not None:\n Vin = (Vout) * (L1 + L2) / (L1)\n elif Vin is not None and Vout is not None and L2 is not None:\n L1 = L2 * (Vin - Vout) / (Vout)\n elif Vin is not None and Vout is not None and L1 is not None:\n L2 = L1 * Vout / (Vin - Vout)\n else:\n raise ValueError(\"ERROR: Invalid Parameters or too few\" +\n \" parameters given to calculate.\")\n\n find = find.lower()\n\n if find == 'vin':\n return Vin\n elif find == 'vout':\n return Vout\n elif find == 'l1':\n return L1\n elif find == 'l2':\n return L2\n else:\n return Vin, Vout, L1, L2", "def det(v1, v2):\n\treturn v1[0] * v2[1] - v1[1] * v2[0]", "def TVD(p1, p2):\n assert p1.shape == p2.shape\n return 0.5 * np.sum(np.absolute(np.subtract(p1, p2)))", "def voltage(analog_pin):\n return \"%0.2f\" % inVolts(analogRead(analog_pin))", "def get_voltage(self):\n status = self.get_status_response()\n volts = status[20] + (status[21] * 0x100) + (status[22] * 0x10000) + (status[23] * 0x1000000)\n volts = float(volts)\n volts /= (1000.0 * 1000.0)\n return volts\n #end get_voltage", "def calibV(self):\n # clear buffer in case of errors\n self.flushInput()\n \n if (self.model == 'GDS'):\n self.write(':CHAN'+str(ch)+':SCAL?\\n')\n # returns V/div, turn it into multiplicative factor\n # between digitizer and actual volts\n vmult = float(self.readline()) * 10./255.\n # GDS includes vertical offset in the data returned.\n voff = 0.\n elif (self.model == 'TDS'):\n self.write('WFMPre:YMUlt?\\n')\n # formula I am using later is from TDS manual, so this\n # is straightforward.\n vmult = float(self.readline())\n self.write('WFMPre:YOFf?\\n')\n voff = float(self.readline())\n \n # clear buffer in case of errors\n self.flushInput()\n\n return (vmult, voff)", "def VerletHope2(r, v, beta,dt,R_dust,M_dust):\n # Deceptively simple (read about Velocity Verlet on wikipedia)\n r_new = r + v*dt + calculate_acceleration2(r,v,beta,omega,R_dust,M_dust)*dt**2/2\n v_new = v + (calculate_acceleration2(r,v,beta,omega,R_dust,M_dust) + calculate_acceleration2(r_new,v,beta,omega,R_dust,M_dust))/2 * dt\n \n return (r_new, v_new)", "def voltage(self):\n\t\treturn self._voltage", "def CalculateFeedForwardVoltage(leftSide, velocity, acceleration):\r\n if acceleration >= DRIVETRAIN_MAX_ACCELERATION:\r\n print(\"WARNING: The acceration is larger than the max!!\")\r\n\r\n if velocity >= DRIVETRAIN_MAX_VELOCITY:\r\n print(\"WARNING: The velocity is larger than the max!!\")\r\n\r\n if leftSide:\r\n kV = DRIVETRAIN_LEFT_KV\r\n kA = DRIVETRAIN_LEFT_KA\r\n VIntercept = DRIVETRAIN_LEFT_V_INTERCEPT\r\n else:\r\n kV = DRIVETRAIN_RIGHT_KV\r\n kA = DRIVETRAIN_RIGHT_KA\r\n VIntercept = DRIVETRAIN_RIGHT_V_INTERCEPT\r\n\r\n return kV * velocity + kA * acceleration + VIntercept", "def d(u, v):\r\n\tdiff = u-v\r\n\treturn diff.dot(diff)", "def reference_voltage(self) -> float:\n return self._ref_voltage", "def __init__(self,v0,v1):\n self.vinputs = v0,v1\n self.xhi = max([v0[0],v1[0]])\n self.yhi,self.ylo = v0[1]>v1[1] and (v0[1],v1[1],) or (v1[1],v0[1])\n\n self.m = (v1[0]-v0[0]) / (v1[1]-v0[1]) ### (x1-x0)/(y1-y0)\n self.b = v0[0] - (v0[1] * self.m) ### x0 - y0*(x1-x0)/(y1-y0)", "def delta_v_calc(mass_initial,\n mass_final,\n v_exhaust,\n ):\n\n return v_exhaust * math.log(mass_initial / mass_final)", "def voltage_drop(V):\n vmin = Vmin(V)\n resting = Vrest(V)\n return vmin - resting", "def voltage_divider_R2(Vin, Vout, R1=\"1K\"):\n R1 = _normalizevalue(R1)\n R2 = R1*(1/(Vin/Vout - 1))\n return _Res(R2)", "def dvdt(self, args: Dict) -> float:\n if self.channel_bool['leak']:\n i_leak: float = self.leak.i(args['v'])\n else:\n i_leak: float = 0.\n \n if self.channel_bool['nav']:\n i_nav: float = self.nav.i(args['v'], h=args['h_nav'])\n else:\n i_nav: float = 0.\n\n if self.channel_bool['kvhh']:\n i_kvhh: float = self.kvhh.i(args['v'], n=args['n_kvhh'])\n else:\n i_kvhh: float = 0.\n\n if self.channel_bool['kva']:\n i_kva: float = self.kva.i(args['v'], h=args['h_kva'])\n else:\n i_kva: float = 0.\n\n if self.channel_bool['kvsi']:\n i_kvsi: float = self.kvsi.i(args['v'], m=args['m_kvsi'])\n else:\n i_kvsi: float = 0.\n\n if self.channel_bool['cav']:\n i_cav: float = self.cav.i(args['v'])\n else:\n i_cav: float = 0.\n\n if self.channel_bool['kca']:\n i_kca: float = self.kca.i(args['v'], ca=args['ca'])\n else:\n i_kca: float = 0.\n \n if self.channel_bool['nap']:\n i_nap: float = self.nap.i(args['v'])\n else:\n i_nap: float = 0.\n\n if self.channel_bool['kir']:\n i_kir: float = self.kir.i(args['v'])\n else:\n i_kir: float = 0.\n\n if self.channel_bool['ampar']:\n i_ampar: float = self.ampar.i(args['v'], s=args['s_ampar'])\n else:\n i_ampar: float = 0.\n\n if self.channel_bool['nmdar']:\n i_nmdar: float = self.nmdar.i(args['v'], s=args['s_nmdar'])\n else:\n i_nmdar: float = 0.\n\n if self.channel_bool['gabar']:\n i_gabar: float = self.gabar.i(args['v'], s=args['s_gabar'])\n else:\n i_gabar: float = 0.\n\n return ((-10.0*self.params.area \n * (i_leak\n + i_nav \n + i_kvhh \n + i_kva \n + i_kvsi \n + i_cav \n + i_kca \n + i_nap \n + i_kir) \n - (i_ampar \n + i_nmdar \n + i_gabar))\n / (10.0*self.params.cm*self.params.area))", "def _uni_to_diff(self, v, omega):\n\n# print(\"--MuleBot._uni_to_diff({:.3f}, {:.3f})\".format(v, omega))\n loggerMB.debug(\"--MuleBot._uni_to_diff({:.3f}, {:.3f})\".format(v, omega))\n\n # v = translation velocity (m/s)\n # omega = angular velocity (rad/s)\n\n # For some reason, it is necessary to multiply the angle by -1.\n # TODO: Probably have to put this back in.\n omega *= -1.0\n\n inches_per_meter = 39.3701\n circumference_in = 2.0 * math.pi * MuleBot.WHEEL_RADIUS\n circumference_m = circumference_in / inches_per_meter\n radians_per_circumference = 2.0\n # R = roll?(meters/radian)\n R = circumference_m / radians_per_circumference\n\n # Get info in inches\n Lin = MuleBot.WHEEL_BASE_LENGTH\n # Convert inches to meters\n Lm = Lin / inches_per_meter\n\n # All measurements are now metric.\n v_l = ( (2.0 * v) - (omega * Lm) ) / (2.0 * R)\n v_r = ( (2.0 * v) + (omega * Lm) ) / (2.0 * R)\n loggerMB.debug(\"--MuleBot._uni_to_diff v_l, v_r: {:.3f}, {:.3f}\".format(v_l, v_r))\n\n rpm_l = self.rps_to_rpm(v_l)\n rpm_r = self.rps_to_rpm(v_r)\n# print(\"--MuleBot._uni_to_diff rpm_l, rpm_r: {:.3f}, {:.3f}\".format(rpm_l, rpm_r))\n loggerMB.debug(\"--MuleBot._uni_to_diff rpm_l, rpm_r: {:.3f}, {:.3f}\".format(rpm_l, rpm_r))\n\n return v_l, v_r" ]
[ "0.62008476", "0.613027", "0.6099671", "0.6095188", "0.60554194", "0.6046503", "0.60115767", "0.59870183", "0.59005463", "0.5894088", "0.5804491", "0.5798433", "0.5769953", "0.5750147", "0.57478386", "0.57300663", "0.57192975", "0.5658065", "0.5647973", "0.56422937", "0.56061894", "0.55814314", "0.5544331", "0.55328596", "0.55279124", "0.55081", "0.5500434", "0.5449531", "0.5430496", "0.5425202" ]
0.692218
0
An amplifier with infinite openloop gain (v_o), infinite input resistance, and zero output resistance. Ideal op amp is an approximate analysis, but most modern amplifiers have such large gain and input impedances that the approximate analysis is a good one.
def idealOpAmp():
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _amp_ ( self , x ) :\n v = self.amplitude ( x )\n #\n return complex( v.real () , v.imag () )", "def app(data_pupil,data_phase,oversize=4):\n complexr=app_complex(data_pupil,data_phase,oversize)\n amp=(abs(complexr)**2)\n return amp", "def amps(self, amp: NumType):\n self._amp = amp", "def amplifier(gain, iterable):\n return (gain * sample for sample in iterable)", "def set_amplitude_exact(self, ampl):\n if (type(ampl) not in [int, float]):\n raise Exception(\n \"ERROR: Amplitude can only be a float ( or int ) value.\")\n\n if ampl <= self.AMPL_MIN:\n self._amplitude = 0\n self._attenuation = 0\n\n if ampl >= self.AMPL_MAX:\n self._amplitude = 10 # 10v is the maximum amplitude\n self._attenuation = 0\n\n self._attenuation = 20 * (math.log10(float(ampl) / 10))\n self._amplitude = float(ampl)", "def instrumentationAmplifier():\n eq = list()\n eq.append(\"Eq(vo,A_v*(v2-v1)\")\n eq.append(\"Eq(Av, 1 + 2*R / R_G\")", "def kA_func(self):\n\n i1 = self.inl[0].to_flow()\n i2 = self.inl[1].to_flow()\n o1 = self.outl[0].to_flow()\n o2 = self.outl[1].to_flow()\n\n T_i1 = T_bp_p(i1)\n T_i2 = T_mix_ph(i2, T0=self.inl[1].T.val_SI)\n T_o1 = T_mix_ph(o1, T0=self.outl[0].T.val_SI)\n T_o2 = T_mix_ph(o2, T0=self.outl[1].T.val_SI)\n\n if T_i1 <= T_o2 and not self.inl[0].T.val_set:\n T_i1 = T_o2 + 0.5\n if T_i1 <= T_o2 and not self.outl[1].T.val_set:\n T_o2 = T_i1 - 0.5\n\n if T_o1 <= T_i2 and not self.outl[0].T.val_set:\n T_o1 = T_i2 + 1\n if T_o1 <= T_i2 and not self.inl[1].T.val_set:\n T_i2 = T_o1 - 1\n\n td_log = ((T_o1 - T_i2 - T_i1 + T_o2) /\n np.log((T_o1 - T_i2) / (T_i1 - T_o2)))\n\n return i1[0] * (o1[2] - i1[2]) + self.kA.val * td_log", "def alorentz(self, X, xm, amp, w, a):\n # w(x) = 2 * w / (1 + np.exp(a * (X - xm)))\n return amp / (1 + ((X - xm) / ((2 * w / (1 + np.exp(a * (X - xm)))) / 2)) ** 2)", "def amps(self) -> NumType:\n return self._amp", "def test_intra_power_law_fit_no_model(self):\n\n\t\tdetails= self.watcher.analyze(model=self.model, layers=self.fc_layers, intra=True, randomize=False, vectors=False)\n\t\tactual_alpha = details.alpha[0]\n\n\t\texpected_alpha = 2.654 # not very accurate because of the sparisify transform\n\t\tself.assertAlmostEqual(actual_alpha,expected_alpha, places=1)", "def test_analog_in_out_loop(self):\n for v in range(0, 6, 1):\n self.l.output(ao0=v, ao1=v)\n r = self.l.input(channels=(8,8,9,9), gains=(1,4,1,4))[0]\n for i in r:\n self.assertTrue(abs(v-i) < .1,\n \"measured %g for %g\" % (i, v))", "def comp_amplification_index(self):\n \n self.grid_tuning_in=self.inputs.grid_tuning_in\n self.grid_tuning_out=gl.comp_grid_tuning_index(self.L,self.nx,(self.r[0:self.n_e**2,:]).T) \n self.grid_tuning_out_inhib=gl.comp_grid_tuning_index(self.L,self.nx,(self.r[self.n_e**2:,:]).T)\n\n self.grid_amp_index=self.grid_tuning_out/self.grid_tuning_in", "def decay_noise_ampl(self):\n \n self.noise_ampl *= self.noise_ampl_decay", "def kA_func(self):\n i1 = self.inl[0].to_flow()\n i2 = self.inl[1].to_flow()\n o1 = self.outl[0].to_flow()\n o2 = self.outl[1].to_flow()\n\n T_i1 = T_mix_ph(i1, T0=self.inl[0].T.val_SI)\n T_i2 = T_mix_ph(i2, T0=self.inl[1].T.val_SI)\n T_o1 = T_mix_ph(o1, T0=self.outl[0].T.val_SI)\n T_o2 = T_mix_ph(o2, T0=self.outl[1].T.val_SI)\n\n if T_i1 <= T_o2:\n T_i1 = T_o2 + 0.01\n if T_i1 <= T_o2:\n T_o2 = T_i1 - 0.01\n if T_i1 <= T_o2:\n T_o1 = T_i2 + 0.02\n if T_o1 <= T_i2:\n T_i2 = T_o1 - 0.02\n\n td_log = ((T_o1 - T_i2 - T_i1 + T_o2) /\n np.log((T_o1 - T_i2) / (T_i1 - T_o2)))\n\n return i1[0] * (o1[2] - i1[2]) + self.kA.val * td_log", "def treat(self):\r\n if self.noiseS > 0:\r\n self.evaluations = min((self.evaluations * self.alphaevals, self.maxevals))\r\n return self.alphasigma\r\n else:\r\n self.evaluations = max((self.evaluations * self.alphaevalsdown, self.minevals))\r\n return 1.0", "def set_sg_amp():\n amp = request.params.get(\"amp\", 0, type=float)\n output = request.params.get(\"output\", 1, type=int)\n retval = RP_LIB.rp_GenAmp(output, ctypes.c_float(amp))\n if retval != 0:\n LOG.error(\"Failed to set signal generator amplitude. Error code: %s\", ERROR_CODES[retval])", "def f_amp(self):\n return self._f_amp", "def equalize_amps(f, multiply_flat=True):\n \n outfile = f.replace('.fits', '_eq.fits')\n if not os.path.isfile(outfile):\n h = fits.open(f)\n if multiply_flat:\n flat_file = os.path.join(os.environ['iref'], \n h[0].header['PFLTFILE'].replace('iref$', ''))\n \n # Get the average amp level\n amp_levels = []\n for i in [1,4]:\n data = np.copy(h[i].data)\n segmap = fits.getdata(f.replace('.fits', '_seg_ext_{}.fits'.format(i)))\n data[segmap > 0] = np.nan # flag sources\n data1, data2 = np.split(data, 2, axis=1) # split amps\n amp_levels.append(np.nanmedian(data1))\n amp_levels.append(np.nanmedian(data2))\n amp_average = np.nanmean(amp_levels)\n\n # Subtract the offset from the average amp level from each amp\n for i in [1,4]:\n data_orig = np.copy(h[i].data)\n data1_orig, data2_orig = np.split(data_orig, 2, axis=1) # split amps\n \n # Make copies of the original data with sources flagged\n data = np.copy(data_orig)\n segmap = fits.getdata(f.replace('.fits', '_seg_ext_{}.fits'.format(i)))\n data[segmap > 0] = np.nan # flag sources\n data1, data2 = np.split(data, 2, axis=1) # split amps\n \n # Subtract the offset of each amp from the average from the original data\n if multiply_flat:\n flat = fits.getdata(flat_file, i)\n flat1, flat2 = np.split(flat, 2, axis=1) # split amps\n data1_new = data1_orig - ((np.nanmedian(data1) - amp_average) * flat1)\n data2_new = data2_orig - ((np.nanmedian(data2) - amp_average) * flat2)\n else:\n data1_new = data1_orig - (np.nanmedian(data1) - amp_average)\n data2_new = data2_orig - (np.nanmedian(data2) - amp_average)\n\n data_new = np.concatenate([data1_new, data2_new], axis=1) # recombine amps\n h[i].data = data_new\n \n h.writeto(outfile, overwrite=True)\n h.close()\n\n else:\n print('{} already exists.'.format(outfile))", "def amplitude(self):\r\n return np.sqrt(self.maxint) * self.weights", "def solve_amps(self, h, a, g):\n\n # Symmetrize T3 RHS\n g3 = ((+ g.t3\n + g.t3.transpose([1, 2, 0, 4, 5, 3])\n + g.t3.transpose([2, 0, 1, 5, 3, 4])\n + g.t3.transpose([0, 2, 1, 3, 5, 4])\n + g.t3.transpose([2, 1, 0, 5, 4, 3])\n + g.t3.transpose([1, 0, 2, 4, 3, 5])\n ) / 12)\n\n # Symmetrize T2 RHS\n g2 = 1 / 2 * (g.t2 + g.t2.transpose([1, 0, 3, 2]))\n\n # Solve\n t2 = g2 * (- cc_denom(h.f, g.t2.ndim, 'dir', 'full'))\n t3 = g3 * (- cc_denom(h.f, g.t3.ndim, 'dir', 'full'))\n\n # Symmetrize amplitudes\n t2 = 1 / 2 * (t2 + t2.transpose([1, 0, 3, 2]))\n t3 = ((+ t3\n + t3.transpose([1, 2, 0, 4, 5, 3])\n + t3.transpose([2, 0, 1, 5, 3, 4])\n + t3.transpose([0, 2, 1, 3, 5, 4])\n + t3.transpose([2, 1, 0, 5, 4, 3])\n + t3.transpose([1, 0, 2, 4, 3, 5])) / 6)\n\n return Tensors(\n t1=g.t1 * (- cc_denom(h.f, g.t1.ndim, 'dir', 'full')),\n t2=t2,\n t3=t3)", "def opamp_amplifier_Rf(gain, Rgnd):\n Rgnd = _normalizevalue(Rgnd)\n Rf = Rgnd*(gain-1)\n return _Res(Rf)", "def test_am_complex(Simulator, plt, seed, rng):\n D = 64\n vocab = Vocabulary(D, rng=rng)\n vocab.parse('A+B+C+D+E+F')\n\n vocab2 = vocab.create_subset([\"A\", \"B\", \"C\", \"D\"])\n\n def input_func(t):\n if t < 0.25:\n return vocab.parse('A+0.8*B').v\n elif t < 0.5:\n return vocab.parse('0.8*A+B').v\n else:\n return vocab.parse('E').v\n\n def inhib_func(t):\n return int(t > 0.75)\n\n with nengo.Network('model', seed=seed) as m:\n am = AssociativeMemory(vocab2, vocab,\n default_output_key=\"F\",\n inhibitable=True,\n threshold_output=True)\n in_node = nengo.Node(output=input_func, label='input')\n inhib_node = nengo.Node(output=inhib_func, label='inhib')\n nengo.Connection(in_node, am.input)\n nengo.Connection(inhib_node, am.inhibit)\n\n in_p = nengo.Probe(in_node)\n out_p = nengo.Probe(am.output, synapse=0.03)\n utils_p = nengo.Probe(am.utilities, synapse=0.05)\n utils_th_p = nengo.Probe(am.thresholded_utilities, synapse=0.05)\n\n sim = Simulator(m)\n sim.run(1.0)\n t = sim.trange()\n # Input: A+0.8B\n more_a = (t >= 0.2) & (t < 0.25)\n # Input: 0.8B+A\n more_b = (t >= 0.45) & (t < 0.5)\n # Input: E (but E isn't in the memory vocabulary, so should output F)\n all_e = (t >= 0.7) & (t < 0.75)\n # Input: E (but inhibited, so should output nothing)\n inhib = (t >= 0.95)\n\n def plot(i, y, ylabel):\n plt.subplot(4, 1, i)\n plt.plot(t, y)\n plt.axvline(0.25, c='k')\n plt.axvline(0.5, c='k')\n plt.axvline(0.75, c='k')\n plt.ylabel(ylabel)\n plt.legend(vocab.keys[:y.shape[1]], loc='best', fontsize='xx-small')\n plot(1, nengo.spa.similarity(sim.data[in_p], vocab), \"Input\")\n plot(2, sim.data[utils_p], \"Utilities\")\n plot(3, sim.data[utils_th_p], \"Thresholded utilities\")\n plot(4, nengo.spa.similarity(sim.data[out_p], vocab), \"Output\")\n\n assert all(np.mean(sim.data[utils_p][more_a], axis=0)[:2] > [0.8, 0.5])\n assert all(np.mean(sim.data[utils_p][more_a], axis=0)[2:] < [0.01, 0.01])\n assert all(np.mean(sim.data[utils_p][more_b], axis=0)[:2] > [0.5, 0.8])\n assert all(np.mean(sim.data[utils_p][more_b], axis=0)[2:] < [0.01, 0.01])\n assert similarity(sim.data[utils_p][all_e], np.ones((1, 4))) < 0.05\n assert similarity(sim.data[utils_p][inhib], np.ones((1, 4))) < 0.05\n assert all(np.mean(sim.data[utils_th_p][more_a], axis=0)[:2] > [0.8, 0.8])\n assert all(\n np.mean(sim.data[utils_th_p][more_a], axis=0)[2:] < [0.01, 0.01])\n assert all(np.mean(sim.data[utils_th_p][more_b], axis=0)[:2] > [0.8, 0.8])\n assert all(\n np.mean(sim.data[utils_th_p][more_b], axis=0)[2:] < [0.01, 0.01])\n assert similarity(sim.data[utils_th_p][all_e], np.ones((1, 4))) < 0.05\n assert similarity(sim.data[utils_th_p][inhib], np.ones((1, 4))) < 0.05\n assert similarity(sim.data[out_p][more_a], vocab.parse(\"A\").v) > 0.8\n assert similarity(sim.data[out_p][more_a], vocab.parse(\"B\").v) > 0.8\n assert similarity(sim.data[out_p][more_b], vocab.parse(\"A\").v) > 0.8\n assert similarity(sim.data[out_p][more_b], vocab.parse(\"B\").v) > 0.8\n assert similarity(sim.data[out_p][all_e], vocab.parse(\"F\").v) > 0.8\n assert similarity(sim.data[out_p][inhib], np.ones((1, D))) < 0.05", "def agauss(self, X, xm, amp, w, a):\n # w(x) = 2 * w / (1 + np.exp(a * (X - xm)))\n return amp * np.exp(-((X - xm) / (2 * w / (1 + np.exp(a * (X - xm))))) ** 2)", "def kA_func(self):\n i, o = self.inl[0].to_flow(), self.outl[0].to_flow()\n\n ttd_1 = T_mix_ph(i, T0=self.inl[0].T.val_SI) - self.Tamb.val_SI\n ttd_2 = T_mix_ph(o, T0=self.outl[0].T.val_SI) - self.Tamb.val_SI\n\n if ttd_1 > ttd_2:\n td_log = (ttd_1 - ttd_2) / np.log(ttd_1 / ttd_2)\n elif ttd_1 < ttd_2:\n td_log = (ttd_2 - ttd_1) / np.log(ttd_2 / ttd_1)\n else:\n td_log = 0\n\n return i[0] * (o[2] - i[2]) + self.kA.val * td_log", "def anisotropy_solution(r, **kwargs):\n return 1", "def opamp_gain(R1, Rf):\n R1, Rf = map(_normalizevalue, (R1, Rf))\n gain = 1 + (Rf/R1)\n return gain", "def test_amplitude_damping_error(self):\n qr = QuantumRegister(1, 'qr')\n cr = ClassicalRegister(1, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.x(qr) # prepare + state\n for _ in range(30):\n # Add noisy identities\n circuit.barrier(qr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n shots = 2000\n backend = QasmSimulator()\n # test noise model\n error = amplitude_damping_error(0.75, 0.25)\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, 'id')\n # Execute\n target = {'0x0': 3 * shots / 4, '0x1': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)", "def gain_opt(machine, T):\n res = (np.arange(T)+1)\n return res * np.amax(machine)", "def mamplitude(options):\n signal = audio.read(options.ipath)\n result = op.mamplitude(signal.data, float(options.factor))\n audio.write(options.opath, result, signal.rate, sampwidth=1)\n if options.plot:\n plotter.plot(**{'Input: '+options.ipath: signal.data,\n 'Output: '+options.opath: result})", "def evaluate(x, amplitude, x_0, alpha, x_cutoff):\n\n xx = x / x_0\n return amplitude * xx ** (-alpha) * np.exp(-x / x_cutoff)" ]
[ "0.6163449", "0.60998505", "0.59039474", "0.5810233", "0.57597375", "0.575317", "0.5678718", "0.56314796", "0.5603917", "0.5500167", "0.5498625", "0.5482416", "0.5466694", "0.54564613", "0.5434886", "0.54241836", "0.5395722", "0.53837705", "0.5347545", "0.53447354", "0.53339744", "0.5330714", "0.53195167", "0.5304796", "0.5297552", "0.5279269", "0.52569205", "0.5238143", "0.5229706", "0.52161425" ]
0.74028385
0
Updates entity's destination and returns lead time to arrive there in hours.
def set_destination(self): # TODO: consider new implementation with multiple paths possible. self.destination = self.network[self.current_node]['next'] lead_time = self.network[self.current_node]['path'].lead_time return datetime.timedelta(hours=lead_time)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def travel_time(self, origin, destination):\n assert 2 <= len(origin) <= 3, \"Origin should by (x, y) or (x, y, z)\"\n assert 2 <= len(destination) <= 3, \"Origin should by (x, y) or (x, y, z)\"\n assert len(origin) == len(destination), \"Elevation should be present in origin and destination or absent in both\"\n if len(origin) == 2:\n xo, yo = origin\n xd, yd = destination\n zo = zd = 0\n else:\n assert len(origin) == 3\n xo, yo, zo = origin\n xd, yd, zd = destination\n\n ground_distance = np.sqrt((xd-xo)**2 + (yd-yo)**2)\n elevation_diff = zd - zo\n if elevation_diff >= 0:\n return max(ground_distance / self.max_airspeed, elevation_diff / self.max_rate_of_climb)\n else:\n return max(ground_distance / self.max_airspeed, -elevation_diff / self.max_rate_of_descent)", "def update_by_delta(self):\n if (not self.smart_scheduled_for) or (not self.smart_schedule_info):\n # Doesn't depend on any other event.\n return\n\n delta_s = self.smart_schedule_info.get('delta_s')\n if delta_s is None: # Doesn't have a time delta.\n return\n\n delta_to_assoc_event = timedelta(seconds=delta_s)\n new_start_time = self.smart_scheduled_for.end_time + delta_to_assoc_event\n new_end_time = new_start_time + (self.end_time - self.start_time)\n\n self.start_time = new_start_time\n self.end_time = new_end_time\n self.save()", "def alter_destination(self, destination):\n series = self.series\n if not series:\n logging.warning('Cannot alter destination to %s for orphan mission %s.' % (destination, self.id))\n return\n\n destination_point = series.point_for_station(destination)\n if not destination_point:\n logging.warning('Cannot alter destination to %s for mission %s. (no id found)' % (destination, self.id))\n return\n\n destination_id = destination_point.station_id\n passed = False\n for stop in self.stops:\n if passed:\n stop.status = StopStatuses.canceled\n else:\n if stop.station_id == destination_id:\n passed = True\n stop.status = StopStatuses.altDestination\n else:\n stop.alteredDestination = destination\n\n if passed:\n logging.info('Mission %s altered destination to %s.' % (self.id, destination))\n else:\n logging.warning('Mission %s could not find altered destination %s.' % (self.id, destination))\n url = '/agent/station/%s' % destination_id\n self.issue_time += timedelta(seconds=config.INTERVAL_BETWEEN_UPDATE_MSG)\n self.tasks.append(self.instruction_task(url, 'prio', self.issue_time))", "def route_info_helper(g, origin, destination, distance):\n time = 0 \n acceleration = 1406.25\n \n if(distance > 400):\n time = time + 0.53 + 0.53\n distance = distance - 400\n time = time + distance / 750\n else:\n half = distance / 2.0\n time = time + 2 (math.sqrt((2 * half) / acceleration))\n \n \n flights_out = g.city_dict[destination].get_flights_out()\n number = len(flights_out)\n time = time + (2.1 - (0.1 * number))\n \n return time", "def getNextDest(self):\n\n if self.direction_forward:\n if len(self.destinations)-1 == self.current_loc: #if Autobuz reaches rightmost destination, it also takes a break and changes directions\n self.direction_forward = False #Autobuz changes direction\n self.updateOmLocation()\n return self.destinations[self.current_loc], (self.break_duration + self.trip_duration) #return destination reached and elapsed time\n \n else:\n self.current_loc += 1\n self.updateOmLocation()\n return self.destinations[self.current_loc], self.trip_duration\n \n else:\n if 0 == self.current_loc: #if Autobuz reaches leftmost destination, it also takes a break and changes directions\n self.direction_forward = True #Autobuz changes direction\n self.updateOmLocation()\n return self.destinations[self.current_loc], (self.break_duration + self.trip_duration)\n \n else:\n self.current_loc -= 1\n self.updateOmLocation()\n return self.destinations[self.current_loc], self.trip_duration", "def _update_transition(self, dt, time, direction): #pylint:disable-msg=C0103,C0301\r\n pass", "def update(self, dt=None): #pylint: disable=invalid-name\n if dt is None:\n dt = datetime.utcnow()\n\n self.update_location(self.old_location, dt - timedelta(seconds=1))\n self.update_location(self.current_location, dt)\n self.update_location(self.future_location, dt + timedelta(seconds=1))", "def dst(self, dt):", "def compute_travel_time(start_id, dest_id, csv, G):\n \n # route is not computed yet\n if csv[start_id][dest_id] is None:\n return -1\n\n travel_time = 0\n cur_node_id = start_id\n\n while cur_node_id != dest_id:\n \n # get the next node on this route\n next_node_id = csv[cur_node_id][dest_id]\n\n # update the travel time\n edge = G.get_edge_data(cur_node_id, next_node_id)\n travel_time += edge[0]['travel_time']\n\n cur_node_id = next_node_id\n\n return travel_time", "def determine_arrival_time(middle_destination):\r\n\r\n start_link = 'https://maps.googleapis.com/maps/api/directions''/json?'\r\n end_link = '&mode=transit&transit_mode=subway'\r\n final_link = start_link + 'origin=%s&destination=%s&key=%s&arrival_time=%s' % (\r\n origin, middle_destination, api_key, str(arrival_time),) + end_link\r\n # change to directions matrix\r\n json_total_routes = requests.get(final_link).json()\r\n # determines start time to get to destination and then adds 5 minute (300 sec) buffer, this is in unix form\r\n buffer_time = 300\r\n arrival_time_transit = json_total_routes['routes'][0]['legs'][0]['departure_time']['value'] - buffer_time\r\n return arrival_time_transit", "def update_stay_time(self):\n # It would not be better to simply self.stay_time = self.get_length() ??\n self.stay_time = self.get_length()", "def getDeliveryTime(ori, dest):\n\n start_time = time.time()\n\n routingApi = herepy.RoutingApi(os.getenv(\"HERE_KEY\"))\n gm = GoogleMaps(os.getenv(\"GOOGLE_KEY\"))\n\n try:\n response = routingApi.truck_route(ori.coords[::-1], dest.coords[::-1], [herepy.RouteMode.truck, herepy.RouteMode.fastest]).as_dict()\n distance = response.get('response').get('route')[0].get('summary').get('distance') / 1000\n except herepy.error.HEREError:\n try:\n response = gm.distance_matrix(ori.coords[::-1], dest.coords[::-1], mode=\"driving\", departure_time=dt.datetime.now(), traffic_model=\"pessimistic\")\n distance = response.get('rows')[0].get('elements')[0].get('distance').get('value') / 1000\n except Exception as e:\n capture_exception(e)\n raise e\n\n if distance < 51:\n deltime = 6\n elif distance > 50 and distance < 701:\n deltime = 24\n elif distance > 700 and distance < 1400:\n deltime = 48\n else:\n deltime = 72\n\n print('--- Tiempo de ejecucion calcDeliveryTime: {} segundos ---'.format((time.time() - start_time)))\n\n return deltime, distance", "def updateDest(self):\n\n\t\t# if end is reached stop calling\n\t\tif self.i == self.numSteps:\n\t\t\treturn False\n\n\t\t# controller\n\t\tpoint = self.control.nextUpPD(self.i)\n\t\tcommand_string = 'id1 mav.waypoint_actuator setdest [%s, %s, %s, %s, 0.2] \\n' % (\n\t\t\tpoint[0], point[1], point[2], point[3])\n\t\tcomm.write(bytes(command_string, 'utf8'))\n\n\t\tself.i = self.i + 1\n\t\treturn GLib.SOURCE_CONTINUE", "def dest_time(self) -> float:\n return ntp_to_system_time(self.dest_timestamp)", "def save(self, *args, **kwargs):\n self.distance_from_hq = self.get_distance_from_hq(self.lat, self.lon)\n super().save(*args, **kwargs)", "def traffic_restoration_time_to_healed_or_new_endpoints_in_minutes(self) -> Optional[int]:\n return pulumi.get(self, \"traffic_restoration_time_to_healed_or_new_endpoints_in_minutes\")", "def update(entity_list):\n for entity in entity_list:\n # Only moveable entities should have the dest field\n if \"dest\" in entity:\n # If no destination, pick a new random one\n if entity[\"dest\"] == None:\n entity[\"dest\"] = random_pos(100, 100)\n \n # Move one step towards destination\n cpos = entity[\"position\"]\n dest = entity[\"dest\"]\n entity[\"position\"] = move(cpos, dest) \n\n # Clear destination if it has been reached\n if entity[\"dest\"] == entity[\"position\"]:\n entity[\"dest\"] = None", "def restore_traffic_time_to_healed_or_new_endpoint_in_minutes(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"restore_traffic_time_to_healed_or_new_endpoint_in_minutes\")", "def update_statistics_after_move(self, node_to_move_on):\n\n distance_exit = self.exit_point.distance_from_start_point\n distance_node_to_move_on = node_to_move_on.distance_from_start_point\n self.labyrinth_statistics[\"distance_between_agent_and_end_point\"] = distance_exit - distance_node_to_move_on\n self.labyrinth_statistics[\"number_of_moves_done_by_agent\"] += 1", "def update(self):\n if not self.exists:\n return\n if AT.TIME_TO_EXPIRE in self.attributes:\n if not self.calculate_time_left():\n self.fire_trigger(TR.TIME_EXPIRED)", "def finish_hour(self):\n\t\tassert len(self.values) >= 4, 'A fully formed update date is needed.'\n\t\tself.values = self.values[:4]", "def update_goal(self):\n pass", "def calculateNewPath(self):\r\n\r\n\t\tnodeDict = self.simulationHandle.getMap().getNodeDict()\r\n\t\tdistDict = self.simulationHandle.getMap().getDistDict()\r\n\r\n\t\tself.pathToGoal = pathfinder.findPath(self.currentNode, self.goalNode, nodeDict, distDict)", "def travel(self, dest: Location, time: int) -> None:\n self.refreshDroneStatus(time)\n if self.task.status == DroneStatus.Idle:\n self.task.setTravel(time, distance(self.__location, dest), dest)\n elif self.task.status != DroneStatus.Traveling:\n raise RuntimeError(\"Cannot order a drone to travel if it is doing something else\")\n elif self.task.dest != dest:\n raise RuntimeError(\"Cannot change drone destination during flight\")\n self.refreshDroneStatus(time)", "def visit_move(self, move):\n dest_id = self.event_json['destination']['id']\n destination = self.world.entities[dest_id]\n move.destination = destination", "def updateTimeStep(self, newDt):\n self.timeStep = newDt", "def update(self) -> None:\n destination = self._get_local_dest(self.path)\n\n try:\n path_type = self._get_path_type(self.absolute)\n handler = self.factory.get_handler(path_type)\n handler.update(self.absolute, destination)\n except Exception as e:\n print(f\"[!] Skipping {self.path}: {e}\")", "def update_based_on_time(self):\n for counter, agent in enumerate(self.agents):\n if self.t >= agent.getFinishTime() and self.agent_current_task[counter] != -1: # task is finished\n task_num = self.agent_current_task[counter]\n self.finish_time_per_task_dict[task_num] = self.t\n self.is_task_finished[0][task_num] = 1\n agent.changebusy(False)\n self.update_agent_is_idle_based_on_class()", "def absulute2relative_time(x): \n if x.viewed:\n x.viewed_reltime=x.viewed_time-x.start\n \n if x.completed:\n x.completed_reltime=x.completed_time-x.start\n \n return x", "def restore_traffic_time_to_healed_or_new_endpoint_in_minutes(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"restore_traffic_time_to_healed_or_new_endpoint_in_minutes\")" ]
[ "0.5782541", "0.56913507", "0.547817", "0.5343863", "0.5279372", "0.5238057", "0.5227395", "0.52051044", "0.520272", "0.5185315", "0.51523775", "0.5151906", "0.51071995", "0.5073053", "0.5004269", "0.5004062", "0.49946293", "0.49642354", "0.49372905", "0.4920497", "0.49176788", "0.49161103", "0.49070266", "0.48982227", "0.4892921", "0.48683104", "0.48642737", "0.48639575", "0.48617077", "0.48615405" ]
0.70615387
0
Change password form for teachers
def change_password(request): emp = models.Teacher.objects.get(user=request.user) context_dict = {} if request.method == 'POST': form = AdminPasswordChangeForm(user=request.user, data=request.POST) if form.is_valid(): form.save() update_session_auth_hash(request, form.user) context_dict["message"] = "Password changed successfully" history = models.History( user=emp, activity="", activity_type="Changed password" ) history.save() else: context_dict["message"] = "Password not changed" return render(request, "changePassword.html", context_dict)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def change_pwd(self):\r\n if self.field_pwd.text() == \"\":\r\n self.label_chg_pwd.setText(\"Password cannot be empty\")\r\n return None\r\n self.encryptor.set_key_from_password(self.field_pwd.text())\r\n self.label_chg_pwd.setText(\"Password typed\")\r\n self.label_chg_pwd.setStyleSheet(\"color:#01ac2d\")\r\n self.label_chg_key.clear()\r\n self.field_key.clear()\r\n QtWidgets.QMessageBox.information(self, \"Password Change\", \r\n (\"Your password has been successfully changed.\\n\\n\"\r\n \"You can now encrypt / decrypt files.\"))", "def ChangePassword():\n if self.ChangePassword():\n # Update successful, return to main screen\n self.confirm_pass.set('')\n self.password.set('')\n Return()\n else:\n return", "def edit_password():\n form = EditPasswordForm()\n\n if request.method == 'POST' and form.validate():\n\n user = Users.query.filter_by(id=current_user.id).first()\n\n if not user.check_password(form.old_password.data):\n flash('Incorrect old password', 'warning')\n return redirect(url_for('auth.edit_password'))\n\n user.set_password(form.new_password.data)\n\n try:\n db.session.commit()\n flash('Your password has been changed.', 'success')\n except IntegrityError:\n db.session.rollback()\n flash('ERROR! Unable to change your password, please check your details are correct and try again.',\n 'warning')\n\n return redirect(url_for('auth.account'))\n\n return render_template('auth/edit_account/edit_password.html', form=form)", "def change_password(request):\n if request.method == 'POST':\n form = PasswordChangeForm(request.user, request.POST)\n if form.is_valid():\n user = form.save()\n update_session_auth_hash(request, user)\n messages.success(request, 'Updated password!')\n return redirect('profile')\n else:\n form = PasswordChangeForm(request.user)\n return render(request, 'accounts/forms.html', {\n 'form': form\n })", "def enter_password(self):", "def change_my_password():\n form = ChangePassword()\n if request.method == 'GET':\n return render_template('changemypassword.html', form=form)\n if request.method == 'POST' and form.validate_on_submit():\n username = form.username.data\n old_password = form.password.data\n new_password_hash = generate_password_hash(form.password1.data)\n account = db.check_item(\"username\", username)\n if account is not None:\n if check_password_hash(str(account['password_hash']), old_password):\n db.update_password_username(username, new_password_hash)\n flash('Your password has been changed')\n return redirect(url_for('login'))\n else:\n flash('Invalid username or password')\n return redirect(url_for('change_my_password'))\n else:\n flash('Invalid username or password')\n return redirect(url_for('change_my_password'))\n else:\n return render_template('changemypassword.html', form=form)", "def change_password(request):\n if request.method == 'POST':\n form = PasswordChangeForm(user=request.user, data=request.POST)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('view-profile', args=[request.user.id]))\n else:\n print \"form not valid\"\n else:\n form = PasswordChangeForm(user=request.user)\n\n return render(request, 'woofer/show_form.html', {\n 'form' : form,\n 'message' : None,\n 'form_action' : reverse('change-password'),\n 'title' : \"Change Password\"\n })", "def password_change(request):\n status = 200\n pform = ChangePasswordForm(request.user, request.POST)\n\n if pform.is_valid():\n status = pform.save(request)\n if status == 200:\n messages.success(request, _('Your password was successfully changed'))\n return redirect('profile')\n\n return render(request, 'gui/profile/profile_password_form.html', {\n 'user': request.user,\n 'pform': pform,\n }, status=status)", "def set_password(self, new_password):\n super(Mafiasi, self).set_password(new_password)\n self.new_password = new_password", "def change_password():\n form = PasswordResetForm()\n\n if form.validate_on_submit():\n # Update user\n current_user.password = crypto_manager.hash(form.password.data)\n\n try:\n correct = True\n db.session.commit()\n\n flash(_('Password updated correctly'), 'success')\n\n return redirect(url_for('admin.profile_edit'))\n\n except Exception:\n correct = False\n current_app.logger.exception('Failed to update user password')\n\n flash(_('Error updating password, contact an administrator'), 'error')\n\n return render_template('admin/profile/change_password.html', form=form)\n\n finally:\n if not correct:\n db.session.rollback()\n\n return render_template('admin/profile/change_password.html', form=form)", "def passwordform(name = None):\n if request.method == 'POST':\n if name == None:\n name = session['name']\n\n password = request.form['pass1']\n confirmPassword = request.form['passconfirm']\n if password == confirmPassword:\n hl.changePassword(name,confirmPassword)", "def pass_change(request):\n if request.method == \"POST\":\n form = PasswordChangeForm(request.user, request.POST)\n if form.is_valid():\n form.save()\n return home(request, \"Password Changed Successfully\")\n \n else:\n form = PasswordChangeForm(instance=request.user)\n \n ctx = _make_context(request, \"pass_form\", form)\n \n return TemplateResponse(request, \"users/index.html\", ctx)", "def password():\n\n if request.method == 'POST':\n print 'Changing password'\n # query for user's hash of password\n pw_hash = datastore.get_user_by_user_id(engine, session['user_id'])['hash']\n\n # check all boxes filled, old password is correct, new and confirmation match\n if not request.form.get('old') or not check_password_hash(pw_hash, request.form.get('old')):\n flash('Incorrect old password!', 'danger')\n return render_template('password.html')\n elif not request.form.get('new') or not request.form.get('confirmation'):\n flash('Must confirm new password!', 'danger')\n return render_template('password.html')\n elif not request.form.get('new') == request.form.get('confirmation'):\n flash('New passwords don\\'t match!', 'danger')\n return render_template('password.html')\n\n # update hash in database\n datastore.update_password_hash(engine, session['user_id'], generate_password_hash(request.form.get('new')))\n\n # redirect to portfolio\n flash('Password changed!', 'info')\n print 'Password changed!'\n return redirect(url_for('index'))\n\n else:\n print 'Loading change password page'\n return render_template('password.html')", "def password_change_view(request):\n extra_context = {'title': _('Current user password change')}\n\n if request.user.user_options.block_password_change:\n messages.error(\n request, _(\n 'Changing the password is not allowed for this account.'\n )\n )\n return HttpResponseRedirect(reverse(settings.HOME_VIEW))\n\n return password_change(\n request, extra_context=extra_context,\n template_name='appearance/generic_form.html',\n post_change_redirect=reverse('authentication:password_change_done'),\n )", "def change_password():\n\n from .forms import ChangeCredentialsForm\n\n username = current_user.get_id()\n form = ChangeCredentialsForm(request.form)\n\n if form.validate_on_submit():\n logger.info(username + \" wants to change something.\")\n if request.form['username'] != username:\n logger.info(\"User \" + username + \" wants to change the username.\")\n app.rename_user(username, request.form['username'],\n request.form['newPassword1'])\n else:\n logger.info(\"Changing password of user \" + username + \".\")\n app.add_user_and_password(request.form['username'],\n request.form['newPassword1'])\n\n logger.info(\"Successfully changed credentials of \"\n + username + '.')\n return redirect(url_for('home'))\n\n else:\n return render_template('change-credentials.html',\n form=form,\n username=username)", "def update_password(): \n \n form = PasswordForm()\n if request.method == 'POST':\n if form.validate_on_submit():\n \n hashed_pw = bcrypt.hashpw(form.new_password.data.encode('utf-8'), bcrypt.gensalt())\n user = mongo.db.user.find_one({'username': session['username']})\n \n if bcrypt.checkpw(request.form['password'].encode('utf-8'), user['hashed_password']):\n mongo.db.user.find_one_and_update({'username': session['username']}, {'$set':{'hashed_password':hashed_pw}})\n \n flash(f'Password reset was successful, please login again.','success')\n return redirect(url_for('login'))\n \n return render_template('pages/settings.html', \n title='Password', \n form=form\n )", "def changepassword():\n if request.method == \"POST\":\n\n # Ensure password was submitted\n if not request.form.get(\"newpassword\"):\n return apology(\"must provide password\", 400)\n # Ensure passwords match\n elif request.form.get(\"newpassword\") != request.form.get(\"confirmation\"):\n return apology(\"passwords do not match\", 400)\n elif request.form.get(\"newpassword\").isalpha() == True:\n return apology(\"password must contain at least one numeric symbol\")\n\n # encrypt new password\n hash = generate_password_hash(request.form.get(\"newpassword\"))\n print(hash)\n # update user's password in database\n result = db.execute(\"UPDATE users SET hash = :hash WHERE id = :id\", hash=hash, id = session[\"user_id\"])\n\n if not result:\n return apology(\"password not available\", 400)\n\n # Redirect user to home page\n return redirect(\"/\")\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n return render_template(\"changepass.html\")", "def change_password(request):\n\n form = ChangePasswordForm(user=request.user)\n context = {\n 'form': form,\n 'submit_button_text': _('Update password'),\n 'back_button_text': _('Cancel'),\n 'show_back_button': True,\n }\n # If this is a POST request then process the Form data\n if request.method == 'POST':\n # Create a form instance and populate it with data from the request (binding):\n form = ChangePasswordForm(request.POST, user=request.user)\n context.update({'form': form})\n # Check if the form is valid:\n if form.is_valid():\n user = request.user\n if not user.check_password(form.cleaned_data['old_password']):\n messages.error(request, _('Password was not changed! You typed your old password in incorrectly, please try again.'), extra_tags='alert alert-warning')\n else:\n # process the data in form.cleaned_data as required (here we just write it to the model due_back field)\n user.set_password(form.cleaned_data['new_password'])\n user.save()\n update_session_auth_hash(request, request.user)\n # redirect to a new URL:\n messages.success(request, _('Your password was changed.'), extra_tags='alert alert-success')\n form = ChangePasswordForm(user=request.user)\n context.update({'form': form})\n return render(request, 'change_password_form.html', context)\n\n\n return render(request, 'change_password_form.html', context)", "def setpassword(self, pwd):\n pass", "def change_password(self, new_pass):\n self.manager.change_user_password(self, new_pass)", "def change_user_password():\n session_id = request.args.get('session-id', None)\n user_id = request.args.get('user-id', None)\n user = get_user_by_id(user_id)\n if request.method == 'POST':\n old_password = request.form['old-password']\n new_password = request.form['new-password']\n confirm_password = request.form['confirm-password']\n today = datetime.date.today()\n reservations_list = get_user_reservations_list(user_id)\n cars_reservations_list = get_cars_user_reservations_list(reservations_list)\n reservations_status_list = get_reservations_status_list(reservations_list)\n if check_authentication(session_id, user_id):\n is_password_updated = update_user_password(user_id, old_password, new_password, confirm_password)\n else:\n return render_template('home.html', cars_list=get_cars_preview(), news_list=get_news_list(), authjs=False,\n preview_length=get_cars_preview().__len__(), del_session_cookie=True)\n if is_password_updated == \"OK\":\n return render_template('user_area.html', user=user.id, session_id=session_id, edit_mode=False,\n surname=user.surname, name=user.name, birthdate=user.birthdate,\n feedback_msg=\"Password successfully updated!\", today=today,\n reservations_list=reservations_list, cars_reservations_list=cars_reservations_list,\n reservations_status_list=reservations_status_list)\n else:\n return render_template('user_area.html', user=user.id, session_id=session_id, edit_mode=False,\n surname=user.surname, name=user.name, birthdate=user.birthdate,\n feedback_msg=is_password_updated, today=today,\n reservations_list=reservations_list, cars_reservations_list=cars_reservations_list,\n reservations_status_list=reservations_status_list)", "def changepassword():\n try:\n if request.method == 'POST':\n # Makes sure the passwords match and that it meets complexity\n validate = check_pass(\n request.form['newpass'], request.form['connewpass'])\n if validate == \"Passed\":\n data = [request.form['newpass'], session[\n 'username'], request.form['oldpass']]\n with Database() as database:\n database.updateUserPassword(data)\n return redirect(url_for('profile', username=session['username']))\n else:\n flash(validate)\n return render_template('changepass.html')\n\n else:\n return render_template('changepass.html')\n\n except Exception as e:\n flash(\"Oops, something went wrong... Try again.\")\n return render_template('changepass.html')", "def change_password(self, new_password):\n dev = self.nearest_pandevice()\n self.password_hash = dev.request_password_hash(new_password)\n self.update(\"password_hash\")", "def set_admin_password(self, instance, new_pass):\n pass", "def change_password_user():\n\n form = ChangePasswordForm(request.form)\n\n if form.validate_on_submit():\n\n if not request.form['old_password'] or request.form['old_password'] == '' :\n flash(\"No null or empty values are allowed.\",\"warn\")\n return render_template('user/change_password_user.html', title='Change Password', form=form)\n\n if not request.form['password'] or request.form['password'] == '' :\n flash(\"No null or empty values are allowed.\",\"warn\")\n return render_template('user/change_password_user.html', title='Change Password', form=form)\n\n if request.form['password'] != request.form['retype_password']:\n flash(\"Passwords are not the same!\",\"warn\")\n return render_template('user/change_password_user.html', title='Change Password', form=form)\n\n\n hashed_password = user_manager.hash_password(request.form['password'])\n\n # Modificamos el password del usuario\n current_user.password = hashed_password\n\n try:\n correct = True\n db.session.commit()\n except Exception as e:\n # Catch anything unknown\n print(e)\n correct = False\n finally:\n if not correct:\n # Cleanup and show error\n db.session.rollback()\n flash('Error modifying password of user, make sure username and email are unique','error')\n return render_template('user/change_password_user.html', title='Change Password', form=form)\n else:\n flash('Congratulations, update your password!','success')\n return redirect(url_for('user_ksat.show_user'))\n\n\n return render_template('user/change_password_user.html', title='Change Password', form=form)", "def view_update_user(self, user, new_pw, old_pw):\r\n user.realm._checker.passwd(user.userID, new_pw, old_pw)", "def change_password():\n\n if request.method == \"POST\":\n\n # Ensure current password is not empty\n if not request.form.get(\"current_password\"):\n return apology(\"must provide current password\", 400)\n\n # Query database for user_id\n rows = db.execute(\"SELECT hash FROM users WHERE id = :user_id\", user_id=session[\"user_id\"])\n\n # Ensure current password is correct\n if len(rows) != 1 or not check_password_hash(rows[0][\"hash\"], request.form.get(\"current_password\")):\n return apology(\"invalid password\", 400)\n\n # Ensure new password is not empty\n if not request.form.get(\"new_password\"):\n return apology(\"must provide new password\", 400)\n\n # Ensure new password confirmation is not empty\n elif not request.form.get(\"new_password_confirmation\"):\n return apology(\"must provide new password confirmation\", 400)\n\n # Ensure new password and confirmation match\n elif request.form.get(\"new_password\") != request.form.get(\"new_password_confirmation\"):\n return apology(\"new password and confirmation must match\", 400)\n\n # Update database\n hash = generate_password_hash(request.form.get(\"new_password\"))\n rows = db.execute(\"UPDATE users SET hash = :hash WHERE id = :user_id\", user_id=session[\"user_id\"], hash=hash)\n\n # Show flash\n flash(\"Password Changed!\")\n return redirect(\"/\")\n\n return render_template(\"change_password.html\")", "def set_password(ctx, new_password, remember):\n ensure_validated(ctx, prompt='Enter your current password')\n if not new_password:\n new_password = click.prompt(\n 'Enter your new password',\n hide_input=True,\n confirmation_prompt=True,\n err=True)\n\n controller = ctx.obj['controller']\n settings = ctx.obj['settings']\n keys = settings.setdefault('keys', {})\n key = controller.set_password(new_password)\n click.echo('Password updated.')\n if remember:\n keys[controller.id] = b2a_hex(key).decode()\n settings.write()\n click.echo('Password remembered')\n elif controller.id in keys:\n del keys[controller.id]\n settings.write()", "def change_password():\n\n if request.method == 'POST':\n current_password = request.form['current_password']\n new_password = request.form['new_password']\n\n # If current password is correct, update and store the new hash\n if current_user.check_password_hash(current_password):\n current_user.generate_password_hash(new_password)\n else:\n return 'Current password you entered is wrong! Please try again!'\n\n # Commit the changes we made in the object to the database\n success, reason = commit_transaction()\n if not success:\n return f'Error occurred while changing your password - {reason}!'\n\n log(f'<code>{current_user.name}</code> has updated their password!</code>')\n\n # Log the user out, and redirect to login page\n logout_user()\n return redirect(url_for('login'))\n return render_template('change_password.html')", "async def password(self, ctx):\n pass" ]
[ "0.7280924", "0.72753847", "0.72258353", "0.71798277", "0.7163031", "0.715748", "0.71463037", "0.7115237", "0.7095983", "0.70515865", "0.70478415", "0.70305073", "0.7016382", "0.69865227", "0.692954", "0.69270027", "0.69050103", "0.6901926", "0.68824595", "0.6861174", "0.68590075", "0.68273854", "0.68030554", "0.67990935", "0.67889845", "0.67785287", "0.6742517", "0.66950953", "0.66877705", "0.66694427" ]
0.803726
0
View to take email and mail the link to reset password.
def password_reset(request): context_dict = {} if request.method == 'POST': email = request.POST.get('email') if email: user = models.Teacher.objects.get( soft_delete=False, user__email=email ) if not user: context_dict["message"] = "Email ID does'nt exist, Enter Correct details" mail = { 'email': email, 'domain': request.META['HTTP_HOST'], 'site_name': 'Placement Portal', 'uid': urlsafe_base64_encode(force_bytes(user.pk)), 'user': user, 'token': ''.join([random.choice(ascii_letters+digits) for i in range (128)]), 'protocol': 'http', } try: reset_token = models.PasswordReset( user=user, token=mail['token'], token_consumed=False, ) reset_token.save() except Exception as e: print (e) subject_template_name = 'password_reset_email_subject.txt' email_template_name = 'password_reset_email.html' subject = loader.render_to_string(subject_template_name, mail) subject = ''.join(subject.splitlines()) email_data = loader.render_to_string(email_template_name, mail) send_mail(subject, email_data, DEFAULT_FROM_EMAIL, [email], fail_silently=False) context_dict["message"] = "Email has been sent to your registered Email ID with instructions." return render(request, "password_reset_form.html", context_dict)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset_password():\n if current_user.is_authenticated:\n return redirect(url_for('main.home'))\n\n form = RequestResetForm()\n\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data).first()\n send_reset_email(user) # located in utils.py\n flash('An email has been sent with instruction to reset your password', 'info')\n return redirect(url_for('users.login'))\n\n return render_template('reset_password_request.html', form=form)", "def send_password_reset_email():\n aaa.send_password_reset_email(\n username=post_get('username'),\n email_addr=post_get('email_address')\n )\n return 'Please check your mailbox.'", "def send_password_reset_mail(email, token):\n print(\"reset password\")\n url = f\"{settings.SITE_URL}/reset-password?email={email}&token={token}\"\n SUBJECT = \"Reset Password Request\"\n # The HTML body of the email.\n body = \"\"\"\n <html>\n <head></head>\n <body>\n <p>Here is your password reset link:</p>\n <p><a href='{0}'>{1}</a></p>\n </body>\n </html>\n \"\"\".format(url, url)\n send_mail(SUBJECT, body, email)", "def forgot_password():\r\n form = ForgotPasswordForm(request.form)\r\n if form.validate_on_submit():\r\n user = model.user.User.query\\\r\n .filter_by(email_addr=form.email_addr.data)\\\r\n .first()\r\n if user and user.email_addr:\r\n msg = Message(subject='Account Recovery',\r\n recipients=[user.email_addr])\r\n if user.twitter_user_id:\r\n msg.body = render_template(\r\n '/account/email/forgot_password_openid.md',\r\n user=user, account_name='Twitter')\r\n elif user.facebook_user_id:\r\n msg.body = render_template(\r\n '/account/email/forgot_password_openid.md',\r\n user=user, account_name='Facebook')\r\n elif user.google_user_id:\r\n msg.body = render_template(\r\n '/account/email/forgot_password_openid.md',\r\n user=user, account_name='Google')\r\n else:\r\n userdict = {'user': user.name, 'password': user.passwd_hash}\r\n key = signer.signer.dumps(userdict, salt='password-reset')\r\n recovery_url = url_for('.reset_password',\r\n key=key, _external=True)\r\n msg.body = render_template(\r\n '/account/email/forgot_password.md',\r\n user=user, recovery_url=recovery_url)\r\n msg.html = markdown(msg.body)\r\n mail.send(msg)\r\n flash(gettext(\"We've send you email with account \"\r\n \"recovery instructions!\"),\r\n 'success')\r\n else:\r\n flash(gettext(\"We don't have this email in our records. \"\r\n \"You may have signed up with a different \"\r\n \"email or used Twitter, Facebook, or \"\r\n \"Google to sign-in\"), 'error')\r\n if request.method == 'POST' and not form.validate():\r\n flash(gettext('Something went wrong, please correct the errors on the '\r\n 'form'), 'error')\r\n return render_template('/account/password_forgot.html', form=form)", "def forgotPassword():\n if request.method == 'POST':\n if emailform():\n email = request.form['email1']\n\n #Confirm the user exist\n if hl.confirmUser(email):\n user = hl.getUser(\"Email\",email)\n refLink = \"http://\"+request.headers['Host']+hl.genUrl(user[\"Name\"],\"Password\")\n #Send email\n msg = \"\"\"\n Dear {},\n\n You are receiving this email because you have requested your password be reset. \n Use the following link to reset your password:\n\n {}\n\n If you did not request that your password be changed, please reply to this email immediately.\n\n Regards,\n Onegroup Admin Team\n \"\"\".format(user[\"Name\"],refLink)\n\n emailMessage(\"Password Reset\", [user[\"Email\"]], msg)\n return redirect(url_for('confirm', confirmed = 'Password reset email has been sent.'))\n else:\n flash(\"User doesn't exists\")\n else:\n flash(\"Emails don't match\")\n \n return render_template('emailsend.html')", "def post(self):\n try:\n url = request.host_url + 'reset/password/'\n body = request.get_json()\n base_url = request.url_root\n email = body.get('email')\n\n if not email:\n raise SchemaValidationError\n\n user = User.objects.get(email=email)\n if not user:\n raise EmailDoesNotExistsError\n\n expires = datetime.timedelta(minutes=60)\n payload = {\"user_id\": str(user.id)}\n reset_token = create_access_token(payload, expires_delta=expires)\n\n return send_email('[Unboxit] Reset Your Password',\n sender='[email protected]',\n recipients=[user.email],\n text_body=render_template(\n 'components/reset_password.txt',\n url=url + reset_token),\n html_body=render_template(\n 'components/reset_password.html',\n url=url + reset_token,\n first_name=user.first_name,\n base_url=base_url))\n except SchemaValidationError:\n raise SchemaValidationError\n except DoesNotExist:\n raise EmailDoesNotExistsError\n except Exception as e:\n raise InternalServerError", "def reset_password(email):\n user = AuthUser.query.filter_by(email=email).first()\n if user is None:\n return False\n # Generate email with unique link\n msg = Message(\n \"Password Reset Link\",\n recipients=[user.email] \n )\n msg.body = \"Click on this link and following the instructions to reset your \"\n \"password\\n\\n%s%s?uid=%s-%s\" % (\n app.config['SITE_URI'],\n \"/reset/password/\",\n user.id,\n user.get_uid()\n )\n mail.send(msg)\n return True", "def send_recovery_password_email(token: str, email: str) -> None:\n\n # TODO ...\n # Load html templates and get the content from it.\n # html_content = ...\n\n # You must have to send this as a anchor\n # to my-domain.com/reset-password?token=ad5a....\n link = f\"{SERVER_HOST}/reset-password?token={token}\"\n content = f\"\"\"\n <h1>Reset your password</h1>\n <p></p>\n <a href=\"{link}\" target=\"_blank\" rel=\"noopener noreferrer\">Press here</a>\n \"\"\"\n email = sender.create_email(\n to_list=[email],\n subject=f\"Recovery Password\",\n html_content=content,\n )\n sender.send_email(email_to_send=email)", "def post(self):\n data = request.get_json()\n user = actions.get_user_by_email(data['email'])\n html = '<p>To reset your password </p>'\n subject = 'Request for changing password, ' + user['username']\n actions.send_email(data['email'], user['username'], user['password'], subject,\n '/reset_password/', html, False)\n pass", "def send_pw_reset_email(user):\n token = user.get_token()\n message = Message(\n 'Reset Your Password',\n sender='[email protected]',\n recipients=[user.email])\n message.body = f\"To verify reset your password, click the link \" \\\n f\"below:\\n\\n\" \\\n f\"{url_for('users.reset_password', token=token, _external=True)}\"\n mail.send(message)", "def forgot_password():\n if request.method == 'POST':\n if 'username' in request.form:\n username = request.form['username']\n user = Users.query.get(username)\n if user:\n reset_slug = utils.encrypt(username)\n reset_url = request.host_url + 'reset_password' + '/' + reset_slug\n from_email = ('[email protected]', 'TSG Bot')\n to_email = [(user.email, user.name)]\n subject = 'Password reset for Hades account'\n content = f\"Hello {user.name}, please click <a href=\\\"{reset_url}\\\">here</a> to reset your password!\"\n utils.send_mail(from_email, to_email, subject, content)\n return redirect(url_for('login'))\n return render_template('forgot_password.html')", "def password_reset(request):\n\tif not request.user.is_authenticated():\n\t\treturn django.contrib.auth.views.password_reset(request,\n template_name='usermgr/password_reset_form.html',\n email_template_name= 'usermgr/password_reset_email.html',\n post_reset_redirect='/usermgr/password_reset/done/')\n\telse:\n\t\treturn HttpResponseRedirect(settings.LOGIN_REDIRECT_URL)", "def send_password_reset_email(user):\n\n token = user.get_password_token()\n reset_time=datetime.now()\n send_email('[SiteSurveyApp] Account password reset',\n recipients=[user.email],\n sender=app.config['MAIL_DEFAULT_SENDER'],\n text_body=render_template('auth/emails/reset_password.txt',\n user=user, token=token, reset_time=reset_time),\n html_body=render_template('auth/emails/reset_password.html',\n user=user, token=token, reset_time=reset_time))", "def reset_password_request():\n form = ResetPasswordRequestForm()\n if form.validate_on_submit():\n try:\n user = User.query.filter_by(email=form.email.data).first_or_404()\n except Exception:\n flash('This Email ID is Not Registered', 'error')\n return render_template('password_reset_request.html',\n form=form), 400\n\n if user:\n send_password_reset_email(user)\n flash('Please check your email for a password reset link.',\n 'success')\n return render_template('post_pass_reset_request.html',\n title=\"Reset Password\")\n else:\n flash(\n 'Your email address must be confirmed \\\n before attempting a password reset.',\n 'error')\n return redirect(url_for('auth.login'))\n\n return render_template('password_reset_request.html', form=form), 400", "def forgot_password():\n\n if not current_user.is_anonymous():\n return redirect(url_for(\"forum.index\"))\n\n form = ForgotPasswordForm()\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data).first()\n\n if user:\n token = user.make_reset_token()\n send_reset_token(user, token=token)\n\n flash((\"E-Mail sent! Please check your inbox.\"), \"info\")\n return redirect(url_for(\"auth.forgot_password\"))\n else:\n flash((\"You have entered an username or email that is not linked \\\n with your account\"), \"danger\")\n return render_template(\"auth/forgot_password.html\", form=form)", "def forgot_password():\n url = 'http://localhost:8080/' + 'user/reset/'\n body = request.get_json()\n email = body.get('email')\n if not email:\n return jsonify(msg.MISSING_PARAMETER), 400\n user_email = views.UserManagement().exists(email=email)\n\n if not user_email:\n return jsonify(msg.NO_DATA), 404\n expires = datetime.timedelta(hours=24)\n reset_token = create_access_token(identity=email, expires_delta=expires)\n\n send_email('[Shodita] Reset Your Password', sender='[email protected]', recipients=[email],\n text_body=render_template('email/reset_password.txt', url=url + reset_token),\n html_body=render_template('email/reset_password.html', url=url + reset_token))\n\n return jsonify(msg.SUCCESS), 200", "def forgot_passwd(request):\n dc_settings = request.dc.settings\n\n return password_reset(\n request,\n template_name='gui/accounts/forgot.html',\n email_template_name='gui/accounts/forgot_email.txt',\n subject_template_name='gui/accounts/forgot_subject.txt',\n password_reset_form=partial(ForgotForm, request),\n post_reset_redirect=reverse('forgot_done'),\n from_email=dc_settings.DEFAULT_FROM_EMAIL,\n current_app='gui',\n extra_context={\n 'e_site_name': dc_settings.SITE_NAME,\n 'e_site_link': dc_settings.SITE_LINK,\n })", "def reset_password():\n form = ResetPassword()\n if form.validate_on_submit():\n user_email = form.email.data\n mail_exist = db.check_email(user_email)\n if mail_exist is not None:\n new_password = generate_password()\n new_password_hash = generate_password_hash(new_password)\n username = mail_exist['username']\n db.update_password_username(username, new_password_hash)\n flash('Your new password has been sent to your mailbox')\n redirect('login')\n # send_password_reset_email(user_email, new_password)\n return redirect(url_for('login'))\n else:\n flash('This email address is not registered')\n return redirect('reset_password')\n return render_template('resetpassword.html', form=form)", "def send_password_reset(user):\n _log('++ sending password reset email for: {} {}'.format(user.first_name, user.last_name))\n secret_string = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(20))\n\n # if local set the domain to localhost\n if ENV_DICT['ENVIRON'] == 'LOCAL':\n secret_link = 'http://localhost:8080/reset/{}/'.format(secret_string)\n # otherwise use the subdomain of the tenancy\n else:\n secret_link = 'http://{}.cpisearch.io/reset/{}/'.format(user.tenancy, secret_string)\n\n reset_link_object = PasswordResetLink(\n user_id=user.user_id,\n secret_link=secret_string,\n tenancy=user.tenancy,\n )\n db.session.add(reset_link_object)\n db.session.commit()\n send_email(\n to_email=user.email,\n subject='SuccessKit Password Reset',\n template_path='emails/password_reset_email.html',\n template_vars={\n 'user': user,\n 'secret_link': secret_link\n }\n )", "def password_reset(request):\n try:\n with transaction.atomic():\n try:\n data = request.data\n data = validations_utils.email_validation(data) # Validates email id, it returns lower-cased email in data.\n user = validations_utils.user_validation_with_email(data['email'])\n except ValidationException as e: # Generic exception\n return Response(e.errors, status=e.status)\n current_site = get_current_site(request)\n domain = current_site.domain\n key = utils.create_reset_password_key(user.email)\n utils.send_reset_password_mail(user, key, domain) # Sends an email for resetting the password.\n return Response(messages.PASSWORD_RESET_LINK_SENT, status=status.HTTP_200_OK)\n except IntegrityError:\n return Response(messages.CAN_NOT_RESET_PASSWORD, status=status.HTTP_500_INTERNAL_SERVER_ERROR)", "def forgotpassword(request):\n if request.method == 'GET':\n return render(request, 'app/other/forgot_password.html', {'title':'Forgot Password?',})\n elif request.method == 'POST':\n username = request.POST['username']\n\n if User.objects.filter(username = username).exists():\n user = User.objects.get(username = username)\n if Referee.objects.filter(user = user).exists():\n referee = Referee.objects.get(user = user)\n # generate token\n passwordResetTokenGenerator = PasswordResetTokenGenerator()\n token = PasswordResetTokenGenerator.generate_token(passwordResetTokenGenerator, str(user.id))\n token = str(token.decode('utf-8'))\n # email to referee\n subject = \"[Password Reset Link]\"\n message = 'http:////localhost:8000//reset//token=//' + token\n content = \"<br>Dear sir,</br><br></br><br></br>Link is: \"+message+'. Please click on the link to change the credentials.'+\"<br></br><br></br>Regards,<br></br>PhDPortal.\"\n email = []\n receiver = referee.user\n email.append(receiver.email)\n send_email_task.delay(email, subject, content)\n # redirect to same page with status to check your mail and click on activation link\n \n dict = {'status' : 'Done', 'message' : 'An Activation link has been sent to your mail-id'}\n return HttpResponse(json.dumps(dict), content_type = 'application/json')\n else: # given username is not valid to use this feature\n dict = {'status': 'Error', 'message' : 'You are not Authorized to change password'}\n return HttpResponse(json.dumps(dict), content_type = 'application/json')\n else: # given username is not valid to use this feature\n dict = {'status': 'Error', 'message' : 'Invalid Username, Try Again!'}\n return HttpResponse(json.dumps(dict), content_type = 'application/json')\n else:\n return redirect(reverse(URL_BAD_REQUEST))", "def send_reset_email(staff):\n token = staff.get_reset_token()\n msg = Message('Password Reset Request', \n sender='[email protected]', \n recipients=[staff.email])\n msg.body = f\"\"\"To reset your password, visit the following link:\n{url_for('reset_token', token=token, _external=True)}\nIf you did not make this request, then simply record this email and no changes will be made.\"\"\"\n try:\n mail.send(msg)\n except Exception as e:\n print(e)", "def link(self):\n return f\"https://{DOMAIN}/password-reset/{self.code}\"", "def handle_emails():\n email = request.data['email'].strip()\n user = User.query.filter_by(email=email).first()\n option = \\\n request.data['option'].strip() # have a <select> in the frontend\n token = s.dumps(email, salt='email-confirm')\n\n msg = Message('Reset password', sender=app.config['ADMINS'][0],\n recipients=[email])\n link = 'http://localhost:3000/confirm_email/{}/{}'\\\n .format(option, token)\n if user:\n msg.body = 'Your link is {}'.format(link)\n else:\n msg.body = 'You attempted to reset your password but you do not \\\n have an account with us. Please Sign Up and Log in. {}'\\\n .format('http://localhost:3000/register')\n\n mail.send(msg)\n return jsonify({\"message\":\"Please confirm your email.\"}), 201", "def forgot():\n form = ForgotForm()\n\n if form.validate_on_submit():\n db.session.add(form.pw_reset)\n db.session.commit()\n\n form.pw_reset.send()\n flash('A password reset link has been sent to your email', 'alert-success')\n return redirect(url_for('default.home'))\n else:\n flash_form_errors(form)\n return render_template('forgot.html', form=form)", "def test_password_reset_email(self, send_mail_mock):\n pw_reset_name = 'auth_password_reset'\n # ensure view exists\n pw_reset_get_response = self.client.get(reverse(pw_reset_name))\n self.assertEqual(pw_reset_get_response.status_code, 200)\n # post data to password reset; make Django send email\n data = {'email': self.email}\n self.client.post(reverse(pw_reset_name), data=data, follow=True)\n # verify that email sent with right template\n send_mail_mock.assert_called_with(\n ANY,\n 'registration/password_reset_email.txt',\n ANY, ANY, ANY,\n html_email_template_name=ANY)", "def reset_request():\n if current_user.is_authenticated:\n return redirect('/home')\n form = RequestResetForm()\n if form.validate_on_submit():\n staff = Staff.query.filter_by(email=form.email.data).first()\n send_reset_email(staff)\n flash('An email has been sent with instructions to reset your password.', 'info')\n return redirect(url_for('login'))\n return render_template('reset_request.html', title='Reset Password',\n form=form)", "def reset_password_email(request):\n if request.method == 'POST' :\n try:\n print(request.POST)\n user = models.UserProfile.objects.get(email=request.POST.get('email',''))\n current_site=get_current_site(request)\n email_subject='Password Reset'\n message=render_to_string('reset_password.html',{\n 'user':user,\n 'domain':current_site.domain,\n 'uid':urlsafe_base64_encode(force_bytes(user.id)),\n 'token':account_activation_token.make_token(user),\n })\n to_email= user.email\n email= EmailMessage(email_subject,message,to=[to_email])\n email.send()\n return JsonResponse(\n {\n \"status\":\"The Reset password email has been sent.\"\n }\n )\n except(TypeError, ValueError, OverflowError, models.UserProfile.DoesNotExist):\n user = None\n return JsonResponse(\n {\n \"status\":\"No matching account found\"\n }\n )\n else :\n return JsonResponse(\n {\n \"status\":\"only post method is available\"\n }\n )", "def action_reset_password(self):\n # prepare reset password signup\n create_mode = bool(self.env.context.get('create_user'))\n\n # no time limit for initial invitation, only for reset password\n expiration = False if create_mode else now(days=+1)\n\n self.mapped('partner_id').signup_prepare(signup_type=\"reset\", expiration=expiration)\n\n # send email to users with their signup url\n template = False\n if create_mode:\n try:\n template = self.env.ref('loyalty.set_password_email', raise_if_not_found=False)\n except ValueError:\n pass\n if not template:\n template = self.env.ref('loyalty.reset_password_email')\n assert template._name == 'mail.template'\n\n template_values = {\n 'email_to': '${object.email|safe}',\n 'email_cc': False,\n 'auto_delete': True,\n 'partner_to': False,\n 'scheduled_date': False,\n }\n template.write(template_values)\n\n for user in self:\n if not user.email:\n raise UserError(_(\"Cannot send email: user %s has no email address.\") % user.name)\n with self.env.cr.savepoint():\n template.with_context(lang=user.lang).send_mail(user.id, force_send=True, raise_exception=True)\n _logger.info(\"Password reset email sent for user <%s> to <%s>\", user.login, user.email)", "def request_password_reset():" ]
[ "0.77369666", "0.7654678", "0.76541126", "0.76050055", "0.74960506", "0.74847054", "0.7466968", "0.7450301", "0.7397794", "0.739677", "0.73547953", "0.7352869", "0.73409104", "0.7325412", "0.7316366", "0.7305401", "0.72502244", "0.72438157", "0.72168416", "0.7154242", "0.7148575", "0.71477026", "0.71021914", "0.708804", "0.7068072", "0.7036203", "0.6951567", "0.6948683", "0.6905124", "0.68978375" ]
0.8057735
0
For adding Exam name like semester 1 or trimester 2.
def addExamName(request): emp = models.Teacher.objects.get(user=request.user) if not emp.exam_permit: raise Http404 context_dict = {} if request.method == "POST": name = request.POST.get('ename') duplicate_check = models.ExamName.objects.filter( name=name, ).first() if duplicate_check: context_dict["message"] = "Exam Name already exists." duplicate_check.soft_delete=False duplicate_check.save() return render(request, "addExamName.html", context_dict) try: examName = models.ExamName( name=name ) examName.save() history = models.History( user=emp, activity='Added Exam' + str(name) +'.\n', activity_type="add exam." ) history.save() context_dict["message"] = 'Successfully added new Exam Type.' context_dict["success"] = True except Exception as e: context_dict["message"] = str(e) context_dict["success"] = False print(e) return render(request, "addExamName.html", context_dict)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n return f\"{self.semester} | {self.school} | {self.position} | {self.class_name}\"", "def thesis(self, new_thesis):\r\n self.__thesis = 'Thesis: ' + new_thesis", "def __str__(self):\n return str(self.__student_name) + \" has grade \" + str(self.__grade_value) + \" at \" + str(self.__discipline_name)", "def add_student():\n\n\tprint('You must enter the student as is:\\n'\n\t\t\"'First name', 'middle name', 'Last name', 'major', 'major', 'gpa', id_number, 'minor'\"\n\t\t\" 'minor' graduation year, advisor number\\n For example: 'Kyle', 'Jacob', 'Ranney', 'Insurance'\"\n\t\t\", 'Chemistry', 3.0, 93988, 'Biology', 'NULL', 2016, 2234\\n\")\n\t# use sql insert statement\n\t# become familiar with this!\t", "def __str__(self):\n return \"%s, %02d, %s\" % (self.code, self.section, self.semester)", "def __str__(self):\n # Use 'Unknown' if the course instance does not have a term\n if self.course_instance.term:\n term = self.course_instance.term.verbose_name()\n else:\n term = 'Unknown'\n\n exam_unicode = '{term} {number} {type} for {course}'.format(\n term=term,\n number=self.get_exam_number_display(),\n type=self.get_exam_type_display(),\n course=self.course_instance.course)\n if self.instructors:\n instructors = ', '.join([i.last_name for i in self.instructors])\n return '{}, taught by {}'.format(exam_unicode, instructors)\n else:\n return '{} (Instructors Unknown)'.format(exam_unicode)", "def get_descriptive_name(self):\r\n long_name = str(self.year) + ' ' + self.make + ' ' + self.model\r\n #Mostrar_Grande = long_name.upper()\r\n #return long_name.upper()\r\n #return Mostrar_Grande #Funciona Com Return TAMBÉM, mas olhe na linha 39 como seria necessário usar.\r\n print(long_name.upper())", "def __str__(self):\r\n stu_info = super().__str__()\r\n return f'{stu_info} {self.thesis}'", "def marketing_name(self):\n return \"Custom solution - 2\"", "def example(exam_name, question_set, student):\n\n exam = Exam(exam_name)\n for question in question_set:\n exam.add_question(question, question_set[question])\n student = Student(student['f_name'], student['l_name'], student['address'])\n take_test(exam, student)\n return student, exam", "def get_describe_name(self):\n long_name = str(self.year)+ ' ' + self.make.title()+ ' ' +self.model.title()\n return long_name", "def get_name():\n return \"SVM Idea\"", "def toStudentString(self):\r\n return \"{0}th year, section {1}, {2} {3}\".format(self.batch, self.batch_id, self.batch, self.batch_id)", "def question_8():\n subject = input(\"Enter subject code: \")\n while subject != \"\":\n if len(subject) == 6:\n it_string = \"\"\n if subject.startswith(\"CP\"):\n it_string = \" IT\"\n if subject[2] == '1':\n year_string = \"first-year\"\n elif subject[2] == '2':\n year_string = \"second-year\"\n elif subject[2] == '3':\n year_string = \"third-year\"\n else:\n year_string = \"Masters or other\"\n print(f\"That is a {year_string}{it_string} subject.\")\n else:\n print(\"Invalid subject code\")\n subject = input(\"Enter subject code: \")", "def numbered_title(self):\n return f\"Appendix {self.chapter}. {self.title}\"", "def __str__(self):\n return \"student:\"+str(self.name)+\":\"+str(self.age)+\":\"+str(self.major)", "def addSemester(inputFile, outputFile):\n\n \n fall_classes = ['cs 121', 'cs 223', 'cs 260', 'cs 215']\n spring_classes = ['cs 122', 'cs 166', 'cs 224', 'cs 251', 'cs 261']\n \n\n with open(inputFile, 'r') as f:\n newlist = f.readlines() \n for i in range(len(newlist)):\n newlist[i] = newlist[i].strip()\n print(newlist)\n for i in newlist:\n print(i) \n print(\"-----------------------\") # this is all taking each line in the text and converting it into a list while also getting rid of any new lines to make it easier to concatanate fall or spring\n\n with open(outputFile, 'w') as f:\n index = 0 # this is a counter that is incrementing once every loop and is being used to index into the list \n for i in newlist: \n if i in fall_classes: \n newlist[index] += \" fall\\n\"\n index += 1\n elif i in spring_classes:\n newlist[index] += \" spring\\n\"\n index += 1\n\n \n original = ''.join(newlist) # this brings together the finalized list into the original format just with the corresponding fall or spring\n print(original)\n f.write(original)", "def get_descriptive_name(self):\r\n long_name=str(self.year)+' '+self.make+' '+self.model\r\n return long_name.title()", "def insert_course(dept, num, text):\n\n # Course Title \n m = re.search(\"[\\d\\w]{5} - ([\\w ]*)\", text)\n title = m.group(1) if m else \"nomatch\"\n\n # Course Description\n m = re.search(\"\\.\\s(.*)\\sTypically\",text)\n des = m.group(1) if m else \"nomatch\"\n\n # Credit hours aren't fixed for every course\n # Credit Hours: 2.00\n # Credit Hours: 2.00 or 3.00. \n # Credit Hours: 1.00 to 18.00. \n m = re.search(\"Credit Hours: (\\d+\\.\\d+)\",text, flags=re.IGNORECASE)\n m = re.search(\"(\\d+\\.\\d+)(.*?)Credit hours\",text, flags=re.IGNORECASE) if not m else m\n cr = m.group(1) if m else \"-1\"\n\n # Semesters Offered\n m = re.search(\"Typically offered (.*?)\\.\", text)\n sem = m.group(1).split() if m else [\"nomatch\"]\n\n # Course Type: Lecture, Recitation, Lab, Seminar, etc.\n m = re.search(\"Schedule Types:\\s((?:[\\w ]+)(?:,[\\w ]+)*) \\s+\", text)\n form = m.group(1).split(\", \") if m else [\"nomatch\"]\n\n # Learning objectives will not necessarily follow campuses\n m = re.search(\"campuses:(\\s+([\\w\\s])+\\n)\", text)\n campus = m.group(1).strip().split(\"\\n\\n\") if m else [\"nomatch\"]\n campus = [camp.strip() for camp in campus]\n\n # prereq regex and decomosition of prereqs into lists of AND conditions (works for most classes, not 477 and similar)\n # re.DOTALL matches all characters, including \"\\n\"\n idx = text.find(\"campuses:\")\n m = re.search(\"Prerequisites:(.*)\",text[idx:],flags=re.DOTALL)\n if m:\n allReqs = []\n prereqText = m.group(1).strip()\n prereqText = prereqText.encode('ascii', 'ignore') \n for i in PrereqParser.parseprereq(prereqText):\n reqArr = []\n for j in i.split():\n if j.find(\"-C\") != -1:\n j = j.replace(\"-C\",\"\")\n reqArr.append(Requisite(course=j,reqType=False))\n else:\n reqArr.append(Requisite(course=j,reqType=True)) \n allReqs.append(RequisiteList(courses=reqArr))\n\n else:\n allReqs = []\n\n # create course entity\n course = Course(number=num, title=title, department=dept, form=form,\n description=des, credits=float(cr), semesters=sem,\n campuses=campus,requisites=allReqs, id=dept + num)\n # store course \n course.put()", "def name(self):\n return 'SE Rimozione Inquinanti'", "def get_descriptive_name(self):\r\n long_name = str(self.year)+' '+self.make + ' '+self.model\r\n return long_name.title()", "def get_full_name_with_academic_title(self) -> str:\n base_name = super().get_full_name()\n return f'{self.title} {base_name}' if self.title else base_name", "def addStud(self,ID,name,attNr,grade):\n if ID < 0: raise Exception(\"Invalid ID!\")\n parts = name.split(' ')\n if len(parts) < 2: raise Exception('Invalid name!')\n for part in parts:\n if len(part)<3: raise Exception('Invalid name!')\n if attNr < 0: raise Exception('Invalid number of attendances!')\n if grade not in range(0,11): raise Exception('Invalid grade!')\n self.__studRepo.add(Student(ID,name,attNr,grade))", "def option1(self):\n ID = int(input(\"ID: \"))\n name = input(\"Name: \")\n attNr = int(input(\"Number of attendances: \"))\n grade = int(input(\"Grade: \"))\n self.__srv.addStud(ID,name,attNr,grade)", "def subChoose():\n\n print \"Please Choose Subject:\\n\\n1, Carms\\n2, Runes\\n3, Defence Against Dark Arts\\n4, Astronomy\\n5, Bonus\"", "def formatName(self):\r\n return self.title.getVal() + \" \" + self.first.getVal() + \" \" + self.last.getVal()", "def assessment_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"assessment_name\")", "def get_descriptive_name(self):\r\n long_name = str(self.year) + ' ' + self.make + ' ' + self.model\r\n return long_name.title()", "def display(self):\n return f'{self._last_name},{self._first_name}:({self._student_id}) {self._major} gpa:{self._gpa}'", "def dc_title(self):\n return u\"{0} ({1}): {2} {3}\".format(\n self.label, self.in_assessment[0].timepoint,\n self.subjects[0].code_in_study,\n \"...\" if len(self.subjects) > 1 else \"\")" ]
[ "0.5798856", "0.5777781", "0.5663284", "0.5635707", "0.5615205", "0.55926704", "0.5520119", "0.5467409", "0.5458762", "0.5406007", "0.5394709", "0.5367752", "0.53436214", "0.5338537", "0.53357697", "0.5332841", "0.5325494", "0.5315696", "0.53049135", "0.53027743", "0.52977306", "0.5264618", "0.52377564", "0.5221814", "0.5190809", "0.5177082", "0.5158515", "0.51531357", "0.5149631", "0.5142698" ]
0.66135615
0
View to edit the already existing student in database by taking student_id.
def edit_student(request, student_id): emp = models.Teacher.objects.get(user=request.user) if not emp.student_permit: raise Http404 student = models.Student.objects.filter( pk=student_id, soft_delete=False ).first() if not student: raise Http404 context_dict = { "all_courses": context_helper.course_helper(), "blood_groups": context_helper.blood_group_helper(), "guardian_types": context_helper.guardian_type_helper(), "gender_types": context_helper.gender_helper(), 'student_id': student_id } if request.method == 'POST': update_fields = [] activity = '' sname = request.POST.get('sname') roll = request.POST.get('rno') dob = request.POST.get('dob') gender = request.POST.get('gender_picker') bgroup = request.POST.get('blood_group_picker') if bgroup == 'Choose option': bgroup = None phone = request.POST.get('phone') curradd = request.POST.get('curradd') permadd = request.POST.get('permadd') gname = request.POST.get('gname') course = request.POST.get('course_picker') batch = request.POST.get('batch') gtype = request.POST.get('guardian_type_picker') gphone = request.POST.get('gphone') email = request.POST.get('email') address_flag = request.POST.get('address_flag') print (address_flag) address_flag = True if address_flag == 'on' else False if address_flag == True: permadd = curradd try: if "profile-img" in request.FILES: student.photo = request.FILES["profile-img"] update_fields.append('photo') activity += 'Changed photo.\n' if student.name != sname: student.name = sname update_fields.append('name') activity += 'Changed name to '+ str(sname) +'.\n' if student.roll_no != roll: student.roll_no = roll update_fields.append('roll_no') activity += 'Changed roll number to '+ str(roll) +'.\n' if str(student.dob) != str(dob): student.dob = dob update_fields.append('dob') activity += 'Changed DOB to ' + str(dob) + '.\n' if student.gender != gender: student.gender = gender update_fields.append('gender') activity += 'Changed gender to ' + str(gender) + '.\n' if student.blood_group != bgroup: student.blood_group = bgroup update_fields.append('blood_group') activity += 'Changed blood group to ' + str(bgroup) + '.\n' if student.phone != phone: student.phone = phone update_fields.append('phone') activity += 'Changed phone number to ' + str(phone) + '.\n' if student.curr_address != curradd: student.curr_address = curradd update_fields.append('curr_address') activity += 'Changed current address to ' + str(curradd) + '.\n' if student.perm_address != permadd: student.perm_address = permadd update_fields.append('perm_address') activity += 'Changed permanent address to ' + str(permadd) + '.\n' if student.curr_address != curradd: student.curr_address = curradd update_fields.append('curr_address') activity += 'Changed current address to ' + str(curradd) + '.\n' if student.guardian_name != gname: student.guardian_name = gname update_fields.append('guardian_name') activity += 'Changed current address to ' + str(gname) + '.\n' if student.guardian_phone != gphone: student.guardian_phone = gphone update_fields.append('guardian_phone') activity += 'Changed guardian phone to ' + str(gphone) + '.\n' if student.guardian_type != gtype: student.guardian_type = gtype update_fields.append('guardian_type') activity += 'Changed current address to ' + str(gtype) + '.\n' if str(student.course.pk) != str(course): student.course = models.Course.objects.get(pk=course) update_fields.append('course') activity += 'Changed course to ' + str(course) + '.\n' if student.batch != batch: student.batch = batch update_fields.append('batch') activity += 'Changed batch to' + str(batch) + '.\n' if student.email != email: student.email = email update_fields.append('email') activity += 'Changed email to ' + str(email) + '.\n' if student.address_flag != address_flag: student.address_flag = address_flag update_fields.append('address_flag') activity += 'Changed address flag.' student.save(update_fields=update_fields) history = models.History( user=emp, activity=activity, activity_type="edit student" ) history.save() context_dict["message"] = 'Successfully updated student.' context_dict["success"] = True except Exception as e: context_dict["message"] = str(e) context_dict["success"] = False print(e) context_dict.update(context_helper.get_student_info(student)) if type(context_dict['dob']) == str: context_dict['dob'] = datetime.strptime(context_dict['dob'], '%Y-%m-%d') for i in context_dict['course']: try: del context_dict['all_courses'][i] except: pass for i in context_dict['blood_group']: try: context_dict['blood_groups'].remove(i) except: pass for i in context_dict['guardian_type']: try: context_dict['guardian_types'].remove(i) except: pass for i in context_dict['gender_type']: try: context_dict['gender_types'].remove(i) except: pass if context_dict.get('success', False): return HttpResponseRedirect('/view-students') return render( request, "editStudent.html", context_dict )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def edit_student(request, s_id):\n user = CustomUser.objects.get(id=s_id)\n student = Student.objects.get(user_id=s_id)\n\n if request.method == 'POST':\n user_edit_form = EditUserForm(request.POST, instance=user)\n student_edit_form = EditStudentForm(request.POST, instance=student)\n\n if user_edit_form.is_valid() and student_edit_form.is_valid():\n user_edit_form.save()\n student_edit_form.save()\n messages.success(request, \"The student's account has been edited successfully\")\n return redirect('student_account', s_id=s_id)\n else:\n messages.error(request, \"The form has not been filled correctly\")\n\n else:\n user_edit_form = EditUserForm(instance=user)\n student_edit_form = EditStudentForm(instance=student)\n\n context = {\n 'user_edit_form': user_edit_form,\n 'student_edit_form': student_edit_form\n }\n return render(request, 'main/edit_student.html', {'user_edit_form': context['user_edit_form'],\n 'student_edit_form': context['student_edit_form']})", "def student(identificator):\n student_table = db.get_table('student')\n student = student_table.get(identificator)\n if student is None:\n abort(404)\n discipline = db.get_table('discipline')\n disciplines = discipline.get()\n scores = student_table.get_scores(identificator)\n for each in disciplines:\n if each['id'] not in scores:\n scores[each['id']] = {'score': '', 'id': 0}\n form = StudentForm()\n return render_template(\n 'student.html', student=student,\n form=form, disciplines=disciplines,\n scores=scores\n )", "def on_edit_students_select(self):\n edit_window = Students()\n edit_window.exec_()", "def editStudent(s, number):\n nname = input(\"New Name: \")\n nnumber = input(\"New Number: \")\n ngpa = input(\"New GPA: \")\n nfield = input(\"New Field: \")\n\n deleteStudent(s, number)\n student = Student(nname, nnumber, ngpa, nfield)\n if t.insert(nnumber, student):\n ht.insert(student)\n print(nname, \"edited successfully.\")\n else:\n print(\"new student number is not valid.\")", "def __ui_update_student(self):\n student_id = input(\"student id: \")\n student_name = input(\"student discipline_name: \")\n disciplines_list = []\n\n discipline_name = None\n while discipline_name != '':\n discipline_name = input(\"Discipline discipline_name: \")\n if discipline_name == '':\n break\n elif self.__discipline_controller.find_by_name(discipline_name) is not None:\n disciplines_list.append(discipline_name)\n print(\"Add discipline successful\\n\")\n else:\n print(\"Invalid discipline!\")\n\n try:\n self.__student_controller.update_student(student_id, student_name, disciplines_list)\n print(\"Update student successful\\n\")\n except StudentException as se:\n print(se)\n return\n except RepositoryException as re:\n print(re)\n return", "def edit_person(self, pk):", "def editSport(sport_id):\n\n editedSport = session.query(Sport).filter_by(id=sport_id).one()\n if request.method == 'POST':\n if request.form['sportName']:\n editedSport.sportName = request.form['sportName']\n editedSport.user_id = login_session['user_id']\n session.add(editedSport)\n session.commit()\n return redirect(url_for('showSports'))\n else:\n return render_template('editsport.html', sport=editedSport)", "def add_student():\n\n return render_template(\"student_add.html\")", "def student_detail(request, pk):\n try:\n students = student.objects.get(pk=pk)\n except students.DoesNotExist:\n return HttpResponse(status=404)\n\n if request.method == 'GET':\n serializer = studentSerializer(students)\n return JsonResponse(serializer.data)\n\n elif request.method == 'PUT':\n data = JSONParser().parse(request)\n serializer = studentSerializer(students, data=data)\n if serializer.is_valid():\n serializer.save()\n return JsonResponse(serializer.data)\n return JsonResponse(serializer.errors, status=400)\n\n elif request.method == 'DELETE':\n students.delete()\n return HttpResponse(status=204)", "def updateStudents(request):\n\n return updateRole('gsoc_student')", "def edit_attendance(request, attendance_id):\n\n\temp = models.Teacher.objects.get(user=request.user)\n\tif not emp.student_permit:\n\t\traise Http404\n\tattendance = models.Attendance.objects.filter(\n\t\tpk=attendance_id, soft_delete=False\n\t).first()\n\tprint(\"1\")\n\tcontext_dict = {\n\t\t\"all_subjects\": context_helper.subject_helper(),\n\t\t'attendance_id': attendance_id,\n\t}\n\tif request.method == 'POST':\n\t\tupdate_fields = []\n\t\tactivity = ''\n\t\troll = request.POST.get('roll')\n\t\tsubject = request.POST.get('subject_picker')\n\t\tobtained = request.POST.get('attendance')\n\t\ttotal = request.POST.get('total')\n\t\tstudent = models.Student.objects.filter(\n\t\t\troll_no=roll\n\t\t).first()\n\t\tif not student:\n\t\t\tcontext_dict[\"message\"] = 'Student at does not exist / Roll number has not been alloted.'\n\t\t\treturn render(request, \"editAttendance.html\", context_dict)\n\t\ttry:\n\t\t\tif attendance.student != student:\n\t\t\t\tattendance.student = student\n\t\t\t\tupdate_fields.append('student')\n\t\t\t\tactivity += 'Changed student to ' + str(student) + '.\\n'\n\t\t\tif attendance.total_attendance != total:\n\t\t\t\tattendance.total_attendance = total\n\t\t\t\tupdate_fields.append('total_attendance')\n\t\t\t\tactivity += 'Changed total attendance to ' + str(total) + '.\\n'\n\t\t\tif attendance.obtained_attendance != obtained:\n\t\t\t\tattendance.obtained_attendance = obtained\n\t\t\t\tupdate_fields.append('obtained_attendance')\n\t\t\t\tactivity += 'Changed obtained attendance to' + str(obtained) + '.\\n'\n\t\t\tif str(attendance.subject.pk) != str(subject):\n\t\t\t\tattendance.subject = models.Subject.objects.get(pk=subject)\n\t\t\t\tupdate_fields.append('subject')\n\t\t\t\tactivity += 'Changed subject to ' + str(subject) + '.\\n'\n\t\t\tattendance.save(update_fields=update_fields)\n\t\t\thistory = models.History(\n\t\t\t\tuser=emp,\n\t\t\t\tactivity=activity,\n\t\t\t\tactivity_type=\"edit attendance\"\n\t\t\t)\n\t\t\thistory.save()\n\t\t\tcontext_dict[\"message\"] = 'Successfully updated Attendance.'\n\t\t\tcontext_dict[\"success\"] = True\n\t\texcept Exception as e:\n\t\t\tcontext_dict[\"message\"] = str(e)\n\t\t\tcontext_dict[\"success\"] = False\n\t\t\tprint(e)\n\tcontext_dict.update(context_helper.get_attendance_info(attendance))\n\tfor i in context_dict['subjects']:\n\t\t# use for dynamic\n\t\ttry: del context_dict['all_subjects'][i]\n\t\texcept: pass\n\tif context_dict.get('success', False):\n\t\treturn HttpResponseRedirect('/view-attendance')\n\treturn render(\n\t\trequest, \"editAttendance.html\", context_dict\n\t)", "def document_edit_view(document_id):\n\n doc = Document.query.filter(Document.id == document_id).first_or_404()\n return render_template('admin/documents/edit.html', document=doc, path='/admin/documents')", "def edit(request, observation_id, summary_id):\n\n if request.method == 'POST':\n if observation_id and summary_id:\n o = get_object_or_404(models.Observations, pk=observation_id)\n o.summary_id = summary_id\n form = Observation(request.POST,instance=o)\n else:\n form = Observation(request.POST)\n if form.is_valid():\n form.save()\n return render_to_response(\"obsform_form.html\",\n {'form': form,\n 'success' : 'Your observation was saved'},\n context_instance=RequestContext(request))\n else:\n o = get_object_or_404(models.Observations, pk=observation_id)\n o.summary_id = summary_id\n\n form = Observation(instance=o)\n\n return render_to_response('obsform_form.html', {'form' : form},\n context_instance=RequestContext(request))", "def edit(self, id=None):\n rock_q = model.meta.Session.query(model.Rock)\n c.rock = rock_q.filter_by(id=id).first()\n if c.rock:\n return render('/derived/rock/edit.mako')\n else:\n abort(404)", "def edit_user(user_id):\n\n user = User.query.get_or_404(user_id)\n return render_template('users/edit.html', user=user)", "def show_edit_form(user_id):\n user = User.query.get_or_404(user_id)\n return render_template('edit.html', user=user)", "def edit(self):\n\n pass", "def edit_user(user_id):\n user = User.query.get_or_404(user_id)\n return render_template('/users/edit_page.html', user=user)", "def add_student():\n if request.method == 'POST':\n db.add_student(request.form)\n return redirect('/registry')\n else:\n return render_template('add.html')", "def edit_subject(request,subject_id):\n\n\temp = models.Teacher.objects.get(user=request.user)\n\tif not emp.subject_permit:\n\t\traise Http404\n\tsubject = models.Subject.objects.filter(\n\t\tpk=subject_id, soft_delete=False\n\t).first()\n\tif not subject:\n\t\traise Http404\n\tcontext_dict = {\n\t\t\"all_courses\": context_helper.course_helper(),\n\t\t\"subject_types\": context_helper.subject_type_helper(),\n\t\t'subject_id': subject_id,\n\t}\n\tif request.method == 'POST':\n\t\tupdate_fields = []\n\t\tactivity = ''\n\t\tcourse = request.POST.get('course_picker')\n\t\tname = request.POST.get('sname')\n\t\tsid = request.POST.get('sid')\n\t\tstype = request.POST.get('subject_picker')\n\t\tmaxmarks = request.POST.get('marks')\n\t\ttry:\n\t\t\tif str(subject.course.pk) != str(course):\n\t\t\t\tsubject.course = models.Course.objects.get(pk=course)\n\t\t\t\tupdate_fields.append('course')\n\t\t\t\tactivity += 'Changed course to ' + str(course) + '.\\n'\n\t\t\tif subject.s_type != stype:\n\t\t\t\tsubject.s_type = stype\n\t\t\t\tupdate_fields.append('s_type')\n\t\t\t\tactivity += 'Changed subject type to ' + str(stype) + '.\\n'\n\t\t\tif subject.name != name:\n\t\t\t\tsubject.name = name\n\t\t\t\tupdate_fields.append('name')\n\t\t\t\tactivity += 'Changed subject name to' + str(name) + '.\\n'\n\t\t\tif subject.s_id != sid:\n\t\t\t\tsubject.s_id = sid\n\t\t\t\tupdate_fields.append('s_id')\n\t\t\t\tactivity += 'Changed subject ID to' + str(sid) + '.\\n'\n\t\t\tif subject.max_marks != maxmarks:\n\t\t\t\tsubject.max_marks = maxmarks\n\t\t\t\tupdate_fields.append('max_marks')\n\t\t\t\tactivity += 'Changed maximum marks to' + str(maxmarks) + '.\\n'\n\t\t\tsubject.save(update_fields=update_fields)\n\t\t\thistory = models.History(\n\t\t\t\tuser=emp,\n\t\t\t\tactivity=activity,\n\t\t\t\tactivity_type=\"edit subject\"\n\t\t\t)\n\t\t\thistory.save()\n\t\t\tcontext_dict[\"message\"] = 'Successfully updated Result Data.'\n\t\t\tcontext_dict[\"success\"] = True\n\t\texcept Exception as e:\n\t\t\tcontext_dict[\"message\"] = str(e)\n\t\t\tcontext_dict[\"success\"] = False\n\t\t\tprint(e)\n\tcontext_dict.update(context_helper.get_subject_info(subject))\n\tfor i in context_dict['courses']:\n\t\ttry: del context_dict['all_courses'][i]\n\t\texcept: pass\n\tfor i in context_dict['subject_type']:\n\t\ttry: context_dict['subject_types'].remove(i)\n\t\texcept: pass\n\tif context_dict.get('success', False):\n\t\treturn HttpResponseRedirect('/view-subjects')\n\treturn render(\n\t\trequest, \"editSubject.html\", context_dict\n\t)", "def edit_user(user_id):\n user = User.query.get_or_404(user_id)\n return render_template(\"users/edit_user.html\", user=user)", "def edit_book(request, pk):\n\tbook = get_object_or_404(Book, pk=pk)\n\tif request.method == \"POST\":\n\t\tif book.author == request.user:\n\t\t\tform = BookForm(request.POST, instance=book)\n\t\t\tif form.is_valid():\n\t\t\t\tbook = form.save(commit=False)\n\t\t\t\tbook.author = request.user\n\t\t\t\tbook.save()\n\t\t\t\treturn redirect('book_list')\n\t\telse:\n\t\t\tform = BookForm(instance=book)\n\t\t\terror_msg = \"Your are not the auther of this book, only the actual author can edit this book\"\n\t\t\tmessages.error(request, error_msg)\n\telse:\n\t\tform = BookForm(instance=book)\n\n\tmessages.info(request, \"Editing an existing book\")\n\treturn render(request, 'BookManagement/new_book.html', {'form': form})", "def added_student():\n\n first = request.form.get('first_name')\n last = request.form.get('last_name')\n github = request.form.get('github')\n\n hackbright.make_new_student(first, last, github)\n first, last, github = hackbright.get_student_by_github(github)\n\n html = render_template(\"student_added.html\", first=first, last=last, github=github)\n\n return html", "def getStudent(request, pk):\n try:\n student = Student.objects.get(nuid=pk)\n\n except Student.DoesNotExist:\n return Response(status=HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = StudentSerializer(student, context={'request': request})\n return Response({'data': serializer.data})\n\n elif request.method == 'PUT':\n serializer = StudentSerializer(student, data=request.data, context={'request': request})\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n student.delete()\n return Response(status=HTTP_204_NO_CONTENT)", "def show_add_student_form():\n\n return render_template(\"add_student_form.html\")", "def edit_user(user_id):\n user = User.query.get_or_404(user_id)\n\n return render_template('edit-user.html', user=user)", "def show_edit_form(user_id):\n\n user = User.query.get_or_404(user_id)\n\n return render_template(\"users/edit_user.html\", user=user)", "def addStudent(request):\n\temp = models.Teacher.objects.get(user=request.user)\n\tif not emp.student_permit:\n\t\traise Http404\n\tcontext_dict = {\n\t\t\"all_courses\": context_helper.course_helper(),\n\t\t\"blood_groups\": context_helper.blood_group_helper(),\n\t\t\"guardian_types\": context_helper.guardian_type_helper(),\n\t\t\"gender_type\": context_helper.gender_helper(),\n\t}\n\tif request.method == 'POST':\n\t\tsname = request.POST.get('sname')\n\t\troll = request.POST.get('rno')\n\t\tdob = request.POST.get('dob')\n\t\tgender = request.POST.get('gender_picker')\n\t\tbgroup = request.POST.get('blood_group_picker')\n\t\tif bgroup == 'Choose option':\n\t\t\tbgroup = None\n\t\tphone = request.POST.get('phone')\n\t\tcurradd = request.POST.get('curradd')\n\t\tpermadd = request.POST.get('permadd')\n\t\tgname = request.POST.get('gname')\n\t\tcourse = request.POST.get('course_picker')\n\t\tbatch = request.POST.get('batch')\n\t\tgtype = request.POST.get('guardian_type_picker')\n\t\tgphone = request.POST.get('gphone')\n\t\temail = request.POST.get('email')\n\t\tduplicate_student = models.Student.objects.filter(\n\t\t\tname=sname, dob=dob, guardian_name=gname,\n\t\t\tguardian_type=gtype, phone=phone, email=email\n\t\t).first()\n\t\tif duplicate_student:\n\t\t\tcontext_dict[\"message\"] = 'Student already exist.'\n\t\t\tduplicate_student.soft_delete=False\n\t\t\tduplicate_student.save()\n\t\t\treturn render(request, \"AddStudent.html\", context_dict)\n\t\taddress_flag = request.POST.get('address_flag')\n\t\taddress_flag = True if address_flag == 'on' else False\n\t\tif address_flag == True:\n\t\t\tpermadd = curradd\n\t\ttry:\n\t\t\tstudent = models.Student(\n\t\t\t\tname=sname,\n\t\t\t\troll_no=roll,\n\t\t\t\tdob=dob,\n\t\t\t\tgender=gender,\n\t\t\t\tblood_group=bgroup,\n\t\t\t\tphone=phone,\n\t\t\t\tcurr_address=curradd,\n\t\t\t\tperm_address=permadd,\n\t\t\t\tguardian_name=gname,\n\t\t\t\tguardian_type=gtype,\n\t\t\t\tguardian_phone=gphone,\n\t\t\t\tcourse=models.Course.objects.get(pk=course),\n\t\t\t\tbatch=batch,\n\t\t\t\temail=email,\n\t\t\t\taddress_flag=address_flag\n\t\t\t)\n\t\t\tif \"profile-img\" in request.FILES:\n\t\t\t\tstudent.photo = request.FILES[\"profile-img\"]\n\t\t\tstudent.save()\n\t\t\thistory = models.History(\n\t\t\t\tuser=emp,\n\t\t\t\tactivity='Added roll number' + str(roll) +'.\\n',\n\t\t\t\tactivity_type=\"add student\"\n\t\t\t)\n\t\t\thistory.save()\n\t\t\tcontext_dict[\"message\"] = 'Successfully added new student.'\n\t\t\tcontext_dict[\"success\"] = True\n\t\texcept Exception as e:\n\t\t\tcontext_dict[\"message\"] = str(e)\n\t\t\tcontext_dict[\"success\"] = False\n\t\t\tprint(e)\n\treturn render(\n\t\trequest, \"addStudent.html\", context_dict\n\t)", "def team_edit(team_id):\n if request.method == 'GET':\n team = Team.query.filter_by(team_id=team_id).one()\n return render_template('edit_team.html', team=team)", "def edit(self, **kwargs):\n ..." ]
[ "0.74575925", "0.6492118", "0.64062715", "0.6391587", "0.6389621", "0.63496494", "0.6339974", "0.6285115", "0.6241227", "0.6230551", "0.62216437", "0.62023526", "0.6146335", "0.6138599", "0.6107638", "0.6090783", "0.60800827", "0.60740596", "0.60250974", "0.6019445", "0.59975696", "0.5989995", "0.5975467", "0.59738874", "0.5951865", "0.5950739", "0.5926014", "0.5908226", "0.59035707", "0.58703357" ]
0.7398913
1
Edit details related to the subject / Meta data of subject.
def edit_subject(request,subject_id): emp = models.Teacher.objects.get(user=request.user) if not emp.subject_permit: raise Http404 subject = models.Subject.objects.filter( pk=subject_id, soft_delete=False ).first() if not subject: raise Http404 context_dict = { "all_courses": context_helper.course_helper(), "subject_types": context_helper.subject_type_helper(), 'subject_id': subject_id, } if request.method == 'POST': update_fields = [] activity = '' course = request.POST.get('course_picker') name = request.POST.get('sname') sid = request.POST.get('sid') stype = request.POST.get('subject_picker') maxmarks = request.POST.get('marks') try: if str(subject.course.pk) != str(course): subject.course = models.Course.objects.get(pk=course) update_fields.append('course') activity += 'Changed course to ' + str(course) + '.\n' if subject.s_type != stype: subject.s_type = stype update_fields.append('s_type') activity += 'Changed subject type to ' + str(stype) + '.\n' if subject.name != name: subject.name = name update_fields.append('name') activity += 'Changed subject name to' + str(name) + '.\n' if subject.s_id != sid: subject.s_id = sid update_fields.append('s_id') activity += 'Changed subject ID to' + str(sid) + '.\n' if subject.max_marks != maxmarks: subject.max_marks = maxmarks update_fields.append('max_marks') activity += 'Changed maximum marks to' + str(maxmarks) + '.\n' subject.save(update_fields=update_fields) history = models.History( user=emp, activity=activity, activity_type="edit subject" ) history.save() context_dict["message"] = 'Successfully updated Result Data.' context_dict["success"] = True except Exception as e: context_dict["message"] = str(e) context_dict["success"] = False print(e) context_dict.update(context_helper.get_subject_info(subject)) for i in context_dict['courses']: try: del context_dict['all_courses'][i] except: pass for i in context_dict['subject_type']: try: context_dict['subject_types'].remove(i) except: pass if context_dict.get('success', False): return HttpResponseRedirect('/view-subjects') return render( request, "editSubject.html", context_dict )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self, subject: Subject) -> None:\n pass", "def update(self, subject: Subject) -> None:\n pass", "def update_by_id(self, subject_id: str, new_subject_data: any) -> any:\n pass", "def update_subject(self):\n subject = extract_subject(self.events)\n self.events.subject = subject", "def change_subject(self, new_subject):\n raise NotImplementedError", "def update_subject(self, subject):\n if not subject or (self.currentSubject == subject):\n self.currentSubject = \"\"\n self.lbSubject.setText(\"<b>{}</b>\".format(NO_FOCAL_SUBJECT))\n self.lbFocalSubject.setText(NO_FOCAL_SUBJECT)\n else:\n self.currentSubject = subject\n self.lbSubject.setText(\"Subject: <b>{}</b>\".format(self.currentSubject))\n self.lbFocalSubject.setText(\" Focal subject: <b>{}</b>\".format(self.currentSubject))", "def setSubject(self,value): \n self.PDFreactorConfiguration.in1[\"subject\"] = value", "def edit_topic():\n topic = db.topic(request.args(0))\n form = SQLFORM(db.topic, record=topic)\n form.vars.description = text_store_read(topic.description)\n if form.validate():\n topic.update_record(\n name=form.vars.name,\n )\n text_store_write(form.vars.description, key=topic.description)\n session.flash = T('The topic has been created')\n redirect(URL('default', 'index'))\n return dict(form=form)", "def edit(self, *args, **kw):\n\t\t\ttmpl_context.widget = self.edit_form\n\t\t\tpks \t\t= self.provider.get_primary_fields(self.model)\n\t\t\tkw \t\t\t= {}\n\n\t\t\tfor i, pk in enumerate(pks):\n\t\t\t\tkw[pk] \t\t= args[i]\n\n\t\t\tvalue \t\t= self.edit_filler.get_value(kw)\n\t\t\tvalue['_method'] \t= 'PUT'\n\n\t\t\treturn dict(value = value, model = self.model.__name__, pk_count = len(pks))", "def subject(self, value):\n self.set_property(\"subject\", value)", "def edit_data(request, modal_data_pk, section_pk, item_pk, subject_pk):\n section = get_object_or_404(ModalSection, pk=section_pk)\n subject = get_object_or_404(Subject, pk=subject_pk)\n item = get_object_or_404(Item, pk=item_pk)\n try:\n modal_data = get_object_or_404(ModalData, pk=modal_data_pk)\n except:\n modal_data = []\n\n if request.method == \"POST\":\n data = format_data_for_database(request)\n ModalData.add_modal_data(request, section, data)\n return redirect('modal', subject.id, item.id)\n\n context = {\n 'modal_data': modal_data,\n 'subject': subject,\n 'section': section,\n 'item': item,\n }\n \n return render(request, 'modal/edit_data.html', context)", "def subject(self, subject: \"str\"):\n self._attrs[\"subject\"] = subject", "def subject(self, subject: \"str\"):\n self._attrs[\"subject\"] = subject", "def subject(self, subject):\n\n self._subject = subject", "def subject(self, subject):\n\n self._subject = subject", "def subject(self, subject):\n\n self._subject = subject", "def test_update_subject(self):\n conv = G(Conversation, type=CONVERSATION_TYPE_CHAT, creator=self.user1)\n conv.users.add(self.user2)\n self.login(self.user1)\n resp = self.client.patch(self.get_url(conv.pk), {'subject': '-'})\n self.assert200(resp)\n self.assertEqual(Conversation.objects.get(pk=conv.pk).subject, '-')", "def edit(self, *args, **kw):\n tmpl_context.widget = self.edit_form\n #pks = self.provider.get_primary_fields(self.model)\n \n log.debug(\"soyRomperLB= %s\" %kw)\n\n ###########################################\n pks = self.provider.get_primary_fields(self.model)\n \n ###########################################\n kw = {}\n for i, pk in enumerate(pks):\n kw[pk] = args[i]\n value = self.edit_filler.get_value(kw)\n value['_method'] = 'PUT'\n return dict(value=value, model=self.model.__name__, pk_count=len(pks))", "def set_subject(self, subject):\n self._subject = subject", "def edit(self, *args, **kw):\n tmpl_context.widget = self.edit_form\n pks = self.provider.get_primary_fields(self.model)\n kw = {}\n for i, pk in enumerate(pks):\n kw[pk] = args[i]\n\n value = self.edit_filler.get_value(kw)\n value['_method'] = 'PUT'\n \n return dict(value=value, model=self.model.__name__, pk_count=len(pks))", "def edit(self, identifier_type, identifier):\n self.require_librarian(flask.request.library)\n\n # TODO: It would be nice to use the metadata layer for this, but\n # this code handles empty values differently than other metadata\n # sources. When a staff member deletes a value, that indicates\n # they think it should be empty. This needs to be indicated in the\n # db so that it can overrule other data sources that set a value,\n # unlike other sources which set empty fields to None.\n\n work = self.load_work(flask.request.library,\n identifier_type, identifier)\n if isinstance(work, ProblemDetail):\n return work\n\n changed = False\n\n staff_data_source = DataSource.lookup(\n self._db, DataSource.LIBRARY_STAFF)\n primary_identifier = work.presentation_edition.primary_identifier\n staff_edition, is_new = get_one_or_create(\n self._db, Edition,\n primary_identifier_id=primary_identifier.id,\n data_source_id=staff_data_source.id\n )\n self._db.expire(primary_identifier)\n\n new_title = flask.request.form.get(\"title\")\n if new_title and work.title != new_title:\n staff_edition.title = str(new_title)\n changed = True\n\n new_subtitle = flask.request.form.get(\"subtitle\")\n if work.subtitle != new_subtitle:\n if work.subtitle and not new_subtitle:\n new_subtitle = NO_VALUE\n staff_edition.subtitle = str(new_subtitle)\n changed = True\n\n # The form data includes roles and names for contributors in the same order.\n new_contributor_roles = flask.request.form.getlist(\"contributor-role\")\n new_contributor_names = [\n str(n) for n in flask.request.form.getlist(\"contributor-name\")]\n # The first author in the form is considered the primary author, even\n # though there's no separate MARC code for that.\n for i, role in enumerate(new_contributor_roles):\n if role == Contributor.AUTHOR_ROLE:\n new_contributor_roles[i] = Contributor.PRIMARY_AUTHOR_ROLE\n break\n roles_and_names = list(\n zip(new_contributor_roles, new_contributor_names))\n\n # Remove any contributions that weren't in the form, and remove contributions\n # that already exist from the list so they won't be added again.\n deleted_contributions = False\n for contribution in staff_edition.contributions:\n if (contribution.role, contribution.contributor.display_name) not in roles_and_names:\n self._db.delete(contribution)\n deleted_contributions = True\n changed = True\n else:\n roles_and_names.remove(\n (contribution.role, contribution.contributor.display_name))\n if deleted_contributions:\n # Ensure the staff edition's contributions are up-to-date when\n # calculating the presentation edition later.\n self._db.refresh(staff_edition)\n\n # Any remaining roles and names are new contributions.\n for role, name in roles_and_names:\n # There may be one extra role at the end from the input for\n # adding a contributor, in which case it will have no\n # corresponding name and can be ignored.\n if name:\n if role not in list(Contributor.MARC_ROLE_CODES.keys()):\n self._db.rollback()\n return UNKNOWN_ROLE.detailed(\n _(\"Role %(role)s is not one of the known contributor roles.\",\n role=role))\n contributor = staff_edition.add_contributor(\n name=name, roles=[role])\n contributor.display_name = name\n changed = True\n\n new_series = flask.request.form.get(\"series\")\n if work.series != new_series:\n if work.series and not new_series:\n new_series = NO_VALUE\n staff_edition.series = str(new_series)\n changed = True\n\n new_series_position = flask.request.form.get(\"series_position\")\n if new_series_position != None and new_series_position != '':\n try:\n new_series_position = int(new_series_position)\n except ValueError:\n self._db.rollback()\n return INVALID_SERIES_POSITION\n else:\n new_series_position = None\n if work.series_position != new_series_position:\n if work.series_position and new_series_position == None:\n new_series_position = NO_NUMBER\n staff_edition.series_position = new_series_position\n changed = True\n\n new_medium = flask.request.form.get(\"medium\")\n if new_medium:\n if new_medium not in list(Edition.medium_to_additional_type.keys()):\n self._db.rollback()\n return UNKNOWN_MEDIUM.detailed(\n _(\"Medium %(medium)s is not one of the known media.\",\n medium=new_medium))\n staff_edition.medium = new_medium\n changed = True\n\n new_language = flask.request.form.get(\"language\")\n if new_language != None and new_language != '':\n new_language = LanguageCodes.string_to_alpha_3(new_language)\n if not new_language:\n self._db.rollback()\n return UNKNOWN_LANGUAGE\n else:\n new_language = None\n if new_language != staff_edition.language:\n staff_edition.language = new_language\n changed = True\n\n new_publisher = flask.request.form.get(\"publisher\")\n if new_publisher != staff_edition.publisher:\n if staff_edition.publisher and not new_publisher:\n new_publisher = NO_VALUE\n staff_edition.publisher = str(new_publisher)\n changed = True\n\n new_imprint = flask.request.form.get(\"imprint\")\n if new_imprint != staff_edition.imprint:\n if staff_edition.imprint and not new_imprint:\n new_imprint = NO_VALUE\n staff_edition.imprint = str(new_imprint)\n changed = True\n\n new_issued = flask.request.form.get(\"issued\")\n if new_issued != None and new_issued != '':\n try:\n new_issued = strptime_utc(new_issued, '%Y-%m-%d')\n except ValueError:\n self._db.rollback()\n return INVALID_DATE_FORMAT\n else:\n new_issued = None\n if new_issued != staff_edition.issued:\n staff_edition.issued = new_issued\n changed = True\n\n # TODO: This lets library staff add a 1-5 rating, which is used in the\n # quality calculation. However, this doesn't work well if there are any\n # other measurements that contribute to the quality. The form will show\n # the calculated quality rather than the staff rating, which will be\n # confusing. It might also be useful to make it more clear how this\n # relates to the quality threshold in the library settings.\n changed_rating = False\n new_rating = flask.request.form.get(\"rating\")\n if new_rating != None and new_rating != '':\n try:\n new_rating = float(new_rating)\n except ValueError:\n self._db.rollback()\n return INVALID_RATING\n scale = Measurement.RATING_SCALES[DataSource.LIBRARY_STAFF]\n if new_rating < scale[0] or new_rating > scale[1]:\n self._db.rollback()\n return INVALID_RATING.detailed(\n _(\"The rating must be a number between %(low)s and %(high)s.\",\n low=scale[0], high=scale[1]))\n if (new_rating - scale[0]) / (scale[1] - scale[0]) != work.quality:\n primary_identifier.add_measurement(\n staff_data_source, Measurement.RATING, new_rating, weight=WorkController.STAFF_WEIGHT)\n changed = True\n changed_rating = True\n\n changed_summary = False\n new_summary = flask.request.form.get(\"summary\") or \"\"\n if new_summary != work.summary_text:\n old_summary = None\n if work.summary and work.summary.data_source == staff_data_source:\n old_summary = work.summary\n\n work.presentation_edition.primary_identifier.add_link(\n Hyperlink.DESCRIPTION, None,\n staff_data_source, content=new_summary)\n\n # Delete previous staff summary\n if old_summary:\n for link in old_summary.links:\n self._db.delete(link)\n self._db.delete(old_summary)\n\n changed = True\n changed_summary = True\n\n if changed:\n # Even if the presentation doesn't visibly change, we want\n # to regenerate the OPDS entries and update the search\n # index for the work, because that might be the 'real'\n # problem the user is trying to fix.\n policy = PresentationCalculationPolicy(\n classify=True,\n regenerate_opds_entries=True,\n regenerate_marc_record=True,\n update_search_index=True,\n calculate_quality=changed_rating,\n choose_summary=changed_summary,\n )\n work.calculate_presentation(policy=policy)\n\n return Response(\"\", 200)", "def edit(self):\n\n pass", "def put(self, subject: any) -> any:\n pass", "def edit_announcement():\n # Implement me!\n\n announcement = get_announcement(request.vars.announcement_id, auth.user.email)\n\n announcement.description = request.vars.description\n announcement.name = request.vars.name\n announcement.updated_on = datetime.datetime.utcnow()\n announcement.update_record()\n return response.json(announcement)", "def showEditContact(self):", "def edit(self, **kwargs):\n ...", "def update_mission_info(self, tv, cid, mission):\n\t\ttv.set(cid, \"state\", STATE[mission.state])\n\t\ttv.set(cid, \"name\", safe_tk(mission.title))", "def edit(self):\n if not self.context.model.is_editable():\n raise Unauthorized(\"Editing is not allowed\")\n\n title = self.request.get('title')\n if not title:\n return JSONResponse(self.request).error(\n _('agenda_item_update_empty_string',\n default=u\"Agenda Item title must not be empty.\")).proceed().dump()\n\n title = title.decode('utf-8')\n if self.agenda_item.has_proposal:\n if len(title) > ISubmittedProposal['title'].max_length:\n return JSONResponse(self.request).error(\n _('agenda_item_update_too_long_title',\n default=u\"Agenda Item title is too long.\")\n ).proceed().dump()\n\n self.agenda_item.set_title(title)\n return JSONResponse(self.request).info(\n _('agenda_item_updated',\n default=u\"Agenda Item updated.\")).proceed().dump()", "def edit_person(self, pk):", "def edit_details(specimen_id):\n specimen = Details.query.filter_by(specimen_id=specimen_id).first()\n form = CollectionDetailsForm(obj=specimen)\n\n if current_user.id == specimen.specimens.user_id:\n if form.validate_on_submit():\n specimen.date = form.date.data\n specimen.location = form.location.data\n specimen.county = form.county.data\n specimen.state = form.state.data\n specimen.habitat = form.habitat.data\n specimen.notes = form.notes.data\n\n db.session.commit()\n\n return redirect(f\"/specimen/{specimen_id}\")\n\n else:\n return render_template(\n \"editspecimen.html\",\n form=form,\n specimen=specimen,\n step=\"details\",\n )\n else:\n return (\"\", 403)" ]
[ "0.65650016", "0.65650016", "0.6508217", "0.6415158", "0.6376233", "0.6271657", "0.60244745", "0.6020632", "0.59968424", "0.59689504", "0.59328204", "0.59253275", "0.59253275", "0.5892029", "0.5892029", "0.5892029", "0.58708006", "0.5866075", "0.585368", "0.58293235", "0.5769355", "0.57559144", "0.57119995", "0.57108426", "0.56986505", "0.5691432", "0.5682045", "0.56726515", "0.5655958", "0.5646336" ]
0.7351874
0
View students using data tables.
def view_students(request): context_dict = { 'title': 'All Students', } return render(request, "viewStudent.html", context_dict)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def view_all_students():\n message = ''\n global conn\n with conn:\n rows = select_all_students(conn)\n for row in rows:\n message += str(row) + \"\\n\"\n messagebox.showinfo('Student Table', message)", "def all_students(self):\n\n with sqlite3.connect(self.db_path) as conn:\n conn.row_factory = lambda cursor, row: Student(row [1], row[2], row[3], row[5])\n\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select s.Id,\n s.first_name,\n s.Last_name,\n s.slack_handle,\n s.cohort_id,\n c.name\n from students s\n join cohorts c on s.cohort_id = c.id\n order by s.cohort_id\n \"\"\")\n\n all_students = db_cursor.fetchall()\n print('\\n***All Students***')\n\n for student in all_students:\n print(student)", "def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n\n rows = hackbright.get_grades_by_github(github)\n\n\n return render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n rows=rows)\n # return html", "def all_students(self):\n\n with sqlite3.connect(self.db_path) as conn:\n conn.row_factory = lambda cursor, row: Student(\n row[1], row[2], row[3], row[5]\n )\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select s.StudentId,\n s.FirstName,\n s.LastName,\n s.SlackHandle,\n s.CohortId,\n c.Name\n from Student s\n join Cohort c on s.CohortId = c.CohortId\n order by s.CohortId\n \"\"\")\n\n all_students = db_cursor.fetchall()\n for student in all_students:\n print(student)", "def all_students(self):\n \n with sqlite3.connect(self.db_path) as conn:\n # conn.row_factory = self.create_student\n conn.row_factory = lambda cursor, row: Student(\n row[1], row[2], row[3], row[5]\n )\n \n \n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select s.Id,\n s.FirstName,\n s.LastName,\n s.SlackHandle,\n s.CohortId,\n c.Name\n from Student s\n join Cohort c on s.CohortId = c.Id\n order by s.CohortId\n \"\"\")\n\n all_students = db_cursor.fetchall()\n\n # for student in all_students:\n # print(f'{student[1]} {student[2]} is in {student[5]}')\n\n # for student in all_students:\n # print(f'{student[1]} {student[2]} is in {student[5]}')\n\n for student in all_students:\n print(student)", "def all_students(self):\n\n with sqlite3.connect(self.db_path) as conn:\n conn.row_factory = lambda cursor, row: Student(\n row[1], row[2], row[3], row[5]\n )\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select s.Id,\n s.FirstName,\n s.LastName,\n s.SlackHandle,\n s.CohortId,\n c.Name\n from Student s\n join Cohort c on s.CohortId = c.Id\n order by s.CohortId\n \"\"\")\n\n all_students = db_cursor.fetchall()\n\n for student in all_students:\n print(student)", "def student_summary() -> str:\n db_path: str = \"810_startup.db\"\n\n try:\n db: sqlite3.Connection = sqlite3.connect(db_path)\n except sqlite3.OperationalError:\n return f'Error: Unable to open database at path {db_path}'\n else:\n query: str = \"select students.Name, students.CWID, grades.Course, grades.Grade, instructors.Name from students,grades,instructors where students.CWID=StudentCWID and InstructorCWID=instructors.CWID order by students.Name\"\n data: Dict[str, str] = [{'Name': name, 'CWID': cwid, 'Course': course, 'Grade': grade, 'Instructor': instructor} for name, cwid, course, grade, instructor in db.execute(query)]\n\n db.close()\n\n return render_template(\n 'students.html',\n title = 'Stevens Repository',\n table_title = 'Students Summary',\n students = data)", "def data_table_page( table_type ) :\r\n logger.debug( f\"table_type={table_type}\" )\r\n model = session_info.get_user_model(session)\r\n\r\n # select table type's corresponding data\r\n if table_type == \"x\" :\r\n df = model._dfX\r\n elif table_type== \"y\" :\r\n df = model._dfY\r\n elif table_type == \"merged\" :\r\n df = model.dfMerged\r\n elif table_type == \"param\" :\r\n param = request.args[\"param\"]\r\n logger.debug(f\"param={param}\")\r\n df = model.dfMerged[[ model.id_col , f\"{param}_x\", f\"{param}_y\"]]\r\n else :\r\n logger.debug()\r\n raise ValueError( f\"Unrecognized table_type={table_type}\" )\r\n \r\n return f\"<pre>{df.to_string()}</pre>\" # TODO replace with template\r", "def initDataView(self,student_cols):\n self.notebook.initStudentsView(student_cols)\n #self.dataView.initStudentsView(student_cols)", "def show_all():\n\n QUERY = \"\"\"\n SELECT first_name, last_name, github\n FROM students\n \"\"\"\n\n db_cursor = db.session.execute(QUERY)\n\n rows = db_cursor.fetchall()\n\n return rows", "def Table(request, key):\n test = models.user_test.Test.get_mem(key)\n if not test:\n msg = 'No test was found with test_key %s.' % key\n return http.HttpResponseServerError(msg)\n\n params = {\n 'hide_nav': True,\n 'hide_footer': True,\n 'test': test,\n }\n\n return util.GetResults(request, 'user_test_table.html', params,\n test.get_test_set())", "def __ui_list_students(self):\n try:\n print(str(self.__student_controller))\n except RepositoryException as re:\n print(re)\n return", "def get_student():\n\n github = request.args.get('github', 'jhacks')\n first, last, github = hackbright.get_student_by_github(github)\n\n\n rows = hackbright.list_projects(github)\n\n return render_template (\"student_info.html\",\n first=first,\n last=last,\n github=github,\n rows=rows\n )", "def select_all_students(conn):\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM students\")\n\n rows = cur.fetchall()\n print(rows)\n for row in rows:\n print(row)", "def browse_students(request):\n students = Student.objects.filter(current_mentor=None)\\\n .exclude(Q(status='drop-out') | Q(status='unresponsive') | Q(status='retainer')\n | Q(status='alum') | Q(status='paused'))\n return render(request, 'match/browse_students.html', {'students': students})", "def student_view():\n sessions = []\n user_id = session.get('user_id')\n con = db.get_db()\n cur = con.cursor()\n cur.execute(\"\"\"SELECT sessions.course_id, sessions.location, sessions.days, sessions.class_time,\n courses.name AS class_name, roster.session_id\n FROM sessions JOIN courses on sessions.course_id = courses.course_id\n JOIN roster on roster.session_id = sessions.id\n JOIN users on users.id = roster.student_id\n WHERE users.id = %s\"\"\",\n (user_id,))\n student_classes = cur.fetchall()\n\n cur.execute(\"\"\"SELECT major FROM users\n WHERE id = %s\"\"\",\n (user_id,))\n student_major = cur.fetchone()\n cur.close()\n con.close()\n\n return render_template(\"layouts/student-home.html\", student_classes=student_classes, student_major=student_major)", "def list_dams(request):\n dams = get_all_dams()\n table_rows = []\n\n for dam in dams:\n table_rows.append(\n (\n dam['name'], dam['owner'],\n dam['river'], dam['date_built']\n )\n )\n\n dams_table = DataTableView(\n column_names=('Name', 'Owner', 'River', 'Date Built'),\n rows=table_rows,\n searching=False,\n orderClasses=False,\n lengthMenu=[ [10, 25, 50, -1], [10, 25, 50, \"All\"] ],\n )\n\n context = {\n 'dams_table': dams_table\n }\n\n return render(request, 'co_drought/list_dams.html', context)", "def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n\n project_list = hackbright.get_grades_by_github(github)\n\n\n return render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n project_list=project_list)", "def show_from_database(self, table_model):\n arr = [4, 1]\n # TODO", "def show_table():\n\n title_list = ('ID', 'Platform', 'Producer', 'Year', 'Elements')\n \n return table, title_list", "def view_all(entities, table, db):\n print \n print \"TABLE:\",table\n for ii in entities:\n print ii\n print", "def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n title_grade_list = hackbright.get_grades_by_github(github)\n\n html = render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n title_grade_list=title_grade_list)\n\n return html", "def table():\n if \"username\" in session:\n return render_template(\"table.html\")\n return abort(401)", "def get_students(self):\n self.cur = self.conn.cursor(pymysql.cursors.DictCursor)\n self.cur.execute(\"SELECT * FROM studenten\")\n self.cur.close()\n\n return self.cur.fetchall()", "def views(\n path, counts, nl, arrays, csv, no_headers, table, fmt, json_cols, columns, schema,\n):\n tables.callback(\n path=path,\n fts4=False,\n fts5=False,\n counts=counts,\n nl=nl,\n arrays=arrays,\n csv=csv,\n no_headers=no_headers,\n table=table,\n fmt=fmt,\n json_cols=json_cols,\n columns=columns,\n schema=schema,\n views=True,\n )", "def main():\n national_university_table()", "def show_data_table(self, show_data_table):\n\n self.container['show_data_table'] = show_data_table", "def student(identificator):\n student_table = db.get_table('student')\n student = student_table.get(identificator)\n if student is None:\n abort(404)\n discipline = db.get_table('discipline')\n disciplines = discipline.get()\n scores = student_table.get_scores(identificator)\n for each in disciplines:\n if each['id'] not in scores:\n scores[each['id']] = {'score': '', 'id': 0}\n form = StudentForm()\n return render_template(\n 'student.html', student=student,\n form=form, disciplines=disciplines,\n scores=scores\n )", "def display_student(s_info):\n print('')\n print('Your information:')\n print(f'{s_info.student_id} - {s_info.first_name} {s_info.last_name}')", "def index():\n response = \"\"\n for table in config.TABLE_SCHEMA.keys():\n response = response + disco.examples(table)\n return response" ]
[ "0.7032715", "0.62924534", "0.62218237", "0.6200149", "0.61551464", "0.6154113", "0.6104839", "0.61026675", "0.6043875", "0.6013538", "0.5988465", "0.5984755", "0.5973147", "0.59658676", "0.59630364", "0.58710605", "0.5852589", "0.5826267", "0.58052284", "0.5802828", "0.57896984", "0.57841414", "0.5758803", "0.5757901", "0.57494164", "0.5747357", "0.57406145", "0.5712988", "0.5706115", "0.569602" ]
0.6582367
1
To add results of students / link results and students
def addResultMain(request): emp = models.Teacher.objects.get(user=request.user) if not emp.result_permit: raise Http404 context_dict = { "result_type": context_helper.result_type_helper(), "all_subjects": context_helper.subject_helper(), "all_exam_name": context_helper.exam_name_helper(), } if request.method == 'POST': roll = request.POST.get('rno') result_type = request.POST.get('result_type_picker') exam = request.POST.get('exam_name_picker') subject = request.POST.get('subject_picker') marks = request.POST.get('marks') student = models.Student.objects.filter( roll_no=roll ).first() result_data_check = models.Result.objects.filter( result_type=result_type, batch=student.batch, exam_name=exam, subject=subject ).first() if not result_data_check: context_dict["message"] = "Result Data does not exist, first create data for result" return render(request, 'addResultMain.html', context_dict) duplicate_check = models.ResultMain.objects.filter( student=models.Student.objects.filter(roll_no=student).first(), result=result_data_check, ).first() if duplicate_check: context_dict["message"]= 'Result already exist.' duplicate_check.soft_delete=False duplicate_check.save() return render(request, "addResultMain.html", context_dict) try: result = models.ResultMain( student=student, result=result_data_check, marks_obtained=marks, ) result.save() history = models.History( user=emp, activity='Added result ' + str(result) + '-' + str(marks) +'.\n', activity_type="add result " ) history.save() context_dict["message"] = 'Successfully added new result.' context_dict["success"] = True except Exception as e: context_dict["message"] = str(e) context_dict["success"] = False print(e) return render(request, 'addResultMain.html', context_dict)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addMultiResults(self, results, index):\n # if no return from site, seed the results with an empty list\n if results is None or len(results) == 0:\n self._results[index] = None\n else:\n self._results[index] = results", "def get_results(self, stud_name):\n self.cur = self.conn.cursor(pymysql.cursors.DictCursor)\n self.cur.execute(\n \"SELECT c.naam, e.cijfer, e.ex_datum \"\n \"FROM studenten s \"\n \"INNER JOIN examens e ON e.stud_id = s.stud_id \"\n \"INNER JOIN cursussen c ON c.cur_id = e.cur_id WHERE s.naam = '{0}' \"\n \"ORDER BY e.ex_datum DESC\".format(stud_name))\n self.cur.close()\n\n return self.cur.fetchall()", "def admin_search_student_query(request):\n\n if not validate_request(request): return redirect(reverse(URL_FORBIDDEN))\n if request.session['type'] == 'S' or request.session['type'] == 'R': return redirect(reverse(URL_FORBIDDEN))\n\n if request.method == \"POST\":\n first_name = request.POST['first_name']\n last_name = request.POST['last_name']\n email = request.POST['email']\n type = request.POST['type']\n dict = {}\n\n for user in User.objects.all():\n user_type = _get_user_type(user)\n\n if user_type is None or user_type == 'A':\n continue # for user who are not S, G, F, D, R, A\n\n user_first_name = None\n user_last_name = None\n user_email = None\n \n votes = 0\n\n if user_type == type:\n votes += 2\n\n if user_type == 'S':\n user_first_name = user.student.first_name\n user_last_name = user.student.last_name\n user_email = user.student.email\n elif user_type == 'G' or user_type == 'D':\n user_first_name = user.faculty.first_name\n user_last_name = user.faculty.last_name\n user_email = user.faculty.email\n elif user_type == 'R':\n user_first_name = user.first_name\n user_last_name = user.last_name\n user_email = user.email\n\n if first_name.upper() in user_first_name.upper():\n votes += 1\n\n if last_name.upper() in user_last_name.upper():\n votes += 1\n\n if email.upper() in user_email.upper():\n votes += 1\n\n dict[user] = votes\n \n sorted_results = sorted(dict.items(), key = operator.itemgetter(1))\n sorted_results.reverse()\n result = _clean_user_info_results(sorted_results)\n\n return HttpResponse(json.dumps(result), content_type = 'application/json')\n else:\n return redirect(reverse(URL_BAD_REQUEST))", "def update_results(self, results):\n pass", "def add_result(self, test_ids, status, comment):\n for test_id in test_ids:\n data = {\n 'case_id': test_id,\n 'comment': comment,\n 'status_id': status,\n }\n self.results.append(data)", "def score_list_student(request):\n\n takes = Take.objects.filter(student__username=request.data[\"sid\"])\n serializer = TakeSerializer(takes, many=True)\n return Response(serializer.data)", "def addResults(self, results):\n if results is None or len(results) == 0:\n self._results = None\n else:\n self._results = results", "def getResults():", "def dummy_scores(result):\n\n # Get students assigned to this year\n req_year = result['subject']['year_id']\n req_user = result['user_id']\n sinfo = models.Students.query.filter_by(year_id=req_year, user_id=req_user).all()\n\n # Loop through and add dummy scores\n for i, student in enumerate(sinfo):\n print(student)\n models.Scores.add_dummy(student.id, result['id'], req_user)", "def _update_result(self, results, clf_numbers):\n # ToDo make results of scoring values dynamic\n names_results = ['Accuracy']\n for number in clf_numbers:\n for name in names_results:\n if name not in self.results:\n self.results[name] = [results[number][name + \"_test_score_\" + str(number)]]\n else:\n self.results[name].append(results[number][name + \"_test_score_\" + str(number)])", "def save_results(self, instagram_results):", "def _AddResult(self):\n if not self._results:\n result = analyzer_result.AnalyzerResult()\n result.attribute_name = 'test_result'\n result.attribute_value = 'is_vegetable'\n self._results.append(result)", "def extend(self, results):\r\n \r\n docs = self.docs\r\n self.scored_list.extend(docnum for docnum in results.scored_list\r\n if docnum not in docs)\r\n self.docs = docs | results.docs\r\n \r\n # TODO: merge the query terms?\r", "def results(self, results):\n self._results = results", "def results(self, results):\n self._results = results", "def results(self):\r\n pass", "def processSearchResult(self):", "def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n\n rows = hackbright.get_grades_by_github(github)\n\n\n return render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n rows=rows)\n # return html", "def added_student():\n\n first = request.form.get('first_name')\n last = request.form.get('last_name')\n github = request.form.get('github')\n\n hackbright.make_new_student(first, last, github)\n first, last, github = hackbright.get_student_by_github(github)\n\n html = render_template(\"student_added.html\", first=first, last=last, github=github)\n\n return html", "def add_series_by_search_result(self, result):\n return (yield self.add_series_by_id(result.id))", "def results(self):\n pass", "def addResultData(request):\n\temp = models.Teacher.objects.get(user=request.user)\n\tif not emp.result_permit:\n\t\traise Http404\n\tcontext_dict = {\n\t\t\"result_type\": context_helper.result_type_helper(),\n\t\t\"all_subjects\": context_helper.subject_helper(),\n\t\t\"all_courses\": context_helper.course_helper(),\n\t\t\"all_exam_name\": context_helper.exam_name_helper(),\n\t}\n\tif request.method == \"POST\":\n\t\tcourse = request.POST.get('course_picker')\n\t\tresult_type = request.POST.get('result_type_picker')\n\t\texam_name = request.POST.get('exam_name_picker')\n\t\tsubject = request.POST.get('subject_picker')\n\t\tbatch = request.POST.get('batch')\n\t\tduplicate_check = models.Result.objects.filter(\n\t\t\tcourse=course, result_type=result_type, exam_name=exam_name,\n\t\t\tsubject=subject, batch=batch\n\t\t).first()\n\t\tif duplicate_check:\n\t\t\tcontext_dict[\"message\"] = 'Result Data already exist.'\n\t\t\tduplicate_check.soft_delete=False\n\t\t\tduplicate_check.save()\n\t\t\treturn render(request, \"addResultData.html\", context_dict)\n\t\ttry:\n\t\t\tresult_data = models.Result(\n\t\t\t\tcourse=models.Course.objects.get(pk=course),\n\t\t\t\tresult_type=result_type, \n\t\t\t\texam_name=models.ExamName.objects.get(pk=course),\n\t\t\t\tsubject=models.Subject.objects.get(pk=course), \n\t\t\t\tbatch=batch\n\t\t\t)\n\t\t\tresult_data.save()\n\t\t\thistory = models.History(\n\t\t\t\tuser=emp,\n\t\t\t\tactivity='Added result data' + str(course)+ str(result_type)+ \n\t\t\t\t\tstr(exam_name)+ str(subject)+ str(batch) +'.\\n',\n\t\t\t\tactivity_type=\"add result data\"\n\t\t\t)\n\t\t\thistory.save()\n\t\t\tcontext_dict[\"message\"] = 'Successfully added new Result Data.'\n\t\t\tcontext_dict[\"success\"] = True\n\t\texcept Exception as e:\n\t\t\tcontext_dict[\"message\"] = str(e)\n\t\t\tcontext_dict[\"success\"] = False\n\t\t\tprint(e)\n\treturn render(request, \"addResultData.html\", context_dict)", "def printSearchResults(results):\n Log.Debug('Search produced %d results:' % len(results))\n index = 0\n for result in results:\n Log.Debug(' ... %d: id=\"%s\", name=\"%s\", year=\"%s\", score=\"%d\".' %\n (index, result.id, result.name, str(result.year), result.score))\n index += 1", "def add_student():\n\n return render_template(\"student_add.html\")", "def __save_relevants_in_results(self, exec_result, total: bool = False) -> None:\n current_idx = self.num_res\n # print(\"Current index: {}\".format(current_idx))\n self.num_res += len(exec_result['search-results']['entry'])\n # print(\"[Before saving in results dict] Number of current results: {}\".format(self.num_res))\n if total:\n self.results[\"total_results\"] = int(exec_result['search-results']['opensearch:totalResults'])\n for i, doc in enumerate(exec_result['search-results']['entry']):\n date_parts = self.__split_date(doc['prism:coverDate'][0]['$'])\n if \"authors\" in doc.keys():\n authors = self.__convert_authors(doc[\"authors\"])\n else:\n authors = \"\"\n self.results[\"documents\"].append(dict())\n self.results[\"documents\"][current_idx+i][\"eid\"] = doc['eid']\n self.results[\"documents\"][current_idx+i][\"title\"] = self.__prepare_title(doc[\"dc:title\"])\n self.results[\"documents\"][current_idx+i][\"authors\"] = authors\n self.results[\"documents\"][current_idx+i][\"date\"] = doc['prism:coverDate'][0]['$']\n self.results[\"documents\"][current_idx+i][\"year\"] = date_parts[0]\n self.results[\"documents\"][current_idx+i][\"month\"] = date_parts[1]\n self.results[\"documents\"][current_idx+i][\"day\"] = date_parts[2]", "def add_to_results(self, result_id: str, result):\n\n self.results[result_id] = result", "def __ui_search_student_by_name(self, search):\n try:\n result = self.__student_controller.search_by_name(search)\n for student in result:\n print(str(student))\n\n except RepositoryException as re:\n print(re)\n return", "def get_results(self):\n\n super().get_results()", "def add_grades(self, request, pk=None):\n\n instance = self.get_object()\n try:\n user = self.request.user\n query = models.StudentSubject.objects.filter(\n subject__teacher__user=user,\n subject=instance\n )\n serializer = self.get_serializer(query, many=True)\n \n id = self.request.query_params.get('id')\n\n if id:\n q = get_object_or_404(\n models.StudentSubject,\n pk=id,\n subject=instance\n )\n return self.filtering(request, q)\n return Response(serializer.data)\n except:\n raise except_handler.ActionDecor()", "def results(self, results):\n\n self._results = results" ]
[ "0.6114725", "0.59435695", "0.5933007", "0.58812654", "0.5834557", "0.57925606", "0.57653034", "0.5724111", "0.56746095", "0.5673749", "0.56612015", "0.565572", "0.5644683", "0.5600812", "0.5600812", "0.5592871", "0.55797255", "0.55652213", "0.5562192", "0.5561268", "0.55395174", "0.5538853", "0.55295867", "0.55080205", "0.5503344", "0.5500893", "0.54996336", "0.5478121", "0.54713196", "0.54567665" ]
0.6044379
1
View Main results of students.
def view_result_main(request): context_dict = { 'title': 'All Results Main', } return render(request, "viewResultMain.html", context_dict)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def view_students(request):\n\n\tcontext_dict = {\n\t\t'title': 'All Students',\n\t}\n\treturn render(request, \"viewStudent.html\", context_dict)", "def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n\n rows = hackbright.get_grades_by_github(github)\n\n\n return render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n rows=rows)\n # return html", "def get_student():\n\n github = request.args.get('github', 'jhacks')\n first, last, github = hackbright.get_student_by_github(github)\n\n\n rows = hackbright.list_projects(github)\n\n return render_template (\"student_info.html\",\n first=first,\n last=last,\n github=github,\n rows=rows\n )", "def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n\n project_list = hackbright.get_grades_by_github(github)\n\n\n return render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n project_list=project_list)", "def view_test(request):\n\n\tcontext_dict = {\n\t\t'title': 'All Students'\n\t}\n\treturn render(\n\t\trequest,\n\t\t'viewTest.html',\n\t\tcontext_dict\n\t)", "def display_results():\n pass", "def student_view():\n sessions = []\n user_id = session.get('user_id')\n con = db.get_db()\n cur = con.cursor()\n cur.execute(\"\"\"SELECT sessions.course_id, sessions.location, sessions.days, sessions.class_time,\n courses.name AS class_name, roster.session_id\n FROM sessions JOIN courses on sessions.course_id = courses.course_id\n JOIN roster on roster.session_id = sessions.id\n JOIN users on users.id = roster.student_id\n WHERE users.id = %s\"\"\",\n (user_id,))\n student_classes = cur.fetchall()\n\n cur.execute(\"\"\"SELECT major FROM users\n WHERE id = %s\"\"\",\n (user_id,))\n student_major = cur.fetchone()\n cur.close()\n con.close()\n\n return render_template(\"layouts/student-home.html\", student_classes=student_classes, student_major=student_major)", "def student_summary() -> str:\n db_path: str = \"810_startup.db\"\n\n try:\n db: sqlite3.Connection = sqlite3.connect(db_path)\n except sqlite3.OperationalError:\n return f'Error: Unable to open database at path {db_path}'\n else:\n query: str = \"select students.Name, students.CWID, grades.Course, grades.Grade, instructors.Name from students,grades,instructors where students.CWID=StudentCWID and InstructorCWID=instructors.CWID order by students.Name\"\n data: Dict[str, str] = [{'Name': name, 'CWID': cwid, 'Course': course, 'Grade': grade, 'Instructor': instructor} for name, cwid, course, grade, instructor in db.execute(query)]\n\n db.close()\n\n return render_template(\n 'students.html',\n title = 'Stevens Repository',\n table_title = 'Students Summary',\n students = data)", "def view_results(self):\n self.master.switch_frame(ResultsView)", "def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n\n # a list of (project_title, grade) for a given student\n titles_grades = hackbright.get_grades_by_github(github)\n\n\n html = render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n titles_grades=titles_grades)\n\n return html", "def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n title_grade_list = hackbright.get_grades_by_github(github)\n\n html = render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n title_grade_list=title_grade_list)\n\n return html", "def __ui_list_students(self):\n try:\n print(str(self.__student_controller))\n except RepositoryException as re:\n print(re)\n return", "def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n\n grades = hackbright.get_grades_by_github(github)\n\n html = render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n grades=grades)\n\n return html", "def get_student():\n\n github = request.args.get('github')\n\n # print (\"aaaaaa\",hackbright.get_student_by_github(github))\n\n # if hackbright.get_student_by_github(github):\n\n first, last, github = hackbright.get_student_by_github(github)\n\n # html = render_template(\"student_info.html\",\n # first = first,\n # last = last,\n # github=github)\n\n return render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github)", "def main():\n student_info = prompt_student()\n display_student(student_info)", "def browse_students(request):\n students = Student.objects.filter(current_mentor=None)\\\n .exclude(Q(status='drop-out') | Q(status='unresponsive') | Q(status='retainer')\n | Q(status='alum') | Q(status='paused'))\n return render(request, 'match/browse_students.html', {'students': students})", "def get_student():\n\n github = request.args.get('github', 'jhacks')\n first, last, github = hackbright.get_student_by_github(github)\n html = render_template('student_info.html',\n first=first,\n last=last,\n github=github)\n return html", "def view_all_students():\n message = ''\n global conn\n with conn:\n rows = select_all_students(conn)\n for row in rows:\n message += str(row) + \"\\n\"\n messagebox.showinfo('Student Table', message)", "def show_results(self):\n print(\"Survey results:\")\n for response in self.responses:\n print('- ' + response)", "def score_list_student(request):\n\n takes = Take.objects.filter(student__username=request.data[\"sid\"])\n serializer = TakeSerializer(takes, many=True)\n return Response(serializer.data)", "def Dashboard(user=None):\n\n\tif user == None:\n\t\tuser= defaultUser\n\n\ttable = user.htmlTable(head=5)\n\t\n\n\tphysics_score = user.subjectAccuracy(\"Physics\")\n\tbiology_score = user.subjectAccuracy(\"Biology\")\n\n\tbiology_numerator = biology_score[1]\n\tbiology_denominator = biology_score[0]\n\tbiology_accuracy = int(np.round(biology_score[2], 2) * 100)\n\n\tphysics_numerator = physics_score[1]\n\tphysics_denominator = physics_score[0]\n\tphysics_accuracy = int(np.round(physics_score[2], 2) * 100)\n\n\ttotal_questions = biology_denominator + physics_denominator\n\n\n\twikifier_results = {}\n\twikifier_results[\"Oski\"] = \"https://en.wikipedia.org/wiki/Oski_the_Bear\"\n\twikifier_results[\"Mitosis\"] = \"https://en.wikipedia.org/wiki/Mitosis\"\n\twikifier_results[\"Gravity\"] = \"https://en.wikipedia.org/wiki/Gravity\"\n\n\treturn render_template('indexStudent.html', user=user.name, table=table, wikifier_results=wikifier_results, \n\t\tphysics_numerator = physics_numerator, physics_denominator = physics_denominator, physics_accuracy = physics_accuracy, \n\t\tbiology_accuracy = biology_accuracy, biology_numerator = biology_numerator, biology_denominator = biology_denominator, total_questions=total_questions)", "def view_student_gradebook():\n\n user_id = session.get('user_id')\n courses = []\n grades = []\n con = db.get_db()\n cur = con.cursor()\n\n cur.execute(\"\"\"SELECT DISTINCT courses.course_id, (ROUND(sum(grades.points_received)/sum(grades.total_points), 2 )*100)\n as total_grade, roster.session_id as class_session,\n courses.name as class_name, users.name AS teacher_name, grades.student_id\n FROM courses JOIN sessions on courses.course_id = sessions.course_id\n\t\t\t\t JOIN users on courses.teacherid= users.id\n JOIN assignments on assignments.session_id = sessions.id\n JOIN grades on grades.assignment_id = assignments.assignment_id\n JOIN roster on roster.session_id = sessions.id\n WHERE grades.student_id = %s\n\t GROUP BY grades.student_id, roster.session_id, courses.course_id, users.id\"\"\",\n (user_id,))\n courses = cur.fetchall()\n\n cur.close()\n con.close()\n\n return render_template(\"/layouts/gradebook/student_view.html\", courses=courses)", "def vis_results():\n try:\n uuid = session['uuid']\n dm = DatabaseManager()\n genes, diseases, uuiddb, query, genpanel, date \\\n = dm.retreieve_zoekopdracht(uuid)\n\n return render_template(\"results.html\", genes=genes, diseases=diseases,\n uuid=uuid, query=query, genpanel=genpanel,\n date=date)\n except KeyError:\n flash(\"Please run a search or retrieve one from the archived \"\n \"searches before visiting this page!\")\n return redirect(url_for('search'))", "def student_view(self, context=None):\n if self.is_course_staff():\n return self.staff_view()\n gea_assessment = GeaAssessment(User.objects.get(id=self.xmodule_runtime.user_id), self)\n frag = Fragment(loader.render_template(\"templates/edx_gea/student.html\",\n {'score' : gea_assessment.score,\n 'comment' : gea_assessment.comment}))\n return frag", "def results():\n\n queryName = request.form['query']\n queryStars = request.form['stars']\n \n datasource = DataSource()\n listOfRestaurantNames = datasource.searchRestaurantsByNameAndMinimumStars(queryName, queryStars)\n restaurants = datasource.generateRestaurantObjects(listOfRestaurantNames[:15])\n\n return render_template('results.html', restaurants=restaurants)", "def display_student(s_info):\n print('')\n print('Your information:')\n print(f'{s_info.student_id} - {s_info.first_name} {s_info.last_name}')", "def index():\n if request.method == 'POST':\n sample_ids = request.form.getlist(\"sample_ids\")\n best = request.form.get(\"best\")\n worst = request.form.get(\"worst\")\n if best is not None and worst is not None:\n mdr.submit_results(sample_ids, best, worst)\n \n sample = mdr.get_sample()\n \n return render_template('index.html', sample=sample[\"items\"])", "def show_results():\n\n if request.args.get('cheery'):\n msg_type = \"cheery\"\n message = \"\"\"\n Life’s like a movie. Write your own ending.\n \"\"\"\n auth = \"— Kermit the Frog\"\n elif request.args.get('dreary'):\n msg_type = \"dreary\"\n message = \"\"\"\n Bad days happen to everyone, but when one happens to you, just\n keep doing your best and never let a bad day make you feel bad\n about yourself.\n \"\"\"\n author = \"— Big Bird\"\n elif request.args.get('honest'):\n msg_type = \"honest\"\n message = \"\"\"\n Who care if me eat carrot or collard greens? Me also like broccoli\n and lettuce and lima beans. Me still Cookie Monster. That not a sham.\n \"\"\"\n author = \"— Cookie Monster\"\n\n return render_template(\"results.html\",\n message=message,\n author=author,\n msg_type=msg_type)", "def student_summary(self, student_id, request, activity):\n try:\n student = User.objects.get(id=student_id)\n except User.DoesNotExist:\n return HttpResponseNotFound(\"Cet étudiant ne fait pas partie de ce cours\")\n\n if not activity.is_member(student):\n return HttpResponseNotFound(\"Cet étudiant ne fait pas partie de ce cours\")\n\n activities = [acti for acti in activity.indexed_activities() if acti.open]\n indexed_pl = {a: a.indexed_pl() for a in activities}\n all_pl = []\n for indexed in indexed_pl.values():\n all_pl += list(indexed)\n teacher_list = activity.teacher.all()\n tl_id = [t.id for t in teacher_list]\n student_list = activity.student.exclude(id__in=tl_id)\n nb_student = len(student_list) if student_list else 1\n\n grades_query = HighestGrade.objects.filter(activity__in=activities,\n pl__in=all_pl,\n user__in=student_list)\n d_grade = dict()\n for g in grades_query:\n if g.grade is not None:\n d_grade[(g.user.id, g.pl.id)] = int(g.grade)\n\n tp = list()\n for a in activities:\n question = list()\n for pl in a.indexed_pl():\n all_mark = list()\n for s in student_list:\n if (s.id, pl.id) in d_grade:\n ms = max([0, d_grade[(s.id, pl.id)]])\n else:\n ms = 0\n all_mark.append(ms)\n if (student.id, pl.id) not in d_grade:\n mark_student = 0\n else:\n mark_student = max([0, d_grade[(student.id, pl.id)]])\n state = Answer.pl_state(pl, student)\n question.append({\n 'state': state,\n 'name': pl.json['title'],\n 'all_mark': all_mark,\n 'mark': mark_student,\n 'mean': round(sum(all_mark) / (5*nb_student), 2),\n 'min': round(min(all_mark) / 5, 2),\n 'max': round(max(all_mark) / 5, 2),\n })\n len_tp = len(question) if question else 1\n all_grouped_mark = list()\n for i in range(nb_student):\n all_grouped_mark.append(sum([q['all_mark'][i] for q in question]) / len_tp)\n tp.append({\n 'name': a.activity_data['title'],\n 'activity_name': a.name,\n 'id': a.id,\n 'width': str(100 / len_tp),\n 'pl': question,\n 'all_mark': all_grouped_mark,\n 'mark': round(sum([q['mark'] for q in question]) / (5*len_tp), 2),\n 'mean': round(sum(all_grouped_mark) / (5*nb_student), 2),\n 'min': round(min(all_grouped_mark) / 5, 2),\n 'max': round(max(all_grouped_mark) / 5, 2),\n })\n\n len_act = sum([len(t['pl']) for t in tp]) if [len(t['pl']) for t in tp] else 1\n all_act_mark = list()\n for i in range(nb_student):\n sum_mark = 0\n for t in tp:\n sum_mark += sum([e['all_mark'][i] for e in t['pl']])\n all_act_mark.append(sum_mark / len_act)\n course_mark = sum([sum([e['mark'] for e in t['pl']]) for t in tp]) / len_act\n return render(request, 'activity/activity_type/course/student_summary.html', {\n 'state': [i for i in State if i != State.ERROR],\n 'course_name': activity.name,\n 'student': student,\n 'activities': tp,\n 'course_id': activity.id,\n 'mark': round(course_mark / 5, 2),\n 'mean': round(sum(all_act_mark) / (5*nb_student), 2),\n 'min': round(min(all_act_mark) / 5, 2),\n 'max': round(max(all_act_mark) / 5, 2),\n 'nb_more': sum([1 for m in all_act_mark if m > course_mark]),\n 'nb_less': sum([1 for m in all_act_mark if m < course_mark]),\n })", "def student_view(self, context=None):\n context = {\n 'title': self.display_name,\n 'question': self.question,\n 'description': self.description,\n 'max_score': self.weight,\n 'score': self.score,\n 'is_done': self.is_done\n }\n\n html = self.render_template(\"static/html/student_view.html\", context)\n\n frag = Fragment(html)\n frag.add_css(self.load_resource(\"static/css/qnet.css\"))\n frag.add_javascript(self.load_resource(\"static/js/src/student_view.js\"))\n frag.initialize_js('QnetXBlock')\n return frag" ]
[ "0.7259597", "0.69562906", "0.6889267", "0.68556446", "0.67449915", "0.6720009", "0.67108893", "0.67076385", "0.66855097", "0.6681005", "0.6644783", "0.6635739", "0.65707105", "0.6508019", "0.6491504", "0.6467003", "0.644743", "0.64383495", "0.6376366", "0.6297787", "0.62705606", "0.6246395", "0.62429756", "0.62223023", "0.6186169", "0.6181443", "0.6171522", "0.6126429", "0.6112584", "0.61099046" ]
0.72396094
1
Add result of attendance to the particular subject. Can also use this by taking subject ID not giving drop down for subjects.
def add_attendance(request): emp = models.Teacher.objects.get(user=request.user) if not emp.student_permit: raise Http404 context_dict = { "all_subjects": context_helper.subject_helper(), } if request.method == "POST": roll = request.POST.get('roll') subject = request.POST.get('subject_picker') attendance = request.POST.get('attendance') total = request.POST.get('total') student = models.Student.objects.filter( roll_no=roll ).first() duplicate_check = models.Attendance.objects.filter( student=student, subject=subject, ).first() if duplicate_check: context_dict["message"] = 'Attendance already exist.' duplicate_check.soft_delete=False duplicate_check.save() return render(request, "addAttendance.html", context_dict) try: attendance_data = models.Attendance( student=student, subject=models.Subject.objects.get(pk=subject), total_attendance=total, obtained_attendance=attendance ) attendance_data.save() history = models.History( user=emp, activity='Added attendance of ' + str(student) + str(subject) +'.\n', activity_type="add attendance" ) history.save() context_dict["message"] = 'Successfully added Attendance.' context_dict["success"] = True except Exception as e: context_dict["message"] = str(e) context_dict["success"] = False print(e) return render(request, "addAttendance.html", context_dict)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def edit_attendance(request, attendance_id):\n\n\temp = models.Teacher.objects.get(user=request.user)\n\tif not emp.student_permit:\n\t\traise Http404\n\tattendance = models.Attendance.objects.filter(\n\t\tpk=attendance_id, soft_delete=False\n\t).first()\n\tprint(\"1\")\n\tcontext_dict = {\n\t\t\"all_subjects\": context_helper.subject_helper(),\n\t\t'attendance_id': attendance_id,\n\t}\n\tif request.method == 'POST':\n\t\tupdate_fields = []\n\t\tactivity = ''\n\t\troll = request.POST.get('roll')\n\t\tsubject = request.POST.get('subject_picker')\n\t\tobtained = request.POST.get('attendance')\n\t\ttotal = request.POST.get('total')\n\t\tstudent = models.Student.objects.filter(\n\t\t\troll_no=roll\n\t\t).first()\n\t\tif not student:\n\t\t\tcontext_dict[\"message\"] = 'Student at does not exist / Roll number has not been alloted.'\n\t\t\treturn render(request, \"editAttendance.html\", context_dict)\n\t\ttry:\n\t\t\tif attendance.student != student:\n\t\t\t\tattendance.student = student\n\t\t\t\tupdate_fields.append('student')\n\t\t\t\tactivity += 'Changed student to ' + str(student) + '.\\n'\n\t\t\tif attendance.total_attendance != total:\n\t\t\t\tattendance.total_attendance = total\n\t\t\t\tupdate_fields.append('total_attendance')\n\t\t\t\tactivity += 'Changed total attendance to ' + str(total) + '.\\n'\n\t\t\tif attendance.obtained_attendance != obtained:\n\t\t\t\tattendance.obtained_attendance = obtained\n\t\t\t\tupdate_fields.append('obtained_attendance')\n\t\t\t\tactivity += 'Changed obtained attendance to' + str(obtained) + '.\\n'\n\t\t\tif str(attendance.subject.pk) != str(subject):\n\t\t\t\tattendance.subject = models.Subject.objects.get(pk=subject)\n\t\t\t\tupdate_fields.append('subject')\n\t\t\t\tactivity += 'Changed subject to ' + str(subject) + '.\\n'\n\t\t\tattendance.save(update_fields=update_fields)\n\t\t\thistory = models.History(\n\t\t\t\tuser=emp,\n\t\t\t\tactivity=activity,\n\t\t\t\tactivity_type=\"edit attendance\"\n\t\t\t)\n\t\t\thistory.save()\n\t\t\tcontext_dict[\"message\"] = 'Successfully updated Attendance.'\n\t\t\tcontext_dict[\"success\"] = True\n\t\texcept Exception as e:\n\t\t\tcontext_dict[\"message\"] = str(e)\n\t\t\tcontext_dict[\"success\"] = False\n\t\t\tprint(e)\n\tcontext_dict.update(context_helper.get_attendance_info(attendance))\n\tfor i in context_dict['subjects']:\n\t\t# use for dynamic\n\t\ttry: del context_dict['all_subjects'][i]\n\t\texcept: pass\n\tif context_dict.get('success', False):\n\t\treturn HttpResponseRedirect('/view-attendance')\n\treturn render(\n\t\trequest, \"editAttendance.html\", context_dict\n\t)", "def add_user_subject(self, user, date_time):\n self._update_user_activity(self.num_subjects_per_user, user, date_time)", "def addResultMain(request):\n\n\temp = models.Teacher.objects.get(user=request.user)\n\tif not emp.result_permit:\n\t\traise Http404\n\tcontext_dict = {\n\t\t\"result_type\": context_helper.result_type_helper(),\n\t\t\"all_subjects\": context_helper.subject_helper(),\n\t\t\"all_exam_name\": context_helper.exam_name_helper(),\n\t}\n\tif request.method == 'POST':\n\t\troll = request.POST.get('rno')\n\t\tresult_type = request.POST.get('result_type_picker')\n\t\texam = request.POST.get('exam_name_picker')\n\t\tsubject = request.POST.get('subject_picker')\n\t\tmarks = request.POST.get('marks')\n\t\tstudent = models.Student.objects.filter(\n\t\t\troll_no=roll\n\t\t).first()\n\t\tresult_data_check = models.Result.objects.filter(\n\t\t\tresult_type=result_type, batch=student.batch,\n\t\t\texam_name=exam, subject=subject\n\t\t).first()\n\t\tif not result_data_check:\n\t\t\tcontext_dict[\"message\"] = \"Result Data does not exist, first create data for result\"\n\t\t\treturn render(request, 'addResultMain.html', context_dict)\n\t\tduplicate_check = models.ResultMain.objects.filter(\n\t\t\tstudent=models.Student.objects.filter(roll_no=student).first(), result=result_data_check,\n\t\t).first()\n\t\tif duplicate_check:\n\t\t\tcontext_dict[\"message\"]= 'Result already exist.'\n\t\t\tduplicate_check.soft_delete=False\n\t\t\tduplicate_check.save()\n\t\t\treturn render(request, \"addResultMain.html\", context_dict)\n\t\ttry:\n\t\t\tresult = models.ResultMain(\n\t\t\t\tstudent=student,\n\t\t\t\tresult=result_data_check, \n\t\t\t\tmarks_obtained=marks,\n\t\t\t)\n\t\t\tresult.save()\n\t\t\thistory = models.History(\n\t\t\t\tuser=emp,\n\t\t\t\tactivity='Added result ' + str(result) + \n\t\t\t\t\t'-' + str(marks) +'.\\n',\n\t\t\t\tactivity_type=\"add result \"\n\t\t\t)\n\t\t\thistory.save()\n\t\t\tcontext_dict[\"message\"] = 'Successfully added new result.'\n\t\t\tcontext_dict[\"success\"] = True\n\t\texcept Exception as e:\n\t\t\tcontext_dict[\"message\"] = str(e)\n\t\t\tcontext_dict[\"success\"] = False\n\t\t\tprint(e)\n\treturn render(request, 'addResultMain.html', context_dict)", "def addResultData(request):\n\temp = models.Teacher.objects.get(user=request.user)\n\tif not emp.result_permit:\n\t\traise Http404\n\tcontext_dict = {\n\t\t\"result_type\": context_helper.result_type_helper(),\n\t\t\"all_subjects\": context_helper.subject_helper(),\n\t\t\"all_courses\": context_helper.course_helper(),\n\t\t\"all_exam_name\": context_helper.exam_name_helper(),\n\t}\n\tif request.method == \"POST\":\n\t\tcourse = request.POST.get('course_picker')\n\t\tresult_type = request.POST.get('result_type_picker')\n\t\texam_name = request.POST.get('exam_name_picker')\n\t\tsubject = request.POST.get('subject_picker')\n\t\tbatch = request.POST.get('batch')\n\t\tduplicate_check = models.Result.objects.filter(\n\t\t\tcourse=course, result_type=result_type, exam_name=exam_name,\n\t\t\tsubject=subject, batch=batch\n\t\t).first()\n\t\tif duplicate_check:\n\t\t\tcontext_dict[\"message\"] = 'Result Data already exist.'\n\t\t\tduplicate_check.soft_delete=False\n\t\t\tduplicate_check.save()\n\t\t\treturn render(request, \"addResultData.html\", context_dict)\n\t\ttry:\n\t\t\tresult_data = models.Result(\n\t\t\t\tcourse=models.Course.objects.get(pk=course),\n\t\t\t\tresult_type=result_type, \n\t\t\t\texam_name=models.ExamName.objects.get(pk=course),\n\t\t\t\tsubject=models.Subject.objects.get(pk=course), \n\t\t\t\tbatch=batch\n\t\t\t)\n\t\t\tresult_data.save()\n\t\t\thistory = models.History(\n\t\t\t\tuser=emp,\n\t\t\t\tactivity='Added result data' + str(course)+ str(result_type)+ \n\t\t\t\t\tstr(exam_name)+ str(subject)+ str(batch) +'.\\n',\n\t\t\t\tactivity_type=\"add result data\"\n\t\t\t)\n\t\t\thistory.save()\n\t\t\tcontext_dict[\"message\"] = 'Successfully added new Result Data.'\n\t\t\tcontext_dict[\"success\"] = True\n\t\texcept Exception as e:\n\t\t\tcontext_dict[\"message\"] = str(e)\n\t\t\tcontext_dict[\"success\"] = False\n\t\t\tprint(e)\n\treturn render(request, \"addResultData.html\", context_dict)", "def add_subject(csv_data, subj, timepoint, orged, orgedwhen, orgedby, conved,\n convedwhen, convedby, notes):\n row = pd.DataFrame([dict(Subject=subj, Timepoint=timepoint,\n Organized=orged, Date_Organized=orgedwhen,\n Organized_by=orgedby, Converted=conved,\n Date_Converted=convedwhen, Converted_by=convedby,\n Notes=notes)])\n csv_data = csv_data.append(row, ignore_index=False)\n return csv_data", "def add_teacher_data(connection,name,tsc_no,subjects,type_of_teacher):\r\n with connection:\r\n connection.execute(INSERT_TEACHER,(name,tsc_no,subjects,type_of_teacher))", "def option1(self):\n ID = int(input(\"ID: \"))\n name = input(\"Name: \")\n attNr = int(input(\"Number of attendances: \"))\n grade = int(input(\"Grade: \"))\n self.__srv.addStud(ID,name,attNr,grade)", "def update_subject(self):\n subject = extract_subject(self.events)\n self.events.subject = subject", "def edit_subject(request,subject_id):\n\n\temp = models.Teacher.objects.get(user=request.user)\n\tif not emp.subject_permit:\n\t\traise Http404\n\tsubject = models.Subject.objects.filter(\n\t\tpk=subject_id, soft_delete=False\n\t).first()\n\tif not subject:\n\t\traise Http404\n\tcontext_dict = {\n\t\t\"all_courses\": context_helper.course_helper(),\n\t\t\"subject_types\": context_helper.subject_type_helper(),\n\t\t'subject_id': subject_id,\n\t}\n\tif request.method == 'POST':\n\t\tupdate_fields = []\n\t\tactivity = ''\n\t\tcourse = request.POST.get('course_picker')\n\t\tname = request.POST.get('sname')\n\t\tsid = request.POST.get('sid')\n\t\tstype = request.POST.get('subject_picker')\n\t\tmaxmarks = request.POST.get('marks')\n\t\ttry:\n\t\t\tif str(subject.course.pk) != str(course):\n\t\t\t\tsubject.course = models.Course.objects.get(pk=course)\n\t\t\t\tupdate_fields.append('course')\n\t\t\t\tactivity += 'Changed course to ' + str(course) + '.\\n'\n\t\t\tif subject.s_type != stype:\n\t\t\t\tsubject.s_type = stype\n\t\t\t\tupdate_fields.append('s_type')\n\t\t\t\tactivity += 'Changed subject type to ' + str(stype) + '.\\n'\n\t\t\tif subject.name != name:\n\t\t\t\tsubject.name = name\n\t\t\t\tupdate_fields.append('name')\n\t\t\t\tactivity += 'Changed subject name to' + str(name) + '.\\n'\n\t\t\tif subject.s_id != sid:\n\t\t\t\tsubject.s_id = sid\n\t\t\t\tupdate_fields.append('s_id')\n\t\t\t\tactivity += 'Changed subject ID to' + str(sid) + '.\\n'\n\t\t\tif subject.max_marks != maxmarks:\n\t\t\t\tsubject.max_marks = maxmarks\n\t\t\t\tupdate_fields.append('max_marks')\n\t\t\t\tactivity += 'Changed maximum marks to' + str(maxmarks) + '.\\n'\n\t\t\tsubject.save(update_fields=update_fields)\n\t\t\thistory = models.History(\n\t\t\t\tuser=emp,\n\t\t\t\tactivity=activity,\n\t\t\t\tactivity_type=\"edit subject\"\n\t\t\t)\n\t\t\thistory.save()\n\t\t\tcontext_dict[\"message\"] = 'Successfully updated Result Data.'\n\t\t\tcontext_dict[\"success\"] = True\n\t\texcept Exception as e:\n\t\t\tcontext_dict[\"message\"] = str(e)\n\t\t\tcontext_dict[\"success\"] = False\n\t\t\tprint(e)\n\tcontext_dict.update(context_helper.get_subject_info(subject))\n\tfor i in context_dict['courses']:\n\t\ttry: del context_dict['all_courses'][i]\n\t\texcept: pass\n\tfor i in context_dict['subject_type']:\n\t\ttry: context_dict['subject_types'].remove(i)\n\t\texcept: pass\n\tif context_dict.get('success', False):\n\t\treturn HttpResponseRedirect('/view-subjects')\n\treturn render(\n\t\trequest, \"editSubject.html\", context_dict\n\t)", "def add_grades(self, request, pk=None):\n\n instance = self.get_object()\n try:\n user = self.request.user\n query = models.StudentSubject.objects.filter(\n subject__teacher__user=user,\n subject=instance\n )\n serializer = self.get_serializer(query, many=True)\n \n id = self.request.query_params.get('id')\n\n if id:\n q = get_object_or_404(\n models.StudentSubject,\n pk=id,\n subject=instance\n )\n return self.filtering(request, q)\n return Response(serializer.data)\n except:\n raise except_handler.ActionDecor()", "def add_subject(self, subject):\n super(PCAGroup, self).add_subject(subject)\n\n # Adds subjects pc data to group data dictionary\n for i in range(0, len(subject.values)):\n self.data_dict[i].append(subject.values[i])", "def add_attribute(self, subject_id, id=None, value=None):", "def add_attribute(self, subject_id, id=None, value=None):", "def add_attribute(self, subject_id, id=None, value=None):", "def add_attribute(self, subject_id, id=None, value=None):", "def add_attribute(self, subject_id, id=None, value=None):", "def subject_a(self, subject_a):\n\n self._subject_a = subject_a", "def update_by_id(self, subject_id: str, new_subject_data: any) -> any:\n pass", "def _start_rec_by_subject(self, subject):\r\n self.recording = True\r\n time = time_to_int(self.time.get())\r\n if subject == 'Audio':\r\n name = short_name(self.audio_name.get())\r\n self.recordings.append(record_audio(name=name, max_time=time))\r\n else:\r\n name = short_name(self.video_name.get())\r\n self.recordings.append(record_video(name=name, max_time=time))\r\n self._recoding_beep()\r\n print(\"Started recording \" + subject)", "def get_student_data(act_df, subject, add_list):\n subject_rows = act_df.loc[:, \"TestSubject\"] == subject\n if np.sum(subject_rows) != 1:\n print(\"Error with getting data for %s\" % subject)\n return pd.Series()\n else:\n subject_data = act_df.loc[subject_rows, add_list]\n return subject_data.squeeze()", "def subject(self, value):\n self.set_property(\"subject\", value)", "def update(self, subject: Subject) -> None:\n pass", "def update(self, subject: Subject) -> None:\n pass", "def update_subject(self, subject):\n if not subject or (self.currentSubject == subject):\n self.currentSubject = \"\"\n self.lbSubject.setText(\"<b>{}</b>\".format(NO_FOCAL_SUBJECT))\n self.lbFocalSubject.setText(NO_FOCAL_SUBJECT)\n else:\n self.currentSubject = subject\n self.lbSubject.setText(\"Subject: <b>{}</b>\".format(self.currentSubject))\n self.lbFocalSubject.setText(\" Focal subject: <b>{}</b>\".format(self.currentSubject))", "def add_grades(self, subject_name, grade_list, attendance=True): \n\t\n\t\tif (isinstance(subject_name, str) and isinstance(grade_list, list)):\n\t\t\tfor grade in grade_list:\n\t\t\t\tself.grades.setdefault(subject_name, []).append(grade)\n\t\t\tself.attendance += 1 if attendance else 0", "def subject(self, subject):\n\n self._subject = subject", "def subject(self, subject):\n\n self._subject = subject", "def subject(self, subject):\n\n self._subject = subject", "def subjectFormSubmit(request, experiment_id):\n experiment = get_object_or_404(Experiment, pk=experiment_id)\n form = SubjectDataForm(request.POST, experiment=experiment)\n\n if form.is_valid():\n response = form.save()\n if experiment.instrument: # administer CDI if instrument is defined\n return HttpResponseRedirect(reverse('experiments:vocabChecklist', args = (str(response.id),)))\n else:\n return proceedToExperiment(experiment, str(response.id))\n t = Template(experiment.demographic_data_page_tpl)\n c = RequestContext(request, {'subject_data_form': form, 'experiment': experiment})\n return HttpResponse(t.render(c))", "def generate_report_sheet(self, subjects):\n\t\tif self.is_student:\n\t\t\treport_sheet = []\n\t\t\t# For each subject, find all student assessments\n\t\t\tfor subject in subjects:\n\t\t\t\tsubject_data = {\n\t\t\t\t\t'subject': subject.name\n\t\t\t\t}\n\t\t\t\tsubject_grades = {}\n\t\t\t\tassessment_types = AssessmentType.objects.filter(student_assessments__subject=subject).annotate(\n\t\t\t\t\tnumber=models.Count('student_assessments'), max_score=models.Sum('student_assessments__max_score'))\n\t\t\t\tfor assessment_type in assessment_types:\n\t\t\t\t\t# Probably will optimize this later, but ...\n\t\t\t\t\ttype_weight = StudentAssessmentTypeWeight.objects.filter(subject=subject, assessment_type=assessment_type)[0]\n\t\t\t\t\tsubject_grades[assessment_type.name] = {\n\t\t\t\t\t\t'max_score': assessment_type.max_score,\n\t\t\t\t\t\t'actual_score': 0,\n\t\t\t\t\t\t'max_percentage': type_weight.weight,\n\t\t\t\t\t\t'actual_percentage': 0,\n\t\t\t\t\t}\n\t\t\t\t\tassessments = subject.student_assessments.filter(assessment_type=assessment_type)\n\t\t\t\t\tfor assessment in assessments:\n\t\t\t\t\t\t# Assuming only one grade for now\n\t\t\t\t\t\tstudent_grade = assessment.grades.filter(student=self)[0]\n\t\t\t\t\t\tsubject_grades[assessment_type.name]['actual_score'] += student_grade.score\n\t\t\t\t\tactual_score = subject_grades[assessment_type.name]['actual_score']\n\t\t\t\t\tmax_score = subject_grades[assessment_type.name]['max_score']\n\t\t\t\t\tmax_percentage = type_weight.weight\n\t\t\t\t\tsubject_grades[assessment_type.name]['actual_percentage'] = (float(actual_score)/max_score)*max_percentage\n\t\t\t\tsubject_data['grades'] = subject_grades\n\t\t\t\treport_sheet.append(subject_data)\n\t\t\t# Use final grades to to determine score out of (weight) for each type\n\t\t\t# Determine final grade for the subject\n\t\t\t# Determine final grade (average) overall\n\t\t\tprint('Generated report sheet: {}'.format(report_sheet))\n\t\t\treturn report_sheet\n\t\telse:\n\t\t\tprint('Cannot generate a report sheet for a non-student')" ]
[ "0.5862118", "0.57935876", "0.55824333", "0.5474855", "0.54743844", "0.5459701", "0.5446154", "0.54187894", "0.54110545", "0.53656995", "0.529508", "0.52636135", "0.52636135", "0.52636135", "0.52636135", "0.52636135", "0.5260383", "0.52456486", "0.5243062", "0.520754", "0.5170386", "0.51644814", "0.51644814", "0.5154626", "0.5135344", "0.51311004", "0.51311004", "0.51311004", "0.5125364", "0.51067746" ]
0.66575384
0
Edit attendance of students subject wise.
def edit_attendance(request, attendance_id): emp = models.Teacher.objects.get(user=request.user) if not emp.student_permit: raise Http404 attendance = models.Attendance.objects.filter( pk=attendance_id, soft_delete=False ).first() print("1") context_dict = { "all_subjects": context_helper.subject_helper(), 'attendance_id': attendance_id, } if request.method == 'POST': update_fields = [] activity = '' roll = request.POST.get('roll') subject = request.POST.get('subject_picker') obtained = request.POST.get('attendance') total = request.POST.get('total') student = models.Student.objects.filter( roll_no=roll ).first() if not student: context_dict["message"] = 'Student at does not exist / Roll number has not been alloted.' return render(request, "editAttendance.html", context_dict) try: if attendance.student != student: attendance.student = student update_fields.append('student') activity += 'Changed student to ' + str(student) + '.\n' if attendance.total_attendance != total: attendance.total_attendance = total update_fields.append('total_attendance') activity += 'Changed total attendance to ' + str(total) + '.\n' if attendance.obtained_attendance != obtained: attendance.obtained_attendance = obtained update_fields.append('obtained_attendance') activity += 'Changed obtained attendance to' + str(obtained) + '.\n' if str(attendance.subject.pk) != str(subject): attendance.subject = models.Subject.objects.get(pk=subject) update_fields.append('subject') activity += 'Changed subject to ' + str(subject) + '.\n' attendance.save(update_fields=update_fields) history = models.History( user=emp, activity=activity, activity_type="edit attendance" ) history.save() context_dict["message"] = 'Successfully updated Attendance.' context_dict["success"] = True except Exception as e: context_dict["message"] = str(e) context_dict["success"] = False print(e) context_dict.update(context_helper.get_attendance_info(attendance)) for i in context_dict['subjects']: # use for dynamic try: del context_dict['all_subjects'][i] except: pass if context_dict.get('success', False): return HttpResponseRedirect('/view-attendance') return render( request, "editAttendance.html", context_dict )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def edit_subject(request,subject_id):\n\n\temp = models.Teacher.objects.get(user=request.user)\n\tif not emp.subject_permit:\n\t\traise Http404\n\tsubject = models.Subject.objects.filter(\n\t\tpk=subject_id, soft_delete=False\n\t).first()\n\tif not subject:\n\t\traise Http404\n\tcontext_dict = {\n\t\t\"all_courses\": context_helper.course_helper(),\n\t\t\"subject_types\": context_helper.subject_type_helper(),\n\t\t'subject_id': subject_id,\n\t}\n\tif request.method == 'POST':\n\t\tupdate_fields = []\n\t\tactivity = ''\n\t\tcourse = request.POST.get('course_picker')\n\t\tname = request.POST.get('sname')\n\t\tsid = request.POST.get('sid')\n\t\tstype = request.POST.get('subject_picker')\n\t\tmaxmarks = request.POST.get('marks')\n\t\ttry:\n\t\t\tif str(subject.course.pk) != str(course):\n\t\t\t\tsubject.course = models.Course.objects.get(pk=course)\n\t\t\t\tupdate_fields.append('course')\n\t\t\t\tactivity += 'Changed course to ' + str(course) + '.\\n'\n\t\t\tif subject.s_type != stype:\n\t\t\t\tsubject.s_type = stype\n\t\t\t\tupdate_fields.append('s_type')\n\t\t\t\tactivity += 'Changed subject type to ' + str(stype) + '.\\n'\n\t\t\tif subject.name != name:\n\t\t\t\tsubject.name = name\n\t\t\t\tupdate_fields.append('name')\n\t\t\t\tactivity += 'Changed subject name to' + str(name) + '.\\n'\n\t\t\tif subject.s_id != sid:\n\t\t\t\tsubject.s_id = sid\n\t\t\t\tupdate_fields.append('s_id')\n\t\t\t\tactivity += 'Changed subject ID to' + str(sid) + '.\\n'\n\t\t\tif subject.max_marks != maxmarks:\n\t\t\t\tsubject.max_marks = maxmarks\n\t\t\t\tupdate_fields.append('max_marks')\n\t\t\t\tactivity += 'Changed maximum marks to' + str(maxmarks) + '.\\n'\n\t\t\tsubject.save(update_fields=update_fields)\n\t\t\thistory = models.History(\n\t\t\t\tuser=emp,\n\t\t\t\tactivity=activity,\n\t\t\t\tactivity_type=\"edit subject\"\n\t\t\t)\n\t\t\thistory.save()\n\t\t\tcontext_dict[\"message\"] = 'Successfully updated Result Data.'\n\t\t\tcontext_dict[\"success\"] = True\n\t\texcept Exception as e:\n\t\t\tcontext_dict[\"message\"] = str(e)\n\t\t\tcontext_dict[\"success\"] = False\n\t\t\tprint(e)\n\tcontext_dict.update(context_helper.get_subject_info(subject))\n\tfor i in context_dict['courses']:\n\t\ttry: del context_dict['all_courses'][i]\n\t\texcept: pass\n\tfor i in context_dict['subject_type']:\n\t\ttry: context_dict['subject_types'].remove(i)\n\t\texcept: pass\n\tif context_dict.get('success', False):\n\t\treturn HttpResponseRedirect('/view-subjects')\n\treturn render(\n\t\trequest, \"editSubject.html\", context_dict\n\t)", "def edit_student(request, student_id):\n\n\temp = models.Teacher.objects.get(user=request.user)\n\tif not emp.student_permit:\n\t\traise Http404\n\tstudent = models.Student.objects.filter(\n\t\tpk=student_id, soft_delete=False\n\t).first()\n\tif not student:\n\t\traise Http404\n\tcontext_dict = {\n\t\t\"all_courses\": context_helper.course_helper(),\n\t\t\"blood_groups\": context_helper.blood_group_helper(),\n\t\t\"guardian_types\": context_helper.guardian_type_helper(),\n\t\t\"gender_types\": context_helper.gender_helper(),\n\t\t'student_id': student_id\n\t}\n\tif request.method == 'POST':\n\t\tupdate_fields = []\n\t\tactivity = ''\n\t\tsname = request.POST.get('sname')\n\t\troll = request.POST.get('rno')\n\t\tdob = request.POST.get('dob')\n\t\tgender = request.POST.get('gender_picker')\n\t\tbgroup = request.POST.get('blood_group_picker')\n\t\tif bgroup == 'Choose option':\n\t\t\tbgroup = None\n\t\tphone = request.POST.get('phone')\n\t\tcurradd = request.POST.get('curradd')\n\t\tpermadd = request.POST.get('permadd')\n\t\tgname = request.POST.get('gname')\n\t\tcourse = request.POST.get('course_picker')\n\t\tbatch = request.POST.get('batch')\n\t\tgtype = request.POST.get('guardian_type_picker')\n\t\tgphone = request.POST.get('gphone')\n\t\temail = request.POST.get('email')\n\t\taddress_flag = request.POST.get('address_flag')\n\t\tprint (address_flag)\n\t\taddress_flag = True if address_flag == 'on' else False\n\t\tif address_flag == True:\n\t\t\tpermadd = curradd\n\t\ttry:\n\t\t\tif \"profile-img\" in request.FILES:\n\t\t\t\tstudent.photo = request.FILES[\"profile-img\"]\n\t\t\t\tupdate_fields.append('photo')\n\t\t\t\tactivity += 'Changed photo.\\n'\n\t\t\tif student.name != sname:\n\t\t\t\tstudent.name = sname\n\t\t\t\tupdate_fields.append('name')\n\t\t\t\tactivity += 'Changed name to '+ str(sname) +'.\\n'\n\t\t\tif student.roll_no != roll:\n\t\t\t\tstudent.roll_no = roll\n\t\t\t\tupdate_fields.append('roll_no')\n\t\t\t\tactivity += 'Changed roll number to '+ str(roll) +'.\\n'\n\t\t\tif str(student.dob) != str(dob):\n\t\t\t\tstudent.dob = dob\n\t\t\t\tupdate_fields.append('dob')\n\t\t\t\tactivity += 'Changed DOB to ' + str(dob) + '.\\n'\n\t\t\tif student.gender != gender:\n\t\t\t\tstudent.gender = gender\n\t\t\t\tupdate_fields.append('gender')\n\t\t\t\tactivity += 'Changed gender to ' + str(gender) + '.\\n'\n\t\t\tif student.blood_group != bgroup:\n\t\t\t\tstudent.blood_group = bgroup\n\t\t\t\tupdate_fields.append('blood_group')\n\t\t\t\tactivity += 'Changed blood group to ' + str(bgroup) + '.\\n'\n\t\t\tif student.phone != phone:\n\t\t\t\tstudent.phone = phone\n\t\t\t\tupdate_fields.append('phone')\n\t\t\t\tactivity += 'Changed phone number to ' + str(phone) + '.\\n'\n\t\t\tif student.curr_address != curradd:\n\t\t\t\tstudent.curr_address = curradd\n\t\t\t\tupdate_fields.append('curr_address')\n\t\t\t\tactivity += 'Changed current address to ' + str(curradd) + '.\\n'\n\t\t\tif student.perm_address != permadd:\n\t\t\t\tstudent.perm_address = permadd\n\t\t\t\tupdate_fields.append('perm_address')\n\t\t\t\tactivity += 'Changed permanent address to ' + str(permadd) + '.\\n'\n\t\t\tif student.curr_address != curradd:\n\t\t\t\tstudent.curr_address = curradd\n\t\t\t\tupdate_fields.append('curr_address')\n\t\t\t\tactivity += 'Changed current address to ' + str(curradd) + '.\\n'\n\t\t\tif student.guardian_name != gname:\n\t\t\t\tstudent.guardian_name = gname\n\t\t\t\tupdate_fields.append('guardian_name')\n\t\t\t\tactivity += 'Changed current address to ' + str(gname) + '.\\n'\n\t\t\tif student.guardian_phone != gphone:\n\t\t\t\tstudent.guardian_phone = gphone\n\t\t\t\tupdate_fields.append('guardian_phone')\n\t\t\t\tactivity += 'Changed guardian phone to ' + str(gphone) + '.\\n'\n\t\t\tif student.guardian_type != gtype:\n\t\t\t\tstudent.guardian_type = gtype\n\t\t\t\tupdate_fields.append('guardian_type')\n\t\t\t\tactivity += 'Changed current address to ' + str(gtype) + '.\\n'\n\t\t\tif str(student.course.pk) != str(course):\n\t\t\t\tstudent.course = models.Course.objects.get(pk=course)\n\t\t\t\tupdate_fields.append('course')\n\t\t\t\tactivity += 'Changed course to ' + str(course) + '.\\n'\n\t\t\tif student.batch != batch:\n\t\t\t\tstudent.batch = batch\n\t\t\t\tupdate_fields.append('batch')\n\t\t\t\tactivity += 'Changed batch to' + str(batch) + '.\\n'\n\t\t\tif student.email != email:\n\t\t\t\tstudent.email = email\n\t\t\t\tupdate_fields.append('email')\n\t\t\t\tactivity += 'Changed email to ' + str(email) + '.\\n'\n\t\t\tif student.address_flag != address_flag:\n\t\t\t\tstudent.address_flag = address_flag\n\t\t\t\tupdate_fields.append('address_flag')\n\t\t\t\tactivity += 'Changed address flag.'\n\t\t\tstudent.save(update_fields=update_fields)\n\t\t\thistory = models.History(\n\t\t\t\tuser=emp,\n\t\t\t\tactivity=activity,\n\t\t\t\tactivity_type=\"edit student\"\n\t\t\t)\n\t\t\thistory.save()\n\t\t\tcontext_dict[\"message\"] = 'Successfully updated student.'\n\t\t\tcontext_dict[\"success\"] = True\n\t\texcept Exception as e:\n\t\t\tcontext_dict[\"message\"] = str(e)\n\t\t\tcontext_dict[\"success\"] = False\n\t\t\tprint(e)\n\tcontext_dict.update(context_helper.get_student_info(student))\n\tif type(context_dict['dob']) == str:\n\t\tcontext_dict['dob'] = datetime.strptime(context_dict['dob'], '%Y-%m-%d')\n\tfor i in context_dict['course']:\n\t\ttry: del context_dict['all_courses'][i]\n\t\texcept: pass\n\tfor i in context_dict['blood_group']:\n\t\ttry: context_dict['blood_groups'].remove(i)\n\t\texcept: pass\n\tfor i in context_dict['guardian_type']:\n\t\ttry: context_dict['guardian_types'].remove(i)\n\t\texcept: pass\n\tfor i in context_dict['gender_type']:\n\t\ttry: context_dict['gender_types'].remove(i)\n\t\texcept: pass\n\tif context_dict.get('success', False):\n\t\treturn HttpResponseRedirect('/view-students')\n\treturn render(\n\t\trequest, \"editStudent.html\", context_dict\n\t)", "def add_attendance(request):\n\n\temp = models.Teacher.objects.get(user=request.user)\n\tif not emp.student_permit:\n\t\traise Http404\n\tcontext_dict = {\n\t\t\"all_subjects\": context_helper.subject_helper(),\n\t}\n\tif request.method == \"POST\":\n\t\troll = request.POST.get('roll')\n\t\tsubject = request.POST.get('subject_picker')\n\t\tattendance = request.POST.get('attendance')\n\t\ttotal = request.POST.get('total')\n\t\tstudent = models.Student.objects.filter(\n\t\t\troll_no=roll\n\t\t).first()\n\t\tduplicate_check = models.Attendance.objects.filter(\n\t\t\tstudent=student, subject=subject,\n\t\t).first()\n\t\tif duplicate_check:\n\t\t\tcontext_dict[\"message\"] = 'Attendance already exist.'\n\t\t\tduplicate_check.soft_delete=False\n\t\t\tduplicate_check.save()\n\t\t\treturn render(request, \"addAttendance.html\", context_dict)\n\t\ttry:\n\t\t\tattendance_data = models.Attendance(\n\t\t\t\tstudent=student,\n\t\t\t\tsubject=models.Subject.objects.get(pk=subject),\n\t\t\t\ttotal_attendance=total,\n\t\t\t\tobtained_attendance=attendance\n\t\t\t)\n\t\t\tattendance_data.save()\n\t\t\thistory = models.History(\n\t\t\t\tuser=emp,\n\t\t\t\tactivity='Added attendance of ' + str(student) + \n\t\t\t\t\tstr(subject) +'.\\n',\n\t\t\t\tactivity_type=\"add attendance\"\n\t\t\t)\n\t\t\thistory.save()\n\t\t\tcontext_dict[\"message\"] = 'Successfully added Attendance.'\n\t\t\tcontext_dict[\"success\"] = True\n\t\texcept Exception as e:\n\t\t\tcontext_dict[\"message\"] = str(e)\n\t\t\tcontext_dict[\"success\"] = False\n\t\t\tprint(e)\n\treturn render(request, \"addAttendance.html\", context_dict)", "def __ui_update_student(self):\n student_id = input(\"student id: \")\n student_name = input(\"student discipline_name: \")\n disciplines_list = []\n\n discipline_name = None\n while discipline_name != '':\n discipline_name = input(\"Discipline discipline_name: \")\n if discipline_name == '':\n break\n elif self.__discipline_controller.find_by_name(discipline_name) is not None:\n disciplines_list.append(discipline_name)\n print(\"Add discipline successful\\n\")\n else:\n print(\"Invalid discipline!\")\n\n try:\n self.__student_controller.update_student(student_id, student_name, disciplines_list)\n print(\"Update student successful\\n\")\n except StudentException as se:\n print(se)\n return\n except RepositoryException as re:\n print(re)\n return", "def update_attendance(selection):\n\n t = date.today()\n today = str(t) + '.csv'\n name = list(selection)\n with open(today, 'a', newline='') as attendance:\n fieldnames = ['last_name', 'first_name', 'status']\n writer = writer(attendance, fieldnames=fieldnames)\n for row in reader:\n if row['last_name'] == name[0]:\n writer.writerow({'status': 'PRESENT'})\n else:\n print('Student could not be found on the roster.')", "def edit_student(request, s_id):\n user = CustomUser.objects.get(id=s_id)\n student = Student.objects.get(user_id=s_id)\n\n if request.method == 'POST':\n user_edit_form = EditUserForm(request.POST, instance=user)\n student_edit_form = EditStudentForm(request.POST, instance=student)\n\n if user_edit_form.is_valid() and student_edit_form.is_valid():\n user_edit_form.save()\n student_edit_form.save()\n messages.success(request, \"The student's account has been edited successfully\")\n return redirect('student_account', s_id=s_id)\n else:\n messages.error(request, \"The form has not been filled correctly\")\n\n else:\n user_edit_form = EditUserForm(instance=user)\n student_edit_form = EditStudentForm(instance=student)\n\n context = {\n 'user_edit_form': user_edit_form,\n 'student_edit_form': student_edit_form\n }\n return render(request, 'main/edit_student.html', {'user_edit_form': context['user_edit_form'],\n 'student_edit_form': context['student_edit_form']})", "def modify_an_entry(self):\n target_list = self.find_student()\n\n if not len(target_list):\n print('There is no contents to show')\n else:\n opt = self.input_options(['midterm', 'finalterm'], 1, 'Which test do you want to modify?')\n score = self.input_score()\n\n if opt.upper() == 'MIDTERM':\n for idx in target_list.index:\n self.student_list.loc[self.student_list.index == idx, 'midterm'] = score\n else:\n for idx in target_list.index:\n self.student_list.loc[self.student_list.index == idx, 'finalterm'] = score", "def update_course_enrollment(self, student_id, course_id, course_section_id, term):\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n try:\n with conn:\n cursor.execute(\n \"\"\"\n UPDATE course_enrollments\n SET course_id = ?, course_section_id = ?\n WHERE student_id = ?\n (?,?,?)\"\"\",\n (course_id, course_section_id, student_id),\n )\n return 1\n except sqlite3.IntegrityError:\n return -1", "def updateStudents(request):\n\n return updateRole('gsoc_student')", "def modify_assignmentype(request, pk):\n prof = request.user.prof\n assignmentype = Assignmentype.objects.filter(id=pk, prof=prof).first()\n if assignmentype:\n if request.method == 'POST':\n form = LightAssignmentypeForm(request.POST, instance=assignmentype)\n if form.is_valid():\n form.save()\n return redirect('/detail_assignmentype/%s/' % assignmentype.pk)\n else:\n form = LightAssignmentypeForm(instance=assignmentype)\n context = {}\n context['assignmentype_id'] = assignmentype.id\n context['message'] = 'Modify details of your assignment '\\\n '(keep current student list)'\n context['form'] = form\n context['type_post'] = 'modify'\n return render(request, 'gradapp/assignmentype_form.html', context)\n else:\n return redirect('gradapp:index')", "def edit_appointment(request, id):\n users = User.objects.all()\n appointment = get_object_or_404(Appointment, pk=id)\n if request.POST:\n post = request.POST\n date_string = post.get(\"date\") + \"-\" + post.get(\"time\")\n try:\n date = datetime.datetime.strptime(date_string, '%Y-%m-%d-%H:%M')\n appointment.date = date\n except ValueError:\n pass\n the_user = request.user\n notes = post.get(\"notes\")\n appointment.notes = notes\n\n if the_user.userprofile.is_doctor():\n try:\n patient_id = int(post.get(\"patient\", the_user.pk))\n patient = User.objects.get(pk=patient_id)\n appointment.patient = patient\n except ValueError:\n pass\n\n elif request.user.userprofile.is_patient():\n try:\n doctor_id = int(post.get(\"doctor\", the_user.pk))\n doctor = User.objects.get(pk=doctor_id)\n appointment.doctor = doctor\n except ValueError:\n pass\n\n if appointment:\n messages.add_message(request, messages.SUCCESS, 'Your changes have been saved.')\n else:\n messages.add_message(request, messages.ERROR, 'An error occurred. Please contact an admin for assistance.')\n appointment.save()\n return redirect('view_appointments')\n return render(request, 'edit_appointment.html', {'appointment': appointment,\n 'the_user': request.user,\n 'users': users})", "def editStudent(s, number):\n nname = input(\"New Name: \")\n nnumber = input(\"New Number: \")\n ngpa = input(\"New GPA: \")\n nfield = input(\"New Field: \")\n\n deleteStudent(s, number)\n student = Student(nname, nnumber, ngpa, nfield)\n if t.insert(nnumber, student):\n ht.insert(student)\n print(nname, \"edited successfully.\")\n else:\n print(\"new student number is not valid.\")", "def test15_edit_data_first_student_with_teacher(self):\n students_list_with_edit_student = self.students_page. \\\n click_edit_students_list_button(). \\\n click_edit_student_button(). \\\n enter_student_data(data['third_new_data_student']). \\\n enter_name_approved_by_custom(data['third_new_data_student']). \\\n click_save_data_changes_button(). \\\n click_exit_students_list_editor_button(). \\\n students_table()\n student_with_changes = \\\n data_student_for_check(data['third_new_data_student'])\n self.assertEqual(self.main_page.get_current_url(),\n data['expected_url'])\n self.assertIn(student_with_changes,\n students_list_with_edit_student)\n return self.students_page", "def update_event_attendance_from_tasks(event):\n learners = event.task_set.filter(role__name='learner').count()\n Event.objects \\\n .filter(pk=event.pk) \\\n .filter(Q(attendance__lt=learners) | Q(attendance__isnull=True)) \\\n .update(attendance=learners)", "def test_superuser_edit_assessment(self):\n req, resp = data.get_assessment(self.contract['id'])\n\n response = self.superuser.put(self.assessment_report_url, req)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n response = self.superuser.patch(self.assessment_report_url, req)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def update_by_id(self, subject_id: str, new_subject_data: any) -> any:\n pass", "def students_update_enrollment(request, course_id):\r\n course_id = SlashSeparatedCourseKey.from_deprecated_string(course_id)\r\n\r\n action = request.GET.get('action')\r\n identifiers_raw = request.GET.get('identifiers')\r\n identifiers = _split_input_list(identifiers_raw)\r\n auto_enroll = request.GET.get('auto_enroll') in ['true', 'True', True]\r\n email_students = request.GET.get('email_students') in ['true', 'True', True]\r\n\r\n email_params = {}\r\n if email_students:\r\n course = get_course_by_id(course_id)\r\n email_params = get_email_params(course, auto_enroll)\r\n\r\n results = []\r\n for identifier in identifiers:\r\n # First try to get a user object from the identifer\r\n user = None\r\n email = None\r\n try:\r\n user = get_student_from_identifier(identifier)\r\n except User.DoesNotExist:\r\n email = identifier\r\n else:\r\n email = user.email\r\n\r\n try:\r\n # Use django.core.validators.validate_email to check email address\r\n # validity (obviously, cannot check if email actually /exists/,\r\n # simply that it is plausibly valid)\r\n validate_email(email) # Raises ValidationError if invalid\r\n\r\n if action == 'enroll':\r\n before, after = enroll_email(course_id, email, auto_enroll, email_students, email_params)\r\n elif action == 'unenroll':\r\n before, after = unenroll_email(course_id, email, email_students, email_params)\r\n else:\r\n return HttpResponseBadRequest(strip_tags(\r\n \"Unrecognized action '{}'\".format(action)\r\n ))\r\n\r\n except ValidationError:\r\n # Flag this email as an error if invalid, but continue checking\r\n # the remaining in the list\r\n results.append({\r\n 'identifier': identifier,\r\n 'invalidIdentifier': True,\r\n })\r\n\r\n except Exception as exc: # pylint: disable=W0703\r\n # catch and log any exceptions\r\n # so that one error doesn't cause a 500.\r\n log.exception(\"Error while #{}ing student\")\r\n log.exception(exc)\r\n results.append({\r\n 'identifier': identifier,\r\n 'error': True,\r\n })\r\n\r\n else:\r\n results.append({\r\n 'identifier': identifier,\r\n 'before': before.to_dict(),\r\n 'after': after.to_dict(),\r\n })\r\n\r\n response_payload = {\r\n 'action': action,\r\n 'results': results,\r\n 'auto_enroll': auto_enroll,\r\n }\r\n return JsonResponse(response_payload)", "def update_views(self):\n # Get correct date format\n self.date_string = self.date.toString(self.date_format)\n \n # Clear Models\n self.availModel.clear()\n self.attendModel.clear()\n \n for student in self.db.get_attendance_for_date(self.date_string):\n if student[1] != None and student[1] != '':\n name = str(student[1])\n else:\n name = str(student[2]) + ' ' + str(student[3])\n # Add student to the attended list view\n itemlist = [QtGui.QStandardItem(name), \\\n QtGui.QStandardItem(str(student[0]))]\n self.attendModel.appendRow(itemlist)\n\n\n for student in self.db.get_students():\n # Don't add them to available list if they attended given date\n if student[1] != None and student[1] != '':\n name = str(student[1])\n else:\n name = str(student[2]) + ' ' + str(student[3])\n # Add student to available list view\n itemlist = [QtGui.QStandardItem(name), \\\n QtGui.QStandardItem(str(student[0]))]\n if self.attendModel.findItems(name) == []:\n self.availModel.appendRow(itemlist)\n\n\n self.availModel.sort(0)\n self.attendModel.sort(0)", "def update_employee(employee):\n employee_id = get_employee_input_int(\"Enter the employee id you want to update\")\n newGrade = get_employee_input_int(\"Enter the new grade for \")\n db.update_employee(employee_id, newGrade)\n print(employee.full_name + \"'s grade value has been updated to :-> \", newGrade)", "def update(self, subject: Subject) -> None:\n pass", "def update(self, subject: Subject) -> None:\n pass", "def option1(self):\n ID = int(input(\"ID: \"))\n name = input(\"Name: \")\n attNr = int(input(\"Number of attendances: \"))\n grade = int(input(\"Grade: \"))\n self.__srv.addStud(ID,name,attNr,grade)", "def edit():", "def on_edit_students_select(self):\n edit_window = Students()\n edit_window.exec_()", "def test_superuser_edit_assessment(self):\n req, resp = data.assessment_02_request, data.assessment_02_response\n resp['contract'] = self.contract['id']\n\n response = self.superuser.put(self.assessment_custom_url, req)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n response = self.superuser.patch(self.assessment_custom_url, req)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def updateEMPStudyData(self, study_id, study_score, web_app_user_id):\n con = self.getMetadataDatabaseConnection()\n con.cursor().callproc('qiime_assets.update_emp_study_data', [study_id, study_score, web_app_user_id])", "def updateEMPStudy(self, study_id, study_name, investigation_type, miens_compliant, submit_to_insdc, \n portal_type, study_title, study_alias, pmid, study_abstract, study_description,\n number_samples_collected, number_samples_promised , lab_person,\n lab_person_contact, emp_person, first_contact, most_recent_contact, sample_type, \n has_physical_specimen, has_extracted_data, timeseries, spatial_series,\n principal_investigator, principal_investigator_contact, default_emp_status, funding,\n includes_timeseries):\n con = self.getMetadataDatabaseConnection()\n results = con.cursor().callproc('qiime_assets.emp_study_update', \n [study_id, study_name, investigation_type, miens_compliant, submit_to_insdc, portal_type, \n study_title, study_alias, pmid, study_abstract, study_description,\n number_samples_collected, number_samples_promised , lab_person,\n lab_person_contact, emp_person, first_contact, most_recent_contact, sample_type, \n has_physical_specimen, has_extracted_data, timeseries, spatial_series,\n principal_investigator, principal_investigator_contact, default_emp_status, funding,\n includes_timeseries])", "def on_remove_clicked(self):\n selected_indexes = self.ui.attendListView.selectedIndexes()\n for index in selected_indexes:\n row = self.attendModel.itemFromIndex(index).row()\n student = self.attendModel.item(row, 0).text()\n sid = self.attendModel.item(row, 1).text()\n try:\n # Actually add the student for the date into the database\n self.db.student_deattend(sid, self.date_string)\n except KeyError:\n # Display error window if student missing\n err_msg = QtGui.QErrorMessage()\n err_msg.showMessage(\"Sid not found for student %s\" % student)\n\n self.update_views()", "def assert_can_edit_asmt(selenium, asmt):\n asmts_ui_service = webui_service.AssessmentsService(selenium)\n info_page = asmts_ui_service.open_info_page_of_obj(asmt)\n _assert_title_editable(asmt, selenium, info_page)", "def update_subject(self, subject):\n if not subject or (self.currentSubject == subject):\n self.currentSubject = \"\"\n self.lbSubject.setText(\"<b>{}</b>\".format(NO_FOCAL_SUBJECT))\n self.lbFocalSubject.setText(NO_FOCAL_SUBJECT)\n else:\n self.currentSubject = subject\n self.lbSubject.setText(\"Subject: <b>{}</b>\".format(self.currentSubject))\n self.lbFocalSubject.setText(\" Focal subject: <b>{}</b>\".format(self.currentSubject))" ]
[ "0.71086836", "0.6244524", "0.62197715", "0.56363916", "0.5607704", "0.55585027", "0.55038476", "0.55020773", "0.5450966", "0.539527", "0.5375184", "0.5331032", "0.52976197", "0.5293877", "0.5229473", "0.52075845", "0.519442", "0.5174884", "0.5143767", "0.51074713", "0.51074713", "0.50429636", "0.50405216", "0.5029527", "0.50183636", "0.5016674", "0.49936086", "0.4989607", "0.49808154", "0.49806327" ]
0.7827709
0
View attendance of students.
def view_attendance(request): context_dict = { 'title': 'All Attendance', } return render(request, "viewAttendance.html", context_dict)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def edit_attendance(request, attendance_id):\n\n\temp = models.Teacher.objects.get(user=request.user)\n\tif not emp.student_permit:\n\t\traise Http404\n\tattendance = models.Attendance.objects.filter(\n\t\tpk=attendance_id, soft_delete=False\n\t).first()\n\tprint(\"1\")\n\tcontext_dict = {\n\t\t\"all_subjects\": context_helper.subject_helper(),\n\t\t'attendance_id': attendance_id,\n\t}\n\tif request.method == 'POST':\n\t\tupdate_fields = []\n\t\tactivity = ''\n\t\troll = request.POST.get('roll')\n\t\tsubject = request.POST.get('subject_picker')\n\t\tobtained = request.POST.get('attendance')\n\t\ttotal = request.POST.get('total')\n\t\tstudent = models.Student.objects.filter(\n\t\t\troll_no=roll\n\t\t).first()\n\t\tif not student:\n\t\t\tcontext_dict[\"message\"] = 'Student at does not exist / Roll number has not been alloted.'\n\t\t\treturn render(request, \"editAttendance.html\", context_dict)\n\t\ttry:\n\t\t\tif attendance.student != student:\n\t\t\t\tattendance.student = student\n\t\t\t\tupdate_fields.append('student')\n\t\t\t\tactivity += 'Changed student to ' + str(student) + '.\\n'\n\t\t\tif attendance.total_attendance != total:\n\t\t\t\tattendance.total_attendance = total\n\t\t\t\tupdate_fields.append('total_attendance')\n\t\t\t\tactivity += 'Changed total attendance to ' + str(total) + '.\\n'\n\t\t\tif attendance.obtained_attendance != obtained:\n\t\t\t\tattendance.obtained_attendance = obtained\n\t\t\t\tupdate_fields.append('obtained_attendance')\n\t\t\t\tactivity += 'Changed obtained attendance to' + str(obtained) + '.\\n'\n\t\t\tif str(attendance.subject.pk) != str(subject):\n\t\t\t\tattendance.subject = models.Subject.objects.get(pk=subject)\n\t\t\t\tupdate_fields.append('subject')\n\t\t\t\tactivity += 'Changed subject to ' + str(subject) + '.\\n'\n\t\t\tattendance.save(update_fields=update_fields)\n\t\t\thistory = models.History(\n\t\t\t\tuser=emp,\n\t\t\t\tactivity=activity,\n\t\t\t\tactivity_type=\"edit attendance\"\n\t\t\t)\n\t\t\thistory.save()\n\t\t\tcontext_dict[\"message\"] = 'Successfully updated Attendance.'\n\t\t\tcontext_dict[\"success\"] = True\n\t\texcept Exception as e:\n\t\t\tcontext_dict[\"message\"] = str(e)\n\t\t\tcontext_dict[\"success\"] = False\n\t\t\tprint(e)\n\tcontext_dict.update(context_helper.get_attendance_info(attendance))\n\tfor i in context_dict['subjects']:\n\t\t# use for dynamic\n\t\ttry: del context_dict['all_subjects'][i]\n\t\texcept: pass\n\tif context_dict.get('success', False):\n\t\treturn HttpResponseRedirect('/view-attendance')\n\treturn render(\n\t\trequest, \"editAttendance.html\", context_dict\n\t)", "def update_views(self):\n # Get correct date format\n self.date_string = self.date.toString(self.date_format)\n \n # Clear Models\n self.availModel.clear()\n self.attendModel.clear()\n \n for student in self.db.get_attendance_for_date(self.date_string):\n if student[1] != None and student[1] != '':\n name = str(student[1])\n else:\n name = str(student[2]) + ' ' + str(student[3])\n # Add student to the attended list view\n itemlist = [QtGui.QStandardItem(name), \\\n QtGui.QStandardItem(str(student[0]))]\n self.attendModel.appendRow(itemlist)\n\n\n for student in self.db.get_students():\n # Don't add them to available list if they attended given date\n if student[1] != None and student[1] != '':\n name = str(student[1])\n else:\n name = str(student[2]) + ' ' + str(student[3])\n # Add student to available list view\n itemlist = [QtGui.QStandardItem(name), \\\n QtGui.QStandardItem(str(student[0]))]\n if self.attendModel.findItems(name) == []:\n self.availModel.appendRow(itemlist)\n\n\n self.availModel.sort(0)\n self.attendModel.sort(0)", "def view_students(request):\n\n\tcontext_dict = {\n\t\t'title': 'All Students',\n\t}\n\treturn render(request, \"viewStudent.html\", context_dict)", "def student_dashboard(self, request, activity, session):\n if request.method == \"GET\" and request.GET.get(\"studentid\"):\n if int(request.GET.get(\"studentid\")) == request.user.id:\n return self.student_summary(request.GET.get(\"studentid\"), request, activity)\n raise PermissionDenied()", "def student_view():\n sessions = []\n user_id = session.get('user_id')\n con = db.get_db()\n cur = con.cursor()\n cur.execute(\"\"\"SELECT sessions.course_id, sessions.location, sessions.days, sessions.class_time,\n courses.name AS class_name, roster.session_id\n FROM sessions JOIN courses on sessions.course_id = courses.course_id\n JOIN roster on roster.session_id = sessions.id\n JOIN users on users.id = roster.student_id\n WHERE users.id = %s\"\"\",\n (user_id,))\n student_classes = cur.fetchall()\n\n cur.execute(\"\"\"SELECT major FROM users\n WHERE id = %s\"\"\",\n (user_id,))\n student_major = cur.fetchone()\n cur.close()\n con.close()\n\n return render_template(\"layouts/student-home.html\", student_classes=student_classes, student_major=student_major)", "def add_attendance(request):\n\n\temp = models.Teacher.objects.get(user=request.user)\n\tif not emp.student_permit:\n\t\traise Http404\n\tcontext_dict = {\n\t\t\"all_subjects\": context_helper.subject_helper(),\n\t}\n\tif request.method == \"POST\":\n\t\troll = request.POST.get('roll')\n\t\tsubject = request.POST.get('subject_picker')\n\t\tattendance = request.POST.get('attendance')\n\t\ttotal = request.POST.get('total')\n\t\tstudent = models.Student.objects.filter(\n\t\t\troll_no=roll\n\t\t).first()\n\t\tduplicate_check = models.Attendance.objects.filter(\n\t\t\tstudent=student, subject=subject,\n\t\t).first()\n\t\tif duplicate_check:\n\t\t\tcontext_dict[\"message\"] = 'Attendance already exist.'\n\t\t\tduplicate_check.soft_delete=False\n\t\t\tduplicate_check.save()\n\t\t\treturn render(request, \"addAttendance.html\", context_dict)\n\t\ttry:\n\t\t\tattendance_data = models.Attendance(\n\t\t\t\tstudent=student,\n\t\t\t\tsubject=models.Subject.objects.get(pk=subject),\n\t\t\t\ttotal_attendance=total,\n\t\t\t\tobtained_attendance=attendance\n\t\t\t)\n\t\t\tattendance_data.save()\n\t\t\thistory = models.History(\n\t\t\t\tuser=emp,\n\t\t\t\tactivity='Added attendance of ' + str(student) + \n\t\t\t\t\tstr(subject) +'.\\n',\n\t\t\t\tactivity_type=\"add attendance\"\n\t\t\t)\n\t\t\thistory.save()\n\t\t\tcontext_dict[\"message\"] = 'Successfully added Attendance.'\n\t\t\tcontext_dict[\"success\"] = True\n\t\texcept Exception as e:\n\t\t\tcontext_dict[\"message\"] = str(e)\n\t\t\tcontext_dict[\"success\"] = False\n\t\t\tprint(e)\n\treturn render(request, \"addAttendance.html\", context_dict)", "def teacher_dashboard(self, request, activity, session):\n if request.method == \"GET\" and request.GET.get(\"studentid\"):\n return self.student_summary(request.GET.get(\"studentid\"), request, activity)\n\n else:\n return self.course_summary(request, activity)", "def select_student_enrollment_detailed(self, student_id):\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n with conn:\n cursor.execute(\n \"\"\"SELECT \n c.course_id, c.subject, c.course_num, c.course_title,\n cs.course_section_id, cs.schedule_days, cs.start_time, cs.end_time,\n i.first_name || ' ' || i.last_name AS 'Instructor Name', c.course_units\n FROM courses c\n JOIN course_sections cs\n ON c.course_id = cs.course_id\n JOIN course_enrollments ce\n ON ce.course_section_id = cs.course_section_id AND ce.course_id = cs.course_id\n JOIN instructors i\n ON cs.instructor_id = i.instructor_id\n WHERE ce.student_id = ?\"\"\",\n (student_id,),\n )\n return cursor.fetchall()", "def browse_students(request):\n students = Student.objects.filter(current_mentor=None)\\\n .exclude(Q(status='drop-out') | Q(status='unresponsive') | Q(status='retainer')\n | Q(status='alum') | Q(status='paused'))\n return render(request, 'match/browse_students.html', {'students': students})", "def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n\n rows = hackbright.get_grades_by_github(github)\n\n\n return render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n rows=rows)\n # return html", "def student_view(self, context=None):\n if self.is_course_staff():\n return self.staff_view()\n gea_assessment = GeaAssessment(User.objects.get(id=self.xmodule_runtime.user_id), self)\n frag = Fragment(loader.render_template(\"templates/edx_gea/student.html\",\n {'score' : gea_assessment.score,\n 'comment' : gea_assessment.comment}))\n return frag", "def __ui_list_students(self):\n try:\n print(str(self.__student_controller))\n except RepositoryException as re:\n print(re)\n return", "def get_attendance(employee, date):\n # empty list to append the date come from database, after convert it from tuple to string\n day = []\n # excute sql query to get list of data each date come as tuple [('2020-04-01',)]\n FetchDay = c.execute(\"SELECT day FROM Attendance where employee=:employee\", {\n 'employee': employee})\n # get all date as list of tuples\n day_as_tuple = c.fetchall()\n\n # iterate over list of tuple and append each date to day list\n for days in day_as_tuple:\n for ele in days:\n day.append(ele)\n\n # test the case to check if date in day list or not\n if date in day:\n attended = True\n else:\n attended = False\n\n # make report as dictionary\n report = {}\n report['attended'] = attended\n # Time duration function to compute time duration\n duration = TimeDuration(employee, date)\n report['duration'] = str(duration)[:5]\n return report", "def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n\n project_list = hackbright.get_grades_by_github(github)\n\n\n return render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n project_list=project_list)", "def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n title_grade_list = hackbright.get_grades_by_github(github)\n\n html = render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n title_grade_list=title_grade_list)\n\n return html", "def get_student():\n\n github = request.args.get('github', 'jhacks')\n first, last, github = hackbright.get_student_by_github(github)\n\n\n rows = hackbright.list_projects(github)\n\n return render_template (\"student_info.html\",\n first=first,\n last=last,\n github=github,\n rows=rows\n )", "def view_student_gradebook():\n\n user_id = session.get('user_id')\n courses = []\n grades = []\n con = db.get_db()\n cur = con.cursor()\n\n cur.execute(\"\"\"SELECT DISTINCT courses.course_id, (ROUND(sum(grades.points_received)/sum(grades.total_points), 2 )*100)\n as total_grade, roster.session_id as class_session,\n courses.name as class_name, users.name AS teacher_name, grades.student_id\n FROM courses JOIN sessions on courses.course_id = sessions.course_id\n\t\t\t\t JOIN users on courses.teacherid= users.id\n JOIN assignments on assignments.session_id = sessions.id\n JOIN grades on grades.assignment_id = assignments.assignment_id\n JOIN roster on roster.session_id = sessions.id\n WHERE grades.student_id = %s\n\t GROUP BY grades.student_id, roster.session_id, courses.course_id, users.id\"\"\",\n (user_id,))\n courses = cur.fetchall()\n\n cur.close()\n con.close()\n\n return render_template(\"/layouts/gradebook/student_view.html\", courses=courses)", "def find_students(self):\n from quizzer.models.attendance import Attendance\n from quizzer.models.semester import Semester\n\n semester = Semester.get_current()\n\n for attendance in Attendance.objects: # TODO: Use indexed query later.\n if attendance.semester == semester and attendance.class_ == self:\n yield attendance.student", "def student(identificator):\n student_table = db.get_table('student')\n student = student_table.get(identificator)\n if student is None:\n abort(404)\n discipline = db.get_table('discipline')\n disciplines = discipline.get()\n scores = student_table.get_scores(identificator)\n for each in disciplines:\n if each['id'] not in scores:\n scores[each['id']] = {'score': '', 'id': 0}\n form = StudentForm()\n return render_template(\n 'student.html', student=student,\n form=form, disciplines=disciplines,\n scores=scores\n )", "def mark_attendance(employee):\r\n # loads date from computer\r\n today = datetime.datetime.now()\r\n mark = today.strftime(\"%d/%m/%Y %H:%M\")\r\n # adds to attendance list in object\r\n employee.attendance.append(mark)\r\n return employee.attendance", "def student_view(self, context=None):\n # pylint: disable=no-member\n log.info(\"Studnent view called\")\n log.info(self)\n context = {\n \"student_state\": json.dumps(self.student_state()),\n \"id\": self.location.name.replace('.', '_'),\n \"max_file_size\": getattr(\n settings, \"STUDENT_FILEUPLOAD_MAX_SIZE\",\n self.STUDENT_FILEUPLOAD_MAX_SIZE\n )\n }\n fragment = Fragment()\n fragment.add_content(\n render_template(\n 'templates/assignment/show.html',\n context\n )\n )\n fragment.add_javascript(_resource(\"static/js/src/agea.js\"))\n fragment.initialize_js('ExcelSheetAssessmentXBlock')\n return fragment", "def __init__(self, student, start_date, day_periods):\n self.student = student\n self.start_date = start_date\n self.day_periods = day_periods\n self.student_name = student.full_name_lastname_first(\n show_middle_name=False)\n self.student_gender= student.gender\n self.student_attendance_record = self.student.attendance", "def generate_student_report(self):\n \n period_type = self.parameter_dict.get(\"period_type\", \"monthly\")\n insert_gender_markers = self.parameter_dict.get(\n \"insert_gender_markers\", False)\n period = [(self.start_date,self.end_date)]\n for student in self.students:\n self.table_data.append(self._generate_single_student_report_line(\n student,period, False))\n self.keys_list.append(\"\")\n self.table_descriptor = \\\n [('name','string','Name'),\n ('days_present','number', 'Days Present'),\n ('percent_present', 'number', '% Present')]", "def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n\n grades = hackbright.get_grades_by_github(github)\n\n html = render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n grades=grades)\n\n return html", "def get_student():\n\n github = request.args.get('github', 'jhacks')\n first, last, github = hackbright.get_student_by_github(github)\n html = render_template('student_info.html',\n first=first,\n last=last,\n github=github)\n return html", "def test_read_attendances_by_organization_student(self):\n organization_access = OrganizationAccessFactory(\n role=STUDENT,\n organization=self.organization,\n )\n\n self.assert_user_cannot_read_attendances(organization_access.user, self.live)", "def display_student(s_info):\n print('')\n print('Your information:')\n print(f'{s_info.student_id} - {s_info.first_name} {s_info.last_name}')", "def atten_employee(list_emp, name):\r\n with open(\"attendance_log.txt\", \"w\") as attendance_by_emp:\r\n attendance_by_emp.seek(0)\r\n attendance_by_emp.write(\"Employee Attendance Report:\\n\")\r\n for worker in list_emp:\r\n if worker.name == name:\r\n attendance_by_emp.write(\"%s-\\n\" % worker.name)\r\n for date in worker.attendance:\r\n attendance_by_emp.write(\"\\t\" + date + '\\n')\r\n print(\"Report issued!\\n\")\r\n return\r\n print(\"%s is not in employee log\\n\" % name)\r\n return", "def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n\n # a list of (project_title, grade) for a given student\n titles_grades = hackbright.get_grades_by_github(github)\n\n\n html = render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n titles_grades=titles_grades)\n\n return html", "def view_appointments(request):\n\n appointments = Appointment.objects.all().order_by('date')\n\n if request.user.userprofile.is_patient():\n appointments = Appointment.objects.filter(patient=request.user.id).order_by('date')\n\n elif request.user.userprofile.is_doctor():\n appointments = Appointment.objects.filter(doctor=request.user.id).order_by('date')\n\n return render(request, 'view_appointments.html', {'appointments': appointments,\n 'the_user': request.user})" ]
[ "0.6413403", "0.62919253", "0.62873226", "0.6202059", "0.6138102", "0.6109332", "0.60675406", "0.58709055", "0.58479583", "0.58447534", "0.5835368", "0.58113986", "0.5766857", "0.57441", "0.5713139", "0.57057875", "0.5688026", "0.5664378", "0.5640548", "0.5639207", "0.563774", "0.56354785", "0.5621009", "0.56056213", "0.55948293", "0.5582429", "0.5580051", "0.557788", "0.5535677", "0.5525109" ]
0.7802853
0
Test if height input without inches is extracted and converted into cm
def test_height_feet_only(self): result = height_to_cm("6'") self.assertEqual(result, 183)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_inches_valid(self):\n result = inch_to_cm(\"72\\\"\")\n self.assertEqual(result, 183)", "def test_height_valid(self):\n result = height_to_cm(\"5' 10\\\"\")\n self.assertEqual(result, 178)", "def test_height_invalid_input(self):\n result = height_to_cm(\"--\")\n self.assertIsNone(result)", "def test_inches_invalid_input(self):\n result = inch_to_cm(\"--\")\n self.assertIsNone(result)", "def height_conversion(height):\n num_list = re.findall(r\"\\d+\", height)\n inches = int(num_list[0]) * 12 + int(num_list[1])\n return inches", "def HeightTest(string):\n\t# pull out the last 2 characters of the string\n\tunit = string[-2:]\n\n\t# if there are no units (unit is not 'in' or 'cm')\n\t# return False\n\tif (unit != 'in') & (unit != 'cm'):\n\t\treturn False\n\n\t# pull out the measurement, cast as integer\n\tmeasurement = int(string[:-2])\n\n\tif unit == 'cm':\n\t\treturn NumberTest(measurement, 150, 193)\n\tif unit == 'in':\n\t\treturn NumberTest(measurement, 59, 76)\n\treturn False", "def cm2inch(size):\n return size / 2.54", "def cm2inch(size):\n return size / 2.54", "def cm2inch(size):\n return size / 2.54", "def is_valid_height(height: str) -> bool:\n split_height_at_centimeters = height.split(\"cm\")\n if len(split_height_at_centimeters) == 1:\n split_height_at_inches = height.split(\"in\")\n return split_height_at_inches[0].isnumeric() and 59 <= int(split_height_at_inches[0]) <= 76\n else:\n return split_height_at_centimeters[0].isnumeric() and 150 <= int(split_height_at_centimeters[0]) <= 193", "def athlete_height(a):\r\n if pd.isnull(a):\r\n return np.nan\r\n if \"cm\" in a:\r\n temp = round(int(a.replace(\" cm\",\"\")),0)\r\n if \"'\" in a:\r\n temp = a.split('\"')[0]\r\n feet = int(temp.split(\"'\")[0])\r\n inches = int(temp.split(\"'\")[1])\r\n cm = round(feet*30.48 + 2.54*inches,0)\r\n temp = cm\r\n if \"in\" in a:\r\n inches = round(int(a.replace(\" in\",\"\")), 0)\r\n kg = round(2.54 * inches,0)\r\n temp = kg \r\n \r\n if temp < 120 or temp > 225:\r\n return np.nan\r\n else:\r\n return temp", "def cm(feet = 0, inches = 0):\n inches_to_cm = inches * 2.54 # first convert inches to centimeters\n feet_to_cm = feet * 12 * 2.54 # second convert feet to centimeters\n return inches_to_cm + feet_to_cm # return the combined value", "def inches(n):\n return n * 25.4", "def inches_to_mm(inches):\n\tmm=inches*25.4\n\treturn mm", "def inches_to_mm(inches):\n mm = inches * 25.4\n return mm", "def __conv_inch(length_mm):\n return length_mm / 25.4", "def mm_to_inches(rainfall_in_mm):\r\n rainfall_in_inches = rainfall_in_mm * 0.0393701\r\n return rainfall_in_inches", "def getRangeInches(self) -> float:\n ...", "def to_inch(self):\r\n if self.units != 'inch':\r\n self.units = 'inch'\r\n for statement in self.statements:\r\n statement.to_inch()\r\n for tool in iter(self.tools.values()):\r\n tool.to_inch()\r\n for primitive in self.primitives:\r\n primitive.to_inch()\r\n for hit in self.hits:\r\n hit.to_inch()", "def height(self) -> int:", "def height(self) -> int:", "def height(self) -> int:", "def validate_height(passport: map) -> bool:\n if passport.get('hgt'):\n if passport['hgt'].count('cm'):\n val = int(passport['hgt'][:passport['hgt'].find('cm')])\n if 150 <= val <= 193:\n return True\n elif passport['hgt'].count('in'):\n val = int(passport['hgt'][:passport['hgt'].find('in')])\n if 59 <= val <= 76:\n return True\n else:\n return False\n\n return False", "def cm2inch(*tupl):\n inch = 2.54\n if type(tupl[0]) == tuple:\n return tuple(i/inch for i in tupl[0])\n else:\n return tuple(i/inch for i in tupl)", "def xl_row_height(cm):\n return 28.35 * cm", "def _get_cm_per_pixel(self, diameter):\n CM_PER_INCH = 2.54\n pixels_per_inch = 2 / diameter\n cm_per_pixel = CM_PER_INCH * pixels_per_inch\n return cm_per_pixel", "def footprint_height():", "def cm2inch(x: Union[float, Sequence[float], NDArray]) -> Sequence[float]:\n return list(np.array(x) / 2.54)", "def valid_hgt(cls, hgt):\n check = re.search(r\"^(\\d+)(in|cm)$\", hgt)\n if not check:\n raise ValueError(f\"Invalid hgt {hgt}\")\n n, unit = check.groups()\n n = int(n)\n if unit == \"in\":\n if not (76 >= n >= 59):\n raise ValueError(\"Invalid hgt\")\n if unit == \"cm\":\n if not (193 >= n >= 150):\n raise ValueError(\"Invalid hgt\")\n return hgt", "def unitsDetector(self, num):\n try:\n num = int(num)\n except:\n sys.exit('Invalid input! Method only takes ints or floats.')\n \n digits = 0\n while num > 1:\n num /= 10\n digits += 1\n \n digits -= 1\n ind = digits // 3\n units = {3: 'B', 2: 'M', 1: 'K', 0: ''}[ind]\n \n return 10 ** (ind * 3), units" ]
[ "0.79813945", "0.79194003", "0.7545475", "0.7360076", "0.72949666", "0.67772716", "0.6567673", "0.6567673", "0.6567673", "0.6473662", "0.63965285", "0.63748944", "0.6205345", "0.616127", "0.6080251", "0.59634614", "0.58540994", "0.58296794", "0.57459766", "0.56851465", "0.56851465", "0.56851465", "0.5681328", "0.5643984", "0.5627788", "0.5579725", "0.55178195", "0.54943067", "0.54877526", "0.5410548" ]
0.81370246
0
Test if valid height input with feet and inches provided is extracted and converted into cm
def test_inches_valid(self): result = inch_to_cm("72\"") self.assertEqual(result, 183)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_height_feet_only(self):\n result = height_to_cm(\"6'\")\n self.assertEqual(result, 183)", "def test_height_valid(self):\n result = height_to_cm(\"5' 10\\\"\")\n self.assertEqual(result, 178)", "def test_height_invalid_input(self):\n result = height_to_cm(\"--\")\n self.assertIsNone(result)", "def test_inches_invalid_input(self):\n result = inch_to_cm(\"--\")\n self.assertIsNone(result)", "def height_conversion(height):\n num_list = re.findall(r\"\\d+\", height)\n inches = int(num_list[0]) * 12 + int(num_list[1])\n return inches", "def cm(feet = 0, inches = 0):\n inches_to_cm = inches * 2.54 # first convert inches to centimeters\n feet_to_cm = feet * 12 * 2.54 # second convert feet to centimeters\n return inches_to_cm + feet_to_cm # return the combined value", "def HeightTest(string):\n\t# pull out the last 2 characters of the string\n\tunit = string[-2:]\n\n\t# if there are no units (unit is not 'in' or 'cm')\n\t# return False\n\tif (unit != 'in') & (unit != 'cm'):\n\t\treturn False\n\n\t# pull out the measurement, cast as integer\n\tmeasurement = int(string[:-2])\n\n\tif unit == 'cm':\n\t\treturn NumberTest(measurement, 150, 193)\n\tif unit == 'in':\n\t\treturn NumberTest(measurement, 59, 76)\n\treturn False", "def is_valid_height(height: str) -> bool:\n split_height_at_centimeters = height.split(\"cm\")\n if len(split_height_at_centimeters) == 1:\n split_height_at_inches = height.split(\"in\")\n return split_height_at_inches[0].isnumeric() and 59 <= int(split_height_at_inches[0]) <= 76\n else:\n return split_height_at_centimeters[0].isnumeric() and 150 <= int(split_height_at_centimeters[0]) <= 193", "def athlete_height(a):\r\n if pd.isnull(a):\r\n return np.nan\r\n if \"cm\" in a:\r\n temp = round(int(a.replace(\" cm\",\"\")),0)\r\n if \"'\" in a:\r\n temp = a.split('\"')[0]\r\n feet = int(temp.split(\"'\")[0])\r\n inches = int(temp.split(\"'\")[1])\r\n cm = round(feet*30.48 + 2.54*inches,0)\r\n temp = cm\r\n if \"in\" in a:\r\n inches = round(int(a.replace(\" in\",\"\")), 0)\r\n kg = round(2.54 * inches,0)\r\n temp = kg \r\n \r\n if temp < 120 or temp > 225:\r\n return np.nan\r\n else:\r\n return temp", "def inches(n):\n return n * 25.4", "def inches_to_mm(inches):\n\tmm=inches*25.4\n\treturn mm", "def inches_to_mm(inches):\n mm = inches * 25.4\n return mm", "def cm2inch(size):\n return size / 2.54", "def cm2inch(size):\n return size / 2.54", "def cm2inch(size):\n return size / 2.54", "def validate_height(passport: map) -> bool:\n if passport.get('hgt'):\n if passport['hgt'].count('cm'):\n val = int(passport['hgt'][:passport['hgt'].find('cm')])\n if 150 <= val <= 193:\n return True\n elif passport['hgt'].count('in'):\n val = int(passport['hgt'][:passport['hgt'].find('in')])\n if 59 <= val <= 76:\n return True\n else:\n return False\n\n return False", "def valid_hgt(cls, hgt):\n check = re.search(r\"^(\\d+)(in|cm)$\", hgt)\n if not check:\n raise ValueError(f\"Invalid hgt {hgt}\")\n n, unit = check.groups()\n n = int(n)\n if unit == \"in\":\n if not (76 >= n >= 59):\n raise ValueError(\"Invalid hgt\")\n if unit == \"cm\":\n if not (193 >= n >= 150):\n raise ValueError(\"Invalid hgt\")\n return hgt", "def conversion():\r\n\tdistance_feet = int(input(f\"Enter the distance in feet: \"))\r\n\tdistance_inches = distance_feet * 12\r\n\tdistance_yards = distance_feet / 3\r\n\tdistance_miles = distance_feet / 5280\r\n\treturn distance_inches, distance_yards, distance_miles", "def getRangeInches(self) -> float:\n ...", "def mm_to_inches(rainfall_in_mm):\r\n rainfall_in_inches = rainfall_in_mm * 0.0393701\r\n return rainfall_in_inches", "def convert(input):\n\n #constants\n m2cm = 100.\n inch2cm = 2.54\n foot2inch = 12\n yard2foot = 3\n mile2yard = 1760\n\n #calculations\n inches = input * m2cm / inch2cm\n feet = inches / foot2inch\n yards = feet / yard2foot\n miles = yards / mile2yard\n \n \n output = \"\"\"\n {input} meters equals:\n {inches: .2f} inches\n {feet: .2f} feet\n {yards: .2f} yards\n {miles: .2f} miles\"\"\".format(input=input, inches=inches, feet=feet, yards=yards, miles=miles)\n \n print(output)", "def cm2inch(*tupl):\n inch = 2.54\n if type(tupl[0]) == tuple:\n return tuple(i/inch for i in tupl[0])\n else:\n return tuple(i/inch for i in tupl)", "def _mps_to_mph(self) -> None:\n if self.units == \"m/s\":\n self.units = \"mph\"\n self.value = (self.value * 2.236936).__round__(2)\n else:\n msg = (\n \"Not a valid unit conversion, expected units to be in 'm/s' but instead \"\n + f\"units were in {self.units}.\"\n )\n raise ValueError(msg)", "def feet2m(feet):\n return feet * 0.3048", "def __conv_inch(length_mm):\n return length_mm / 25.4", "def feet_to_meters(feet: float) -> float:\n feet = float(feet)\n meter = float(float(feet) * 0.3048)\n return round(meter, 2)", "def to_inch(self):\r\n if self.units != 'inch':\r\n self.units = 'inch'\r\n for statement in self.statements:\r\n statement.to_inch()\r\n for tool in iter(self.tools.values()):\r\n tool.to_inch()\r\n for primitive in self.primitives:\r\n primitive.to_inch()\r\n for hit in self.hits:\r\n hit.to_inch()", "def test_convert(self):\n height = 1.6 * self.meter\n foot = .305 * self.meter\n inch = 1 / 12 * foot\n\n self.assertTrue(abs(height / foot - 5.246) < .001)\n self.assertTrue(abs(height / inch - 62.951) < .001)\n\n newton = self.kgram * self.meter / (self.second ** 2)\n pound = 4.448222 * newton\n accel = 9.8 * self.meter / (self.second ** 2)\n\n weight = 150 * pound\n mass = weight / accel\n self.assertTrue(abs(mass / self.kgram - 68.085) < .001)", "def pa_to_inches(pressure_in_pa):\r\n pressure_in_inches_of_m = pressure_in_pa * 0.02953\r\n return pressure_in_inches_of_m", "def meters_from(feet):\n try:\n meters = float(feet) / 3.28\n meters = round(meters, 2) # Round to two decimal places\n return str(meters)\n except ValueError:\n return \"invalid input\"" ]
[ "0.8042959", "0.7631555", "0.739575", "0.7370734", "0.72036153", "0.66799337", "0.66381806", "0.6505398", "0.64125216", "0.60870993", "0.5969806", "0.596189", "0.59046865", "0.59046865", "0.59046865", "0.57828355", "0.5710599", "0.5627367", "0.55650413", "0.55526185", "0.55437326", "0.53487855", "0.5337463", "0.5306356", "0.5280187", "0.5254158", "0.5240671", "0.5186911", "0.51575625", "0.5154385" ]
0.7818981
1
Start Hikvision event stream thread.
def start_hik(self, event): self.camdata.start_stream()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start(self):\n gv.logger.info(\"Videoqualityprobe healthcheck thread started\")\n thread = Thread(target=self.run, args=())\n thread.daemon = True\n self.thread = thread\n thread.start()", "def start(self):\n\t\tself.stream.start_stream()", "def start(self):\n self.has_event = False\n self.running = True\n self._condition.acquire()\n self._thread = threading.Thread(target=read_input, args=(self,))\n self._thread.start()", "def start(self):\n self.stream.start()\n self.running = True\n self.update()", "def _start_event_stream(self):\r\n\r\n # Register with an event queue, which will be used as event source:\r\n self._event_queue = self._call_factory(\"subscribe\")\r\n if self._event_queue is None:\r\n self.logger.debug(\"SseHTTPRequestHandler(Thread-%s): no queue, \"\r\n \"stopping this thread\",\r\n threading.current_thread().ident)\r\n # As per http://dev.w3.org/html5/eventsource/, a response code\r\n # of 204 tells the browser not to reconnect:\r\n self.send_response(204)\r\n return\r\n self.logger.debug(\"SseHTTPRequestHandler(Thread-%s): registered queue, \"\r\n \"start sending events\", threading.current_thread().ident)\r\n\r\n # Send HTTP headers:\r\n self.send_response(200)\r\n self.send_header(\"Content-type\", \"text/event-stream\")\r\n self.end_headers()\r\n\r\n # Start event serving loop:\r\n self._send_events()", "def start_stream(self):\n pass", "def listen(self):\n self.processor_thread = Thread(target = self.event_loop, name=\"InputThread-\"+str(self.thread_index), args=(self.thread_index, ))\n self.thread_index += 1\n self.processor_thread.daemon = True\n self.processor_thread.start()", "def start(self):\n\n if self.thread is None:\n self.thread = threading.Thread(\n target=self.__run__,\n daemon=True,\n )\n\n self.thread.start()\n LOGGER.debug(\n \"Starting thread `%s` for event loop `%s`.\",\n self.ident,\n self.thread.ident,\n )", "def start_stream(self):\n self.handle = lt.add_magnet_uri(self.lt_ses, self.queue[0].magnet_link, # pylint: disable=no-member\n self.params)\n self.handle.set_sequential_download(True)\n\n self.stream_thread = threading.Thread(target=self._stream,\n name='stream')\n self.stream_thread.start()", "def start(self):\n self.thread.start()", "def start(self):\r\n start_thread(self._extract_thread_func, \"message sorter thread\")\r\n self.debug(\"### initialized stream sorter with %g s time window\"\r\n % (self.delay))", "def start(self) -> None:\n self._stream.start()", "def start(self):\n self._setup_thread()\n self.thread.start()", "def start(self):\n self._thread.start()", "def start(self):\n\n def pubsub_thread():\n \"\"\" Call get_message in loop to fire _handler. \"\"\"\n\n while not self._stop.is_set():\n self._pubsub.get_message()\n sleep(0.01)\n\n # subscribe to personal channel and fire up the message handler\n self._pubsub.subscribe(**{'actor:%s' % self.uuid: self._handler})\n self._proc = Thread(target=pubsub_thread)\n self._proc.daemon = True\n self._proc.start()", "def start(self):\n self.log('Start capturing.')\n # ---\n try:\n self.setup()\n # run camera thread\n self._worker = Thread(target=self.run)\n self._worker.start()\n except StopIteration:\n self.log('Exception thrown.')", "def run(self):\n t = Thread(target=self._listen)\n t.start()", "def __init__(self, stream):\n self.stream = stream\n self.queue = Queue()\n self.start_thread()", "def start(self):\n # Start listening for messages\n self.connect_to_presentation()\n\n # Start the heartbeat\n self.heartbeat_thread.start()", "def run(self):\n\n # Start the video stream process\n self._process.start()", "def start(self):\n self.stop() # Stop current process\n self._watchdog = threading.Thread(target=self._watch)\n self._defunctdog = threading.Thread(target=self._defunct)\n self._stdin_thread = threading.Thread(target=self.stdin_thread)\n logfile = settings.get_path(\"logs\") + '/' + self.name + '.log'\n try:\n self.stderr = open(logfile, 'w')\n except IOError:\n log.warning(\"There is no where ({0}) to put log of {1}\".format(logfile, self.name))\n self.stderr = None\n self._running.set()\n # if self.name == 'vlcvideo':\n # log.debug('HIGH PRIORITY')\n # self._popen = Popen( 'chrt -r 80 '+self.command, bufsize=0, executable=None, stdin=PIPE, stdout=PIPE, stderr=self.stderr,\n # close_fds=False, shell=True, cwd=None, env=None,\n # universal_newlines=False, startupinfo=None, creationflags=0, preexec_fn=None) \n # # preexec_fn=lambda : os.nice(-20)\n # else: \n self._popen = Popen(shlex.split(self.command), bufsize=0, executable=None, stdin=PIPE, stdout=PIPE,\n stderr=self.stderr,\n close_fds=False, shell=False, cwd=None, env=None,\n universal_newlines=False, startupinfo=None, creationflags=0, preexec_fn=None)\n # preexec_fn=lambda : os.nice(-20)\n self._watchdog.start()\n self._defunctdog.start()\n self._stdin_thread.start()\n register_thread(self)\n if self.onOpen:\n self.onEvent([self.onOpen])", "def _start_in_thread(self):\n return spawn_waitready(self._listen, self.start)[0]", "def start(self) -> None:\n start_thread(super().start, self.__class__.__name__)", "def start(self):\n\n # ioloop.install()\n threading.Thread(target=self.loop.start).start()\n time.sleep(1)", "def start(self):\n \n self.thread.start()\n self.state = \"running\"", "def start(self, event):\n return", "def start(self):\n gevent.spawn(self.run)", "def start(self):\n gv.logger.info(\"Started playing new playlist\")\n thread = Thread(target=self.run, args=())\n thread.daemon = True\n self.thread = thread\n thread.start()", "def start(self):\n logging.info(\"ICMPecho health monitor plugin: Starting to watch \"\n \"instances.\")\n\n self.monitor_thread = threading.Thread(target = self.start_monitoring,\n name = self.thread_name)\n self.monitor_thread.daemon = True\n self.monitor_thread.start()", "def create_listen_thread(self):\n self.listen_thread = threading.Thread(target=self.listen, daemon=True)\n self.listen_thread.start()\n print('Started listener thread')" ]
[ "0.6741748", "0.6689916", "0.6685221", "0.66381896", "0.6620423", "0.64558554", "0.64370614", "0.6398764", "0.63183093", "0.62978", "0.6290173", "0.6284828", "0.62792015", "0.623901", "0.6146902", "0.6136318", "0.61089164", "0.6103689", "0.60574526", "0.6047557", "0.6042904", "0.6005118", "0.5975319", "0.5949132", "0.5933245", "0.59246886", "0.5913913", "0.5902432", "0.59020674", "0.5895973" ]
0.7037271
0
Return attribute list for sensor/channel.
def get_attributes(self, sensor, channel): return self.camdata.fetch_attributes(sensor, channel)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sensors(self) -> List[dict]:\n return self.items_by_domain(\"sensor\")", "def listattribute(self, varName):\n fName = \"\"\n if varName in self.statVars:\n fName = self.statVars[varName][0]\n elif varName in self.timeVars:\n fName = self.timeVars[varName][0][0]\n if fName:\n var = cdms2.open(fName, 'r')(varName)\n return var.listattributes()\n else:\n return []", "def get_attribute_list(self):\n attributes = [attr for attr in vars(self.entries[0]) if not attr.startswith('__')]\n return attributes", "def feature_list(self):\n components = self._device_info.get(device_data_constants.KEY_COMPONENT, {})\n # Set is_rma_device.\n components['is_rma_device'] = self._is_rma_device\n return self._feature_list.Encode(components)", "def get_attribute_list(self):\n return self.dp.get_attribute_list()", "def get_channelbox_attributes(input_object):\n attr_list = input_object.listAttr(keyable = True, scalar = True, multi = True)\n attr_list.extend(input_object.listAttr(channelBox = True))\n return attr_list", "def ordered_channel_names(self):\n channel_list = []\n for k in self.__dict__.keys():\n if k.startswith('channel_'):\n channel_list.append(\n [int(k.split('channel_')[1]), self.__dict__[k]]\n )\n channel_list.sort()\n if len(channel_list) == 0:\n print('********* warning!! empty channel list - are there ay channel_N attributes? ')\n return [i[1] for i in channel_list]", "def get_metric_list(self) -> List[str]:\n ...", "def get_meas_list(self, **kwargs):\n channel = kwargs.get(\"channel\", self.active_channel)\n meas_list = self.scpi.query_meas_name_list(channel)\n if len(meas_list) == 1:\n return None # if there isnt a single comma, then there arent any measurments\n return [(meas_list[k], meas_list[k + 1]) for k in range(0, len(meas_list) - 1, 2)]", "def sensor(self):\n return ProxyList(self, OxfordITC503.Sensor, range(3))", "def get_attributes(cls):\r\n return []", "def get_sensors(self) -> tuple:\n return self.sensors", "def channels(self):\n return [cc for cc in list(self.dataset.data_vars)]", "def get_sensor_data(self):\n sensor_config = self.load_sensor_config()\n now_utc = datetime.utcnow()\n now = datetime.now()\n sensor_data = []\n for sensor, address in sensor_config.items():\n sensor_data.append(self.fetch_data(sensor, address, now_utc, now))\n return sensor_data", "def _read_attributes(root):\n output_list = []\n for _, value in enumerate(root[0][2]):\n attr = Attribute(value)\n output_list.append(attr)\n return output_list", "def get_devices(self):\n devices = self.get(\"event/device\")", "def get_physical_output_channels(self):\r\n bufsize = 1024\r\n buf = ctypes.create_string_buffer(bufsize)\r\n NIDAQ_dll.DAQmxGetDevAOPhysicalChans(self.dev_id.encode('ascii'),\r\n ctypes.byref(buf), bufsize)\r\n channel_list = buf_to_list(buf)\r\n channel_list = [channel.lstrip(self.dev_id+'/') for channel in channel_list]\r\n return channel_list", "def sensors(self):\n return self._sensors", "def getSensors(self):\n return [float(self.current_state),]", "def _attrlist(self,obj, attrs):\n vlist = [obj.__getattribute__(attr) for attr in attrs]\n return vlist", "def list_devices(self):\n return [x for x in self.devices.keys()]", "def sensorsItems(self):\n return self.settingsDb.allSensors()", "def so_attributes(self):\n try:\n self.channels\n except AttributeError:\n # create if doesn't exist\n self.channels = [x[0] for x in self.data.columns]\n \n dfs = ['sofiltEEG', 'spsofiltEEG']\n [setattr(self, df, pd.DataFrame(index=self.data.index)) for df in dfs]\n self.so_events = {}\n self.so_rejects = {}", "def attributes(self):\n\n return list(self._attributes.values())", "def get_attrs(self):\n attrs = []\n for attribute in self.__dict__.keys():\n attrs.append(attribute)", "def do_list(self, _):\n devices = []\n for source in self._target.devices:\n devices.append({\n 'name': source.device['name'],\n 'path': source.device['path'],\n })\n return devices", "def get_all_data(self):\n\t\ttemp = self.get_temp()\n\t\taccel = self.get_accel_data()\n\t\tgyro = self.get_gyro_data()\n\t\treturn [accel, gyro, temp]", "def binary_sensors(self) -> List[dict]:\n return self.items_by_domain(\"binary_sensor\")", "def values(self):\n return [getattr(self, a.name) for a in self.__attrs_attrs__]", "def get(self, channels): \n data = []\n for chan, d in zip(self.channels, self.data):\n if chan not in channels:\n continue\n data.append(d)\n data = np.stack(data)\n return data" ]
[ "0.6570343", "0.64977187", "0.6259698", "0.6129214", "0.60648686", "0.6006154", "0.5944662", "0.5927698", "0.5921689", "0.5897804", "0.5869058", "0.58233976", "0.5816986", "0.57921696", "0.57769096", "0.57692206", "0.57688844", "0.5752122", "0.575147", "0.5749902", "0.5747109", "0.57411236", "0.57375705", "0.57211953", "0.5715307", "0.5703943", "0.5677684", "0.5672722", "0.5672001", "0.5659868" ]
0.78213686
0
Extract sensor last update time.
def _sensor_last_update(self): return self._cam.get_attributes(self._sensor, self._channel)[3]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_last_update_time(self):\n return self.last_update_time", "def _get_last_meas_time(self):\n\n #if flag for whole data regeneration is set\n if self._process_type == 'full_gen':\n return datetime.datetime(1900, 1, 1, 0, 0, 0)\n \n \n res = self._db.Query(\"\"\"SELECT last_measurement_time\n FROM last_dashboard_element_segment_value\n WHERE\n element_id = %s\n AND segment_value_id = %s\n \"\"\",(self._id, self._segment_value_id))\n if not res:\n return datetime.datetime(1900, 1, 1, 0, 0, 0)\n item = self._db.record[0]\n if item['last_measurement_time']:\n return item['last_measurement_time']\n return datetime.datetime(1900, 1, 1, 0, 0, 0)", "def last_update_time(self):\n return self._last_update_time", "def last_update_datetime(self):\n return datetime.strptime(self.last_update, \"%Y-%m-%d %H:%M:%S.%f\")", "def last_update_datetime(self):\n return datetime.strptime(self.last_update, \"%Y-%m-%d %H:%M:%S.%f\")", "def last_update(self):\n date, time = self.data.get(\"update_date\"), self.data.get(\"update_time\")\n if date is not None and time is not None:\n return datetime.strptime(date + time, \"%d-%m-%Y%H:%M\").replace(\n tzinfo=VIENNA_TIME_ZONE\n )", "def last_updated_time(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"last_updated_time\")", "def last_updated(self):\n try:\n return max(self.station_usage, key=lambda x: x.last_update).dt_last_update\n except ValueError:\n return datetime.fromtimestamp(0)", "def last_updated_time(self) -> str:\n return pulumi.get(self, \"last_updated_time\")", "def dt_last_update(self):\n return self.last_update", "def last_update_time_in_minutes(self):\n return self._last_update_time_in_minutes", "def update_time(self) -> str:\n return pulumi.get(self, \"update_time\")", "def update_time(self) -> str:\n return pulumi.get(self, \"update_time\")", "def update_time(self) -> str:\n return pulumi.get(self, \"update_time\")", "def update_time(self) -> str:\n return pulumi.get(self, \"update_time\")", "def update_time(self) -> str:\n return pulumi.get(self, \"update_time\")", "def update_time(self) -> str:\n return pulumi.get(self, \"update_time\")", "def update_time(self) -> str:\n return pulumi.get(self, \"update_time\")", "def update_time(self) -> str:\n return pulumi.get(self, \"update_time\")", "def get_last_update(self):\n facility_data = self.get_raw_facilities_data()\n if facility_data is None:\n return None\n else:\n return datetime.strptime(facility_data['lastUpdate'], \"%Y-%m-%dT%H:%M:%SZ\")", "def last_update(self):\n serie = self._root.find('Series')\n return int(serie.find('lastupdated').text)", "def last_updated_time(self) -> datetime.datetime:\n return self.__last_updated_time", "def last_updated_time(self) -> datetime.datetime:\n return self.__last_updated_time", "def last_update(self): # TOFIX model the job and return an object instead of dictionary\n return self._data.get('summary_fields', {}).get('last_update')", "def last_update(self):\r\n request = http.Request('GET', '/metadata/last_update.json')\r\n return request, parsers.parse_json", "def get_last_time(self):\n \n return self._last", "def last_update(self):\n return self._last_update", "def last_update(self):\n return self._last_update", "def update_time(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"update_time\")", "def update_time(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"update_time\")" ]
[ "0.7647438", "0.7511165", "0.74607396", "0.7425206", "0.7425206", "0.7414086", "0.73201466", "0.7316108", "0.7272959", "0.7233726", "0.7118026", "0.7056613", "0.7056613", "0.7056613", "0.7056613", "0.7056613", "0.7056613", "0.7056613", "0.7056613", "0.70526713", "0.7018561", "0.70051134", "0.70051134", "0.68973905", "0.68832237", "0.68613285", "0.68402576", "0.68402576", "0.67959785", "0.67959785" ]
0.7972229
0
Return true if sensor is on.
def is_on(self): return self._sensor_state()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_on(self):\n return getattr(self.coordinator.data[0], self._sensor) is True", "def is_on(self):\n data = self.sensor_data\n if data and data[\"model\"] == \"SML\" and data[\"changed\"]:\n return data[\"state\"] == STATE_ON\n return False", "def is_on(self) -> bool:\n return self._device.is_on", "def is_on(self):\n return self._device.is_on", "def is_on(self):\n return self._device.is_on", "def is_on(self):\n return self._device.is_on", "def is_on(self):\n if self._sensor_type != DEVICE_TYPE_DOORBELL:\n return self._camera_data[\"event_on\"]\n return self._camera_data[\"event_ring_on\"]", "def is_on(self):\n return self._device.state", "def is_on(self) -> bool:\n return self.entity_description.get_ufp_value(self.device) is True", "def is_on(self) -> bool:\n return self.entity_description.get_ufp_value(self.device) is True", "def is_on(self) -> bool:\n raise NotImplementedError(\"Device subclass needs to implement this.\")", "def is_on(self) -> bool:\n return self._device.fan_on", "def is_on(self):\n if self._power_state == HYSEN_POWERON :\n return True\n else:\n return False", "def is_on(self):\n return True if self._device.device_data[self._uuid]['streaming_state'] == \\\n 'streaming-enabled' or \\\n self._device.device_data[self._uuid]['streaming_state'] == \\\n 'online-disabled' else False", "def is_on(self) -> bool:\n return self.tuya_device.status.get(DPCODE_SWITCH, False)", "def is_on(self) -> bool:\n return self._state == STATE_ON", "def is_on(self):\n return getattr(self._node, STICK_API[USB_MOTION_ID][ATTR_STATE])", "def is_on(self):\n return self._light_on", "def is_on(self) -> bool:\n return self.coordinator.data.get_metric(METRIC_KEY_MODE) == MODE_ON", "def is_on(self) -> bool:\n return self._is_on", "def is_on(self):\n return bool(self.arest.data.get('state'))", "def is_on(self):\n return self._device.state == SHCShutterContact.ShutterContactService.State.OPEN", "def is_on(self):\n return self.car.data[DATA_PLUGGED_IN]", "def is_on(self):\n return self._state == STATE_ON", "def is_on(self):\n return self._state == STATE_ON", "def is_on(self):\n return self._state == STATE_ON", "def is_on(self):\n return self._state == STATE_ON", "def is_on(self):\n return self._state == STATE_ON", "def is_on(self):\n return self._on", "def is_on(pin):\n return GPIO.input(pin) == GPIO.HIGH" ]
[ "0.8668819", "0.83718693", "0.8321358", "0.82086754", "0.82086754", "0.82086754", "0.8072303", "0.79461884", "0.78692335", "0.78692335", "0.78290933", "0.77697575", "0.7760602", "0.76900584", "0.7629474", "0.7626312", "0.7625873", "0.757377", "0.7554073", "0.75531846", "0.7529294", "0.7516886", "0.75165766", "0.75033826", "0.75033826", "0.75033826", "0.75033826", "0.75033826", "0.7472258", "0.74612755" ]
0.88761306
0
Process the client reports by aggregating their weights.
async def process_reports(self): await self.aggregate_weights(self.updates) # Testing the global model accuracy if Config().clients.do_test: # Compute the average accuracy from client reports self.average_accuracy = self.accuracy_averaging(self.updates) logging.info( '[Server #{:d}] Average client accuracy: {:.2f}%.'.format( os.getpid(), 100 * self.average_accuracy)) if hasattr(Config().server, 'do_test'): if not Config().clients.do_test or Config().server.do_test: # Test the updated model directly at the server self.accuracy = self.trainer.test(self.testset) logging.info( '[Server #{:d}] Global model accuracy: {:.2f}%\n'.format( os.getpid(), 100 * self.accuracy)) else: self.accuracy = self.average_accuracy await self.wrap_up_processing_reports()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _aggregate(self, *params): \n serialized_params = np.array([self._serialize(client) for client in params])\n serialized_aggregation = self._aggregate(*serialized_params)\n aggregated_weights = self._deserialize(serialized_aggregation)\n \n return aggregated_weights", "async def process_reports(self):\n features = [features for (__, features) in self.updates]\n\n # Faster way to deep flatten a list of lists compared to list comprehension\n feature_dataset = list(chain.from_iterable(features))\n\n # Training the model using all the features received from the client\n sampler = all_inclusive.Sampler(feature_dataset)\n self.algorithm.train(feature_dataset, sampler,\n Config().algorithm.cut_layer)\n\n # Test the updated model\n self.accuracy = self.trainer.test(self.testset)\n logging.info('[Server #{:d}] Global model accuracy: {:.2f}%\\n'.format(\n os.getpid(), 100 * self.accuracy))\n\n await self.wrap_up_processing_reports()", "async def wrap_up_processing_reports(self):\n if hasattr(Config(), 'results'):\n new_row = []\n for item in self.recorded_items:\n item_value = {\n 'global_round':\n self.current_global_round,\n 'round':\n self.current_round,\n 'accuracy':\n self.accuracy * 100,\n 'average_accuracy':\n self.average_accuracy * 100,\n 'edge_agg_num':\n Config().algorithm.local_rounds,\n 'local_epoch_num':\n Config().trainer.epochs,\n 'training_time':\n max([\n report.training_time for (report, __) in self.updates\n ]),\n 'round_time':\n time.perf_counter() - self.round_start_time\n }[item]\n new_row.append(item_value)\n\n if Config().is_edge_server():\n result_csv_file = f'{Config().result_dir}result_{Config().args.id}.csv'\n else:\n result_csv_file = f'{Config().result_dir}result.csv'\n\n csv_processor.write_csv(result_csv_file, new_row)\n\n if Config().is_edge_server():\n # When a certain number of aggregations are completed, an edge client\n # needs to be signaled to send a report to the central server\n if self.current_round == Config().algorithm.local_rounds:\n logging.info(\n '[Server #%d] Completed %s rounds of local aggregation.',\n os.getpid(),\n Config().algorithm.local_rounds)\n self.model_aggregated.set()\n\n self.current_round = 0\n self.new_global_round_begins.clear()\n # Wait until a new global round begins\n # to avoid selecting clients before a new global round begins\n await self.new_global_round_begins.wait()", "def statcalc(clients):\n\n results = {}\n results['average_conn_rate'] = 0\n results['average_reply_rate_avg'] = 0\n results['tot_requests'] = 0\n results['tot_replies'] = 0\n results['aggregate_conn_rate'] = 0\n results['average_conn_rate'] = 0\n results['aggregate_reply_rate'] = 0\n results['average_reply_rate_avg'] = 0\n\n for i, client in enumerate(clients):\n results['tot_requests'] += client.tot_requests\n results['tot_replies'] += client.tot_replies\n results['aggregate_conn_rate'] += client.conn_rate\n results['average_conn_rate'] = (results['average_conn_rate'] + client.conn_rate) / (i+1)\n results['aggregate_reply_rate'] += client.reply_rate_avg\n results['average_reply_rate_avg'] = (results['average_reply_rate_avg'] + client.reply_rate_avg) / (i+1)\n\n if not client.failed:\n results['percent_lost'] = (results['tot_requests'] - results['tot_replies']) / results['tot_requests'] * 100.0\n else:\n results['percent_lost'] = 100\n\n return results", "def calculate_weighted_results():\n pass", "def processReports(self):\n count = 0\n for r in self.reports:\n #need to change the next two lines so that the fields are not hard-coded\n self.currentCase = r.id\n self.currentText = r.impression.lower()\n self.analyzeReport(self.currentText,\n \"disease\",\n modFilters=['indication','probable_existence',\n 'definite_existence',\n 'historical','future','pseudoneg',\n 'definite_negated_existence',\n 'probable_negated_existence'])\n\n self.recordResults()", "def log_weights_statistics(self):\n for weight_name, weight_parameter in self._weights.items():\n for statistic_function in self._statistics_functions:\n self._weights_statistics[statistic_function.__name__][\n weight_name\n ].append(float(statistic_function(weight_parameter)))", "def FedAvg_agregation_process(model, clients_models_hist:list , weights:list):\n \n new_model=deepcopy(model)\n set_to_zero_model_weights(new_model)\n\n for k,client_hist in enumerate(clients_models_hist):\n \n for idx, layer_weights in enumerate(new_model.parameters()):\n\n contribution=client_hist[idx].data*weights[k]\n layer_weights.data.add_(contribution)\n \n return new_model", "def write_report(self):\r\n self.end_time = time.strftime('%Y-%m-%d_%H:%M:%S')\r\n server_log.info('')\r\n server_log.info('=========================================================')\r\n server_log.info('All test clients completed!')\r\n server_log.info(' Start time: {}'.format(self.start_time))\r\n server_log.info(' End time: {}'.format(self.end_time))\r\n server_log.info('')\r\n server_log.info('Total of {} client(s) ran. Data for each client:'.format(len(self.client_list)))\r\n for client in self.client_list.values():\r\n server_log.info('---------------------------------------------------------')\r\n server_log.info(' Client {}'.format(client.client_id))\r\n server_log.info(' Test status: {}'.format(client.status))\r\n server_log.info(' Time ran: {:.2f} sec'.format(client.time_ran)) \r\n server_log.info(' Avg CPU usage: {:.2f}%'.format(client.cpu_avg))\r\n server_log.info(' Avg MEM usage: {:.2f}%'.format(client.mem_avg))\r\n server_log.info(' Files written: {}'.format(client.files_written))\r\n server_log.info(' File size: {}'.format(client.file_size))\r\n server_log.info(' Chunk size: {}'.format(client.chunk_size))\r\n server_log.info('=========================================================')\r\n server_log.info('')", "def process(self, accumulator: [FLContext], fl_ctx: FLContext):\n # The model data is in model.params as a dict.\n model = fl_ctx.get_model()\n vars_to_aggregate = [set(item.get_model().params) for item in accumulator]\n vars_to_aggregate = set.union(*vars_to_aggregate)\n\n for v_name in vars_to_aggregate:\n n_local_iters, np_vars = [], []\n for item in accumulator:\n data = item.get_model()\n if v_name not in data.params:\n continue # this item doesn't have the variable from client\n\n # contribution is a protobuf msg\n # it has `n_iter` which represents number of local iterations \n # used to compute this contribution \n acc = item.get_prop('_contribution')\n float_n_iter = np.float(acc.n_iter)\n n_local_iters.append(float_n_iter)\n\n # weighted using local iterations\n weighted_value = proto_to_ndarray(data.params[v_name]) * float_n_iter\n np_vars.append(weighted_value)\n if not n_local_iters:\n continue # didn't receive this variable from any clients\n new_val = np.sum(np_vars, axis=0) / np.sum(n_local_iters)\n new_val += proto_to_ndarray(model.params[v_name])\n\n # Update the params in model using CopyFrom because it is a ProtoBuf structure\n model.params[v_name].CopyFrom(ndarray_to_proto(new_val))\n return False", "def _get_aggregated_results(self):\n gradients = self.gradients\n client_traj_infos = flatten_lists(self.client_traj_infos)\n client_opt_infos = self._combine_client_opt_infos(self.client_opt_infos)\n \n self.gradients = []\n self.client_traj_infos = []\n self.client_opt_infos = []\n\n return gradients, client_traj_infos, client_opt_infos", "def weight(self, weight_scheme, weight_name='weight', unique_key='identity',\n subset=None, report=True, path_report=None, inplace=True, verbose=True):\n if subset:\n if isinstance(subset, str):\n if self.is_filter(subset):\n subset = {subset: 0}\n else:\n raise ValueError('{} is not a valid filter_var'.format(subset))\n ds = self.filter('subset', subset, False)\n meta, data = ds.split()\n else:\n meta, data = self.split()\n engine = qp.WeightEngine(data, meta=meta)\n engine.add_scheme(weight_scheme, key=unique_key, verbose=verbose)\n engine.run()\n\n org_wname = weight_name\n if report:\n print(engine.get_report())\n print()\n if path_report:\n df = engine.get_report()\n full_file_path = '{} ({}).xlsx'.format(path_report, weight_name)\n df.to_excel(full_file_path)\n print('Weight report saved to:\\n{}'.format(full_file_path))\n s_name = weight_scheme.name\n s_w_name = 'weights_{}'.format(s_name)\n if inplace:\n weight_description = '{} weights'.format(s_name)\n data_wgt = engine.dataframe(s_name)[[unique_key, s_w_name]]\n data_wgt.rename(columns={s_w_name: org_wname}, inplace=True)\n if org_wname not in self._meta['columns']:\n self.add_meta(org_wname, 'float', weight_description)\n self.update(data_wgt, on=unique_key)\n else:\n wdf = engine.dataframe(weight_scheme.name)\n return wdf.rename(columns={s_w_name: org_wname})", "def compute_metrics(self, results: list) -> dict:", "def report(self):\n #i need to figure out how to pass all these in a list or something, woof.\n self.report_generator_module.run(\\\n self.total,\\\n self.unique,\\\n self.top_10,\\\n self.top_10_base,\\\n self.lengths,\\\n self.counts,\\\n self.one_to_six,\\\n self.trailing_number,\\\n self.last_1digit,\\\n self.last_2digit,\\\n self.last_3digit,\\\n self.last_4digit,\\\n self.last_5digit,\\\n self.charset)", "def _calc_stats(self):\n\n for res in self.rsts:\n _LOG.info(\"Calculate statistics for '%s'\", res.reportid)\n res.calc_stats(regexs=self._stats_colnames, funcnames=self._stats_funcs)", "def process_report(self, edge_index, weight):\n print_level(\n 'debug', self.node_id, 'Received report from edge ' +\n str(edge_index) + ' with weight ' + str(weight))\n if edge_index != self.father:\n # Send back the reply for the initiate search message\n if weight < self.best_weight:\n self.best_weight = weight\n self.best_edge = edge_index\n self.rec += 1\n self.__report()\n else:\n # Received report from core edge - finish the search for best edge\n if self.state == State.find:\n # Node is still finding the best edge. Send back to the queue\n self.__edge_stub(-1, Message.report, [weight], edge_index)\n else:\n if weight > self.best_weight:\n # The other core node must update its father\n self.__changeroot()\n elif weight == INF and self.best_weight == INF:\n # Tree complete\n self.__complete()", "def compute_statistics(self):", "def calculate_weights():\n weights = {}\n\n\n # estimate run time of step 1 (fast sweep)\n f_range = sweeper_script.settings['stop'] - sweeper_script.settings['start']\n N_samples = sweeper_script.settings['samplecount']\n df = f_range / N_samples\n\n t = N_samples / df\n\n weights['quick scan'] = t\n\n # estimate run time of step 2 (high res sweep)\n df = self.settings['high_res_df']\n N_samples = self.settings['high_res_N']\n\n t = N_samples / df\n\n weights['high res scan'] = t\n\n\n total_time = sum([v for k, v in weights.iteritems()])\n\n weights = {k: v/total_time for k, v in weights.iteritems()}\n\n print('weights',weights)\n\n return weights", "def fusion_collected_responses(self, lst_model_updates, key='weights'):\n w = []\n n_k = []\n try:\n for update in lst_model_updates:\n w.append(np.array(update.get('weights')))\n n_k.append(update.get('train_counts'))\n except ModelUpdateException as ex:\n logger.exception(ex)\n raise FusionException(\"Model updates are not appropriate for this fusion method. Check local training.\")\n\n x_hat = self.aggregate(w, n_k)\n if self.round == 1:\n return x_hat\n\n x_hat_new = [[] for x in range(np.shape(x_hat)[0])]\n iter_ = 0\n error = 1\n while error >= 1e-4 and iter_ <= 10000:\n v_n = []\n for p in range(len(w)):\n v = [[] for x in range(np.shape(x_hat)[0])]\n diff = []\n p_model = w[p][1]\n for i in range(np.shape(x_hat)[0]):\n diff.append(x_hat[i].flatten() - p_model[i].flatten())\n coeff = max(0, 1 - self.rho / np.linalg.norm(np.concatenate(diff)))\n for i in range(np.shape(x_hat)[0]):\n v[i] = coeff * (p_model[i] - x_hat[i])\n v_n.append(np.array(v))\n v_n_agg = self.aggregate(v_n, n_k)\n for i in range(np.shape(x_hat)[0]):\n x_hat_new[i] = x_hat[i] - v_n_agg[i]\n errors = []\n for i in range(np.shape(x_hat)[0]):\n errors.append(x_hat_new[i].flatten() - x_hat[i].flatten())\n error = np.linalg.norm(np.concatenate(errors))\n x_hat = copy.deepcopy(x_hat_new)\n iter_ += 1\n\n self.round += 1\n return x_hat_new", "def compute_metrics(self):\n pass", "def main(self):\n\n assault_mech_df = self.get_mech_df(url=self.assault_url)\n heavy_mech_df = self.get_mech_df(url=self.heavy_url)\n medium_mech_df = self.get_mech_df(url=self.medium_url)\n light_mech_df = self.get_mech_df(url=self.light_url)\n all_weights_df = pd.concat([assault_mech_df, heavy_mech_df, medium_mech_df, \n light_mech_df])\n\n self.save_data(assault_mech_df, \"assault\")\n self.save_data(heavy_mech_df, \"heavy\")\n self.save_data(medium_mech_df, \"medium\")\n self.save_data(light_mech_df, \"light\")\n self.save_data(all_weights_df, \"all_weights\")\n #get maximum new columns needed for splitting variants\n max_cols = all_weights_df.variants.apply(lambda x: len(x)).max()\n melt_cols = []\n\n for i in range(max_cols):\n all_weights_df[\"var_\"+str(i)] = \"\"\n melt_cols.append(\"var_\"+str(i))\n\n variant_weights_df = pd.DataFrame()\n for index, row in all_weights_df.iterrows():\n for i in range(len(row[\"variants\"])):\n #add each variant to variant weights as a row with mech, tonnage, variant\n new_row_dict = {\n \"mech_name\":row[\"mechs\"],\n \"tonnage\":row[\"tonnage\"],\n \"variant\":row[\"variants\"][i].upper()\n }\n new_row_df = pd.DataFrame(new_row_dict, index=[0])\n variant_weights_df = pd.concat([variant_weights_df, new_row_df])\n\n for i in range(len(row[\"hero_chassis\"])):\n new_row_dict = {\n \"mech_name\":row[\"hero_names\"],\n \"tonnage\":row[\"tonnage\"],\n \"variant\":row[\"hero_chassis\"][i].upper()\n }\n new_row_df = pd.DataFrame(new_row_dict, index=[0])\n variant_weights_df = pd.concat([variant_weights_df, new_row_df])\n\n\n for i in range(len(row[\"special_variants\"])):\n new_row_dict = {\n \"mech_name\":row[\"mechs\"],\n \"tonnage\":row[\"tonnage\"],\n \"variant\":row[\"special_variants\"][i].upper()\n }\n new_row_df = pd.DataFrame(new_row_dict, index=[0])\n variant_weights_df = pd.concat([variant_weights_df, new_row_df]) \n\n #add champion variants by matching on \n for i in range(len(row[\"champion_variants\"])):\n new_row_dict = {\n \"mech_name\":row[\"mechs\"],\n \"tonnage\":row[\"tonnage\"],\n \"variant\":row[\"champion_variants\"][i].upper()\n }\n new_row_df = pd.DataFrame(new_row_dict, index=[0])\n variant_weights_df = pd.concat([variant_weights_df, new_row_df])\n #remove duplicate rows \n variant_weights_df = variant_weights_df[variant_weights_df.duplicated(keep=\"first\")==False]\n self.save_data(variant_weights_df, \"variant_weights\")", "def generate_report():\n\n # Fetch the top 3 most viewed articles and number of views and print them\n articles_query = get_articles_query()\n popular_articles = execute_query(articles_query)\n print_top_articles(popular_articles)\n\n # Fetch the most popular authors and print them\n authors_query = get_authors_query()\n popular_authors = execute_query(authors_query)\n print_authors(popular_authors)\n\n # Print the days when there were more than 1% errors in HTTP requests\n errors_query = get_errorData_query()\n error_data = execute_query(errors_query)\n print_error_data(error_data)", "def _set_server_weight(self, _v):\n\n if isinstance(_v, Group):\n for _, sg in _v.subgroups.items():\n self._set_server_weight(sg)\n else:\n if self.resource.CPU_avail > 0:\n _v.vCPU_weight = float(_v.vCPUs) / float(self.resource.CPU_avail)\n else:\n _v.vCPU_weight = 1.0\n self.total_CPU += _v.vCPUs\n\n if self.resource.mem_avail > 0:\n _v.mem_weight = float(_v.mem) / float(self.resource.mem_avail)\n else:\n _v.mem_weight = 1.0\n self.total_mem += _v.mem\n\n if self.resource.local_disk_avail > 0:\n _v.local_volume_weight = float(_v.local_volume_size) / float(self.resource.local_disk_avail)\n else:\n if _v.local_volume_size > 0:\n _v.local_volume_weight = 1.0\n else:\n _v.local_volume_weight = 0.0\n self.total_local_vol += _v.local_volume_size", "def compute(self) -> Any:\n # ddp hotfix, could be done better\n # but metric must handle DDP on it's own\n if self._ddp_backend == \"xla\":\n device = get_device()\n for key in self.statistics:\n key_statistics = torch.tensor([self.statistics[key]], device=device)\n key_statistics = xm.all_gather(key_statistics).sum(dim=0).cpu().numpy()\n self.statistics[key] = key_statistics\n elif self._ddp_backend == \"ddp\":\n for key in self.statistics:\n value: List[np.ndarray] = all_gather(self.statistics[key])\n value: np.ndarray = np.sum(np.vstack(value), axis=0)\n self.statistics[key] = value\n\n per_class, micro, macro, weighted = get_aggregated_metrics(\n tp=self.statistics[\"tp\"],\n fp=self.statistics[\"fp\"],\n fn=self.statistics[\"fn\"],\n support=self.statistics[\"support\"],\n zero_division=self.zero_division,\n )\n if self.compute_per_class_metrics:\n return per_class, micro, macro, weighted\n else:\n return [], micro, macro, weighted", "def compute(self) -> Any:\n # ddp hotfix, could be done better\n # but metric must handle DDP on it's own\n if self._ddp_backend == \"xla\":\n device = get_device()\n for key in self.statistics:\n key_statistics = torch.tensor([self.statistics[key]], device=device)\n key_statistics = xm.all_gather(key_statistics).sum(dim=0).cpu().numpy()\n self.statistics[key] = key_statistics\n elif self._ddp_backend == \"ddp\":\n for key in self.statistics:\n value: List[np.ndarray] = all_gather(self.statistics[key])\n value: np.ndarray = np.sum(np.vstack(value), axis=0)\n self.statistics[key] = value\n\n per_class, micro, macro, weighted = get_aggregated_metrics(\n tp=self.statistics[\"tp\"],\n fp=self.statistics[\"fp\"],\n fn=self.statistics[\"fn\"],\n support=self.statistics[\"support\"],\n zero_division=self.zero_division,\n )\n if self.compute_per_class_metrics:\n return per_class, micro, macro, weighted\n else:\n return [], micro, macro, weighted", "def run(self):\n # Cache paremeters and arrays\n allWeights = self.allWeights\n spenn, spene, spenu = self.shared.penn, self.shared.pene, self.shared.penu\n ind = self.istart\n nstat = allWeights.shape[0]\n npar = self.penn.shape[1]\n combineHorizontal = self.combineHorizontal\n \n # Choose my estimator\n if 'mean' in self.wtype:\n estimator = self.computeMean\n elif 'median' in self.wtype:\n estimator = self.computeMedian\n else:\n assert False, 'unsupported weight type. must be mean or median'\n \n # Loop over my portion of GPS stations\n for jj in range(nstat):\n # Extract weights\n #plt.semilogy(spenn[ind,:], 'b')\n #plt.semilogy(spene[ind,:], 'r')\n #plt.savefig('figures/penalties_%03d.png' % ind)\n #plt.clf()\n weights = np.tile(np.expand_dims(allWeights[jj,:], axis=1), (1,npar))\n # Compute weighted penalty\n if combineHorizontal:\n wgtPen = estimator(weights, spenn+spene, 0.5*(self.npenalty+self.epenalty))\n self.penn[ind,:] = wgtPen\n self.pene[ind,:] = wgtPen\n else:\n self.penn[ind,:] = estimator(weights, spenn, self.npenalty)\n self.pene[ind,:] = estimator(weights, spene, self.epenalty)\n self.penu[ind,:] = estimator(weights, spenu, self.upenalty)\n ind += 1\n\n return", "def _get_weighted_census_results(self, grouping, field):\n with self.engine.connect() as conn:\n q = \"SELECT census_tract, {field} FROM census\".format(\n field=field) # TODO need to add 'year' column for multiple census years when this is added to the data\n proxy = conn.execute(q)\n census_results = [dict(x) for x in proxy.fetchall()]\n\n # Transform the results\n items = [] # For storing results as we go\n\n if grouping == 'census_tract':\n # No weighting required, data already in proper format\n for r in census_results:\n output = dict({'group': r['census_tract'],\n 'value': r[field]})\n items.append(output)\n\n elif grouping in ['ward', 'neighborhood_cluster']:\n proxy = conn.execute(\n \"SELECT DISTINCT {grouping} FROM census_tract_to_{grouping}\".format(\n grouping=grouping))\n groups = [x[0] for x in proxy.fetchall()]\n\n for group in groups:\n proxy = conn.execute(\n \"SELECT * FROM census_tract_to_{grouping} WHERE {grouping} = '{group}'\".format(\n grouping=grouping, group=group))\n results = [dict(x) for x in proxy.fetchall()]\n\n count = 0\n for result in results:\n tract = result['census_tract']\n factor = result['population_weight_counts']\n matching_data = next((item for item in census_results if\n item[\"census_tract\"] == tract),\n {field: None})\n if matching_data[field] is None:\n logging.warning(\n \"Missing data for census tract when calculating weightings: {}\".format(\n tract))\n matching_data[field] = 0\n\n value = matching_data[field]\n count += (value * factor)\n\n output = dict({'group': group, 'value': round(count, 0)})\n items.append(output)\n else:\n # Invalid grouping\n items = None\n\n return {'items': items, 'grouping': grouping, 'data_id': field}", "def generate_reports(self):\n print(\"Generating reports.\")\n # Input user memberships and utilities\n input_mean_util = dict()\n for g in range(self.num_groups):\n input_mean_util[g] = self.group_distributions.get_group_distribution(g)[0]\n input_mean_util[g] = input_mean_util[g]-input_mean_util[g][self.num_items-1]\n input_mean_util = pd.DataFrame.from_dict(data=input_mean_util, orient='index')\n input_mean_util.columns = [\"mean_input_util_item{0:03d}\".format(i) for i in range(self.num_items)]\n input_mean_util.reset_index(inplace=True, drop=True)\n\n input_memb_prob = self.group_distributions.get_group_probabilities()\n input_memb_prob = pd.DataFrame(data=input_memb_prob, columns=['input_memb_prob'])\n input_memb_prob.reset_index(inplace=True, drop=True)\n\n # Drawn user membership probabilities\n drawn_memb_prob = self.true_user_utilities.groupby(['user_group'])[['user_group']].count() / self.num_users\n drawn_memb_prob.rename(columns={'user_group': 'sim_memb_prob'}, inplace=True)\n drawn_memb_prob.reset_index(inplace=True, drop=True)\n\n # Drawn user utilities (mean)\n drawn_mean_util = self.true_user_utilities.groupby(['user_group']).mean()\n drawn_mean_util.drop(labels=['user_id'], axis=1, inplace=True)\n drawn_mean_util.columns = [\"mean_sim_util_item{0:03d}\".format(i) for i in range(self.num_items)]\n drawn_mean_util.reset_index(inplace=True, drop=True)\n\n user_summary = pd.concat(objs=[input_memb_prob, drawn_memb_prob, input_mean_util, drawn_mean_util], axis=1)\n user_summary.to_csv(os.path.join(self.output_dir, 'drawn_users_summary.csv'), index=False)\n\n # Checking design balance per user\n balance_per_user = self.data_set.groupby(['user_id']).sum()/(self.num_trips*self.shelf_size/self.num_items)\n balance_per_user.drop(labels=['trip', 'choice'], axis=1, inplace=True)\n balance_per_user.reset_index(inplace=True)\n balance_per_user.to_csv(os.path.join(self.output_dir, 'item_balance_per_user.csv'), index=False)\n\n print(\"Done with generating reports.\")", "def main():\n print \"Parsing web log...\"\n log = parse_weblog(WEBLOG_FILEPATH)\n print \"Keeping only store page entries...\"\n store_page_entries = keep_only_store_page(log)\n print \"Grouping entries by domain...\"\n store_pages = hash_entries(store_page_entries) \n print \"Calculating bounce rates for each store page...\"\n bounce_rates = compute_bounce_rate(store_pages)\n print \"Saving results to file...\"\n save_as_csv(bounce_rates, OUTPUT_PATH)", "def _monitor(self):\n # while CONF.weight == 'bw':\n while True:\n self._send_echo_request()\n self.create_link_delay()\n # self.get_loss()\n self.stats['flow'] = {}\n self.stats['port'] = {}\n for dp in self.datapaths.values():\n self.port_features.setdefault(dp.id, {})\n self.link_loss.setdefault(dp.id,{})\n self._request_stats(dp)\n # refresh data.\n self.capabilities = None\n self.best_paths = None\n hub.sleep(setting.MONITOR_PERIOD)\n self.show_stat()" ]
[ "0.63763714", "0.6358904", "0.6236991", "0.60008013", "0.5937266", "0.57118726", "0.5572431", "0.55678767", "0.55606735", "0.54852104", "0.5456038", "0.54495156", "0.5415237", "0.53729385", "0.5338227", "0.5301329", "0.52975875", "0.5291869", "0.5262049", "0.5219562", "0.5192079", "0.51866734", "0.5170628", "0.5166875", "0.5166875", "0.51415116", "0.511088", "0.5108509", "0.5102907", "0.5102238" ]
0.7276421
0
Wrap up processing the reports with any additional work.
async def wrap_up_processing_reports(self): if hasattr(Config(), 'results'): new_row = [] for item in self.recorded_items: item_value = { 'global_round': self.current_global_round, 'round': self.current_round, 'accuracy': self.accuracy * 100, 'average_accuracy': self.average_accuracy * 100, 'edge_agg_num': Config().algorithm.local_rounds, 'local_epoch_num': Config().trainer.epochs, 'training_time': max([ report.training_time for (report, __) in self.updates ]), 'round_time': time.perf_counter() - self.round_start_time }[item] new_row.append(item_value) if Config().is_edge_server(): result_csv_file = f'{Config().result_dir}result_{Config().args.id}.csv' else: result_csv_file = f'{Config().result_dir}result.csv' csv_processor.write_csv(result_csv_file, new_row) if Config().is_edge_server(): # When a certain number of aggregations are completed, an edge client # needs to be signaled to send a report to the central server if self.current_round == Config().algorithm.local_rounds: logging.info( '[Server #%d] Completed %s rounds of local aggregation.', os.getpid(), Config().algorithm.local_rounds) self.model_aggregated.set() self.current_round = 0 self.new_global_round_begins.clear() # Wait until a new global round begins # to avoid selecting clients before a new global round begins await self.new_global_round_begins.wait()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def processing(self):\n pass", "def do(self):\r\n self.dlCsvReport()\r\n self.dlXlsReport()", "def processReports(self):\n count = 0\n for r in self.reports:\n #need to change the next two lines so that the fields are not hard-coded\n self.currentCase = r.id\n self.currentText = r.impression.lower()\n self.analyzeReport(self.currentText,\n \"disease\",\n modFilters=['indication','probable_existence',\n 'definite_existence',\n 'historical','future','pseudoneg',\n 'definite_negated_existence',\n 'probable_negated_existence'])\n\n self.recordResults()", "def process(self):\n pass", "def __execute_reporter(self):\n if not self.__args.report:\n return\n reporter.HTMLReporter().generate_report_from_file(\n self.__lst_json_files)", "def buildReports(self):\n pass", "async def process_reports(self):\n await self.aggregate_weights(self.updates)\n\n # Testing the global model accuracy\n if Config().clients.do_test:\n # Compute the average accuracy from client reports\n self.average_accuracy = self.accuracy_averaging(self.updates)\n logging.info(\n '[Server #{:d}] Average client accuracy: {:.2f}%.'.format(\n os.getpid(), 100 * self.average_accuracy))\n\n if hasattr(Config().server, 'do_test'):\n if not Config().clients.do_test or Config().server.do_test:\n # Test the updated model directly at the server\n self.accuracy = self.trainer.test(self.testset)\n logging.info(\n '[Server #{:d}] Global model accuracy: {:.2f}%\\n'.format(\n os.getpid(), 100 * self.accuracy))\n else:\n self.accuracy = self.average_accuracy\n\n await self.wrap_up_processing_reports()", "def process(self, results):\n raise NotImplementedError", "def _process(self):\n export_collect_data(self.kwargs[\"collect\"])", "def report():\n pass", "def process_output_reports(results, analysis, date_now):\n #PLUG_INS[analysis.plug_in].set_data(analysis.title, file_path, results)\n output = PLUG_INS[analysis.plug_in]()\n file_path = settings.REPORT_PATH+\"/analysis%s_%s_%s_%s_%s_%s_%s\" % (analysis.id, date_now.year, date_now.month, date_now.day, date_now.hour, date_now.minute, date_now.second)\n output.set_data(analysis.title, file_path, results)\n\n result = AnalysisResult(analysis=analysis, output=string.split(output.get_output_file(), \"/\")[-1], run_date=date_now)\n result.save() \n analysis.last_report = date_now\n analysis.save()\n return True", "def report(self):\n #i need to figure out how to pass all these in a list or something, woof.\n self.report_generator_module.run(\\\n self.total,\\\n self.unique,\\\n self.top_10,\\\n self.top_10_base,\\\n self.lengths,\\\n self.counts,\\\n self.one_to_six,\\\n self.trailing_number,\\\n self.last_1digit,\\\n self.last_2digit,\\\n self.last_3digit,\\\n self.last_4digit,\\\n self.last_5digit,\\\n self.charset)", "async def process_reports(self):\n features = [features for (__, features) in self.updates]\n\n # Faster way to deep flatten a list of lists compared to list comprehension\n feature_dataset = list(chain.from_iterable(features))\n\n # Training the model using all the features received from the client\n sampler = all_inclusive.Sampler(feature_dataset)\n self.algorithm.train(feature_dataset, sampler,\n Config().algorithm.cut_layer)\n\n # Test the updated model\n self.accuracy = self.trainer.test(self.testset)\n logging.info('[Server #{:d}] Global model accuracy: {:.2f}%\\n'.format(\n os.getpid(), 100 * self.accuracy))\n\n await self.wrap_up_processing_reports()", "def process(self):", "def process(self):", "def process(self):", "def post_process(self):\n pass", "def post_process(self):\n pass", "def post_process(self):\n pass", "def post_process(self):\n pass", "def post_process(self):\n pass", "def process(self):\n raise NotImplementedError", "def _process(self):\n self.kwargs[\"collect\"].process_scan_form_data(self.kwargs[\"data\"])", "def do_work(self):", "def prepare(self):\n for scenario_result, scenario_pass, case_pass in self.iterate():\n for step_result in scenario_result.step_results:\n step_pass = step_result.success\n url, method = step_result.fetch.url, step_result.fetch.method\n params = step_result.fetch.kwargs.get(\"params\")\n method_report = self.get_method_report(url, method)\n if method_report:\n method_report.add(\n case_pass, scenario_pass, step_pass, params\n )", "def _postprocess(self):", "def run(self):\n report_file = self.get_report_file_name()\n self.export_records_to_file(report_file)\n print(\"Report file ({}) generated.\".format(report_file))", "def do_work(self):\n raise NotImplementedError", "def run(self):\r\n self.collect_data()", "def _prepare_printing(self):\n # generate PDF for the recordset\n self._generate_attachment()\n\n providers = set(self.mapped('provider_id.id'))\n for provider_id in providers: # process by provider id\n records = self.filtered(lambda r: r.provider_id.id == provider_id)\n # call provider implementation\n provider_name = records[0].provider_id.provider\n if hasattr(records, '_%s_prepare_printing' % provider_name):\n getattr(records, '_%s_prepare_printing' % provider_name)()" ]
[ "0.6657251", "0.6634495", "0.6463586", "0.640295", "0.63786864", "0.6316492", "0.6291937", "0.6238219", "0.6229885", "0.61629045", "0.6146104", "0.6141007", "0.6029224", "0.602065", "0.602065", "0.602065", "0.6010309", "0.6010309", "0.6010309", "0.6010309", "0.6010309", "0.59967524", "0.5968311", "0.59667593", "0.5953814", "0.5941825", "0.58970165", "0.58874905", "0.58826387", "0.5865823" ]
0.68476087
0
Create user and send activation emails.
def perform_create(self, serializer): user = serializer.save() signals.user_registered.send( sender=self.__class__, user=user, request=self.request ) context = get_email_context(user) to = [get_user_email(user)] if djconf.SEND_ACTIVATION_EMAIL: djconf.EMAIL.activation(self.request, context).send(to) elif djconf.SEND_CONFIRMATION_EMAIL: djconf.EMAIL.confirmation(self.request, context).send(to)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_activation_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.send_activation_email(Site.objects.get_current())\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].to, [self.user_info['email']])", "def user_activation(user):\n act_hash = random_password(32)\n user.set_hashword(act_hash)\n user.save()\n base_url = url_for('public.home', _external=True)\n act_url = url_for(\n 'auth.activate',\n userid=user.id,\n userhash=act_hash,\n _external=True)\n if not 'mailman' in current_app.extensions:\n logging.warning('E-mail extension has not been configured')\n return act_hash\n msg = EmailMessage()\n msg.subject = 'Your dribdat account'\n msg.body = \\\n \"Hello %s,\\n\" % user.username \\\n + \"Thanks for signing up at %s\\n\\n\" % base_url \\\n + \"Tap here to activate your account:\\n\\n%s\" % act_url\n msg.to = [user.email]\n logging.info('Sending activation mail to user %d' % user.id)\n logging.debug(act_url)\n msg.send(fail_silently=True)\n return act_hash", "def test_user_creation_email(self):\n self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n self.assertEqual(len(mail.outbox), 1)", "def test_create_user_auto_activate(self, services):\n data = {\n 'username': 'John',\n 'email': '[email protected]',\n 'password': 'test123!',\n 'phone': '1234567890',\n 'first_name': 'Chuck',\n 'last_name': 'Norris',\n 'university': {\n \"name\": \"random_university\"\n },\n 'academic_field': {'name': \"random_field\"},\n 'academic_level': {'name': \"random_level\"},\n 'gender': \"M\",\n 'birthdate': \"1999-11-11\",\n }\n\n response = self.client.post(\n reverse('user-list'),\n data,\n format='json',\n )\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(json.loads(response.content)['phone'], '1234567890')\n\n user = User.objects.get(email=\"[email protected]\")\n activation_token = ActionToken.objects.filter(\n user=user,\n type='account_activation',\n )\n\n self.assertTrue(user.is_active)\n self.assertEqual(1, len(activation_token))\n\n # Test that no email was sent:\n self.assertEqual(len(mail.outbox), 0)", "def form_valid(self, form):\n # Switching between temporary registration and main registration is easy with the is_active attribute.\n # The withdrawal process will also improve if you only set is_active to False.\n user = form.save(commit=False)\n user.is_active = False\n user.save()\n\n # Send activation URL\n current_site = get_current_site(self.request)\n domain = current_site.domain\n context = {\n 'protocol': 'https' if self.request.is_secure() else 'http',\n 'domain': domain,\n 'token': dumps(user.pk),\n 'user': user,\n }\n\n subject = render_to_string('register/mail_template/create/subject.txt', context)\n message = render_to_string('register/mail_template/create/message.txt', context)\n\n user.email_user(subject, message)\n return redirect('register:user_create_done')", "def register_new_user(user):\n user.is_active = False\n user.set_unusable_password()\n user.save()\n\n url = generate_url_reset(user)\n #TODO: mettere un body decente per l'email\n send_email(user.email, url, 'aMUX Registration Confirm')", "def create_user(self):\n User.objects.create_user('test', '[email protected]', 'testing')", "def test_create_user_activation_email(self):\n\n data = {\n 'username': 'John',\n 'email': '[email protected]',\n 'password': 'test123!',\n 'phone': '1234567890',\n 'first_name': 'Chuck',\n 'last_name': 'Norris',\n 'university': {\n \"name\": \"random_university\"\n },\n 'academic_field': {'name': \"random_field\"},\n 'academic_level': {'name': \"random_level\"},\n 'gender': \"M\",\n 'birthdate': \"1999-11-11\",\n }\n\n response = self.client.post(\n reverse('user-list'),\n data,\n format='json',\n )\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(json.loads(response.content)['phone'], '1234567890')\n\n user = User.objects.get(email=\"[email protected]\")\n activation_token = ActionToken.objects.filter(\n user=user,\n type='account_activation',\n )\n\n self.assertFalse(user.is_active)\n self.assertEqual(1, len(activation_token))\n\n # Test that one message was sent:\n self.assertEqual(len(mail.outbox), 1)", "def post(self):\n requestData = request.form\n\n # Grab username and password from request\n # Generate a hash from password so its not stored in plaintext\n username = requestData['username']\n pwdhash = generate_password_hash(requestData['password'])\n\n # Check if user with given username already exists\n user = User.query.filter_by(username=username).first()\n\n # If not, create a new user and redirect to login page\n if user is None:\n try:\n user = User(username=username, pwdhash=pwdhash)\n except AssertionError:\n flash('Forbidden character detected in username', 'warning')\n return redirect(url_for('page.RegisterView:index'))\n db.session.add(user)\n db.session.commit()\n print user\n print user.get_activation_link()\n flash(\"\"\"\n We\\'ve sent you an email. Please click the link in the\n email to complete the creation of your account.\n \"\"\", 'info')\n link = user.get_activation_link()\n body = render_template(\"email.html\", link=link)\n self.send_email('Account activation',\n '[email protected]',\n [username], body)\n return redirect(url_for('page.LoginView:index'))\n\n # Otherwise show error message\n flash('Username already taken', 'info')\n return redirect(url_for('page.RegisterView:index'))", "def test_resend_activation_email_activated_user(self):\n user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), send_email=False, **self.user_info)\n\n profile = self.registration_profile.objects.get(user=user)\n user, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n self.assertTrue(user.is_active)\n self.assertTrue(activated)\n\n self.assertFalse(self.registration_profile.objects.resend_activation_mail(\n email=self.user_info['email'],\n site=Site.objects.get_current(),\n ))\n self.assertEqual(len(mail.outbox), 0)", "def create_user_email(user):\n if not user.is_authenticated:\n return False\n \n user.email = \"%s@%s\" % (user.username, settings.DEFAULT_EMAIL_HOST)\n user.save()\n \n return user.email", "def new_user_form_valid(self, form):\n new_user = form.save()\n new_user.set_password(form.cleaned_data[\"password\"])\n\n h = hashlib.sha1()\n h.update(str(random.random()).encode('utf-8'))\n salt = h.hexdigest()[:5]\n\n h = hashlib.sha1()\n text = salt+new_user.name\n h.update(text.encode('utf-8'))\n\n new_user.activation_key = h.hexdigest()\n new_user.save()\n\n subject = \"Your Work Schedule: Confirm registration\"\n text = (\n \"\"\"Hi {}, \\n please confirm Your registration by clicking or\n copy-past this link \\n {}/user_account/activate/{}/ \\n\n Please confirm with in 48 houers. Thank You for using our app.\n \\n Your Sandbox Team\n \"\"\".format(new_user.name, HOST_NAME, new_user.activation_key))\n\n send_mail(\n subject,\n text,\n EMAIL_HOST_USER,\n [new_user.email],\n fail_silently=False\n )\n return HttpResponseRedirect(self.get_success_url())", "def send_activation_email(self, user):\n\t\tactivation_key = self.get_activation_key(user)\n\t\tcontext = self.get_email_context(activation_key)\n\t\tcontext.update({\n\t\t\t'user': user\n\t\t})\n\t\tsubject = render_to_string(self.email_subject_template,\n\t\t\t\t\t\t\t\t context)\n\t\t# Force subject to a single line to avoid header-injection\n\t\t# issues.\n\t\tsubject = ''.join(subject.splitlines())\n\t\tmessage = render_to_string(self.email_body_template,\n\t\t\t\t\t\t\t\t context)\n\t\tuser.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)", "def create_user(name, email):\n user = register(name, email)\n add_message(user=user, text=config.MSG_WELCOME)\n add_message(user=user, text=config.MSG_UNVERIFIED, can_dismiss=False)\n return user", "def send_activation_email(self, user):\n activation_key = self.get_activation_key(user)\n context = self.get_email_context(activation_key)\n context[\"user\"] = user\n subject = render_to_string(\n template_name=self.email_subject_template,\n context=context,\n request=self.request,\n )\n # Force subject to a single line to avoid header-injection\n # issues.\n subject = \"\".join(subject.splitlines())\n message = render_to_string(\n template_name=self.email_body_template,\n context=context,\n request=self.request,\n )\n user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)", "def test_activation_email_missing_template(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.send_activation_email(Site.objects.get_current())\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].to, [self.user_info['email']])", "async def send_account_created(self, user_id: int, created_by_admin=False):\n async with self.pg.acquire() as conn:\n company_id, status, role = await conn.fetchrow(\n 'SELECT company, status, role FROM users WHERE id=$1', user_id\n )\n ctx = dict(events_link='/dashboard/events/', created_by_admin=created_by_admin, is_admin=role == 'admin')\n if status == 'pending':\n ctx['confirm_email_link'] = password_reset_link(user_id, auth_fernet=self.auth_fernet)\n\n await self.send_emails.direct(company_id, Triggers.account_created, [UserEmail(id=user_id, ctx=ctx)])", "def _send_registration_email(request, user, acct_type):\n current_site = get_current_site(request)\n subject = \"Activate your PuPPy Mentorship Account\"\n\n uid = urlsafe_base64_encode(force_bytes(user.pk))\n activation_token = account_activation_token.make_token(user)\n\n url_token = uid.decode('utf-8') + '/' + activation_token\n\n message = render_to_string(\n 'mentorship_profile/activation_email.html', {\n \"user\": user,\n \"domain\": current_site.domain,\n \"account_type\": acct_type,\n \"url_token\": url_token\n }\n )\n user.email_user(subject, message)", "def create_inactive_user(self, username, password, email,\r\n send_email=True, profile_callback=None):\r\n new_user = User.objects.create_user(username, email, password)\r\n new_user.is_active = False\r\n new_user.save()\r\n \r\n registration_profile = self.create_profile(new_user)\r\n \r\n if profile_callback is not None:\r\n profile_callback(user=new_user)\r\n \r\n if send_email:\r\n from django.core.mail import send_mail\r\n current_site = Site.objects.get_current()\r\n \r\n subject = render_to_string('registration/activation_email_subject.txt',\r\n { 'site': current_site })\r\n # Email subject *must not* contain newlines\r\n subject = ''.join(subject.splitlines())\r\n \r\n message = render_to_string('registration/activation_email.txt',\r\n { 'activation_key': registration_profile.activation_key,\r\n 'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS,\r\n 'site': current_site })\r\n \r\n send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, [new_user.email])\r\n return new_user", "def create_inactive_user(self, username, password, email, send_email=True, profile_callback=None):\n # Create the user.\n new_user = User.objects.create_user(username, email, password)\n new_user.is_active = False\n new_user.save()\n \n # And finally create the registration profile.\n registration_profile = self.create_profile(new_user)\n \n # Create site-specific profile, if specified.\n if profile_callback is not None:\n profile_callback(user=new_user)\n \n if send_email:\n from django.core.mail import send_mail\n current_domain = Site.objects.get_current().domain\n subject = \"Activate your new account at %s\" % current_domain\n message_template = loader.get_template('registration/activation_email.txt')\n message_context = Context({ 'site_url': 'http://%s/' % current_domain,\n 'activation_key': registration_profile.activation_key,\n 'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS })\n message = message_template.render(message_context)\n send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, [new_user.email])\n return new_user", "def create_email(user):\n if 'research' in user.get_domains():\n domain = 'research'\n else: domain = 'academic'\n subject = \"ECE/CIS Account Created\"\n helprequest = \"https://www.eecis.udel.edu/service\"\n \n message = \"Your ECE/CIS %s account has been created with the username: %s\\n\\n\" % (domain, user.username)\n message += \"Please do not reply to this message. If you need assistance with your account, please visit:\\n\"\n message += \"%s\\n\\n\" % helprequest\n message += \"-- EE/CIS Labstaff\\n\"\n\n send('[email protected]', 'ECE/CIS Account System', \\\n [user.email], subject, message, MAILHOST)", "async def user_signup(\n form: SignUp,\n task: BackgroundTasks,\n db: Session = Depends(db_session)):\n user = User()\n user.name = form.name\n user.email = form.login\n user.hashed_password = PWD_CONTEXT.hash(form.password)\n user.disabled = False\n db.add(user)\n try:\n db.commit()\n except exc.IntegrityError:\n db.rollback\n return {\"success\": False, \"msg\": \"Пользователь уже зарегистрирован\"}\n\n task.add_task(send_welcome_email, user.email)\n return {\"success\": True}", "def create_user(context, params):\n form_user = dict()\n # form_user['edited_by'] = context.user\n if params.get('username'):\n form_user['username'] = params.get('username')\n else:\n form_user['username'] = create_username(params) # 'email_user{}'.format(MISUser.objects.latest('id').id + 1\n form_user['first_name'] = params.get('first_name')\n form_user['last_name'] = params.get('last_name')\n form_person = create_person(params)\n form_user.update(form_person)\n user = User.objects.create(**form_user)\n user.set_password(params.get('password'))\n\n email = {'label': 'Work', 'val': params.get('email'), 'person': user, 'is_main': True}\n create_email(context, email)\n\n user.save()\n return user", "def create_inactive_user(self, form):\n new_user = form.save(commit=False)\n new_user.is_active = False\n new_user.save()\n\n self.send_activation_email(new_user)\n\n return new_user", "def create(self, data):\n # Make User\n username = data['email'].split(\"@\")[0]\n user = User.objects.create_user(**data, username=username, is_verified=False, is_client=True)\n Profile.objects.create(user=user)\n send_confirmation_email.delay(user_pk=user.pk)\n return user", "def signup(request):\n if request.method == 'POST':\n form = SignUpForm(request.POST)\n if form.is_valid():\n user = form.save(commit=False)\n user.is_active = False\n user.save()\n current_site = get_current_site(request)\n subject = 'Activate Your neighwatch Account'\n message = render_to_string('registration/activation_email.html', {\n 'user': user,\n 'domain': current_site.domain,\n 'uid': urlsafe_base64_encode(force_bytes(user.pk)),\n 'token': account_activation_token.make_token(user),\n })\n user.email_user(subject, message)\n return redirect('account_activation_sent')\n else:\n form = SignUpForm()\n return render(request, 'registration/registration_form.html', {'form': form})", "def create(self, data):\n data.pop('password_confirmation')\n try:\n availability = data.pop(\"availability\")\n babysitter = data.pop(\"user_bbs\")\n user = User.objects.create_user(**data, is_verified=False)\n if babysitter:\n bbs = Babysitter.objects.create(user_bbs=user, **babysitter)\n for shift in availability:\n Availability.objects.create(bbs=bbs, **shift)\n except KeyError:\n logging.info('This is a instance client')\n user = User.objects.create_user(**data, is_verified=False)\n logging.info(f'User created, whit pk {user.pk}')\n client = Client.objects.create(user_client=user)\n logging.info(f'User pk is already to pass {user.pk}')\n send_confirmation_email.delay(username=user.username, email=user.email )\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError(\"The given email must be set\")\n try:\n with transaction.atomic():\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.generate_activation_code()\n user.save(using=self._db)\n return user\n except:\n raise", "def test_activation_email_uses_registration_default_from_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.send_activation_email(Site.objects.get_current())\n self.assertEqual(mail.outbox[0].from_email, '[email protected]')", "def test_user_creation_no_email(self):\n self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(),\n send_email=False, **self.user_info)\n self.assertEqual(len(mail.outbox), 0)" ]
[ "0.75173324", "0.7401911", "0.7284484", "0.722946", "0.7194599", "0.7161482", "0.7149278", "0.714638", "0.7140364", "0.7098761", "0.70938045", "0.70883733", "0.7070287", "0.70686877", "0.7038679", "0.7011149", "0.70017517", "0.6978857", "0.6974424", "0.6947503", "0.69285625", "0.68845016", "0.6877755", "0.68627197", "0.68538135", "0.6851987", "0.6846787", "0.6818591", "0.68065464", "0.6785218" ]
0.7469454
1
append chart to list
def add_chart(self, chart: Chart): self.charts.append(chart)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_chart(conf, entries):\r\n serie_index = 0\r\n for serie in conf['series']:\r\n data = []\r\n for entry in entries:\r\n if entry is not None:\r\n data.append(entry.datatolist(str(serie['db'])))\r\n conf['series'][serie_index]['data'] = data\r\n serie_index += 1\r\n \r\n \"\"\" Add PlotBands \"\"\" \r\n plotBands = []\r\n last_entry = len(entries)-1\r\n n = 1\r\n while n < last_entry and\\\r\n entries[n].phase is not None and\\\r\n entries[n] is not None and\\\r\n entries[n].next().phase is not None:\r\n begin = entries[n].dt\r\n phase = entries[n].phase\r\n n += 1\r\n while entries[n] is not None and\\\r\n entries[n].phase is not None and\\\r\n entries[n].phase == phase and\\\r\n n < last_entry:\r\n n += 1\r\n end = entries[n].dt\r\n plotBand = {\r\n 'color': PhaseColor[phase],\r\n 'from': datetime_to_timestamp(begin),\r\n 'to': datetime_to_timestamp(end)\r\n }\r\n plotBands.append(plotBand)\r\n conf['xAxis']['plotBands'] = plotBands\r\n \r\n \"\"\" Add Labels \"\"\" \r\n condition_flag_allumage = '((prec.phase is not None) and (prec.phase is not PHASE_ALLUMAGE))'\r\n condition_next_is_not_maintien = '((next.phase is not None) and (next.phase is not PHASE_MAINTIEN))'\r\n labels = json.loads(json.dumps(ChartLabel)) #make a copy of original object\r\n labels['name'] = 'Labels'\r\n for entry in entries:\r\n if entry is not None and entry.phase is not None:\r\n #Label Allumage \r\n if entry.event is not None:\r\n data = {\r\n \"x\": datetime_to_timestamp(entry.dt),\r\n \"title\": 'Allumage'\r\n }\r\n labels['data'].append(data)\r\n \"\"\"\r\n # Label Combustion \r\n if entry.phase == PHASE_COMBUSTION and\\\r\n entry.prec() is not None and\\\r\n entry.prec().phase is not PHASE_COMBUSTION and\\\r\n entry.all_next_verify_condition(5, condition_next_is_not_maintien):\r\n data = {\r\n \"x\": datetime_to_timestamp(entry.dt),\r\n \"title\": 'Combustion'\r\n }\r\n labels['data'].append(data)\r\n \"\"\"\r\n conf['series'].append(labels)\r\n\r\n \"\"\" Add Subtitle (plotbands legend) \"\"\"\r\n #conf[\"subtitle\"] = ChartLegend\r\n\r\n \"\"\" Add Title (date begin date end) \"\"\"\r\n if len(entries) > 3:\r\n begin = pretty_date(entries[0].dt)\r\n end = pretty_date(entries[len(entries)-1].dt)\r\n #conf[\"title\"][\"text\"] = 'Monitoring Chaudière du {0} au {1}'.format(begin, end)\r\n conf[\"title\"][\"text\"] = 'Monitoring Chaudière'\r\n conf[\"subtitle\"][\"text\"] = ' du {0} au {1}'.format(begin, end)\r\n\r\n else:\r\n conf[\"title\"][\"text\"] = 'Monitoring Chaudière'\r\n\r\n \"\"\" Return new conf \"\"\"\r\n return conf", "def charts(self, charts):\n\n self.container['charts'] = charts", "def AddToChart(self, chart, points, color=None, label=None):\n return chart.AddData(points, color=color, label=label)", "def addDataPoints(self):\n pass", "def addchart(self, v):\n\n ml = self.vardict[v].VariableLevel\n self.cmd.append(Achart.graphstarttemplate % {\"originandscale\" : self.scaling()})\n ###self.cmd.append(Achart.guidetemplate % {\"varlabel\": self.labelit(v)})\n self.cmd.append(Achart.guidetemplate % {\"varlabel\": v}) # use names to save space\n self.cmd.append(Achart.noyaxis)\n if ml != \"scale\":\n self.cmd.append(Achart.include0)\n\n if v in self.stats:\n self.cmd.append(self.stats[v]) # scale statement to force both charts to align i\n if ml != \"scale\":\n self.cmd.append(Achart.barelement % {\"avar\" : self.avardict.getAName(v), \"svar\": v,\n \"transparency\" : self.transparency, \n \"allcolor\" : self.alldatacolor, \"subcolor\": self.subgroupcolor, \"allpattern\":self.alldatapattern,\n \"subpattern\": self.subgrouppattern})\n else:\n if v in self.mins:\n themin = float(self.mins[v][0])\n themax = float(self.mins[v][1])\n option = \"binStart(%s), binWidth(%s),\" % (themin, (themax - themin)/self.bincount)\n else:\n option = \"\"\n if self.histogram != \"kernel\":\n self.cmd.append(Achart.histelement % {\"avar\" : self.avardict.getAName(v), \"svar\": v,\n \"transparency\" : self.transparency, \"histogram\" : self.histogram, \n \"allcolor\" : self.alldatacolor, \"subcolor\": self.subgroupcolor, \"bincount\": self.bincount,\n \"option\" : option, \"allpattern\":self.alldatapattern, \"subpattern\": self.subgrouppattern})\n else:\n self.cmd.append(Achart.kernelelement % {\"avar\" : self.avardict.getAName(v), \"svar\": v,\n \"transparency\" : self.transparency, \"histogram\" : self.histogram, \n \"allcolor\" : self.alldatacolor, \"subcolor\": self.subgroupcolor, \"bincount\": self.bincount,\n \"option\" : option, \"allpattern\":self.alldatapattern, \"subpattern\": self.subgrouppattern,\n \"smoothprop\": self.smoothprop, \"scaledtodata\" : scaledtodata})\n self.cmd.append(Achart.graphendtemplate)", "def on_append_plot(self, event=None):\n self._on_plot_selection()\n data_id, theory_id, state_id = self.set_data_helper()\n self.parent.plot_data(data_id=data_id,\n state_id=state_id,\n theory_id=theory_id,\n append=True)", "def append(self, object, **style):\n\n if isinstance(object, Figure):\n self.series.append(object)\n\n elif isHistogram(object):\n if object.dimensions != self.dimensions:\n raise ValueError, \\\n \"can't plot %d-D histogram in %d-D plot\" \\\n % (object.dimensions, self.dimensions)\n if object.dimensions == 1:\n plot_series = Histogram1DPlot(object, **style)\n self.series.append(plot_series)\n elif object.dimensions == 2:\n plot_series = Histogram2DPlot(object, **style)\n self.series.append(plot_series)\n else:\n raise ValueError, \\\n \"don't know how to plot %d-D histogram\" \\\n % object.dimensions\n \n elif isScatter(object):\n if self.dimensions != 2:\n raise ValueError, \\\n \"can't plot a scatter in a %d-D plot\" \\\n % self.dimensions\n self.series.append(ScatterPlot(object, **style))\n\n elif isFunction(object):\n if self.dimensions == 1:\n self.series.append(Function1DPlot(object, **style))\n else:\n raise ValueError, \\\n \"don't know hot to plot %d-D function\" \\\n % object.dimensions\n\n elif isGraph(object):\n if self.dimensions != 2:\n raise ValueError, \\\n \"can't plot a graph in a %d-D plot\" \\\n % self.dimensions\n self.series.append(Graph(object, **style))\n else:\n raise TypeError, \"don't know how to plot %r\" % (object, )", "def _process_charts(self, data_set_instance_id):\n for chart in self._charts:\n self._db.Query(\"\"\"INSERT INTO report_data_set_chart_instance\n (report_data_set_chart_id, report_data_set_instance_id, chart_generation_time)\n VALUES(%s, %s, NOW())\n ON DUPLICATE KEY UPDATE chart_generation_time = NOW()\"\"\",(chart['report_data_set_chart_id'], data_set_instance_id))", "def charts(self, charts):\n\n self._charts = charts", "def append(self, *args):\n return _osgAnimation.VertexList_append(self, *args)", "def create_education_chart(region_list, comparison):\n print('education chart HI')\n print(comparison)\n if comparison == 'field':\n qty_data = create_data_by_field_qty(region_list, 'education')\n qty_chart = {\n 'chartType': 'bar',\n 'chartName': 'Status Pendidikan menurut Jumlah Orang',\n 'dataFields': qty_data,\n 'dataOptions': {\n 'fieldAxis': 'Status Pendidikan',\n 'measureAxis': 'Jumlah Orang',\n 'tooltipStringFormat': ['_', ' ', 'Orang']\n }\n }\n\n dataset_total = sum(qty_data['values'])\n pct_data = create_data_by_field_pct(qty_data, dataset_total)\n pct_chart = {\n 'chartType': 'doughnut',\n 'chartName': 'Status Pendidikan menurut Persentase Orang',\n 'dataFields': pct_data,\n 'dataOptions': {\n 'fieldAxis': 'Status Pendidikan',\n 'measureAxis': 'Persentase Orang',\n 'tooltipStringFormat': ['_', '%']\n } \n }\n\n chart_list = {'chartList': [qty_chart, pct_chart]}\n jsonprint(chart_list)\n return chart_list\n\n elif comparison == 'region':\n (qty_list, label_list) = \\\n create_data_by_region_qty(region_list, 'education')\n\n print(qty_list, label_list)\n\n dataset_total_list = get_dataset_total_list(qty_list)\n pct_list = create_data_by_region_pct(qty_list, \n dataset_total_list)\n\n chart_list = {'chartList': [], 'labelList': label_list}\n for index, chart in enumerate(qty_list):\n pct_list[index]['dataOptions'] = {\n 'tooltipStringFormat': ['_', '%'],\n 'fieldAxis': 'Status Pendidikan',\n 'measureAxis': 'Persentase Orang'\n }\n qty_list[index]['dataOptions'] = {\n 'tooltipStringFormat': ['_', ' ', 'Orang'],\n 'fieldAxis': 'Status Pendidikan',\n 'measureAxis': 'Jumlah Orang'\n }\n\n field = pct_list[index]['field']\n pct_list[index]['chartName'] = \\\n \"Persentase Orang dengan Status Pendidikan '\" + field + \\\n \"' menurut Kecamatan\"\n qty_list[index]['chartName'] = \\\n \"Jumlah Orang dengan Status Pendidikan '\" + \\\n field + \"' menurut Kecamatan\"\n\n chart_list['chartList'].append(pct_list[index])\n chart_list['chartList'].append(qty_list[index])\n\n jsonprint(chart_list)\n return chart_list", "def append(self, obj):\n if isinstance(obj, Series):\n self.series.append(obj)\n elif isinstance(obj, Shape):\n self.shapes.append(obj)", "def __add_to_hist(self):\n pieces, _ = self.get_pieces()\n self.hist.append([pieces, self.current_dice, self.current_player, self.round])", "def create_series(self):\n series = []\n for timeline_object in self.timeline['results']:\n count = timeline_object[\"count\"]\n series.insert(0, count)\n self.query_total = self.query_total + count\n label = self.query[0:30]\n if len(self.query) > 30:\n label = label + \"...\"\n label = label + \" (\" + str(self.query_total) + \")\"\n series.insert(0, label)\n return series", "def add_chart(self, chart, *charts):\n if _vtk.vtkRenderingContextOpenGL2 is None: # pragma: no cover\n from pyvista.core.errors import VTKVersionError\n\n raise VTKVersionError(\n \"VTK is missing vtkRenderingContextOpenGL2. Try installing VTK v9.1.0 or newer.\"\n )\n self._charts.add_chart(chart, *charts)", "def append(self, data):\n self.data_list.append(data)", "def test_append():\n import matplotlib.pyplot as plt\n endpoints = [\"krstc_baseline\"]\n start = \"2019-09-30\"\n end = \"now\" \n end = '2019-10-03T16:01:56.699883'\n qmon = QueueMonitor(endpoints, start, end)\n \n # a record we would have gotten from 'qmon.listen'\n record = {\n 'msgtype': 4,\n 'payload': {\n 'value_cal': -5.32175, 'value_raw': 1.056375},\n 'sender_info': {\n 'commit': 'g7190b92',\n 'exe': '/home/pi/controls/latest/bin/dragonfly',\n 'hostname': 'scannerpi',\n 'package': 'dripline',\n 'service_name': 'scannerpi_service',\n 'username': 'pi',\n 'version': 'v3.7.3'},\n 'timestamp': '2019-10-03T16:02:20.543470Z'\n }\n\n # ts = datetime.fromisoformat(record[\"timestamp\"]) # can't handle the \"Z\"\n ts = parser.parse(record[\"timestamp\"]) # this works\n val = record[\"payload\"][\"value_cal\"]\n \n # append new values\n qmon.data_lists[\"krstc_baseline_ts\"].append(ts)\n qmon.data_lists[\"krstc_baseline\"].append(val)\n \n # make the plot\n xv = qmon.data_lists[\"krstc_baseline_ts\"]\n yv = qmon.data_lists[\"krstc_baseline\"]\n plt.plot(xv, yv, \"-r\")\n\n # superimpose the new point again\n plt.plot(ts, val, \".b\", ms='10')\n\n plt.gcf().autofmt_xdate() # rotates labels\n plt.show()", "def create_marriage_chart(region_list, comparison):\n if comparison == 'field':\n qty_data = create_data_by_field_qty(region_list, 'marriage')\n qty_chart = {\n 'chartType': 'bar',\n 'chartName': 'Status Pernikahan menurut Jumlah Orang',\n 'dataFields': qty_data,\n 'dataOptions': {\n 'fieldAxis': 'Status Pernikahan',\n 'measureAxis': 'Jumlah Orang',\n 'tooltipStringFormat': ['_', ' ', 'Orang']\n }\n }\n\n dataset_total = sum(qty_data['values'])\n pct_data = create_data_by_field_pct(qty_data, dataset_total)\n pct_chart = {\n 'chartType': 'doughnut',\n 'chartName': 'Status Pernikahan menurut Persentase Orang',\n 'dataFields': pct_data,\n 'dataOptions': {\n 'fieldAxis': 'Status Pernikahan',\n 'measureAxis': 'Persentase Orang',\n 'tooltipStringFormat': ['_', '%']\n }\n }\n\n chart_list = {'chartList': [qty_chart, pct_chart]}\n jsonprint(chart_list)\n return chart_list\n\n elif comparison == 'region':\n (qty_list, label_list) = \\\n create_data_by_region_qty(region_list, 'marriage')\n dataset_total_list = get_dataset_total_list(qty_list)\n pct_list = create_data_by_region_pct(qty_list,\n dataset_total_list)\n\n chart_list = {'chartList': [], 'labelList': label_list}\n for index, chart in enumerate(qty_list):\n pct_list[index]['dataOptions'] = {\n 'tooltipStringFormat': ['_', '%'],\n 'fieldAxis': 'Status Pernikahan',\n 'measureAxis': 'Persentase Orang'\n } \n qty_list[index]['dataOptions'] = {\n 'tooltipStringFormat': ['_', ' ', 'Orang'],\n 'fieldAxis': 'Status Pernikahan',\n 'measureAxis': 'Jumlah Orang'\n }\n\n field = pct_list[index]['field']\n if field == 'Kawin':\n pct_list[index]['chartName'] = \\\n 'Persentase Warga yang sudah ' + field + \\\n ' menurut Kecamatan'\n qty_list[index]['chartName'] = \\\n 'Jumlah Warga yang sudah ' + field + \\\n ' menurut Kecamatan'\n else:\n pct_list[index]['chartName'] = \\\n 'Persentase Warga yang ' + field + \\\n ' menurut Kecamatan'\n qty_list[index]['chartName'] = \\\n 'Jumlah Warga yang ' + field + \\\n ' menurut Kecamatan' \n\n chart_list['chartList'].append(pct_list[index])\n chart_list['chartList'].append(qty_list[index])\n\n jsonprint(chart_list)\n return chart_list", "def addPoints(self):\n numDims = len(self.relation.fieldNames) - 1\n datasets = self.relation.getScaledDatasets()\n for ds in datasets:\n points = []\n lines = []\n for i in range(numDims):\n p = PlotPoint(self, ds[i], ds[-1])\n p.setParentItem(self.axes[i])\n points.append(p)\n\n if 0 < i:\n lines.append(PlotLine(self, points[i - 1], p))\n if i == numDims - 1:\n lines.append(PlotLine(self, p, points[0]))\n\n group = self.scene().createItemGroup(lines)\n group.dataClassLabel = points[0].cls\n self.lineGroups.append(group)", "def append(self, curve, idx=None):\n insert = False if idx is None else True\n if isinstance(curve, Graph):\n for c in curve.iterCurves():\n if insert:\n self.data.insert(idx, c)\n idx += 1\n else:\n self.data.append(c) # c are Curves, we can do like that\n elif isinstance(curve, list):\n for c in curve:\n if insert:\n self.data.insert(idx, c)\n idx += 1\n else:\n self.append(c) # call itself, must check if c is a Curve\n elif isinstance(curve, Curve):\n if insert:\n self.data.insert(idx, curve)\n else:\n self.data.append(curve)\n elif isinstance(curve, str) and curve == 'empty':\n curve = Curve([[np.inf], [np.inf]], {})\n if insert:\n self.data.insert(idx, curve)\n else:\n self.data.append(curve)\n else:\n print('Graph.append: failed (type:', type(curve), ')')", "def addFromList(self, drawlist, canv):\n\n if self._debug: logger.debug(\"enter Frame.addFromlist() for frame %s\" % self.id)\n if self.showBoundary:\n self.drawBoundary(canv)\n\n while len(drawlist) > 0:\n head = drawlist[0]\n if self.add(head,canv,trySplit=0):\n del drawlist[0]\n else:\n #leave it in the list for later\n break", "def add_datum(self, x, fields):\n\t\n\t\tfor name, value in fields.iteritems():\n\t\t\tif name not in self.curves:\n\t\t\t\tcurve = QwtPlotCurve()\n\t\t\t\tcurve.attach(self)\n\t\t\t\tself.curves[name] = [curve, [], []]\n\t\t\t\n\t\t\tstuff = self.curves[name]\n\t\t\tstuff[1].append(x)\n\t\t\tstuff[2].append(value)", "def add(self, output_svg: Drawing) -> None:\n pass", "def append_fig(self, title, axis, fignum, name, fig):\r\n # main dictionary\r\n self.fig_dict[name] = (fig, fig.get_size_inches(), fig.dpi)\r\n # aliases dictionaries\r\n self.aliases_dict[title] = name\r\n self.aliases_dict[axis] = name\r\n self.aliases_dict[fignum] = name\r\n # figure list aliases entries\r\n self.titles.append(title)\r\n self.axes.append(axis)\r\n self.fignums.append(fignum)\r\n # figure list default entries\r\n self.listWidget.addItem(title)", "def get_list_chart_queryset(self, result_list):\n return result_list", "def add_plot(self, method, x, y, *args, **kwargs):\n self.plots.append([self.Plot(method, x, y, args, kwargs)])", "def create_religion_chart(region_list, comparison):\n if comparison == 'field':\n qty_data = create_data_by_field_qty(region_list, 'religion')\n qty_chart = {\n 'chartType': 'bar',\n 'chartName': 'Agama menurut Jumlah Penganut',\n 'dataFields': qty_data,\n 'dataOptions': {\n 'fieldAxis': 'Agama',\n 'measureAxis': 'Jumlah Orang',\n 'tooltipStringFormat': ['_', ' ', 'Orang']\n }\n }\n\n dataset_total = sum(qty_data['values'])\n pct_data = create_data_by_field_pct(qty_data, dataset_total)\n pct_chart = {\n 'chartType': 'doughnut',\n 'chartName': 'Agama menurut Persentase Penganut',\n 'dataFields': pct_data,\n 'dataOptions': {\n 'fieldAxis': 'Agama',\n 'measureAxis': 'Persentase Orang',\n 'tooltipStringFormat': ['_', '%']\n }\n }\n\n chart_list = {'chartList': [qty_chart, pct_chart]}\n jsonprint(chart_list)\n return chart_list\n\n elif comparison == 'region':\n (qty_list, label_list) = \\\n create_data_by_region_qty(region_list, 'religion')\n\n dataset_total_list = get_dataset_total_list(qty_list)\n pct_list = create_data_by_region_pct(qty_list, \n dataset_total_list)\n\n chart_list = {'chartList': [], 'labelList': label_list}\n for index, chart in enumerate(qty_list):\n pct_list[index]['dataOptions'] = {\n 'tooltipStringFormat': ['_', '%'],\n 'fieldAxis': 'Agama',\n 'measureAxis': 'Persentase Orang'\n }\n qty_list[index]['dataOptions'] = {\n 'tooltipStringFormat': ['_', ' ', 'Orang'],\n 'fieldAxis': 'Agama',\n 'measureAxis': 'Jumlah Orang'\n }\n\n field = pct_list[index]['field']\n pct_list[index]['chartName'] = \\\n 'Persentase Orang Penganut Agama ' + field + \\\n ' menurut Kecamatan'\n qty_list[index]['chartName'] = \\\n 'Jumlah Orang Penganut Agama ' + field + \\\n ' menurut Kecamatan'\n\n chart_list['chartList'].append(pct_list[index])\n chart_list['chartList'].append(qty_list[index])\n\n jsonprint(chart_list)\n return chart_list", "def parse_data(paints_list,data):\n paints_list.extend(data)", "def __iadd__(self, func):\n self.append_plot(func)\n return self", "def create_occupation_chart(region_list, comparison):\n if comparison == 'field':\n qty_data = create_data_by_field_qty(region_list, 'occupation')\n qty_data['labels'] = \\\n [label for label, value in \n sorted(zip(qty_data['labels'], qty_data['values']), \n key=lambda x: x[1], reverse=True)]\n qty_data['values'] = sorted(qty_data['values'], reverse=True)\n\n top_ten_chart = {\n 'chartType': 'bar',\n 'chartName': '10 Pekerjaan dengan Jumlah Orang Paling Banyak',\n 'dataFields': {\n 'labels': qty_data['labels'][:10],\n 'values': qty_data['values'][:10]\n },\n 'dataOptions': {\n 'fieldAxis': 'Pekerjaan',\n 'measureAxis': 'Jumlah Orang',\n 'tooltipStringFormat': ['_', ' ', 'Orang']\n }\n }\n\n num_jobs = len(qty_data['labels'])\n bottom_ten_chart = {\n 'chartType': 'bar',\n 'chartName': '10 Pekerjaan dengan Jumlah Orang Paling Sedikit',\n 'dataFields': {\n 'labels': qty_data['labels'][num_jobs - 10:],\n 'values': qty_data['values'][num_jobs - 10:]\n },\n 'dataOptions': {\n 'fieldAxis': 'Pekerjaan',\n 'measureAxis': 'Jumlah Orang',\n 'tooltipStringFormat': ['_', ' ', 'Orang']\n }\n }\n\n chart_list = {'chartList': [top_ten_chart, bottom_ten_chart]}\n\n for start in range(10, num_jobs - 10,10):\n end = (start + 10) if start != 70 else (start + 5) \n chart_list['chartList'].append({\n 'chartType': 'bar',\n 'chartName': 'Pekerjaan berdasarkan Jumlah ' + \\\n 'Orang: #' + str(start) + \\\n '-#' + str(end),\n 'dataFields': {\n 'labels': qty_data['labels'][start:end],\n 'values': qty_data['values'][start:end] \n },\n 'dataOptions': {\n 'fieldAxis': 'Pekerjaan',\n 'measureAxis': 'Jumlah Orang',\n 'tooltipStringFormat': ['_', ' ', 'Orang']\n } \n })\n\n jsonprint(chart_list)\n return chart_list \n\n elif comparison == 'region': \n (qty_list, label_list) = \\\n create_data_by_region_qty(region_list, 'occupation')\n\n for chart in qty_list[:]:\n if all_x(chart['dataFields']['values'], 0):\n qty_list.remove(chart)\n else:\n chart['chartName'] = 'Jumlah Orang dengan ' + \\\n 'pekerjaan ' + \\\n chart['field']\n chart['dataOptions'] = {\n 'tooltipStringFormat': ['_', ' ', 'Orang'],\n 'fieldAxis': 'Kecamatan',\n 'measureAxis': 'Jumlah Orang'\n }\n\n chart_list = {'chartList': qty_list, 'labelList': label_list}\n jsonprint(chart_list)\n return chart_list" ]
[ "0.65586525", "0.65164983", "0.6214184", "0.6076237", "0.59907544", "0.5842592", "0.5778565", "0.5761721", "0.5710783", "0.5676788", "0.5672405", "0.5625384", "0.561063", "0.55874395", "0.5584001", "0.55779135", "0.5530055", "0.5526744", "0.54800946", "0.5462886", "0.5395071", "0.538918", "0.53759545", "0.53728133", "0.5368841", "0.5357004", "0.53532904", "0.5343117", "0.5316785", "0.5289997" ]
0.7315353
0