query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
check to see whether an id is for a user | def is_user(id):
return id.startswith('U') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def userIDExists(self, id : int) -> bool:\n return id in self.users.keys()",
"def hasUser(self, id):\n try:\n self.getUser(id)\n return True\n except KeyError:\n return False",
"def check_user(user):\n result_user = search_column_with_constraint(choose_database(\"auth\"), \"users\", \"id\", \"id\", user)\n # result_user = search_single_entry(choose_database(\"auth\"), \"users\", \"id\", user)\n\n if len(result_user) == 0:\n return 0\n else:\n return 1",
"def isOwner(id, userId):\n db = core.connect()\n return db[id][\"createdBy\"] == userId",
"def validate_user_id(self, value):\n if not User.objects.filter(id=value).exists():\n raise serializers.ValidationError('User with this id does not exist.')\n return value",
"def check_id(self, id):",
"def is_self(user_id):\n query_user_id = request.args.get('user_id', default=None, type=int)\n return user_id==query_user_id and user_id is not None",
"def __contains__(self, userid):\r\n userid = int(userid)\r\n return bool(userid in self.players)",
"def user_in_session():\n return 'user_id' in login_session",
"def checkIfUserIsCurrent(self,userId : str) -> bool:\n\n if userId == userId[0]:\n return True\n else:\n return False",
"def is_user(self, user='') -> int:\n try:\n if user in self.users:\n return(1)\n else:\n return(0)\n except Exception as error:\n print(f\"Error: self.is_user({user}) -> {error}\")",
"def same_user(user_id):\n return user_id == login_session['user_id']",
"def is_registered(user_id: str) -> bool:\n inventories = get_file(\"inventories\")\n return str(user_id) in inventories",
"def user_exists(mail_or_id) -> bool:\n conn = sqlite3.connect(\"db.sqlite3\")\n c = conn.cursor()\n\n if type(mail_or_id) is int:\n c.execute(\"\"\"\n SELECT 1 FROM Users\n WHERE id=?\n \"\"\", (mail_or_id,))\n else: #mail\n c.execute(\"\"\"\n SELECT 1 FROM Users\n WHERE mail=?\n \"\"\", (mail_or_id,))\n \n conn.commit()\n \n exists = bool(len(list(c)))\n \n conn.close()\n\n return exists",
"def has_user(self, user): # pylint: disable=unused-argument\r\n return False",
"def check_user(self):\n try:\n if self.get_customer()[0][0] == self.dni:\n return True\n else:\n return False\n except:\n return False",
"def belongs_to_user(self) -> bool:\n return flask.g.user is not None and flask.g.user.id == getattr(\n self, 'user_id', False\n )",
"def is_user_id_available(self,\n\t user_id,\n\t shutit_pexpect_child=None,\n\t note=None,\n\t loglevel=logging.DEBUG):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tshutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child\n\t\tshutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)\n\t\treturn shutit_pexpect_session.is_user_id_available(user_id,\n\t\t note=note,\n\t\t loglevel=loglevel)",
"def test_user_id_get(self):\n pass",
"def user_has_permission(self, id: int, user: User) -> bool:\n return self.get_queryset().filter(pk=id).filter_for_user(user).exists()",
"def user_exists(self,unique_ID):\n\t\ttry:\n\t\t\tself.data[unique_ID]\n\t\texcept KeyError:\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True",
"def exists(cls, user_id):\n user_id = int(user_id)\n user = DB_USER_TABLE.get(doc_id=user_id)\n if not user:\n raise ValueError(f\"unknown user '{user_id}'\")\n return user_id",
"def checkIfUserExists(self, userID):\n return self.db.select_user(userID)",
"def check_if_bot(self, user_id):\n return str(self.get_int_index(bot_id, 9)) in str(user_id)",
"def findUniqueUserID(userID):\n connector = appEngine.connect()\n userIdentifier = connector.execute(\"SELECT user.userID FROM user WHERE userName=(?)\", userID).fetchone()\n #userIdentifier = db.session.execute(\"SELECT user.userID FROM user WHERE userName=(?)\", userID)\n if type(userIdentifier) == type(None):\n return False # this means there is no user in the database yet\n else:\n return True # this means there is a user in the database",
"def check_id(server_id, user_id):\n\n # The user_id parameters here is the same as nym_id in other api calls\n\n # The method is described as a \"ping\" in the API documentation, which should\n # be called after wallet initialized. However a remote account on the server\n # is required.\n\n if hasattr(opentxs, 'OTAPI_Wrap_pingNotary'): # new api name\n retval = opentxs.OTAPI_Wrap_pingNotary(server_id, user_id)\n else: # todo: old api name, remove in due time\n retval = opentxs.OTAPI_Wrap_checkServerID(server_id, user_id)\n\n print(\"(debug) check_server_id retval=\", retval)\n\n # The return value `1` for success is defined by\n # case (OTClient::checkServerId)\n # in OTClient::ProcessUserCommand()\n\n return retval == 1",
"def check_if_user_exists(self, email):\n for user in self.users.values():\n if user['email'] == email:\n return user['id']\n else:\n return False",
"def check_user(entry_code):\n\tif len(User.objects.filter(unique_code=entry_code)) == 1:\n\t\treturn(True)\n\telse:\n\t\traise Http404('No users exist with this code.')",
"def userObjExists(self, user : bbUser.bbUser) -> bool:\n return self.userIDExists(user.id)",
"def get_user_from_uid(uid):\n id, tmp = uid.split('-')\n user = AuthUser.query.filter_by(id=id).first()\n if user and user.get_uid() == uid:\n return True\n return False"
] | [
"0.79370236",
"0.75839674",
"0.7334541",
"0.732782",
"0.72933257",
"0.7157885",
"0.71560794",
"0.70645714",
"0.70558435",
"0.7010881",
"0.6988318",
"0.69240403",
"0.69037765",
"0.6896436",
"0.6886491",
"0.6883643",
"0.6861566",
"0.6858304",
"0.6853838",
"0.6834101",
"0.68314224",
"0.68115324",
"0.67674124",
"0.67472446",
"0.6740736",
"0.67066634",
"0.66630644",
"0.66575384",
"0.6654994",
"0.66423166"
] | 0.8175753 | 0 |
broadcast a new user joining the group | def user_joined_group(cls, group, user):
text = "{} joined the group chat".format(user.username)
cls._broadcast_group(group, None, group, text) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def notify_new_user(self, user):\n # join to default group\n g = self.root.get('community-general')\n if g:\n self.join_group(user, g)",
"def join_server(self, data, user):\n # User will spawn in one of following rooms\n user.room = choice((\"100\", \"300\", \"800\", \"804\"))\n user.send([\"js\", \"-1\", \"1\", \"1\", \"0\", \"0\"])\n self.add(user)",
"def on_joinuser(self, data):\n user_data = {\n 'un': data[3], # nick\n 'ml': data[4], # mod level\n 'st': data[5], # status related\n 'id': data[6], # ezcapechat user id\n 'su': data[7] # ?\n }\n if data[3] == self.users.client.nick:\n self.users.add_client_data(user_data)\n else:\n _user = self.users.add(data[3], user_data)\n print ('%s Joined the room.' % _user.nick)\n\n #BOT\n if (_user.nick.lower() in self.autogreet):\n self.send_public(\"%s, %s\" % (_user.nick, self.autogreet[_user.nick.lower()]))",
"def on_join(data):\r\n\r\n username = data[\"username\"]\r\n room = data[\"room\"]\r\n join_room(room)\r\n\r\n # Broadcast that new user has joined\r\n send({\"msg\": username + \" has joined the \" + room + \" room.\"}, room=room)",
"def _broadcast_group(cls, sender, sender_sid, group, text):\n # todo make this method async\n for recipient in group.get_users():\n if recipient == sender:\n continue\n cls._broadcast_user(sender, sender_sid, recipient, text, group.id)",
"def join(self):\n channel = self.data[0]\n user_pseudonym = VALIDATED_USERS.get_pseudonym(SOCKET_TO_USERID.get(self.source, None))\n\n if user_pseudonym and self.target:\n target_server = self.target[1]\n if(BANHANDLER.is_banned_from_channel(user_pseudonym, target_server, channel)):\n self.source[0].send(\":orcbot!~@localhost PRIVMSG \"+SOCKET_TO_USERID[self.source]+\" :You're banned from \"+channel+\"\\r\\n\")\n elif(self.target):\n self.message = self.message +\"\\r\\n\"\n self.target[0].sendall(self.message)\n self.send()",
"def userJoined(self, user, channel):\n self.dispatch('population', 'userJoined', user, channel)",
"def user_left_group(cls, group, user):\n text = \"{} left the group chat\".format(user.username)\n cls._broadcast_group(group, None, group, text)",
"def join_player(self, data, user):\n self.remove(user)\n\n user.room = \"100\"\n user.x = \"0\"\n user.y = \"0\"\n user.frame = \"0\"\n\n self.add(user)",
"def AddMemberToGroup(group_id,user_id):\r\n Group.AddMemberToGroup(group_id,user_id)",
"def action_add_to_group(self, kwargs):\n user = kwargs[\"user\"]\n group = kwargs[\"group\"]\n\n if self.engine.add_user_to_group(user, group):\n info(f\"User {user} sucessfully added to {group}\")\n else:\n error(f\"Unable to add {user} to {group}, check privileges or dn\")",
"def join(self, gid, group_fields=None):\n kwargs = {}\n if group_fields:\n kwargs['data'] = group_fields\n r = self.put(\"/user/groups/{gid:d}\".format(gid=gid), **kwargs )\n if r.status_code == 204:\n return { \"status\" : True, \"message\" : \"\"}\n return { \"status\" : False, \"message\" : r.json() }",
"def broadcast_to_users(self, text: str, sending_group):\n if sending_group == \"global\":\n for user in self.__users.values():\n user.send_message(f\"broadcast from the server: {text}\")\n print(\"in broadcast to users global\")\n elif sending_group.isdigit():\n sending_group = int(sending_group)\n for user in self.__users.values():\n for station in user.stations:\n if station.line_number == sending_group:\n user.send_message(f\"broadcast from the server: {text}\")\n print(f\"in broadcast to users line{sending_group}\")",
"def __send_broadcast_to_users(self, sending_group=\"global\"):\n\n if sending_group == \"global\":\n data = self.__global_broadcast_entry.get()\n self.__global_broadcast_entry.delete(0, 'end')\n print(f\"broad casting data: {data}\")\n self.__telegram_controller.broadcast_to_users(data, sending_group = \"global\")\n\n elif sending_group == \"line\":\n line = self.__line_number_broadcast_entry.get()\n if len(line) >0 and line.isnumeric():\n data = self.__line_text_broadcast_entry.get()\n self.__line_text_broadcast_entry.delete(0, 'end')\n self.__line_number_broadcast_entry.delete(0, 'end')\n self.__telegram_controller.broadcast_to_users(data, sending_group=line)\n else:\n print(f\"line number must be a number, {line}\")\n else:\n print(f\"{sending_group} is an invalid sending group\")",
"def join_group(self, user, group, force=0):\n if not force and not group.can_join(user):\n raise NotEnoughPrivileges\n \n group.add_member(user)\n user.add_to_group(get_usergroup_database().get_usergroup(group.get_user_id()))\n if hasattr(user, 'karma_activity_credit'):\n # groups can join groups, and groups don't have karma_activity_credit\n user.karma_activity_credit()\n \n self._flush_user_data_caches(user)",
"def userJoined(self, user, channel):\n # Send messasge to Server bot.\n self.data_in(text=\"\", type=\"joined\", user=\"server\", channel=channel,\n nicklist=[user])",
"def add_user_to_group(user, group):\n Command.run(['usermod', '-a', '-G', user, group])",
"def add_new_member(self, event):\n body = event['body']\n body = json.loads(body)\n\n required_fields = ['group_id', 'new_user_id']\n for f in required_fields:\n if f not in body:\n return get_bad_request('POST body missing field {}'.format(f))\n\n group_id = body['group_id']\n new_user_id = body['new_user_id']\n \n user = self.mealShareUsers.get_user_cognito_data(event)\n current_user = user['user_id']\n \n # Requesting user must already be a member\n if not self.mealShareGroups.is_user_in_group(current_user, str(group_id)):\n return {\n 'statusCode': 401,\n 'statusMessage': 'User {} is not a member of the group ID {} and can not add a person to it'.format(current_user, group_id),\n 'group_id': group_id,\n 'new_user_id': new_user_id\n }\n \n # Check if adding was successful\n success = self.mealShareGroups.add_user_to_group(new_user_id, group_id)\n if success:\n return {\n 'statusCode': 200,\n 'statusMessage': 'Successfully added {} to group {}'.format(new_user_id, group_id),\n 'group_id': group_id,\n 'new_user_id': new_user_id\n }\n else:\n return {\n 'statusCode': 500,\n 'statusMessage': 'FAILED to add user {} to group {} by {}'.format(new_user_id, group_id, current_user),\n 'group_id': group_id,\n 'new_user_id': new_user_id\n }",
"def userJoined(self, user, channel):\n ss = self.findSessions(channel)[0]\n user = user.decode(ss.encoding)\n r = ss.addNick(user)\n self.sendResponse(r)",
"def add_member(self, user):\n user_in = user.get_groups()\n for group in user_in:\n if self.usergroup_node == group.usergroup_node:\n print('user is already a member')\n return False\n membership = Relationship(user.get(), 'in', self.usergroup_node)\n graph.create(membership)\n return self.usergroup_node",
"async def react_join(a: Message):\n if a.action.member_id == club_id:\n await a.answer(r_register_help)\n stats.jincr()",
"def new_user(self, socket, name):\r\n for i in self.matches.values(): # Si hay creadas se buscan disponibles\r\n if i.available_to_join:\r\n i.add_new_client(socket, name)\r\n return\r\n # Si no se encontraron disponibles entonces se crea una\r\n new_match = Match(self.send_function)\r\n self.matches[new_match.match_id] = new_match\r\n new_match.add_new_client(socket, name)",
"def joined(message):\n\tglobal GLOBAL_NUM_USERS\n\tGLOBAL_NUM_USERS = GLOBAL_NUM_USERS + 1\n\tprint(message)\n\tsession['name'] = message['name']\n\tsession['room'] = message['room']\n\troom = session.get('room')\n\tjoin_room(room)\n\tprint('%s : joined' % session)\n\temit('_joined', {'user_name': session.get('name'), 'num_users' : GLOBAL_NUM_USERS}, room=room)",
"def join_room(self, data, user):\n # Filters out | to prevent string injection\n data[\"args\"] = [i.replace(\"|\", \"\") for i in data[\"args\"]]\n\n self.remove(user)\n\n user.room = data[\"args\"][1]\n user.x = data[\"args\"][2]\n user.y = data[\"args\"][3]\n user.frame = \"0\"\n\n self.add(user)",
"async def join(self, gid):\n\t\tif self.group != None:\n\t\t\tif self.group.gid == gid:\n\t\t\t\traise exceptions.ClientError('IN_GROUP')\n\n\t\tif gid and not utilities.validate_string(gid):\n\t\t\traise exceptions.ClientError('INVALID_STRING')\n\n\t\tif gid:\n\t\t\tgroup = Group.register(gid)\n\t\telse:\n\t\t\ttries = 0\n\t\t\twhile 1:\n\t\t\t\tif tries >= 5:\n\t\t\t\t\traise exceptions.ClientError('INVALID_GROUP')\n\t\t\t\tgid = utilities.random_string(16)\n\t\t\t\tgroup = Group.register(gid)\n\t\t\t\tif len(group.members) == 0:\n\t\t\t\t\tbreak\n\t\t\t\ttries += 1\n\n\t\tif group.in_game:\n\t\t\traise exceptions.ClientError('IN_GAME')\n\n\t\tawait group.add(self)",
"def add_member_command(client: MsGraphClient, args: dict) -> tuple[str, dict, dict]:\n group_id = str(args.get('group_id'))\n user_id = str(args.get('user_id'))\n required_properties = {\n \"@odata.id\": f'https://graph.microsoft.com/v1.0/users/{user_id}'}\n client.add_member(group_id, required_properties)\n\n human_readable = f'User {user_id} was added to the Group {group_id} successfully.'\n return human_readable, NO_OUTPUTS, NO_OUTPUTS",
"def joinedChannel(self, channel, users):\n pass",
"def add_user_to_group(self, login, group):\n return self.request('put',\n '/groups/{}/users/{}'.format(group, login),\n msg='adding user {} to group {}'.format(login, group)\n )",
"def add_to_group(user: User, group: Group) -> Result:\n if user.pw_name in group.gr_mem:\n return Result(State.unchanged)\n command([\"/usr/sbin/addgroup\", user.pw_name, group.gr_name])\n group.gr_mem.append(user.pw_name)\n return Result(State.success)",
"def invite(self,roomName,user):\n\n self.sendCommand(roomName +\" /invite\",user)"
] | [
"0.66464144",
"0.6612628",
"0.65133005",
"0.65117073",
"0.64265233",
"0.6303679",
"0.6245498",
"0.6235882",
"0.6231058",
"0.6172051",
"0.61579597",
"0.61176723",
"0.60869974",
"0.6015817",
"0.6015379",
"0.601436",
"0.5992443",
"0.59782267",
"0.59572095",
"0.59416044",
"0.5917493",
"0.5907165",
"0.58975923",
"0.58950806",
"0.58592874",
"0.58423334",
"0.5827844",
"0.5783919",
"0.5748963",
"0.5726046"
] | 0.71382284 | 0 |
broadcast a user leaving the group | def user_left_group(cls, group, user):
text = "{} left the group chat".format(user.username)
cls._broadcast_group(group, None, group, text) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def leave_room(self, label):\n user = self.user\n room = await self.get_room(label)\n\n await self.channel_layer.group_send(\n room.group_name,\n {\n 'type': 'chat.leave',\n 'label': label,\n 'username': user.username,\n 'title': room.name,\n }\n )\n # Remove that we're in the room\n self.rooms.discard(label)\n\n # Remove client from the group so he no longer get room messages\n await self.channel_layer.group_discard(\n room.group_name,\n self.channel_name\n )\n\n await self.send_json(\n return_value(\n ACTION_LEAVE, room.label, TO_ME, MSG_LEAVE, NO_MESSAGE\n )\n )",
"def leave_group(self):\n\t\tself.sendMessage(ID_CTRL + \"LEAVE\", True)\n\t\tself.joinstate = 0\n\t\tself.createstate = 0\n\t\tself.__key = None",
"def leave_group():\n incoming = request.get_json()\n Participant.delete_participant_with_user_id_and_room_id(session['user_id'], incoming['room_id'])\n return jsonify(results = incoming['room_id'])",
"async def leave(self):\n\t\tif self.group == None:\n\t\t\traise exceptions.ClientError('NO_GROUP')\n\n\t\tawait self.group.remove(self)\n\n\t\tself.group = None",
"def on_leave(data):\r\n\r\n username = data['username']\r\n room = data['room']\r\n leave_room(room)\r\n send({\"msg\": username + \" has left the room\"}, room=room)",
"def user_joined_group(cls, group, user):\n text = \"{} joined the group chat\".format(user.username)\n cls._broadcast_group(group, None, group, text)",
"def leave(self, user):\n membership = self.check_membership(user)\n if membership is not None and membership.role != 'O':\n if membership.role == 'B':\n membership.role = 'LB'\n else:\n membership.role = 'L'\n membership.save()",
"def decline_invitation(self, user, group):\n if group.is_invited(user):\n group.remove_invitation(user)",
"def on_leave(data):\n username = request.sid\n room = data\n leave_room(room)\n logging.info(username + ' has left the room.')\n send(username + ' has left the room.', room=room)",
"def action_remove_from_group(self, kwargs):\n user = kwargs[\"user\"]\n group = kwargs[\"group\"]\n\n if self.engine.remove_user_from_group(user, group):\n info(f\"User {user} sucessfully removed from {group}\")\n else:\n error(f\"Unable to remove {user} from {group}, check privileges or dn\")",
"def leave(self, message, db_session):\n username = self.ts.get_user(message)\n user = db_session.query(db.User).filter(db.User.name == username).one_or_none()\n if not user:\n user = db.User(name=username)\n db_session.add(user)\n for tup in self.player_queue.queue:\n if tup[0] == username:\n self.player_queue.queue.remove(tup)\n self._add_to_whisper_queue(username, \"You've left the queue.\")\n user.times_played -= 1\n break\n else:\n self._add_to_whisper_queue(username, \"You're not in the queue and must join before leaving.\")",
"def handle_leave_room(self, lobby_command, client_socket):\n user = self.clients[client_socket]['data'].decode('utf-8')\n words = lobby_command.split()\n roomname = words[1]\n print(f\"Handling leave room {roomname} for {user}\")\n for _room in self.rooms:\n if _room.name == roomname:\n print(\"Requested roomname found..\")\n if user not in _room.room_attrbts['members']:\n msg = f\"Client {user} is already NOT a member of room {_room.name}\"\n self.log_and_send(client_socket, msg)\n return\n else:\n _room.room_attrbts['members'].remove(user)\n msg = f\"User {user} successfully removed from room {roomname}\"\n self.log_and_send(client_socket, msg)\n return\n msg = f'Client {user} passed invalid room. Could not join room {roomname}'\n self.log_and_send(client_socket, msg)\n return",
"def leave(msg: telebot.types.Message):\n if utils.in_menu(msg.from_user):\n bot.reply_to(\n msg,\n 'This command outside of game is useless.'\n )\n return\n\n game, user, opponent = utils.get_game_user_opponent(msg.from_user)\n if not game or not user:\n # todo log something\n return\n\n user.state = states.USER_IN_MENU\n user.losses += 1\n utils.update_user(user)\n bot.send_message(\n user.user_id,\n 'You surrendered.'\n )\n\n if opponent:\n opponent.state = states.USER_IN_MENU\n opponent.wins += 1\n utils.update_user(opponent)\n bot.send_message(\n opponent.user_id,\n 'Your opponent surrendered'\n )\n\n field = json.loads(game.field)\n sig = 1 if user == game.user1 else 2\n\n # changes users emojis to poop\n for i in range(len(field)):\n for j in range(len(field[i])):\n if field[i][j] == sig:\n field[i][j] = 4\n\n if opponent:\n utils.send_updated_field(bot, field, game, opponent)\n Game.delete_by_id(game.id)",
"def userLeft(self, user, channel):\n ss = self.findSessions(channel)[0]\n user = user.decode(ss.encoding)\n self.sendResponse(ss.removeNick(user))",
"def leaveMUC(self, room, nick, msg='', pfrom=None):\n if msg:\n self.xmpp.sendPresence(pshow='unavailable', pto=\"%s/%s\" % (room, nick), pstatus=msg, pfrom=pfrom)\n else:\n self.xmpp.sendPresence(pshow='unavailable', pto=\"%s/%s\" % (room, nick), pfrom=pfrom)\n del self.rooms[room]",
"def delete_group(user):\n return 'do some magic!'",
"async def chat_leave(self, event):\n await self.send_json(\n return_value(\n ACTION_WENT_OFFLINE,\n event['label'],\n event['username'],\n MSG_LEAVE,\n NO_MESSAGE\n )\n )",
"def on_leave(self, event):\n self.pre_check(event)\n self.remove_player(event.guild.id)",
"def unfollow_group(request, pk):\n group = get_object_or_404(Group, id=pk)\n\n # Check user is not member of the group\n if not group.members.filter(id=request.user.id).exists():\n actions.unfollow(request.user, group, send_action=False)\n request.user.userprofile.follow_groups.remove(group)\n messages.warning(\n request,\n 'Successed, you are not following this Group anymore.')\n # the group members can choose not follow the group anymore, but still\n # been the member\n else:\n actions.unfollow(request.user, group, send_action=False)\n messages.warning(\n request,\n 'Successed, you are not following this Group anymore. But you are still the one of the members of this group.')\n\n return redirect('groups:groups-detail', pk)",
"def leave(ctx, network):\n return _leave(ctx.obj['client'], network)",
"async def tod_leave(self, ctx, *args):\n try:\n self.players.remove(ctx.author)\n role = discord.utils.get(ctx.guild.roles, name=\"Player\")\n await ctx.author.remove_roles(role)\n except ValueError:\n pass\n message = f\"{ctx.author.mention} has been removed from the game!\"\n await ctx.send(message)",
"async def leave_room(self, room_id):\n print(\"PublicChatConsumer\", \"leave_room\")\n if self.scope[\"user\"].is_authenticated:\n try:\n room: PublicChatRoom = await get_room_or_error(room_id)\n except ClientError as e:\n await self.handle_client_error(e)\n else:\n # Remove user from room users\n await disconnect_user(room, self.scope[\"user\"])\n\n # Set room_id to None\n self.room_id = None\n\n # Remove user from the group\n await self.channel_layer.group_discard(\n room.group_name,\n self.channel_name\n )\n\n # Send the total number of connected users to the client\n connected_users_count = await get_connected_users_count(room)\n await self.channel_layer.group_send(\n room.group_name,\n {\n \"type\": \"connected.users.count\",\n \"connected_users_count\": connected_users_count\n }\n )",
"def command_leavemessage(self, user, nick, channel, rest):\n\n params = rest.split(' ')\n if len(params) < 2:\n self.reply(\n channel,\n nick,\n self.formatDoc(\n \"Usage: {command_prefix}leavemessage target_nick message\"\n )\n )\n return False\n\n target = params[0]\n message = ' '.join(params[1:])\n\n with self.getDbSession() as db_session:\n db_session.add(\n Message(\n user=user,\n nick=nick,\n message_time=datetime.datetime.now(),\n to_nick=target,\n channel=channel,\n message=message\n )\n )\n\n self.reply(channel, nick, 'Message saved for %s' % target)",
"async def async_unjoin_me(self):\n if self._multiroom_wifidirect:\n for dev in self._multiroom_group:\n for device in self.hass.data[DOMAIN].entities:\n if device._is_master: ## TODO!!!\n cmd = \"multiroom:SlaveKickout:{0}\".format(self._slave_ip)\n value = await self._master.async_call_linkplay_httpapi(cmd, None)\n self._master._position_updated_at = utcnow()\n\n else:\n cmd = \"multiroom:Ungroup\"\n value = await self.async_call_linkplay_httpapi(cmd, None)\n\n if value == \"OK\":\n if self._master is not None:\n await self._master.async_remove_from_group(self)\n # await self._master.async_schedule_update_ha_state(True)\n self._multiroom_unjoinat = utcnow()\n self._master = None\n self._is_master = False\n self._slave_mode = False\n self._slave_ip = None\n self._multiroom_group = []\n # await self.async_schedule_update_ha_state(True)\n\n else:\n _LOGGER.warning(\"Failed to unjoin_me from multiroom. \" \"Device: %s, Got response: %s\", self.entity_id, value)",
"async def leaveserver(self, ctx, guild: int):\n guild = self.bot.get_guild(guild)\n await guild.leave()\n embed = discord.Embed(title=f\"left {guild.name} owned by: {guild.owner.name}\")\n embed.set_author(name=ctx.author.nick if ctx.author.nick else ctx.author.name, icon_url=ctx.author.avatar_url)\n await ctx.message.delete()\n await ctx.send(embed=embed)",
"def leave(self, *args, **kwargs):\n return self.bot.leave_chat(self.id, *args, **kwargs)",
"def left(message):\n\tglobal GLOBAL_NUM_USERS\n\tGLOBAL_NUM_USERS = GLOBAL_NUM_USERS - 1\n\troom = session.get('room')\n\tleave_room(room)\n\tprint('%s : left' % session)\n\temit('_left', {'user_name': session.get('name'), 'num_users' : GLOBAL_NUM_USERS}, room=room)",
"def remove(self, user):\n if user != self.head:\n user.group = None\n user.save()\n self.players.remove(user)",
"def userLeft(self, user, channel):\n self.dispatch('population', 'userLeft', user, channel)",
"def user_disappears(self, user):\n pass"
] | [
"0.68877256",
"0.6836113",
"0.6618386",
"0.6293848",
"0.62480164",
"0.6209803",
"0.61928344",
"0.61679953",
"0.61453825",
"0.6137914",
"0.61103255",
"0.6071488",
"0.59881574",
"0.59806395",
"0.5946936",
"0.592674",
"0.5914093",
"0.59117216",
"0.59084827",
"0.5901252",
"0.58909315",
"0.5847027",
"0.5838013",
"0.58291817",
"0.5789759",
"0.5788652",
"0.5755686",
"0.5749129",
"0.57347345",
"0.57271326"
] | 0.7276704 | 0 |
Start an oef node. | def _start_oef_node(self, network_node): | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def launch_oef():\n script_path = os.path.join(\"scripts\", \"oef\", \"launch.py\")\n configuration_file_path = os.path.join(\"scripts\", \"oef\", \"launch_config.json\")\n print(\"Launching new OEF Node...\")\n subprocess.Popen(\n [\"python3\", script_path, \"-c\", configuration_file_path, \"--background\"],\n stdout=subprocess.PIPE,\n env=os.environ,\n cwd=ROOT_DIR,\n )\n\n # Wait for OEF\n print(\"Waiting for the OEF to be operative...\")\n wait_for_oef = subprocess.Popen(\n [os.path.join(\"sandbox\", \"wait-for-oef.sh\"), \"127.0.0.1\", \"10000\", \":\"],\n env=os.environ,\n cwd=ROOT_DIR,\n )\n\n wait_for_oef.wait(30)",
"def ex_start_node(self, node):\n # NOTE: This method is here for backward compatibility reasons after\n # this method was promoted to be part of the standard compute API in\n # Libcloud v2.7.0\n return self.start_node(node=node)",
"def start_node(self, **kwargs):\n # project_name, node_name\n\n try:\n if kwargs['project_name'] in self.data:\n project_name = kwargs['project_name']\n project_id = self.data[project_name]['project_id']\n if kwargs['node_name'] in self.data[project_name]['nodes']:\n node_name = kwargs['node_name']\n node_id = self.data[project_name]['nodes'][node_name]['node_id']\n resp = self.post_to_server('projects/{}/nodes/{}/start'.format(project_id, node_id),{})\n print('Node \\'{}\\' started.'.format(node_name))\n self.data[project_name]['nodes'][node_name]['status'] = \"running\"\n except:\n traceback_print_exc()",
"def runnode(self, node, pynode=False):\n if pynode:\n process = coreinterface.PythonNode(node, self)\n else:\n process = coreinterface.ExecutableNode(node, self)\n process.spawn()\n self.loadingnodes[node] = process",
"def __init__(self):\n self.start = Node('-1')",
"def startNode(klass):\n try:\n ws = klass('ws://localhost:8080/ws')\n ws.daemon = False\n ws.connect()\n except:\n ws.close()",
"def start_node(self, node, override_cfg_params=None):\n node.account.mkdirs(RedpandaService.DATA_DIR)\n node.account.mkdirs(os.path.dirname(RedpandaService.CONFIG_FILE))\n\n self.write_conf_file(node, override_cfg_params)\n\n if self.coproc_enabled():\n self.start_wasm_engine(node)\n\n cmd = (f\"nohup {self.find_binary('redpanda')}\"\n f\" --redpanda-cfg {RedpandaService.CONFIG_FILE}\"\n f\" --default-log-level {self._log_level}\"\n f\" --logger-log-level=exception=debug:archival=debug \"\n f\" --kernel-page-cache=true \"\n f\" --overprovisioned \"\n f\" --smp {self._num_cores} \"\n f\" --memory 6G \"\n f\" --reserve-memory 0M \"\n f\" >> {RedpandaService.STDOUT_STDERR_CAPTURE} 2>&1 &\")\n\n node.account.ssh(cmd)\n\n wait_until(\n lambda: Admin.ready(node).get(\"status\") == \"ready\",\n timeout_sec=RedpandaService.READY_TIMEOUT_SEC,\n err_msg=f\"Redpanda service {node.account.hostname} failed to start\",\n retry_on_exc=True)",
"def run(self):\n self.etcd.start()",
"async def start_node(request: web.Request) -> web.Response:\n req_ctx = RequestContext.parse_obj(request)\n path_params = parse_request_path_parameters_as(_NodePathParams, request)\n try:\n\n await projects_api.start_project_node(\n request, req_ctx.user_id, path_params.project_id, path_params.node_id\n )\n\n raise web.HTTPNoContent(content_type=MIMETYPE_APPLICATION_JSON)\n except ProjectStartsTooManyDynamicNodes as exc:\n raise web.HTTPConflict(reason=f\"{exc}\") from exc\n except ProjectNotFoundError as exc:\n raise web.HTTPNotFound(\n reason=f\"Project {path_params.project_id} not found\"\n ) from exc\n except NodeNotFoundError as exc:\n raise web.HTTPNotFound(\n reason=f\"Node {path_params.node_id} not found in project\"\n ) from exc",
"def __init__(self, start_node):\n self.start_node = start_node",
"def on_rcrnode_open_btn_clicked(self):\n # self.rcrnode.init_node()\n self.rcrnode.resume()",
"def enter(self):\n log.debug(f\"Entering context creator for PutDoer; node running {self.node.isRunning()}.\")\n if not self.node.isRunning():\n conf = configs.get(self.port)\n self.node = get_node(self.port, **conf)",
"def GachaCraftNodeExcelStart(builder):\n return Start(builder)",
"def goto_start(self):\n\n self.__do_action(self.motor.moveto_edge(MotorDriver.LEFT))",
"def main():\n arg_fmt = argparse.RawDescriptionHelpFormatter\n parser = argparse.ArgumentParser(formatter_class=arg_fmt,\n description=main.__doc__)\n\n parser.add_argument(\n '-s', '--save', metavar='PATH',\n help='save current EE config to given file'\n )\n parser.add_argument(\n '-l', '--load', metavar='PATH',\n help='load config from given file onto EE'\n )\n args = parser.parse_args(rospy.myargv()[1:])\n\n print(\"Initializing node... \")\n rospy.init_node('ee_config_editor', anonymous=True)\n\n ee = intera_interface.get_current_gripper_interface()\n if not ee:\n rospy.logerr(\"Could not detect an attached EndEffector!\")\n return\n\n if args.save:\n rospy.loginfo(\"Saving EE config to {}\".format(args.save))\n save_config(ee, args.save)\n\n if args.load:\n rospy.loginfo(\"Loading config and writing config to ClickSmart from {}\".format(args.load))\n load_config(ee, args.load)\n\n def clean_shutdown():\n print(\"\\nExiting example...\")\n\n rospy.on_shutdown(clean_shutdown)",
"def enter(self):\n log.debug(\"Entering context creator for GetDoer\")\n if not self.node.isRunning():\n conf = configs.get(self.port)\n self.node = get_node(self.port, **conf)",
"def start(name, call=None):\n datacenter_id = get_datacenter_id()\n conn = get_conn()\n node = get_node(conn, name)\n\n conn.start_server(datacenter_id=datacenter_id, server_id=node[\"id\"])\n\n return True",
"def launch(self, node):\n if not self.started:\n raise RLException(\"please start ROSLaunch first\")\n elif not isinstance(node, Node):\n raise ValueError(\"arg must be of type Node\")\n\n proc, success = self.parent.runner.launch_node(node)\n if not success:\n raise RLException(\"failed to launch %s/%s\"%(node.package, node.type))\n return proc",
"def start_kernel(self, **kw):",
"def start(self):\n #url = \"http://xapi.openstreetmap.org\" \\\n #url = \"http://osm.bearstech.com\" \\\n url = \"http://osmxapi.hypercube.telascience.org\" \\\n \"/api/0.6/node[amenity=%s][bbox=%s]\" % \\\n (self._amenity, self._location.getBox())\n\n self._has_list = False\n self._places = None\n self._osm_hand.clear_places()\n \n try:\n self._net_if.download(url)\n except Exception as inst:\n self.send_error(inst)",
"def start(cobj):\n pass",
"def start_kernel(self, kernel_name=None, **kwargs):",
"def start(self, **kwargs):\n return self.client.api.start(self.id, **kwargs)",
"def start():",
"def start():",
"def start():",
"def start():",
"def start(self, **kwargs):\n pass",
"def start(self, **kwargs):\n pass",
"def start(self):\n gevent.spawn(self.run)"
] | [
"0.73455715",
"0.67564565",
"0.6507637",
"0.5896183",
"0.5798123",
"0.56471074",
"0.5645573",
"0.5598258",
"0.5573087",
"0.556594",
"0.55550206",
"0.5545505",
"0.5535845",
"0.5517319",
"0.5511",
"0.55073994",
"0.5473905",
"0.5468678",
"0.54667735",
"0.54622465",
"0.54570323",
"0.54533374",
"0.5438118",
"0.5433023",
"0.5433023",
"0.5433023",
"0.5433023",
"0.5412878",
"0.5412878",
"0.54104924"
] | 0.82847816 | 0 |
Test that a generated protocol's serialisation + deserialisation work correctly. | def test_generated_protocol_serialisation(self):
# create a message
reply_message = {1: "number one", 2: "number two", 7: "number seven"}
# message 1
message = TwoPartyNegotiationMessage(
message_id=1,
dialogue_reference=(str(0), ""),
target=0,
performative=TwoPartyNegotiationMessage.Performative.INFORM_REPLY,
reply_message=reply_message,
)
# serialise the message
encoded_message_in_bytes = TwoPartyNegotiationSerializer().encode(message)
# deserialise the message
decoded_message = TwoPartyNegotiationSerializer().decode(
encoded_message_in_bytes
)
# Compare the original message with the serialised+deserialised message
assert decoded_message.message_id == message.message_id
assert decoded_message.dialogue_reference == message.dialogue_reference
assert decoded_message.dialogue_reference[0] == message.dialogue_reference[0]
assert decoded_message.dialogue_reference[1] == message.dialogue_reference[1]
assert decoded_message.target == message.target
assert decoded_message.performative == message.performative
assert decoded_message.reply_message == message.reply_message | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_proto_serialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = SpecificLocation.get_protobuf_schema()(id=sy.serialize(uid), name=\"Test\")\n\n assert sy.serialize(obj, to_proto=True) == blob\n assert sy.serialize(obj, to_proto=True) == blob\n assert sy.serialize(obj, to_proto=True) == blob",
"def test_proto_deserialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid)\n\n blob = SpecificLocation.get_protobuf_schema()(id=sy.serialize(uid))\n\n obj2 = sy.deserialize(blob=blob, from_proto=True)\n assert obj == obj2",
"def test_default_serialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = sy.serialize(obj, to_proto=True)\n\n assert sy.serialize(obj) == blob",
"def test_protocols(container, protocol):\n assert isinstance(container, protocol)",
"def test_buildProtocol(self):\n f = AvatarFactory('world')\n p = f.buildProtocol(None)\n self.assertEqual(p.factory, f)\n self.assertEqual(p.world, 'world')\n self.assertTrue(isinstance(p, AvatarProtocol))",
"def test_default_deserialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = SpecificLocation.get_protobuf_schema()(id=sy.serialize(uid))\n\n obj2 = sy.deserialize(blob=blob)\n assert obj == obj2",
"def test_serialize_deserialize1(self):\n for command in Command:\n serialized = command.serialize()\n deserialized = Command.deserialize(serialized)\n self.assertTrue(deserialized is command)",
"def test_serialization_no_conformers(self):\n mol = Molecule.from_smiles(\"CCO\")\n\n dict_copy = Molecule.from_dict(mol.to_dict())\n assert mol == dict_copy\n\n # TODO: yaml_copy = Molecule.from_yaml(mol.to_yaml())\n with pytest.raises(NotImplementedError):\n mol.to_toml()\n\n bson_copy = Molecule.from_bson(mol.to_bson())\n assert mol == bson_copy\n\n json_copy = Molecule.from_json(mol.to_json())\n assert mol == json_copy\n\n # TODO: round-trip when from_xml is implemented\n mol_as_xml = mol.to_xml()\n with pytest.raises(NotImplementedError):\n Molecule.from_xml(mol_as_xml)\n\n messagepack_copy = Molecule.from_messagepack(mol.to_messagepack())\n assert mol == messagepack_copy\n\n pickle_copy = pickle.loads(pickle.dumps(mol))\n assert mol == pickle_copy",
"def test_serialization_no_conformers(self):\n mol = Molecule.from_smiles(\"CCO\")\n\n dict_copy = Molecule.from_dict(mol.to_dict())\n assert mol == dict_copy\n\n # TODO: yaml_copy = Molecule.from_yaml(mol.to_yaml())\n with pytest.raises(NotImplementedError):\n mol.to_toml()\n\n bson_copy = Molecule.from_bson(mol.to_bson())\n assert mol == bson_copy\n\n json_copy = Molecule.from_json(mol.to_json())\n assert mol == json_copy\n\n # TODO: round-trip when from_xml is implemented\n mol_as_xml = mol.to_xml()\n with pytest.raises(NotImplementedError):\n Molecule.from_xml(mol_as_xml)\n\n messagepack_copy = Molecule.from_messagepack(mol.to_messagepack())\n assert mol == messagepack_copy\n\n pickle_copy = pickle.loads(pickle.dumps(mol))\n assert mol == pickle_copy",
"def test_serialize(self):\n self.assert_raises(TypeError, self.instance.serialize, (1,))",
"def test_proto_spec(self):\n proto_name = 'org.xlattice.upax'\n node_reg = reg.NodeReg()\n proto_reg = reg.ProtoReg(proto_name, node_reg)\n msg_reg = reg.MsgReg(proto_reg)\n proto_spec = M.ProtoSpec(proto_name, proto_reg)\n self.assertEqual(proto_name, proto_spec.name)\n parent = M.ProtoSpec(proto_name, proto_reg)\n\n msg_name = 'logEntry'\n # the enum is not used\n enum = M.EnumSpec.create('Joe', [\n ('oh', 92), ('hello', 47), ('there', 322), ])\n fields = [\n # pylint: disable=no-member\n M.FieldSpec(\n msg_reg,\n 'timestamp',\n FieldTypes.F_UINT32,\n Quants.REQUIRED,\n 0),\n M.FieldSpec(\n msg_reg,\n 'node_id',\n FieldTypes.F_BYTES20,\n Quants.REQUIRED,\n 1),\n M.FieldSpec(\n msg_reg,\n 'key',\n FieldTypes.F_BYTES20,\n Quants.REQUIRED,\n 2),\n M.FieldSpec(\n msg_reg,\n 'length',\n FieldTypes.V_UINT32,\n Quants.REQUIRED,\n 3),\n M.FieldSpec(\n msg_reg,\n 'by_',\n FieldTypes.L_STRING,\n Quants.REQUIRED,\n 4),\n M.FieldSpec(\n msg_reg,\n 'path',\n FieldTypes.L_STRING,\n Quants.REQUIRED,\n 5),\n ]\n msg_spec = M.MsgSpec(msg_name, msg_reg, proto_spec)\n self.assertEqual(msg_name, msg_spec.name)\n for file in fields:\n msg_spec.add_field(file)\n\n # proto_spec.add_msg(msg_spec) # correctly commented out\n self.round_trip_poto_spec_via_string(proto_spec) # GEEP",
"def test_messagepack_serialization(self, molecule):\n serialized = molecule.to_messagepack()\n molecule_copy = Molecule.from_messagepack(serialized)\n assert molecule == molecule_copy\n assert molecule_copy.n_conformers == molecule.n_conformers\n assert np.allclose(molecule_copy.conformers[0], molecule.conformers[0])",
"def test_encode_decode(self):\n assert self._test == pybinn.loads(pybinn.dumps(self._test))",
"def test_buildProtocolStoresFactory(self):\n xs = self.factory.buildProtocol(None)\n self.assertIdentical(self.factory, xs.factory)",
"def test_dumps(self):\n schema = self.UnitTestingSchema()\n serializer = JSONSchema()\n self.assertIsInstance(serializer.dumps(schema), str)",
"def test_messagepack_serialization(self, molecule):\n serialized = molecule.to_messagepack()\n molecule_copy = Molecule.from_messagepack(serialized)\n assert molecule == molecule_copy",
"def test_dumps(self):\n result = self.mapper.dumps(self.serialization)\n self.mapper.to_dict.assert_called_once_with(\"custom\")\n self.serialization.assert_called_once_with(\n self.mapper.to_dict.return_value\n )\n self.assertIs(result, self.serialization.return_value)",
"def test_serialize_no_metadata(self):\n pass # pragma: no cover",
"def test_decoding_method(self):\n data = service_call.encode_call(\"foo\", [42])\n name, params = service_call.decode_call(data)\n\n self.assertEqual(name, \"foo\")\n self.assertEqual(params, [42])",
"def test_decode(self):\n pass # TODO(tlarsen)",
"def testDoNotEncodeStrangeObjects(self):\n class BogusObject(object):\n\n def check_initialized(self):\n pass\n\n self.assertRaises(TypeError,\n protojson.encode_message,\n BogusObject())",
"def test_serialization():\n bb_1 = t2.TBoundingBox(0.4, 0.3, 0.1, top=None) # type:ignore forcing some None/null values\n bb_2 = t2.TBoundingBox(0.4, 0.3, 0.1, top=0.2) # type: ignore\n p1 = t2.TPoint(x=0.1, y=0.1)\n p2 = t2.TPoint(x=0.3, y=None) # type:ignore\n geo = t2.TGeometry(bounding_box=bb_1, polygon=[p1, p2])\n geo_s = t2.TGeometrySchema()\n s: str = geo_s.dumps(geo)\n assert not \"null\" in s\n geo = t2.TGeometry(bounding_box=bb_2, polygon=[p1, p2])\n s: str = geo_s.dumps(geo)\n assert not \"null\" in s",
"def test_convert_proto_plus_to_protobuf_if_protobuf(self):\n protobuf = ProtobufFixture()\n converted = util.convert_proto_plus_to_protobuf(protobuf)\n self.assertEqual(protobuf, converted)",
"def test_decode():\n graph = create_object_graph(\"example\", testing=True)\n codec = graph.pubsub_message_schema_registry.find(DerivedSchema.MEDIA_TYPE)\n message = dumps({\n \"data\": \"data\",\n \"mediaType\": DerivedSchema.MEDIA_TYPE,\n })\n assert_that(codec.decode(message), is_(equal_to({\n \"data\": \"data\",\n \"media_type\": DerivedSchema.MEDIA_TYPE,\n })))",
"def test_validation(protocol_registry):\n\n # pylint: disable=abstract-class-instantiated,function-redefined\n\n class InputsGenerator(protocol_registry, RelaxInputsGenerator):\n\n _calc_types = None\n _relax_types = None\n\n with pytest.raises(TypeError):\n InputsGenerator()\n\n class InputsGenerator(protocol_registry, RelaxInputsGenerator):\n\n _calc_types = {'relax': {}}\n _relax_types = None\n\n def get_builder(self):\n pass\n\n with pytest.raises(RuntimeError):\n InputsGenerator()\n\n class InputsGenerator(protocol_registry, RelaxInputsGenerator):\n\n _calc_types = None\n _relax_types = {RelaxType.ATOMS: 'description'}\n\n def get_builder(self):\n pass\n\n with pytest.raises(RuntimeError):\n InputsGenerator()\n\n class InputsGenerator(protocol_registry, RelaxInputsGenerator):\n\n _calc_types = {'relax': {}}\n _relax_types = {'invalid-type': 'description'}\n\n def get_builder(self):\n pass\n\n with pytest.raises(RuntimeError):\n InputsGenerator()",
"def testNotJSON(self):\n self.assertRaises(ValueError,\n protojson.decode_message, MyMessage, '{this is not json}')",
"def testBinaryProtocolEof(self):\n self.eofTestHelper(TBinaryProtocol.TBinaryProtocolFactory())\n self.eofTestHelperStress(TBinaryProtocol.TBinaryProtocolFactory())",
"def test_large_msg(self):\n self.proto.makeConnection(self.transport)\n outstr = \"test\" * AMP_MAXLEN\n self.proto.data_to_server(MsgServer2Portal, 1, test=outstr)\n\n if pickle.HIGHEST_PROTOCOL == 5:\n # Python 3.8+\n self.transport.write.assert_called_with(\n b\"\\x00\\x04_ask\\x00\\x011\\x00\\x08_command\\x00\\x10MsgServer2Portal\\x00\\x0bpacked_data\"\n b\"\\x00wx\\xda\\xed\\xc6\\xc1\\t\\x80 \\x00@Q#=5Z\\x0b\\xb8\\x80\\x13\\xe85h\\x80\\x8e\\xbam`Dc\\xf4><\\xf8g\"\n b\"\\x1a[\\xf8\\xda\\x97\\xa3_\\xb1\\x95\\xdaz\\xbe\\xe7\\x1a\\xde\\x03\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\n b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\n b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\n b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xe0\\x1f\\x1eP\\x1d\\x02\\r\\x00\\rpacked_data.2\"\n b\"\\x00Zx\\xda\\xed\\xc3\\x01\\r\\x00\\x00\\x08\\xc0\\xa0\\xb4&\\xf0\\xfdg\\x10a\\xa3\"\n b\"\\xd9RUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUU\\xf5\\xfb\\x03m\\xe0\\x06\"\n b\"\\x1d\\x00\\rpacked_data.3\\x00Zx\\xda\\xed\\xc3\\x01\\r\\x00\\x00\\x08\\xc0\\xa0\\xb4&\\xf0\\xfdg\\x10a\"\n b\"\\xa3fSUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUU\\xf5\\xfb\\x03n\\x1c\"\n b\"\\x06\\x1e\\x00\\rpacked_data.4\\x00Zx\\xda\\xed\\xc3\\x01\\t\\x00\\x00\\x0c\\x03\\xa0\\xb4O\\xb0\\xf5gA\"\n b\"\\xae`\\xda\\x8b\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\"\n b\"\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\"\n b\"\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\"\n b\"\\xaa\\xaa\\xaa\\xdf\\x0fnI\\x06,\\x00\\rpacked_data.5\\x00\\x18x\\xdaK-.)I\\xc5\\x8e\\xa7\\xb22@\\xc0\"\n b\"\\x94\\xe2\\xb6)z\\x00Z\\x1e\\x0e\\xb6\\x00\\x00\"\n )\n elif pickle.HIGHEST_PROTOCOL == 4:\n # Python 3.7\n self.transport.write.assert_called_with(\n b\"\\x00\\x04_ask\\x00\\x011\\x00\\x08_command\\x00\\x10MsgServer2Portal\\x00\\x0bpacked_data\"\n b\"\\x00wx\\xda\\xed\\xc6\\xc1\\t\\x80 \\x00@Q#o\\x8e\\xd6\\x02-\\xe0\\x04z\\r\\x1a\\xa0\\xa3m+$\\xd2\"\n b\"\\x18\\xbe\\x0f\\x0f\\xfe\\x1d\\xdf\\x14\\xfe\\x8e\\xedjO\\xac\\xb9\\xd4v\\xf6o\\x0f\\xf3\\x00\\x00\"\n b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\n b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\n b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\n b\"\\x00X\\xc3\\x00P\\x10\\x02\\x0c\\x00\\rpacked_data.2\\x00Zx\\xda\\xed\\xc3\\x01\\r\\x00\\x00\\x08\"\n b\"\\xc0\\xa0\\xb4&\\xf0\\xfdg\\x10a\\xa3\\xd9RUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUU\"\n b\"\\xf5\\xfb\\x03m\\xe0\\x06\\x1d\\x00\\rpacked_data.3\\x00Zx\\xda\\xed\\xc3\\x01\\r\\x00\\x00\\x08\"\n b\"\\xc0\\xa0\\xb4&\\xf0\\xfdg\\x10a\\xa3fSUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUU\"\n b\"\\xf5\\xfb\\x03n\\x1c\\x06\\x1e\\x00\\rpacked_data.4\\x00Zx\\xda\\xed\\xc3\\x01\\t\\x00\\x00\\x0c\"\n b\"\\x03\\xa0\\xb4O\\xb0\\xf5gA\\xae`\\xda\\x8b\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\"\n b\"\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\"\n b\"\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\"\n b\"\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xdf\\x0fnI\\x06,\\x00\\rpacked_data.5\"\n b\"\\x00\\x18x\\xdaK-.)I\\xc5\\x8e\\xa7\\xb22@\\xc0\\x94\\xe2\\xb6)z\\x00Z\\x1e\\x0e\\xb6\\x00\\x00\"\n )",
"def test_buildProtocol(self):\n queryData = (\"fromUser\", None, None)\n factory = irc.DccChatFactory(None, queryData)\n protocol = factory.buildProtocol(\"127.0.0.1\")\n self.assertIsInstance(protocol, irc.DccChat)\n self.assertEqual(protocol.factory, factory)",
"def test_proto_export_inverse(tmp_path, x, name):\n config = Config()\n typedef, message = x\n with tempfile.NamedTemporaryFile(\n mode=\"r+\", dir=str(tmp_path), suffix=\".proto\", delete=True\n ) as outfile:\n\n typedef_map = {name: typedef}\n\n protofile.export_proto(typedef_map, output_file=outfile)\n outfile.flush()\n\n outfile.seek(0)\n new_typedef_map = protofile.import_proto(config, input_file=outfile)\n\n config.known_types.update(new_typedef_map)\n # validate\n for name, typedef in new_typedef_map.items():\n blackboxprotobuf.validate_typedef(typedef, config=config)\n\n def _check_field_types(typedef1, typedef2):\n for field_num in typedef1.keys():\n # make sure we don't drop keys\n assert field_num in typedef2\n assert typedef1[field_num][\"type\"] == typedef2[field_num][\"type\"]\n if typedef1[field_num][\"type\"] == \"message\":\n message_typedef1 = None\n message_typedef2 = None\n if \"message_typedef\" in typedef1[field_num]:\n message_typedef1 = typedef1[field_num][\"message_typedef\"]\n elif \"message_type_name\" in typedef1[field_num]:\n assert typedef1[field_num][\"message_type_name\"] in typedef_map\n message_typedef1 = typedef_map[\n typedef1[field_num][\"message_type_name\"]\n ]\n if \"message_typedef\" in typedef2[field_num]:\n message_typedef2 = typedef2[field_num][\"message_typedef\"]\n elif \"message_type_name\" in typedef2[field_num]:\n assert (\n typedef2[field_num][\"message_type_name\"] in new_typedef_map\n )\n message_typedef2 = new_typedef_map[\n typedef2[field_num][\"message_type_name\"]\n ]\n\n _check_field_types(message_typedef1, message_typedef2)\n\n note(typedef_map)\n note(new_typedef_map)\n for name, typedef in typedef_map.items():\n _check_field_types(typedef, new_typedef_map[name])\n\n note(new_typedef_map[name])\n # try to actually encode a message with the typedef\n encode_forward = length_delim.encode_message(message, config, typedef_map[name])\n\n config.known_types = new_typedef_map\n encode_backward = length_delim.encode_message(\n message, config, new_typedef_map[name]\n )\n\n decode_forward, _, _, _ = length_delim.decode_message(\n encode_forward, config, new_typedef_map[name]\n )\n decode_backward, _, _, _ = length_delim.decode_message(\n encode_backward, config, typedef_map[name]\n )"
] | [
"0.6951617",
"0.68632156",
"0.67302066",
"0.661847",
"0.6527949",
"0.6503405",
"0.6458234",
"0.63932693",
"0.63932693",
"0.6388795",
"0.63429564",
"0.63004637",
"0.6290241",
"0.62630475",
"0.6233855",
"0.618288",
"0.61746454",
"0.6163066",
"0.6099586",
"0.6083544",
"0.60727656",
"0.6058211",
"0.603197",
"0.6010701",
"0.5998617",
"0.5996729",
"0.5993207",
"0.5980765",
"0.5970034",
"0.595375"
] | 0.7254605 | 0 |
Test that a generated protocol could be used in exchanging messages between two agents. | def test_generated_protocol_end_to_end(self):
# AEA components
ledger_apis = LedgerApis({}, FETCHAI)
wallet_1 = Wallet({FETCHAI: FETCHAI_PRIVATE_KEY_FILE})
wallet_2 = Wallet({FETCHAI: FETCHAI_PRIVATE_KEY_FILE})
identity_1 = Identity(
name="my_aea_1",
address=wallet_1.addresses.get(FETCHAI),
default_address_key=FETCHAI,
)
identity_2 = Identity(
name="my_aea_2",
address=wallet_2.addresses.get(FETCHAI),
default_address_key=FETCHAI,
)
oef_connection_1 = OEFConnection(
address=identity_1.address, oef_addr=HOST, oef_port=PORT
)
oef_connection_2 = OEFConnection(
address=identity_2.address, oef_addr=HOST, oef_port=PORT
)
resources_1 = Resources()
resources_2 = Resources()
# add generated protocols to resources
generated_protocol_configuration = ProtocolConfig.from_json(
yaml.safe_load(
open(
os.path.join(
self.cwd,
"tests",
"data",
"generator",
"two_party_negotiation",
"protocol.yaml",
)
)
)
)
generated_protocol = Protocol(
TwoPartyNegotiationMessage.protocol_id,
TwoPartyNegotiationSerializer(),
generated_protocol_configuration,
)
resources_1.protocol_registry.register(
TwoPartyNegotiationMessage.protocol_id, generated_protocol
)
resources_2.protocol_registry.register(
TwoPartyNegotiationMessage.protocol_id, generated_protocol
)
# create AEAs
aea_1 = AEA(identity_1, [oef_connection_1], wallet_1, ledger_apis, resources_1)
aea_2 = AEA(identity_2, [oef_connection_2], wallet_2, ledger_apis, resources_2)
inform_number = tuple((1370, 1991, 1, 4, 17, 6))
# message 1
message = TwoPartyNegotiationMessage(
message_id=1,
dialogue_reference=(str(0), ""),
target=0,
performative=TwoPartyNegotiationMessage.Performative.INFORM,
inform_number=inform_number,
)
encoded_message_in_bytes = TwoPartyNegotiationSerializer().encode(message)
envelope = Envelope(
to=identity_2.address,
sender=identity_1.address,
protocol_id=TwoPartyNegotiationMessage.protocol_id,
message=encoded_message_in_bytes,
)
# message 2
reply_message = {1: "number one", 2: "number two", 7: "number seven"}
message_2 = TwoPartyNegotiationMessage(
message_id=2,
dialogue_reference=(str(0), ""),
target=1,
performative=TwoPartyNegotiationMessage.Performative.INFORM_REPLY,
reply_message=reply_message,
)
encoded_message_2_in_bytes = TwoPartyNegotiationSerializer().encode(message_2)
# add handlers to AEA resources
agent_1_handler = Agent1Handler(
skill_context=SkillContext(aea_1.context), name="fake_skill"
)
resources_1.handler_registry.register(
(
PublicId.from_str("fetchai/fake_skill:0.1.0"),
TwoPartyNegotiationMessage.protocol_id,
),
agent_1_handler,
)
agent_2_handler = Agent2Handler(
encoded_messsage=encoded_message_2_in_bytes,
skill_context=SkillContext(aea_2.context),
name="fake_skill",
)
resources_2.handler_registry.register(
(
PublicId.from_str("fetchai/fake_skill:0.1.0"),
TwoPartyNegotiationMessage.protocol_id,
),
agent_2_handler,
)
# add error skill to AEAs
error_skill_1 = Skill.from_dir(
os.path.join(AEA_DIR, "skills", "error"), aea_1.context
)
resources_1.add_skill(error_skill_1)
error_skill_2 = Skill.from_dir(
os.path.join(AEA_DIR, "skills", "error"), aea_2.context
)
resources_2.add_skill(error_skill_2)
# Start threads
t_1 = Thread(target=aea_1.start)
t_2 = Thread(target=aea_2.start)
try:
t_1.start()
t_2.start()
time.sleep(1.0)
aea_1.outbox.put(envelope)
time.sleep(5.0)
assert (
agent_2_handler.handled_message.message_id == message.message_id
), "Message from Agent 1 to 2: message ids do not match"
assert (
agent_2_handler.handled_message.dialogue_reference
== message.dialogue_reference
), "Message from Agent 1 to 2: dialogue references do not match"
assert (
agent_2_handler.handled_message.dialogue_reference[0]
== message.dialogue_reference[0]
), "Message from Agent 1 to 2: dialogue reference[0]s do not match"
assert (
agent_2_handler.handled_message.dialogue_reference[1]
== message.dialogue_reference[1]
), "Message from Agent 1 to 2: dialogue reference[1]s do not match"
assert (
agent_2_handler.handled_message.target == message.target
), "Message from Agent 1 to 2: targets do not match"
assert (
agent_2_handler.handled_message.performative == message.performative
), "Message from Agent 1 to 2: performatives do not match"
assert (
agent_2_handler.handled_message.inform_number == message.inform_number
), "Message from Agent 1 to 2: inform_numbers do not match"
assert (
agent_1_handler.handled_message.message_id == message_2.message_id
), "Message from Agent 1 to 2: dialogue references do not match"
assert (
agent_1_handler.handled_message.dialogue_reference
== message_2.dialogue_reference
), "Message from Agent 2 to 1: dialogue references do not match"
assert (
agent_1_handler.handled_message.dialogue_reference[0]
== message_2.dialogue_reference[0]
), "Message from Agent 2 to 1: dialogue reference[0]s do not match"
assert (
agent_1_handler.handled_message.dialogue_reference[1]
== message_2.dialogue_reference[1]
), "Message from Agent 2 to 1: dialogue reference[1]s do not match"
assert (
agent_1_handler.handled_message.target == message_2.target
), "Message from Agent 2 to 1: targets do not match"
assert (
agent_1_handler.handled_message.performative == message_2.performative
), "Message from Agent 2 to 1: performatives do not match"
assert (
agent_1_handler.handled_message.reply_message == message_2.reply_message
), "Message from Agent 1 to 2: reply_messages do not match"
time.sleep(2.0)
finally:
aea_1.stop()
aea_2.stop()
t_1.join()
t_2.join() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_identify(self):\n\n protocol_a, transport_a, tree_a, _ = self.create_protocol('protocol_a')\n protocol_b, transport_b, tree_b, _ = self.create_protocol('protocol_b')\n\n transport_a.get_extra_info.return_value = ('127.0.0.1', 1000)\n transport_b.get_extra_info.return_value = ('127.0.0.2', 1000)\n\n self.assertTrue(len(protocol_a.messages) == 0)\n\n protocol_a.identify()\n\n # Check that a message has been sent.\n self.assertTrue(transport_a.write.called)\n self.assertTrue(len(protocol_a.messages) == 1)\n\n # Get the message and check for the key.\n output = transport_a.write.call_args[0][0]\n self.assertTrue(protocol_a.self_key in output.decode())\n\n # Feed the message to the other protocol.\n protocol_b.data_received(output)\n\n # Check that the routing tree has been called to add a Node with the right key.\n self.assertTrue(tree_b.add_node.called)\n self.assertTrue(tree_b.add_node.call_args[0][0].key == 'protocol_a')\n\n # Check that the response on the identify is written to the transport.\n self.assertTrue(transport_b.write.called)\n\n # Get the response, check the key.\n output = transport_b.write.call_args[0][0]\n self.assertTrue(protocol_b.self_key in output.decode())\n\n # Feed the response to the original protocol.\n protocol_a.data_received(output)\n\n # The routing tree should've been called to add the Node with the right key.\n self.assertTrue(tree_a.add_node.called)\n self.assertTrue(tree_a.add_node.call_args[0][0].key == 'protocol_b')\n\n # The messages dict should now be empty again.\n self.assertTrue(len(protocol_a.messages) == 0)",
"def test_protocols(container, protocol):\n assert isinstance(container, protocol)",
"def test_invalid_same_peer_id2(self):\n # Disable idle timeout before creating any new peer because self.create_peer(...)\n # runs the main loop.\n self.conn.disable_idle_timeout()\n # Create new peer and disable idle timeout.\n manager3 = self.create_peer(self.network, peer_id=self.peer_id2)\n conn = FakeConnection(manager3, self.manager1)\n # Disable idle timeout.\n conn.disable_idle_timeout()\n # HELLO\n self.assertEqual(self.conn.peek_tr1_value().split()[0], b'HELLO')\n self.assertEqual(self.conn.peek_tr2_value().split()[0], b'HELLO')\n self.assertEqual(conn.peek_tr1_value().split()[0], b'HELLO')\n self.assertEqual(conn.peek_tr2_value().split()[0], b'HELLO')\n self.conn.run_one_step()\n conn.run_one_step()\n # PEER-ID\n self.assertEqual(self.conn.peek_tr1_value().split()[0], b'PEER-ID')\n self.assertEqual(self.conn.peek_tr2_value().split()[0], b'PEER-ID')\n self.assertEqual(conn.peek_tr1_value().split()[0], b'PEER-ID')\n self.assertEqual(conn.peek_tr2_value().split()[0], b'PEER-ID')\n self.conn.run_one_step()\n conn.run_one_step()\n # READY\n self.assertEqual(self.conn.peek_tr1_value().split()[0], b'READY')\n self.assertEqual(self.conn.peek_tr2_value().split()[0], b'READY')\n self.assertEqual(conn.peek_tr1_value().split()[0], b'READY')\n self.assertEqual(conn.peek_tr2_value().split()[0], b'READY')\n self.conn.run_one_step()\n conn.run_one_step()\n # continue until messages stop\n self.conn.run_until_empty()\n conn.run_until_empty()\n self.run_to_completion()\n # one of the peers will close the connection. We don't know which one, as it depends\n # on the peer ids\n\n if self.conn.tr1.disconnecting or self.conn.tr2.disconnecting:\n conn_dead = self.conn\n conn_alive = conn\n elif conn.tr1.disconnecting or conn.tr2.disconnecting:\n conn_dead = conn\n conn_alive = self.conn\n else:\n raise Exception('It should never happen.')\n self._check_result_only_cmd(conn_dead.peek_tr1_value() + conn_dead.peek_tr2_value(), b'ERROR')\n # at this point, the connection must be closing as the error was detected on READY state\n self.assertIn(True, [conn_dead.tr1.disconnecting, conn_dead.tr2.disconnecting])\n # check connected_peers\n connected_peers = list(self.manager1.connections.connected_peers.values())\n self.assertEquals(1, len(connected_peers))\n self.assertIn(connected_peers[0], [conn_alive.proto1, conn_alive.proto2])\n # connection is still up\n self.assertIsConnected(conn_alive)",
"def testProtocolReturn(self):\n self.assertEqual(\n self.protocol,\n self.mr.protocol\n )\n\n self.mr._protocol = 'burp'\n\n self.assertEqual(\n 'burp',\n self.mr.protocol\n )",
"def test_generated_protocol_serialisation(self):\n # create a message\n reply_message = {1: \"number one\", 2: \"number two\", 7: \"number seven\"}\n # message 1\n message = TwoPartyNegotiationMessage(\n message_id=1,\n dialogue_reference=(str(0), \"\"),\n target=0,\n performative=TwoPartyNegotiationMessage.Performative.INFORM_REPLY,\n reply_message=reply_message,\n )\n\n # serialise the message\n encoded_message_in_bytes = TwoPartyNegotiationSerializer().encode(message)\n\n # deserialise the message\n decoded_message = TwoPartyNegotiationSerializer().decode(\n encoded_message_in_bytes\n )\n\n # Compare the original message with the serialised+deserialised message\n assert decoded_message.message_id == message.message_id\n assert decoded_message.dialogue_reference == message.dialogue_reference\n assert decoded_message.dialogue_reference[0] == message.dialogue_reference[0]\n assert decoded_message.dialogue_reference[1] == message.dialogue_reference[1]\n assert decoded_message.target == message.target\n assert decoded_message.performative == message.performative\n assert decoded_message.reply_message == message.reply_message",
"def test_protocols_updated(self):\n assert self.agent_config.protocols == {self.new_protocol_id}",
"def test_multi_line():\n\n for protocol in LEGACY_PROTOCOLS:\n p = protocol([])\n\n test_case = [\n \"48 6B 10 49 02 01 00 01 02 03 FF\",\n \"48 6B 10 49 02 02 04 05 06 07 FF\",\n \"48 6B 10 49 02 03 08 09 0A 0B FF\",\n ]\n\n correct_data = [0x49, 0x02] + list(range(12))\n\n # in-order\n r = p(test_case)\n assert len(r) == 1\n check_message(r[0], len(test_case), 0x10, correct_data)\n\n # test a few out-of-order cases\n for n in range(4):\n random.shuffle(test_case) # mix up the frame strings\n r = p(test_case)\n assert len(r) == 1\n check_message(r[0], len(test_case), 0x10, correct_data)",
"def test_send_network(self) :\n symbol = 'A' \n oProtocol = Protocol(symbol,mode=\"client\",debug=self.debug)\n command = \"N200\"\n message = oProtocol.send(command)\n #if message['status'] is False :\n #print(\"\\n*** ERROR : test_send_network : {}\".format(message['notify']))\n\n #Pour enregistrer les traces d'appels de fonctions dans le fichier log/client_calltrack_sorted.txt\n client_tracker_print()\n self.assertTrue( (message['status'] is not True) )",
"def test_buildProtocol(self):\n queryData = (\"fromUser\", None, None)\n factory = irc.DccChatFactory(None, queryData)\n protocol = factory.buildProtocol(\"127.0.0.1\")\n self.assertIsInstance(protocol, irc.DccChat)\n self.assertEqual(protocol.factory, factory)",
"def test_verify_connection_to_a_device():",
"def test_websocket_mechanics():\n transport = StringTransportWithDisconnection()\n service = hey_joe.WebSocketService(\"127.0.0.1\", 9000)\n protocol = service.buildProtocol(service._hey_joe_addr)\n protocol.transport = transport\n transport.protocol = protocol\n protocol.connectionMade()\n data_to_send = b'GET / HTTP/1.1\\r\\nHost: somewhere_in_the_world:9000\\r\\nConnection: keep-alive, Upgrade\\r\\nUpgrade: websocket\\r\\nSec-WebSocket-Version: 13\\r\\nSec-WebSocket-Key: F76ObkF/aCKX8WkmAgx2OQ==\\r\\n\\r\\n'\n protocol.dataReceived(data_to_send)\n assert transport.value().startswith(b'HTTP/1.1 101 Switching Protocols\\r\\nServer: hendrix')",
"def testBinaryProtocolEof(self):\n self.eofTestHelper(TBinaryProtocol.TBinaryProtocolFactory())\n self.eofTestHelperStress(TBinaryProtocol.TBinaryProtocolFactory())",
"def test_buildProtocol(self):\n f = AvatarFactory('world')\n p = f.buildProtocol(None)\n self.assertEqual(p.factory, f)\n self.assertEqual(p.world, 'world')\n self.assertTrue(isinstance(p, AvatarProtocol))",
"async def test_websocket_communicator():\n communicator = WebsocketCommunicator(SimpleWebsocketApp(), \"/testws/\")\n # Test connection\n connected, subprotocol = await communicator.connect()\n assert connected\n assert subprotocol is None\n # Test sending text\n await communicator.send_to(text_data=\"hello\")\n response = await communicator.receive_from()\n assert response == \"hello\"\n # Test sending bytes\n await communicator.send_to(bytes_data=b\"w\\0\\0\\0\")\n response = await communicator.receive_from()\n assert response == b\"w\\0\\0\\0\"\n # Test sending JSON\n await communicator.send_json_to({\"hello\": \"world\"})\n response = await communicator.receive_json_from()\n assert response == {\"hello\": \"world\"}\n # Close out\n await communicator.disconnect()",
"def test_differentProtocol(self):\n resolver = client.Resolver(servers=[('example.com', 53)])\n protocols = []\n\n class FakeProtocol(object):\n def __init__(self):\n self.transport = StubPort()\n\n def query(self, address, query, timeout=10, id=None):\n protocols.append(self)\n return defer.succeed(dns.Message())\n\n resolver._connectedProtocol = FakeProtocol\n resolver.query(dns.Query('foo.example.com'))\n resolver.query(dns.Query('bar.example.com'))\n self.assertEqual(len(set(protocols)), 2)",
"def gotProtocol(self,p): \n p.send_hello()",
"def test_connectedProtocol(self):\n resolver = client.Resolver(servers=[('example.com', 53)])\n firstProto = resolver._connectedProtocol()\n secondProto = resolver._connectedProtocol()\n\n self.assertNotIdentical(firstProto.transport, None)\n self.assertNotIdentical(secondProto.transport, None)\n self.assertNotEqual(\n firstProto.transport.getHost().port,\n secondProto.transport.getHost().port)\n\n return defer.gatherResults([\n defer.maybeDeferred(firstProto.transport.stopListening),\n defer.maybeDeferred(secondProto.transport.stopListening)])",
"def notest_send_recv_network(self) :\n\n # Demarrage du serveur\n symbol = 'S' \n oProtocol_server = Protocol(symbol,mode=\"server\",debug=self.debug)\n # tcpHandlerMethods est definie dans le module test.util.test_util\n tcpHandlerMethods[\"process\"] = test_Protocol_process\n oProtocol_server.handlerRegister(tcpHandlerMethods)\n oProtocol_server.start()\n \n # Attente de l'etat actif du serveur.\n while oProtocol_server.isActivated is not True :\n time.sleep(1)\n\n # Toutes les commandes du protocole sont testees\n symbol = 'X'\n oProtocol_client = Protocol(symbol,mode=\"client\", debug=self.debug)\n \n status = True\n # Les commandes entrees par le joueur sont simulees \n for index, command in enumerate(self.commandList) :\n command = self.commandList[index]\n message = oProtocol_client.send(command)\n # print(\"\\n*** Received message= {}\".format(message))\n status = status and message['status']\n if message['status'] is False :\n print(\"\\n*** test_send_recv_network() : {}\\n\".format(message['notify']))\n\n # Le serveur est arrete\n oProtocol_server.shutdown()\n\n # Attend la terminaison des threads\n oProtocol_server.join()\n \n self.assertTrue( status )",
"async def dsmr_connection_send_validate_fixture(hass):\n\n transport = MagicMock(spec=asyncio.Transport)\n protocol = MagicMock(spec=DSMRProtocol)\n\n protocol.telegram = {\n EQUIPMENT_IDENTIFIER: CosemObject([{\"value\": \"12345678\", \"unit\": \"\"}]),\n EQUIPMENT_IDENTIFIER_GAS: CosemObject([{\"value\": \"123456789\", \"unit\": \"\"}]),\n P1_MESSAGE_TIMESTAMP: CosemObject([{\"value\": \"12345678\", \"unit\": \"\"}]),\n }\n\n async def connection_factory(*args, **kwargs):\n \"\"\"Return mocked out Asyncio classes.\"\"\"\n if args[1] == \"5L\":\n protocol.telegram = {\n LUXEMBOURG_EQUIPMENT_IDENTIFIER: CosemObject(\n [{\"value\": \"12345678\", \"unit\": \"\"}]\n ),\n EQUIPMENT_IDENTIFIER_GAS: CosemObject(\n [{\"value\": \"123456789\", \"unit\": \"\"}]\n ),\n }\n if args[1] == \"5S\":\n protocol.telegram = {\n P1_MESSAGE_TIMESTAMP: CosemObject([{\"value\": \"12345678\", \"unit\": \"\"}]),\n }\n if args[1] == \"Q3D\":\n protocol.telegram = {\n Q3D_EQUIPMENT_IDENTIFIER: CosemObject(\n [{\"value\": \"12345678\", \"unit\": \"\"}]\n ),\n }\n\n return (transport, protocol)\n\n connection_factory = MagicMock(wraps=connection_factory)\n\n async def wait_closed():\n if isinstance(connection_factory.call_args_list[0][0][2], str):\n # TCP\n telegram_callback = connection_factory.call_args_list[0][0][3]\n else:\n # Serial\n telegram_callback = connection_factory.call_args_list[0][0][2]\n\n telegram_callback(protocol.telegram)\n\n protocol.wait_closed = wait_closed\n\n with patch(\n \"homeassistant.components.dsmr.config_flow.create_dsmr_reader\",\n connection_factory,\n ), patch(\n \"homeassistant.components.dsmr.config_flow.create_tcp_dsmr_reader\",\n connection_factory,\n ):\n yield (connection_factory, transport, protocol)",
"def test_large_msg(self):\n self.proto.makeConnection(self.transport)\n outstr = \"test\" * AMP_MAXLEN\n self.proto.data_to_server(MsgServer2Portal, 1, test=outstr)\n\n if pickle.HIGHEST_PROTOCOL == 5:\n # Python 3.8+\n self.transport.write.assert_called_with(\n b\"\\x00\\x04_ask\\x00\\x011\\x00\\x08_command\\x00\\x10MsgServer2Portal\\x00\\x0bpacked_data\"\n b\"\\x00wx\\xda\\xed\\xc6\\xc1\\t\\x80 \\x00@Q#=5Z\\x0b\\xb8\\x80\\x13\\xe85h\\x80\\x8e\\xbam`Dc\\xf4><\\xf8g\"\n b\"\\x1a[\\xf8\\xda\\x97\\xa3_\\xb1\\x95\\xdaz\\xbe\\xe7\\x1a\\xde\\x03\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\n b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\n b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\n b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xe0\\x1f\\x1eP\\x1d\\x02\\r\\x00\\rpacked_data.2\"\n b\"\\x00Zx\\xda\\xed\\xc3\\x01\\r\\x00\\x00\\x08\\xc0\\xa0\\xb4&\\xf0\\xfdg\\x10a\\xa3\"\n b\"\\xd9RUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUU\\xf5\\xfb\\x03m\\xe0\\x06\"\n b\"\\x1d\\x00\\rpacked_data.3\\x00Zx\\xda\\xed\\xc3\\x01\\r\\x00\\x00\\x08\\xc0\\xa0\\xb4&\\xf0\\xfdg\\x10a\"\n b\"\\xa3fSUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUU\\xf5\\xfb\\x03n\\x1c\"\n b\"\\x06\\x1e\\x00\\rpacked_data.4\\x00Zx\\xda\\xed\\xc3\\x01\\t\\x00\\x00\\x0c\\x03\\xa0\\xb4O\\xb0\\xf5gA\"\n b\"\\xae`\\xda\\x8b\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\"\n b\"\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\"\n b\"\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\"\n b\"\\xaa\\xaa\\xaa\\xdf\\x0fnI\\x06,\\x00\\rpacked_data.5\\x00\\x18x\\xdaK-.)I\\xc5\\x8e\\xa7\\xb22@\\xc0\"\n b\"\\x94\\xe2\\xb6)z\\x00Z\\x1e\\x0e\\xb6\\x00\\x00\"\n )\n elif pickle.HIGHEST_PROTOCOL == 4:\n # Python 3.7\n self.transport.write.assert_called_with(\n b\"\\x00\\x04_ask\\x00\\x011\\x00\\x08_command\\x00\\x10MsgServer2Portal\\x00\\x0bpacked_data\"\n b\"\\x00wx\\xda\\xed\\xc6\\xc1\\t\\x80 \\x00@Q#o\\x8e\\xd6\\x02-\\xe0\\x04z\\r\\x1a\\xa0\\xa3m+$\\xd2\"\n b\"\\x18\\xbe\\x0f\\x0f\\xfe\\x1d\\xdf\\x14\\xfe\\x8e\\xedjO\\xac\\xb9\\xd4v\\xf6o\\x0f\\xf3\\x00\\x00\"\n b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\n b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\n b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\n b\"\\x00X\\xc3\\x00P\\x10\\x02\\x0c\\x00\\rpacked_data.2\\x00Zx\\xda\\xed\\xc3\\x01\\r\\x00\\x00\\x08\"\n b\"\\xc0\\xa0\\xb4&\\xf0\\xfdg\\x10a\\xa3\\xd9RUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUU\"\n b\"\\xf5\\xfb\\x03m\\xe0\\x06\\x1d\\x00\\rpacked_data.3\\x00Zx\\xda\\xed\\xc3\\x01\\r\\x00\\x00\\x08\"\n b\"\\xc0\\xa0\\xb4&\\xf0\\xfdg\\x10a\\xa3fSUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUU\"\n b\"\\xf5\\xfb\\x03n\\x1c\\x06\\x1e\\x00\\rpacked_data.4\\x00Zx\\xda\\xed\\xc3\\x01\\t\\x00\\x00\\x0c\"\n b\"\\x03\\xa0\\xb4O\\xb0\\xf5gA\\xae`\\xda\\x8b\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\"\n b\"\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\"\n b\"\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\"\n b\"\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xdf\\x0fnI\\x06,\\x00\\rpacked_data.5\"\n b\"\\x00\\x18x\\xdaK-.)I\\xc5\\x8e\\xa7\\xb22@\\xc0\\x94\\xe2\\xb6)z\\x00Z\\x1e\\x0e\\xb6\\x00\\x00\"\n )",
"def test_protocols_updated(self):\n assert self.connection_config.protocols == {self.new_protocol_id}",
"def test_protocols_updated(self):\n assert self.skill_config.protocols == {self.new_protocol_id}",
"def test_new_connection(self):\n with InverterFinder() as finder:\n sock1 = create_connection(('127.0.0.1', 1200))\n sock2, addr = finder.find_inverter()\n # Test if the 2 sockets are paired\n sock2.send(b\"\\x12\")\n self.assertEqual(b\"\\x12\", sock1.recv(1))\n sock1.close()\n sock2.close()",
"async def test_invalid_messages(self):\n async with Node() as n:\n reader, writer = await asyncio.open_connection(\n 'localhost', n._port\n )\n writer.write('hello\\n'.encode())\n await writer.drain()\n writer.close()\n self.assertTrue(n.check_alive())\n\n async with Node() as n1:\n async with Node() as n2:\n await n2.join_network(n1.nid())\n peer = next(iter(n2._act_set))\n await peer.send_message(f'{constants.JOIN_FOR} hello 42')\n await peer.send_message(f'{constants.SHU_MES} hello 42 world')\n self.assertEqual(n1.num_active(), 1)\n self.assertEqual(n1.num_passive(), 0)\n\n await peer.send_message('hello world')\n await asyncio.sleep(2)\n self.assertEqual(n1.num_active(), 0)\n self.assertEqual(n1.num_passive(), 0)",
"def test_envelope_routed(self):\n addr_1 = self.connection_client_1.address\n addr_2 = self.connection_client_2.address\n\n msg = DefaultMessage(\n dialogue_reference=(\"\", \"\"),\n message_id=1,\n target=0,\n performative=DefaultMessage.Performative.BYTES,\n content=b\"hello\",\n )\n envelope = Envelope(\n to=addr_2,\n sender=addr_1,\n protocol_specification_id=DefaultMessage.protocol_specification_id,\n message=DefaultSerializer().encode(msg),\n )\n\n self.multiplexer_client_1.put(envelope)\n delivered_envelope = self.multiplexer_client_2.get(block=True, timeout=20)\n\n assert delivered_envelope is not None\n assert delivered_envelope.to == envelope.to\n assert delivered_envelope.sender == envelope.sender\n assert (\n delivered_envelope.protocol_specification_id\n == envelope.protocol_specification_id\n )\n assert delivered_envelope.message == envelope.message",
"def test_envelope_routed(self):\n addr_1 = self.connection_client_1.address\n addr_2 = self.connection_client_2.address\n\n msg = DefaultMessage(\n dialogue_reference=(\"\", \"\"),\n message_id=1,\n target=0,\n performative=DefaultMessage.Performative.BYTES,\n content=b\"hello\",\n )\n envelope = Envelope(\n to=addr_2,\n sender=addr_1,\n protocol_specification_id=DefaultMessage.protocol_specification_id,\n message=DefaultSerializer().encode(msg),\n )\n\n self.multiplexer_client_1.put(envelope)\n delivered_envelope = self.multiplexer_client_2.get(block=True, timeout=20)\n\n assert delivered_envelope is not None\n assert delivered_envelope.to == envelope.to\n assert delivered_envelope.sender == envelope.sender\n assert (\n delivered_envelope.protocol_specification_id\n == envelope.protocol_specification_id\n )\n assert delivered_envelope.message == envelope.message",
"def test_star_routing_connectivity(self):\n msg = DefaultMessage(\n dialogue_reference=(\"\", \"\"),\n message_id=1,\n target=0,\n performative=DefaultMessage.Performative.BYTES,\n content=b\"hello\",\n )\n for source in range(len(self.multiplexers)):\n for destination in range(len(self.multiplexers)):\n if destination == source:\n continue\n envelope = Envelope(\n to=self.addresses[destination],\n sender=self.addresses[source],\n protocol_specification_id=DefaultMessage.protocol_specification_id,\n message=DefaultSerializer().encode(msg),\n )\n\n self.multiplexers[source].put(envelope)\n delivered_envelope = self.multiplexers[destination].get(\n block=True, timeout=10\n )\n assert delivered_envelope is not None\n assert delivered_envelope.to == envelope.to\n assert delivered_envelope.sender == envelope.sender\n assert (\n delivered_envelope.protocol_specification_id\n == envelope.protocol_specification_id\n )\n assert delivered_envelope.message == envelope.message",
"def testFramepack2(self):\n # Check bad frame generation:\n frame = stomper.Frame()\n frame.cmd = 'DISCONNECT'\n result = frame.pack()\n correct = 'DISCONNECT\\n\\n\\x00\\n'\n self.assertEqual(result, correct)",
"def test_supported_protocol(self):\n assert self.handler.SUPPORTED_PROTOCOL is None",
"def test_load_protocol():\n\n # version 0.0.0 files\n for i in [0]:\n yield load_protocol, (path(__file__).parent /\n path('protocols') /\n path('protocol %d v%s' % (i, Version(0,0,0))))\n\n # version 0.1.0 files\n for i in [0]:\n yield load_protocol, (path(__file__).parent /\n path('protocols') /\n path('protocol %d v%s' % (i, Version(0,1,0))))"
] | [
"0.6693443",
"0.65527207",
"0.64990115",
"0.64490074",
"0.64464223",
"0.6435972",
"0.641256",
"0.63902915",
"0.6338546",
"0.6269686",
"0.6266628",
"0.6260895",
"0.6249174",
"0.61818177",
"0.61632335",
"0.61631536",
"0.61530423",
"0.61333424",
"0.6118203",
"0.60873365",
"0.6057119",
"0.60551274",
"0.60195684",
"0.6013828",
"0.60061854",
"0.60061854",
"0.6005192",
"0.5999445",
"0.5917562",
"0.5914799"
] | 0.6860271 | 0 |
Test _specification_type_to_python_type method unsupported type. | def test__specification_type_to_python_type_unsupported_type(self):
with self.assertRaises(TypeError):
_specification_type_to_python_type("unsupported_type") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_type_error(self):\n self._error_test(TypeError)",
"def test_raises_type_error(self):\n wrong_type = dict()\n self.assertRaises(\n TypeError, util.convert_protobuf_to_proto_plus, wrong_type\n )",
"def test_value_error_for_computing_missing_type():\n with pytest.raises(ValueError):\n compute_type(\"missing_type\", {})",
"def _check_type(self, new_value):\n raise NotImplementedError",
"def checkType(self, value):\n pass",
"def check_type(value: Any, type_spec: computation_types.Type):\n py_typecheck.check_type(type_spec, computation_types.Type)\n value_type = type_conversions.infer_type(value)\n if not type_spec.is_assignable_from(value_type):\n raise TypeError(\n computation_types.type_mismatch_error_message(\n value_type,\n type_spec,\n computation_types.TypeRelation.ASSIGNABLE,\n second_is_expected=True,\n )\n )",
"def test_coerce() -> None:\n assert _coerce(\"1.0\") == Version(\"1.0\")\n assert _coerce(1.0) == Version(\"1.0\")\n expected = \"Unable to coerce object type\"\n with pytest.raises(NotImplementedError, match=expected):\n _coerce(type(Version))",
"def test_unexpectedType(self):\n self.assertRaises(TypeError, nativeString, 1)",
"def test_should_raise_error_if_type_is_invalid(self):\r\n with self.assertRaises(ValueError):\r\n self.spec_parser.parse_statement({'type': 'sugar'})",
"def CheckType(self, *args, **kwargs):\n pass",
"def check_type(self):\n return True",
"def test_ticket_type_change_error_bad_type(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('ticket_type change bad_type changed_type')\n self.assertEqual(2, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def _validate_type(self) -> None:\n # TODO: add transformation logic so that we don't have to transform inputs at every place they are used, including v1 back compat support\n if not spec_type_is_parameter(self.type):\n type_utils.validate_bundled_artifact_type(self.type)",
"def test_proto_plus_to_protobuf_raises_type_error(self):\n wrong_type = dict()\n self.assertRaises(\n TypeError, util.convert_proto_plus_to_protobuf, wrong_type\n )",
"def _validate_type(self):\n # TODO: add transformation logic so that we don't have to transform outputs at every place they are used, including v1 back compat support\n if not spec_type_is_parameter(self.type):\n type_utils.validate_bundled_artifact_type(self.type)",
"def test_check_type_1():\r\n hl = hotlist.HotList()\r\n hl._validate_value(1)\r\n hl._validate_value(1L)\r\n hl._validate_value(1.5)\r\n hl._validate_value(\"abc\")\r\n hl._validate_value(u\"abc\")\r\n hl._validate_value((1, 2, 3,))\r\n hl._validate_value((1, \"AAA\", 3,))\r\n hl._validate_value((1, (\"AAA\", 2, 3,) , 3,))\r\n hl._validate_value((1, frozenset([\"AAA\", 2, 3,]) , 3,))\r\n\r\n with pytest.raises(TypeError):\r\n hl._validate_value([ 1, 2, 3,])\r\n\r\n with pytest.raises(TypeError):\r\n hl._validate_value(( 1, 2, [ 3, 4, 5,],))\r\n\r\n with pytest.raises(TypeError):\r\n hl._validate_value({})\r\n\r\n with pytest.raises(TypeError):\r\n hl._validate_value(hotlist.HotList())",
"def _assert_type(type):\n if isinstance(type, str):\n o, v, p, t = type.split('.')\n if not ontologies.is_supported(o, v, p, t):\n rt.throw(\"Type {0}.v{1}.{2} is unsupported.\".format(o, v, p, t))\n elif type not in ontologies.get_types():\n rt.throw(\"Type {0} is unsupported.\".format(type))",
"def test_incompatible_option_type(key, value):\n wrong_types = {int, str, list, bool} - {type(value)}\n for wrong_type in wrong_types:\n test_value = wrong_type()\n with pytest.raises(InputError):\n _check_input_config({key: test_value})",
"def _check_type_compatibility(self, type_name1, type_name2,\n operation):\n if type_name1 != type_name2:\n raise TypeCompatibilityError(type_name1, type_name2, operation)",
"def try_wrong_types(self, p, name, type_):\n for x in (1, 1.0, \"x\", True, np.ndarray,):\n if type(x) != type_:\n with self.assertRaises(TypeError, msg=f\"{name} {type_} {x}\"):\n setattr(p, name, x)",
"def test_types(self):\n assert types.typeClass(\"str\") == str\n\n assert types.isBuiltinType(\"str\")\n\n assert types.isCollectionType(\"map\")\n assert types.isCollectionType(\"seq\")\n assert not types.isCollectionType(\"str\")\n\n assert types.isScalarType(\"str\")\n assert not types.isScalarType(\"seq\")\n assert not types.isScalarType(\"map\")\n\n assert types.isCollection([])\n assert types.isCollection({})\n assert not types.isCollection(\"foo\")\n\n assert types.isScalar(\"\")\n assert types.isScalar(True)\n assert not types.isScalar([])\n\n assert types.isCorrectType(\"\", str)\n assert types.isCorrectType({}, dict)\n\n assert types.isString(\"foo\")\n assert not types.isString([])\n\n assert types.isInt(1)\n assert not types.isInt(\"foo\")\n\n assert types.isBool(True)\n assert not types.isBool(1)\n assert not types.isBool(\"true\")\n\n assert types.isFloat(1.0)\n assert not types.isFloat(\"foo\")\n\n assert types.isNumber(1)\n assert types.isNumber(1.0)\n assert not types.isNumber(\"foo\")\n\n assert types.isText(\"foo\")\n assert types.isText(1)\n assert types.isText(1.0)\n assert not types.isText([])\n assert not types.isText(True)\n\n assert types.isAny(\"foo\")\n assert types.isAny(True)\n assert types.isAny(1)\n assert types.isAny(1.0)\n assert types.isAny({})\n assert types.isAny([])\n\n assert types.isEnum(\"foo\")\n assert not types.isEnum(1)\n\n assert types.isNone(None)\n assert not types.isNone(\"foo\")",
"def test_should_return_appropriate_type(self):\r\n assert isinstance(self.spec_parser.parse_statement(self.edge_spec), Edge)\r\n assert isinstance(self.spec_parser.parse_statement(self.property_spec), Property)",
"def testTheType(self, theTestType):\n \n pass",
"def test_wrong_input_on_creation(self):\r\n\r\n self.assertRaises(TypeError, TypedListType, None)",
"def test_should_return_error_if_stmt_contains_no_type(self):\r\n with self.assertRaises(TypeError):\r\n self.spec_parser.parse_statement({'name': 'todd'})",
"def TYPE(value):\n raise NotImplementedError()",
"def test_data_type(self):\n self.assertTrue(self.tester.data_type(), \"18S\")",
"def test_expected_type(val, exp_type):\n\n if not isinstance(val, exp_type):\n return False",
"def test_get_types(self):\n pass",
"def test_types(self):\n \n self.assertIsInstance(self.detector_type, str)\n self.assertIsInstance(self.psd, dict)\n self.assertIsInstance(self.intensity, dict)\n self.assertIsInstance(self.database, str)\n self.assertIsInstance(self.position, list)\n self.assertIsInstance(self.angle, list)\n self.assertIsInstance(self.linearity_curve, dict)\n self.assertIsInstance(self.FOV, float)\n \n pass"
] | [
"0.6947089",
"0.6929546",
"0.6844311",
"0.6795259",
"0.66925305",
"0.6652292",
"0.6652253",
"0.6572005",
"0.65506715",
"0.6511283",
"0.64829165",
"0.6474207",
"0.64679945",
"0.64503706",
"0.64311326",
"0.6370294",
"0.6370191",
"0.6354397",
"0.6286515",
"0.6261668",
"0.62531507",
"0.6238267",
"0.6237771",
"0.62207377",
"0.6207208",
"0.6176347",
"0.6165806",
"0.6164325",
"0.6159431",
"0.61389667"
] | 0.91027087 | 0 |
Test _union_sub_type_to_protobuf_variable_name method tuple. | def test__union_sub_type_to_protobuf_variable_name_tuple(self, mock):
_union_sub_type_to_protobuf_variable_name("content_name", "Tuple")
mock.assert_called_once() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _union_sub_type_to_protobuf_variable_name(\n content_name: str, content_type: str\n) -> str:\n if content_type.startswith(\"FrozenSet\"):\n sub_type = _get_sub_types_of_compositional_types(content_type)[0]\n expanded_type_str = \"set_of_{}\".format(sub_type)\n elif content_type.startswith(\"Tuple\"):\n sub_type = _get_sub_types_of_compositional_types(content_type)[0]\n expanded_type_str = \"list_of_{}\".format(sub_type)\n elif content_type.startswith(\"Dict\"):\n sub_type_1 = _get_sub_types_of_compositional_types(content_type)[0]\n sub_type_2 = _get_sub_types_of_compositional_types(content_type)[1]\n expanded_type_str = \"dict_of_{}_{}\".format(sub_type_1, sub_type_2)\n else:\n expanded_type_str = content_type\n\n protobuf_variable_name = \"{}_type_{}\".format(content_name, expanded_type_str)\n\n return protobuf_variable_name",
"def _decode_union_old(data_type, obj, alias_validators, strict, for_msgpack):\n val = None\n if isinstance(obj, six.string_types):\n # Union member has no associated value\n tag = obj\n if tag in data_type.definition._tagmap:\n val_data_type = data_type.definition._tagmap[tag]\n if not isinstance(val_data_type, (bv.Void, bv.Nullable)):\n raise bv.ValidationError(\n \"expected object for '%s', got symbol\" % tag)\n else:\n if not strict and data_type.definition._catch_all:\n tag = data_type.definition._catch_all\n else:\n raise bv.ValidationError(\"unknown tag '%s'\" % tag)\n elif isinstance(obj, dict):\n # Union member has value\n if len(obj) != 1:\n raise bv.ValidationError('expected 1 key, got %s' % len(obj))\n tag = list(obj)[0]\n raw_val = obj[tag]\n if tag in data_type.definition._tagmap:\n val_data_type = data_type.definition._tagmap[tag]\n if isinstance(val_data_type, bv.Nullable) and raw_val is None:\n val = None\n elif isinstance(val_data_type, bv.Void):\n if raw_val is None or not strict:\n # If raw_val is None, then this is the more verbose\n # representation of a void union member. If raw_val isn't\n # None, then maybe the spec has changed, so check if we're\n # in strict mode.\n val = None\n else:\n raise bv.ValidationError('expected null, got %s' %\n bv.generic_type_name(raw_val))\n else:\n try:\n val = _json_compat_obj_decode_helper(\n val_data_type, raw_val, alias_validators, strict, True,\n for_msgpack)\n except bv.ValidationError as e:\n e.add_parent(tag)\n raise\n else:\n if not strict and data_type.definition._catch_all:\n tag = data_type.definition._catch_all\n else:\n raise bv.ValidationError(\"unknown tag '%s'\" % tag)\n else:\n raise bv.ValidationError(\"expected string or object, got %s\" %\n bv.generic_type_name(obj))\n return data_type.definition(tag, val)",
"def union_parts(union: UnionKind, value: dict):\n selector, sub_value = list(value.items())[0]\n final_kind = union.kind_for(selector)\n value = sub_value\n return final_kind, value",
"def _decode_union(data_type, obj, alias_validators, strict, for_msgpack):\n val = None\n if isinstance(obj, six.string_types):\n # Handles the shorthand format where the union is serialized as only\n # the string of the tag.\n tag = obj\n if tag in data_type.definition._tagmap:\n val_data_type = data_type.definition._tagmap[tag]\n if not isinstance(val_data_type, (bv.Void, bv.Nullable)):\n raise bv.ValidationError(\n \"expected object for '%s', got symbol\" % tag)\n if tag == data_type.definition._catch_all:\n raise bv.ValidationError(\n \"unexpected use of the catch-all tag '%s'\" % tag)\n else:\n if not strict and data_type.definition._catch_all:\n tag = data_type.definition._catch_all\n else:\n raise bv.ValidationError(\"unknown tag '%s'\" % tag)\n elif isinstance(obj, dict):\n tag, val = _decode_union_dict(\n data_type, obj, alias_validators, strict, for_msgpack)\n else:\n raise bv.ValidationError(\"expected string or object, got %s\" %\n bv.generic_type_name(obj))\n return data_type.definition(tag, val)",
"def typeToName(type: int) -> unicode:\n ...",
"def method_union_name(self) -> str:",
"def get_Union_params(un):\n try:\n return un.__union_params__\n except AttributeError:\n # Python 3.6\n return un.__args__",
"def test_frame_variable(self):\n self.build()\n self.common_setup()\n\n # This should display correctly.\n self.expect(\n \"frame variable --show-types -- *my_foo_ptr\",\n VARIABLES_DISPLAYED_CORRECTLY,\n substrs=[\n \"(foo)\",\n \"(sub_foo)\",\n \"other_element = 3\"])",
"def _var_name_sub(self, sprintf, quote=False):\n q = ''\n if quote:\n q = \"'\"\n name_list = map(lambda x: q + self.cdict[x][0] + q, sprintf[\"vars\"] )\n return sprintf[\"text\"] % tuple(name_list)",
"def _check_typevar(self, name: str, node: nodes.AssignName) -> None:\n if isinstance(node.parent, nodes.Assign):\n keywords = node.assign_type().value.keywords\n args = node.assign_type().value.args\n elif isinstance(node.parent, nodes.Tuple):\n keywords = (\n node.assign_type().value.elts[node.parent.elts.index(node)].keywords\n )\n args = node.assign_type().value.elts[node.parent.elts.index(node)].args\n\n variance = TypeVarVariance.invariant\n name_arg = None\n for kw in keywords:\n if variance == TypeVarVariance.double_variant:\n pass\n elif kw.arg == \"covariant\" and kw.value.value:\n variance = (\n TypeVarVariance.covariant\n if variance != TypeVarVariance.contravariant\n else TypeVarVariance.double_variant\n )\n elif kw.arg == \"contravariant\" and kw.value.value:\n variance = (\n TypeVarVariance.contravariant\n if variance != TypeVarVariance.covariant\n else TypeVarVariance.double_variant\n )\n\n if kw.arg == \"name\" and isinstance(kw.value, nodes.Const):\n name_arg = kw.value.value\n\n if name_arg is None and args and isinstance(args[0], nodes.Const):\n name_arg = args[0].value\n\n if variance == TypeVarVariance.double_variant:\n self.add_message(\n \"typevar-double-variance\",\n node=node,\n confidence=interfaces.INFERENCE,\n )\n self.add_message(\n \"typevar-name-incorrect-variance\",\n node=node,\n args=(\"\",),\n confidence=interfaces.INFERENCE,\n )\n elif variance == TypeVarVariance.covariant and not name.endswith(\"_co\"):\n suggest_name = f\"{re.sub('_contra$', '', name)}_co\"\n self.add_message(\n \"typevar-name-incorrect-variance\",\n node=node,\n args=(f'. \"{name}\" is covariant, use \"{suggest_name}\" instead'),\n confidence=interfaces.INFERENCE,\n )\n elif variance == TypeVarVariance.contravariant and not name.endswith(\"_contra\"):\n suggest_name = f\"{re.sub('_co$', '', name)}_contra\"\n self.add_message(\n \"typevar-name-incorrect-variance\",\n node=node,\n args=(f'. \"{name}\" is contravariant, use \"{suggest_name}\" instead'),\n confidence=interfaces.INFERENCE,\n )\n elif variance == TypeVarVariance.invariant and (\n name.endswith(\"_co\") or name.endswith(\"_contra\")\n ):\n suggest_name = re.sub(\"_contra$|_co$\", \"\", name)\n self.add_message(\n \"typevar-name-incorrect-variance\",\n node=node,\n args=(f'. \"{name}\" is invariant, use \"{suggest_name}\" instead'),\n confidence=interfaces.INFERENCE,\n )\n\n if name_arg is not None and name_arg != name:\n self.add_message(\n \"typevar-name-mismatch\",\n node=node,\n args=(name_arg, name),\n confidence=interfaces.INFERENCE,\n )",
"def test_get_output_var_names(initialized_bmi):\n names = initialized_bmi.get_output_var_names()\n assert isinstance(names, tuple)\n\n if hasattr(initialized_bmi, \"get_output_var_name_count\"):\n n_names = initialized_bmi.get_output_var_name_count()\n assert len(names) == n_names\n else:\n warnings.warn(\"get_output_var_name_count not implemented\")",
"def property_to_py_name(cpp_struct_name):\r\n first_underscore = cpp_struct_name.find('_')\r\n assert first_underscore != -1\r\n return cpp_struct_name[first_underscore + 1:]",
"def test_output_named_tuple_vs_dictionary_5():\n assert largest_blood_type == largest_blood_type_d, \"Max Blood Type cannot be different for same group\"",
"def gen_type_assertion(var_name: str, ty: type) -> str:\n\n tys = type_str(ty)\n vars = [c for c in 'abcdefghijklmnop' if c != var_name][::-1]\n\n def helper(var_name, tys):\n tys = tys.strip()\n pre_bracket = tys.split(\"[\")[0].lower() # part before [ (or the entire string if no bracket\n ans = f\"type({var_name}) is {pre_bracket}\"\n if \"[\" in tys:\n inside = tys[tys.index(\"[\") + 1:-1]\n new_var = vars.pop()\n if pre_bracket == \"list\" or pre_bracket == \"set\":\n inside_check = helper(new_var, inside)\n # if \" and \" in inside_check:\n # inside_check = \"(\" + inside_check + \")\"\n ans += f\" and all({inside_check} for {new_var} in {var_name})\"\n elif pre_bracket == \"dict\":\n depth = 0\n for i, c in enumerate(inside):\n if c == \"[\":\n depth += 1\n elif c == \"]\":\n depth -= 1\n elif c == \",\" and depth == 0:\n break\n assert depth == 0 and c == \",\", \"Dict[(expecting comma inside)]\"\n key_var = vars.pop()\n key_check = helper(key_var, tys[:i])\n val_check = helper(new_var, tys[i + 1:])\n ans += f\" and all({key_check} and {val_check} for {key_var}, {new_var} in {var_name}.items())\"\n else:\n assert False, f\"Unknown type `{tys}`\"\n return ans\n\n return f\"assert {helper(var_name, tys)}, '{var_name} must be of type {tys}'\"",
"def gen_type_tuple_string(self, name, node):\n return \"('{}', {})\".format(name, self.gen_type_string(node))",
"def test_proto_export_inverse(tmp_path, x, name):\n config = Config()\n typedef, message = x\n with tempfile.NamedTemporaryFile(\n mode=\"r+\", dir=str(tmp_path), suffix=\".proto\", delete=True\n ) as outfile:\n\n typedef_map = {name: typedef}\n\n protofile.export_proto(typedef_map, output_file=outfile)\n outfile.flush()\n\n outfile.seek(0)\n new_typedef_map = protofile.import_proto(config, input_file=outfile)\n\n config.known_types.update(new_typedef_map)\n # validate\n for name, typedef in new_typedef_map.items():\n blackboxprotobuf.validate_typedef(typedef, config=config)\n\n def _check_field_types(typedef1, typedef2):\n for field_num in typedef1.keys():\n # make sure we don't drop keys\n assert field_num in typedef2\n assert typedef1[field_num][\"type\"] == typedef2[field_num][\"type\"]\n if typedef1[field_num][\"type\"] == \"message\":\n message_typedef1 = None\n message_typedef2 = None\n if \"message_typedef\" in typedef1[field_num]:\n message_typedef1 = typedef1[field_num][\"message_typedef\"]\n elif \"message_type_name\" in typedef1[field_num]:\n assert typedef1[field_num][\"message_type_name\"] in typedef_map\n message_typedef1 = typedef_map[\n typedef1[field_num][\"message_type_name\"]\n ]\n if \"message_typedef\" in typedef2[field_num]:\n message_typedef2 = typedef2[field_num][\"message_typedef\"]\n elif \"message_type_name\" in typedef2[field_num]:\n assert (\n typedef2[field_num][\"message_type_name\"] in new_typedef_map\n )\n message_typedef2 = new_typedef_map[\n typedef2[field_num][\"message_type_name\"]\n ]\n\n _check_field_types(message_typedef1, message_typedef2)\n\n note(typedef_map)\n note(new_typedef_map)\n for name, typedef in typedef_map.items():\n _check_field_types(typedef, new_typedef_map[name])\n\n note(new_typedef_map[name])\n # try to actually encode a message with the typedef\n encode_forward = length_delim.encode_message(message, config, typedef_map[name])\n\n config.known_types = new_typedef_map\n encode_backward = length_delim.encode_message(\n message, config, new_typedef_map[name]\n )\n\n decode_forward, _, _, _ = length_delim.decode_message(\n encode_forward, config, new_typedef_map[name]\n )\n decode_backward, _, _, _ = length_delim.decode_message(\n encode_backward, config, typedef_map[name]\n )",
"def test_get_name_of_variable(self):\n name = Code()\n self.assertEqual(str(name), 'name')",
"def repackage_var(x):\n if type(x) == Variable:\n return Variable(x.data)\n else:\n return tuple(repackage_var(v) for v in x)",
"def nameof_both(var, *more_vars):\n result = nameof(var, *more_vars, frame=2)\n\n if not more_vars:\n assert result == bytecode_nameof(frame=2)\n return result",
"def _infer_variable_types_from_data(raw_data):\n raise NotImplementedError()",
"def subexpr_to_smtlib(expr, pre, suff='', fun_annotate_subexpr = None):\n if fun_annotate_subexpr is not None and pre in PythonOperators.logic_ops:\n return '(! (' + pre + ' ' + ExprTranslator.concatenate_args(expr, fun_annotate_subexpr) + suff + \\\n ') :named ' + fun_annotate_subexpr() + ')'\n else:\n return '(' + pre + ' ' + ExprTranslator.concatenate_args(expr, fun_annotate_subexpr) + suff + ')'",
"def test_extracting_one_value(self):\n\t\tself.assertEqual([\"b\"], au.extract_variables(bf.Var(\"b\")), \"Invalid variables extracted, expected [b].\")",
"def test_get_input_var_names(initialized_bmi):\n names = initialized_bmi.get_input_var_names()\n assert isinstance(names, tuple)\n\n if hasattr(initialized_bmi, \"get_input_var_name_count\"):\n n_names = initialized_bmi.get_input_var_name_count()\n assert len(names) == n_names\n else:\n warnings.warn(\"get_input_var_name_count not implemented\")",
"def getunittouu(self, param):\n if type(param) is tuple:\n return tuple([self.getunittouu(val) for val in param])\n try:\n return inkex.unittouu(param)\n except AttributeError:\n return self.unittouu(param)",
"def gen_proto_recv(signame, argname, typename, size, is_enum, is_struct, is_varlen):\n add_code = None\n wordoff = word_offset(signame, argname)\n if is_varlen:\n # Array. Logic is identical to send direction; copying\n # is done elsewhere, we just return an offset.\n # The offset's the same for send, so we don't need\n # to generate any code.\n proto_code = None\n copy_code = None\n signature = None\n else:\n signature = mangle_type(typename)\n if is_struct:\n proto_code = \"%s *%s\" % (typename, argname)\n copy_code = \" CCP_%s_%s_GET(pdu, %s);\" % (\n signame.upper(), argname.upper(), argname)\n else:\n proto_code = \"%s *%s\" % (typename, argname)\n cast = \"(%s)\" % (typename)\n copy_code = \" *%s = %sCCP_%s_%s_GET(pdu);\" % (\n argname, cast, signame.upper(), argname.upper())\n return (proto_code, copy_code, add_code, signature)",
"def unpack_type_spec_from(\n buffer: bytes, offset: int = 0\n) -> tuple[computation_types.Type, int]:\n length, length_size = _unpack_length_from(buffer, offset=offset)\n offset += length_size\n type_spec_bytes, *_ = struct.unpack_from(f'!{length}s', buffer, offset=offset)\n proto = computation_pb2.Type.FromString(type_spec_bytes)\n type_spec = type_serialization.deserialize_type(proto)\n return type_spec, length_size + length # pytype: disable=bad-return-type",
"def __splitVariableNames(self, name, indexes):\n if name == 'x':\n var = self.xCoordinates[indexes[0]][indexes[1]]\n elif name == 'y':\n var = self.yCoordinates[indexes[0]][indexes[1]]\n elif name == 'z':\n var = self.zCoordinates[indexes[0]][indexes[1]]\n elif name == 'colorMap':\n var = self.colorMapCoordinates[indexes[0]][indexes[1]]\n elif name == 'clusterLabels':\n var = self.clusterLabels[indexes[0]][indexes[1]]\n elif name == 'mixtureLabels':\n var = self.mixtureLabels[indexes[0]][indexes[1]]\n elif name == 'mixtureMeans':\n var = self.mixtureMeans[indexes[0]][indexes[1]]\n elif name == 'mixtureCovars':\n var = self.mixtureCovars[indexes[0]][indexes[1]]\n\n # The variable can contain brackets {} (when the symbol \"|\" is present in\n # the variable name), e.g.:\n # DataName|Input|{RavenAuxiliary|variableName|initial_value}\n # or it can look like:\n # DataName|Input|variableName\n\n if var is not None:\n result = [None] * 3\n if '|input|' in var.lower():\n match = re.search(r\"(\\|input\\|)\", var.lower())\n elif '|output|' in var.lower():\n match = re.search(r\"(\\|output\\|)\", var.lower())\n else:\n self.raiseAnError(IOError, f'In Plot {self.name}, the input coordinate {name} has not specified an \"Input\" or \"Output\" (case insensitive). e.g., sourceName|Input|aVariable) in {var}')\n startLoc, endLoc = match.start(), match.end()\n result = [var[:startLoc].strip(), var[startLoc+1:endLoc-1].strip(), var[endLoc:].strip()]\n if '{' in result[-1] and '}' in result[-1]:\n locLower, locUpper = result[-1].find(\"{\"), result[-1].rfind(\"}\")\n result[-1] = result[-1][locLower + 1:locUpper].strip()\n else:\n result = None\n\n return result",
"def __compile_subroutine_parameters(self):\r\n while self.__tokenizer.keyword() == TYPES_DIC[\"VAR\"]:\r\n self.compile_var_dec()",
"def tuple(self, arg: SeField[Any]) -> str:\n if is_bare_tuple(arg.type):\n return arg.varname\n elif is_variable_tuple(arg.type):\n earg = arg[0]\n earg.name = \"v\"\n return f\"tuple({self.render(earg)} for v in {arg.varname})\"\n else:\n rvalues = []\n for i, _ in enumerate(type_args(arg.type)):\n r = arg[i]\n r.name = f\"{arg.varname}[{i}]\"\n rvalues.append(self.render(r))\n return f\"({', '.join(rvalues)},)\" # trailing , is required for single element tuples",
"def test_parse_substitution_variable():\n assert parse_substitution_variable(\"${SOME_VAR}\") == \"SOME_VAR\"\n assert parse_substitution_variable(\"$SOME_VAR\") == \"SOME_VAR\"\n assert parse_substitution_variable(\"SOME_STRING\") is None\n assert parse_substitution_variable(\"SOME_$TRING\") is None\n assert parse_substitution_variable(\"${some_var}\") == \"some_var\"\n assert parse_substitution_variable(\"$some_var\") == \"some_var\"\n assert parse_substitution_variable(\"some_string\") is None\n assert parse_substitution_variable(\"some_$tring\") is None\n assert parse_substitution_variable(\"${SOME_$TRING}\") is None\n assert parse_substitution_variable(\"$SOME_$TRING\") == \"SOME_\""
] | [
"0.7497985",
"0.54891527",
"0.5454183",
"0.5442474",
"0.5129124",
"0.5115415",
"0.51112336",
"0.5045163",
"0.5033024",
"0.5018397",
"0.49975044",
"0.49971396",
"0.49895564",
"0.4978763",
"0.4951379",
"0.49307013",
"0.49243486",
"0.48797363",
"0.48714188",
"0.485954",
"0.48464656",
"0.4840963",
"0.48244455",
"0.4813817",
"0.48101464",
"0.47849223",
"0.46974272",
"0.46804968",
"0.46798742",
"0.46779147"
] | 0.8391995 | 0 |
Test _includes_custom_type method positive result. | def test__includes_custom_type_positive(self, *mocks):
content_type = "Union[str]"
result = self.protocol_generator._includes_custom_type(content_type)
self.assertTrue(result)
content_type = "Optional[str]"
result = self.protocol_generator._includes_custom_type(content_type)
self.assertTrue(result) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _includes_custom_type(content_type: str) -> bool:\n\n if content_type.startswith(\"Optional\"):\n sub_type = _get_sub_types_of_compositional_types(content_type)[0]\n result = _includes_custom_type(sub_type)\n elif content_type.startswith(\"Union\"):\n sub_types = _get_sub_types_of_compositional_types(content_type)\n result = False\n for sub_type in sub_types:\n if _includes_custom_type(sub_type):\n result = True\n break\n elif (\n content_type.startswith(\"FrozenSet\")\n or content_type.startswith(\"Tuple\")\n or content_type.startswith(\"Dict\")\n or content_type in PYTHON_TYPE_TO_PROTO_TYPE.keys()\n ):\n result = False\n else:\n result = True\n return result",
"def testTheType(self, theTestType):\n \n pass",
"def test_get_types(self):\n pass",
"def has_type(self, item_type):\n raise NotImplementedError()",
"def is_custom(self):\n return self._is_custom",
"def check_type(self):\n return True",
"def test(types, _):\n return 'Date' in types and 'Postal Code' in types",
"def should_be_included(self):\n return True",
"def is_required_data(self, typename):\n return typename in self.required_data_products",
"def can_contain(self):\n return False",
"def validatePredefinedType(self, type: int) -> bool:\n ...",
"def has_custom(self, phrase_string: str, custom_property: str) -> bool:\n print('CUSTOM:', self.custom)\n return phrase_string in self.custom and custom_property in self.custom[phrase_string]",
"def test_expected_custom_types(self):\n handler = self.create_handler(\n r'json_list=[\"a\", \"b\"]&'\n r'json_dict={\"a\": 1}&'\n r'datetime=2007-03-04T21:08:12Z&'\n r'date=2007-03-04')\n param_types = {\n 'json_list': 'json',\n 'json_dict': 'json',\n 'datetime': 'datetime',\n 'date': 'date',\n }\n expected_params = {\n u'json_list': [u'a', u'b'],\n u'json_dict': {u'a': 1},\n u'datetime': datetime.datetime(2007, 3, 4, 21, 8, 12),\n u'date': datetime.date(2007, 3, 4),\n }\n self.assertEqual(handler.get_params(param_types), expected_params)",
"def is_required_data(self, typename):\r\n return typename in self.required_data_products",
"def test_sample_type(self):\r\n \r\n self.assertEqual(self.test_sample.sampleType, 'TUMOUR')",
"def tests_ti_document_get_includes(self, request: FixtureRequest):\n super().group_get_includes(request)",
"def hasCustomEffect(self, type_):\n for effect in getHandle().effects:\n if CraftPotionUtil == effect.getMobEffect(, type_):\n return True\n return False",
"def testContentTypes_Extended(self):\n self.mox.ReplayAll()\n\n mapper = service_handlers.RPCMapper(['GET', 'POST'],\n 'my-content-type',\n self.protocol,\n content_types=['a', 'b'])\n\n self.assertEquals(frozenset(['GET', 'POST']), mapper.http_methods)\n self.assertEquals('my-content-type', mapper.default_content_type)\n self.assertEquals(frozenset(['my-content-type', 'a', 'b']),\n mapper.content_types)\n\n self.mox.VerifyAll()",
"def test_type_code(self):\n inv_search = \"collection:review\"\n spi_search = \"find tc review\"\n self._compare_searches(inv_search, spi_search)\n inv_search = \"collection:review\"\n spi_search = \"find ps review\"\n self._compare_searches(inv_search, spi_search)\n inv_search = \"collection:review\"\n spi_search = \"find scl review\"\n self._compare_searches(inv_search, spi_search)",
"def test_allow(self, incl, value):\n i = include(*incl)\n assert i(fields(C).a, value) is True",
"def test_linked_list_includes_exists():\n assert LinkedList.includes",
"def test_publish_with_custom_fields(self):\n class RichField(BaseTextAreaField):\n field_id = 'rich_field'\n\n class SpecialRichField(BaseTextAreaField):\n # Exercise special case field name 'text'\n field_id = 'text'\n\n class BasicField(BaseEditableField):\n field_id = 'basic_field'\n\n fieldset = get_review_request_fieldset('main')\n fieldset.add_field(RichField)\n fieldset.add_field(SpecialRichField)\n fieldset.add_field(BasicField)\n\n try:\n draft = self._get_draft()\n review_request = draft.review_request\n\n draft.description = 'New description'\n draft.extra_data['rich_field'] = '**Rich custom text**'\n draft.extra_data['rich_field_text_type'] = 'markdown'\n draft.extra_data['text'] = 'Nothing special'\n draft.extra_data['text_type'] = 'plain'\n draft.extra_data['basic_field'] = 'Basic text'\n draft.target_people.add(review_request.submitter)\n\n draft.publish()\n\n self.assertNotIn('description_text_type',\n review_request.extra_data)\n self.assertIn('rich_field', review_request.extra_data)\n self.assertIn('rich_field_text_type', review_request.extra_data)\n self.assertIn('text', review_request.extra_data)\n self.assertIn('text_type', review_request.extra_data)\n self.assertIn('basic_field', review_request.extra_data)\n self.assertNotIn('basic_field_text_type',\n review_request.extra_data)\n\n self.assertEqual(review_request.description, draft.description)\n self.assertEqual(review_request.extra_data['rich_field'],\n draft.extra_data['rich_field'])\n self.assertEqual(review_request.extra_data['rich_field_text_type'],\n draft.extra_data['rich_field_text_type'])\n self.assertEqual(review_request.extra_data['text'],\n draft.extra_data['text'])\n self.assertEqual(review_request.extra_data['text_type'],\n draft.extra_data['text_type'])\n self.assertEqual(review_request.extra_data['basic_field'],\n draft.extra_data['basic_field'])\n finally:\n fieldset.remove_field(RichField)\n fieldset.remove_field(SpecialRichField)\n fieldset.remove_field(BasicField)",
"def custom_added(self, key: _K) -> bool:\n return key in self._customs",
"def test_single_named_link_with_custom_type():\n pass",
"def use_types( self ) :\n return self._use_types",
"def __contains__(self, *args, **kwargs): # real signature unknown\n pass",
"def __contains__(self, *args, **kwargs): # real signature unknown\n pass",
"def __contains__(self, *args, **kwargs): # real signature unknown\n pass",
"def __contains__(self, *args, **kwargs): # real signature unknown\n pass",
"def __contains__(self, *args, **kwargs): # real signature unknown\n pass"
] | [
"0.7456461",
"0.6151709",
"0.57726073",
"0.5564573",
"0.5552539",
"0.5481914",
"0.5474497",
"0.54267174",
"0.5376972",
"0.534768",
"0.5330371",
"0.5328861",
"0.5311295",
"0.5270746",
"0.52539307",
"0.5240947",
"0.5239048",
"0.5234515",
"0.5231774",
"0.5193669",
"0.5185735",
"0.5179525",
"0.5166746",
"0.51475644",
"0.51340014",
"0.5124719",
"0.5124719",
"0.5124719",
"0.5124719",
"0.5124719"
] | 0.7806248 | 0 |
Convert a text to a format ROUGE understands. The text is assumed to contain one sentence per line. | def convert_text_to_rouge_format(text, title="dummy title"):
sentences = text.split("\n")
sent_elems = [
"<a name=\"{i}\">[{i}]</a> <a href=\"#{i}\" id={i}>"
"{text}</a>".format(i=i, text=sent)
for i, sent in enumerate(sentences, start=1) if sent != '']
html = """<html>
<head>
<title>{title}</title>
</head>
<body bgcolor="white">
{elems}
</body>
</html>""".format(title=title, elems="\n".join(sent_elems))
return html | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert(text):\n return NewDocument.from_rst(text).format()",
"def preprocess(self, text):\r\n return text",
"def preprocess(self, text):\n if self.model_name == \"bert-base-arabert\":\n return self._old_preprocess(\n text,\n do_farasa_tokenization=True,\n )\n\n if self.model_name == \"bert-base-arabertv01\":\n return self._old_preprocess(text, do_farasa_tokenization=False)\n\n text = str(text)\n text = html.unescape(text)\n if self.strip_tashkeel:\n text = araby.strip_tashkeel(text)\n if self.strip_tatweel:\n text = araby.strip_tatweel(text)\n\n if self.replace_urls_emails_mentions:\n # replace all possible URLs\n for reg in url_regexes:\n text = re.sub(reg, \" [رابط] \", text)\n # REplace Emails with [بريد]\n for reg in email_regexes:\n text = re.sub(reg, \" [بريد] \", text)\n # replace mentions with [مستخدم]\n text = re.sub(user_mention_regex, \" [مستخدم] \", text)\n\n if self.remove_html_markup:\n # remove html line breaks\n text = re.sub(\"<br />\", \" \", text)\n # remove html markup\n text = re.sub(\"</?[^>]+>\", \" \", text)\n\n # remove repeated characters >2\n if self.remove_elongation:\n text = self._remove_elongation(text)\n\n # insert whitespace before and after all non Arabic digits or English Digits and Alphabet and the 2 brackets\n if self.insert_white_spaces:\n text = re.sub(\n \"([^0-9\\u0621-\\u063A\\u0641-\\u064A\\u0660-\\u0669a-zA-Z\\[\\]])\",\n r\" \\1 \",\n text,\n )\n\n # insert whitespace between words and numbers or numbers and words\n text = re.sub(\n \"(\\d+)([\\u0621-\\u063A\\u0641-\\u064A\\u0660-\\u066C]+)\", r\" \\1 \\2 \", text\n )\n text = re.sub(\n \"([\\u0621-\\u063A\\u0641-\\u064A\\u0660-\\u066C]+)(\\d+)\", r\" \\1 \\2 \", text\n )\n\n # remove unwanted characters\n if self.keep_emojis:\n emoji_regex = \"\".join(list(self.emoji.UNICODE_EMOJI[\"en\"].keys()))\n rejected_chars_regex2 = \"[^%s%s]\" % (chars_regex, emoji_regex)\n text = re.sub(rejected_chars_regex2, \" \", text)\n else:\n text = re.sub(rejected_chars_regex, \" \", text)\n\n # remove extra spaces\n text = \" \".join(text.replace(\"\\uFE0F\", \"\").split())\n\n if (\n self.model_name == \"bert-base-arabertv2\"\n or self.model_name == \"bert-large-arabertv2\"\n ):\n if self.keep_emojis:\n new_text = []\n for word in text.split():\n if word in list(self.emoji.UNICODE_EMOJI[\"en\"].keys()):\n new_text.append(word)\n else:\n new_text.append(self.farasa_segmenter.segment(word))\n text = \" \".join(new_text)\n else:\n text = self.farasa_segmenter.segment(text)\n return self._farasa_segment(text)\n\n # ALl the other models dont require Farasa Segmentation\n return text",
"def nltk_text(self, text):\n text = nltk.Text(word_tokenize(text))\n return text",
"def processText(text):\n print(type(text))\n for line in text:\n print(line)\n return text",
"def convert_txt_to_data():\n pass",
"def process_text(model_name: str, text: str) -> spacy.tokens.Doc:\r\n nlp = load_model(model_name)\r\n return nlp(text)",
"def normalize(self, text: str) -> str:",
"def format_ocr_text(self, page):\n \n #read out of the text file that tesseract made\n ocr_text = open(self.ocr_text, 'r')\n \n # write into this file\n djvu_text = open( self.djvu_text, 'w' )\n \n text = \"(page 0 0 1 1\\n\"\n \n self.out_text.write('\\n## Page %d ###\\n\\n' % page )\n \n for line in ocr_text:\n \n #write to the human readable file\n self.out_text.write(line)\n \n # add each line of text\n # escaping \" to \\\" as we go\n text += '(line 0 0 1 1 \"%s\")\\n' % line.replace('\"', r'\\\"').strip()\n \n text += \")\\n\"\n \n djvu_text.write( text )\n \n ocr_text.close()\n djvu_text.close()",
"def format_text(text: TTextType) -> typing.Iterator[TViewLine]:\n for line in text.splitlines():\n yield [(\"text\", line)]",
"def process_text(self, text, language):",
"def process_text(text):\n text = text.strip()\n textList = text.split('\\n')\n newText = ''\n addNewline = True\n for line in textList:\n # Remove duplicate white space\n temp = ' '.join(line.split())\n # Trim any beginning non-alphabet letters\n temp = trim(temp)\n # Remove overly short lines, but keep ends of sentences\n # Add a newline if gap detected\n if len(temp) < 40 and not '.' in temp:\n if addNewline:\n newText += '\\n'\n addNewline = False\n continue\n # Add line to growing string\n newText += temp + ' '\n addNewline = True\n return newText",
"def format_text(text):\n\n\ttext = ' '.join(text).lower()\n\ttext = re.sub(r\"[^a-zA-Z.?!]\", \" \", text)\n\ttext = re.sub(r' +', ' ', text)\n\ttext = word_tokenize(text)\n\ttext = pos_tag(text)\n\n\treturn text",
"def normalize_text(text):\n\n text = text.lower().strip().replace(\"\\n\", \" \").replace(\"\\r\", \"\")\n\n text = replace_money_token(text)\n text = replace_urls_token(text)\n text = fix_unicode_quotes(text)\n text = format_large_numbers(text)\n text = pad_punctuation(text)\n return text.strip()",
"def convert_all(text):\r\n\tpig_tokens = ''\r\n\r\n\t#tokenizes the text\r\n\ttokens = word_tokenize(text)\r\n\r\n\t#regex for non-alphabetical characters\r\n\tpattern = re.compile(r'[^a-zA-Z]')\r\n\r\n\t#converts the words to pig latin and appends them to the sentence.\r\n\tfor token in tokens:\r\n\t\tif not re.findall(pattern, token):\r\n\t\t\tword = word_to_pig_latin(token)\r\n\r\n\t\t\tif re.findall(r'[A-Z]', word):\r\n\t\t\t\tword = word.lower()\r\n\t\t\t\tword = word.capitalize()\r\n\t\t\tpig_tokens += ' ' + word\r\n\t\telse:\r\n\t\t\tpig_tokens += token\r\n\r\n\tpig_text = ''.join(pig_tokens)\r\n\r\n\treturn pig_text",
"def proc_text(self, text):\n\n lemmas = []\n tokens = []\n doc = self(text)\n for tokObj in doc:\n if self._remove_punct and tokObj.is_punct:\n continue\n lemma = tokObj.lemma_\n text = tokObj.text\n if self._keep_only_alpha_num and not is_alpha_num(text):\n continue\n tok1 = text.lower()\n tok2 = lemma.lower()\n if tok1 in self._stopwords or tok2 in self._stopwords:\n continue\n\n if self._lower_case:\n text = text.lower()\n lemma = lemma.lower()\n\n lemmas.append(lemma)\n tokens.append(text)\n\n return ' '.join(lemmas), ' '.join(tokens)",
"def norm_text(self, text):\n\n # encode to apply utf-8 and decode to remove initial 'b'\n text = str(text.encode('utf-8').decode('utf-8'))\n text = text.lower()\n\n # Clean the text\n text = re.sub(r\"[^A-Za-z0-9^,!.\\/'+-=]\", \" \", text)\n text = re.sub(r\"what's\", \"what is \", text)\n text = re.sub(r\"\\'s\", \" \", text)\n text = re.sub(r\"\\'ve\", \" have \", text)\n text = re.sub(r\"can't\", \"cannot \", text)\n text = re.sub(r\"n't\", \" not \", text)\n text = re.sub(r\"i'm\", \"i am \", text)\n text = re.sub(r\"\\'re\", \" are \", text)\n text = re.sub(r\"\\'d\", \" would \", text)\n text = re.sub(r\"\\'ll\", \" will \", text)\n text = re.sub(r\"\\.\", \" \", text)\n text = re.sub(r\"!\", \" ! \", text)\n text = re.sub(r\"\\/\", \" \", text)\n text = re.sub(r\"\\^\", \" ^ \", text)\n text = re.sub(r\"\\+\", \" + \", text)\n text = re.sub(r\"\\-\", \" - \", text)\n text = re.sub(r\"\\=\", \" = \", text)\n text = re.sub(r\"'\", \" \", text)\n text = re.sub(r\"(\\d+)(k)\", r\"\\g<1>000\", text)\n text = re.sub(r\":\", \" : \", text)\n text = re.sub(r\" e g \", \" eg \", text)\n text = re.sub(r\" b g \", \" bg \", text)\n text = re.sub(r\" u s \", \" american \", text)\n text = re.sub(r\"\\0s\", \"0\", text)\n text = re.sub(r\" 9 11 \", \"911\", text)\n text = re.sub(r\"e - mail\", \"email\", text)\n text = re.sub(r\"j k\", \"jk\", text)\n text = re.sub(r\"\\s{2,}\", \" \", text)\n return text",
"def rich(text):\n return full(text, False)",
"def process_text(text):\n no_split_dict = {'u . s': 'u.s', 'u . n': 'u.n', 'u . k': 'u.k', 'l . a': 'l.a', 'j . k': 'j.k', 'a . m': 'a.m',\n 'p . m': 'p.m', 'd . j': 'd.j', 'd . a': 'd.a'}\n\n text = re.sub(\".*--\", \"\", text, count=1) # Removing cnn from start of text\n if text.startswith('(CNN)'): # Remove cnn from articles that starts with only cnn\n text = re.sub('\\(CNN\\)', '', text, count=1)\n text = re.sub(r'(?<=[^?!.0-9])(?=[.,!?])', ' ', text) # 4\n text = re.sub(r'(?![0-9])(?<=[.,])(?=[^\\s])', r' ', text) # 4\n text = text.lower() # 2\n text = re.sub('[^A-Za-z0-9 .!?,øæå]+', '', text) # 3\n text = re.sub(r'((?<=[a-z])(?=[.]))|((?=[a-z])(?<=[.]))(?=[^\\s])', r' ', text) # space a-z.a-z\n text = re.sub(r'((?=[0-9])(?<=[a-z]))|((?=[a-z])(?<=[0-9]))(?=[^\\s])', r' ', text) # space 0-9a-z\n for key in no_split_dict:\n text = text.replace(key, no_split_dict[key]) # Fixing word splits\n text = re.sub('[0-9]', '#', text) # 8\n text = \" \".join(text.split()) # 5, 6, 7 - i think\n return text",
"def text_level_normalizer(self, sentence: str, *args: Any, **kwargs: Any) -> str:\n text = sentence\n return text",
"def convert_pattern_format(text):\n parsed_text = []\n # parse text via Pattern's parser\n pattern_parsed_text = Text(parse(text, relations=True, lemmata=True))\n for sentence in pattern_parsed_text:\n s = Sentence()\n s.string = remove_blanks(sentence.string)\n for word in sentence:\n # Patterns tags for each word in the sentence are stored in a new Word-object\n w = Word()\n w.string = word.string\n w.lemma = word.lemma\n w.index = word.index\n w.tag = word.type\n w.entity = \"\"\n # each word is appended to a Sentence-object\n s.words.append(w)\n # each Sentence-object is appended to an array\n parsed_text.append(s)\n return parsed_text",
"def serialize_text(text):\n return serialize_plaintext(text)",
"def convert_srt_to_txt(text, join=False):\n lines = text.split('\\n')\n result = []\n for line in lines:\n if not line.strip(): # Skipping empty lines\n continue\n elif line.strip().isdigit(): # Skip lines containing only numbers\n continue\n elif (line.startswith(\"WEBVTT\") or\n line.startswith(\"Kind: captions\") or\n line.startswith(\"Language: en\")): # Skipping lines containing service information\n continue\n # We skip lines with the format \"00:00:00,000 --> 00:00:03,090\"\n elif re.match(r\"\\d{2}:\\d{2}:\\d{2}.\\d{3} --> \\d{2}:\\d{2}:\\d{2}.\\d{3}\", line.strip()):\n continue\n else:\n result.append(line.strip())\n if join:\n out = join_lines(result) # Combining strings into sentences\n else:\n out = \"\\n\".join(result) # Combining strings without parsing into sentences\n return out",
"def parse_text(self, text):\n self._text_paragraph = text.split(\"\\n\")\n self._render()",
"def preprocess_text(text: str) -> Tuple[List[str], Dict]:\n raise NotImplementedError",
"def edit_google_vision_text(self,text):\n s1=text\n try:\n log_info(\"Correcting google vision text to remove extra spacing\",MODULE_CONTEXT)\n i=0\n while(i<len(text)):\n s1=text\n if text[i] in [\"/\",\"।\",'।' ,':','|',\",\" ,'०',\"]\",\"-\",\")\",\"}\"] and text[i-1]==\" \": \n text=text[:i-1]+text[i:]\n if i > 0 :\n if text[i-1] in [\"-\",\"[\",\"{\",\"/\",\"(\"] and text[i]==\" \":\n text=text[:i]+text[i+1:]\n elif text[i] in [\"-\",\"[\",\"{\",\"/\",\"(\"] and text[i+1]==\" \":\n text=text[:i+1]+text[i+2:]\n i=i+1\n except Exception as e:\n log_exception(\"Exception while correcting google vision text\", MODULE_CONTEXT, e)\n return s1\n return text",
"def text_prepare(txt):\n print(txt)\n txt = re.sub(r\"[^\\w\\s]\", \" \", str(txt).lower().strip())\n txt = txt.split()\n nltk.corpus.stopwords.words(\"english\")\n txt = [word for word in txt if word not in nltk.corpus.stopwords.words(\"english\")]\n lem = nltk.stem.wordnet.WordNetLemmatizer()\n txt = [lem.lemmatize(word) for word in txt]\n txt = \" \".join(txt)\n return txt",
"def preprocess(text):\n text = remove_space(text)\n text = clean_special_punctuations(text)\n text = handle_emojis(text)\n text = clean_number(text)\n text = spacing_punctuation(text)\n text = clean_repeat_words(text)\n text = remove_space(text)\n #text = stop(text)# if changing this, then chnage the dims \n #(not to be done yet as its effecting the embeddings..,we might be\n #loosing words)...\n return text",
"def parse_text(self, text):\r\n MAXLEN = 100\r\n sentences = []\r\n punct = [\",\",\":\",\";\",\".\",\"–\",\"?\",\"!\",\"(\",\")\"] # Interpunctuation marks\r\n text = text.replace(\"\\r\", \" \").replace(\"\\t\", \" \") # Remove CR and tabs\r\n words = text.split(\" \") if len(text) > MAXLEN else []\r\n sentence = \"\" if len(text) > MAXLEN else text\r\n\r\n # Preprocess list for silence markers\r\n if conf.SilenceMarker in text:\r\n words_new = []\r\n if not words and sentence: # Was too short to be cut initially\r\n words = text.split(\" \")\r\n sentence = \"\"\r\n for w in filter(None, words):\r\n if conf.SilenceMarker not in w.lower():\r\n words_new.append(w)\r\n else:\r\n text_chunks = w.lower().split(conf.SilenceMarker)\r\n for i, part in enumerate(text_chunks):\r\n if part:\r\n words_new.append(part)\r\n if i < len(text_chunks) - 1:\r\n words_new.append(conf.SilenceMarker)\r\n else:\r\n if words_new and conf.SilenceMarker in words_new[-1]:\r\n words_new[-1] += conf.SilenceMarker\r\n else:\r\n words_new.append(conf.SilenceMarker)\r\n words = words_new\r\n\r\n for w in words:\r\n if conf.SilenceMarker in w:\r\n if sentence:\r\n sentences.append(sentence.strip())\r\n sentences.append(w)\r\n sentence = \"\"\r\n elif w[-1] in punct or w[0] in punct: # Encountered punctuation\r\n if w[-1] in punct and (len(sentence) + len(w) + 1 < MAXLEN):\r\n # Word ends with punct and sentence can still be added to\r\n sentences.append(sentence.strip() + \" \" + w.strip())\r\n sentence = \"\" # Save sentence and word, start new sentence\r\n elif w[0] in punct and w[-1] not in punct:\r\n # Word starts with punctuation, like '('\r\n sentences.append(sentence.strip()) # Save current sentence\r\n sentence = w # Start a new sentence with punct and word\r\n else: # word ends with punct and sentence already long enough\r\n sentences.extend([sentence.strip(), w.strip()])\r\n sentence = \"\" \r\n else:\r\n if (len(sentence) + len(w) + 1 < MAXLEN): # Sentence still\r\n sentence += \" \" + w # short enough\r\n else: # Sentence too long\r\n sentences.append(sentence.strip())\r\n sentence = w # Start a new sentence with the word\r\n if sentence:\r\n sentences.append(sentence.strip())\r\n return sentences",
"def process_text(self):\n prp1 = preprocessor.Preprocess()\n processed_text = prp1.clean_data(self.text)\n self.vec1 = self.vec.transform(pd.Series(processed_text))"
] | [
"0.66363716",
"0.61474895",
"0.6118832",
"0.6097702",
"0.609217",
"0.60894656",
"0.6057062",
"0.60051125",
"0.6003964",
"0.59820795",
"0.59751576",
"0.5925597",
"0.5902075",
"0.5848546",
"0.5844513",
"0.5837306",
"0.58300614",
"0.58283144",
"0.5815858",
"0.5792594",
"0.5778242",
"0.57480145",
"0.57458895",
"0.5734289",
"0.569558",
"0.56917804",
"0.5674859",
"0.5668577",
"0.5655869",
"0.56327575"
] | 0.6489249 | 1 |
Conditional Entropy Calculates the conditional Shannon Entropy for two discrete distributions. This metric gives the entropy of the distribution of x in case the distribution of y is known. | def conditional_entropy(x, y, bins, normalize=False):
# get the bins
bins = get_2D_bins(x, y, bins)
# calculate H(x,y) and H(y)
hjoint = joint_entropy(x,y,bins)
hy = entropy(y, bins[1])
if normalize:
normalizer = entropy(x, bins[0])
conditional_entropy = hjoint - hy
# check if conditional entropy and normalizer are very small
if conditional_entropy < 1e-4 and normalizer < 1e-4:
# return zero to prevent very high values of normalized conditional entropy
# e.g. conditional entropy = -1.3e-12, normalizer = -1.6e-12
# -> normalized conditional entropy = 812.5
return 0
else:
return conditional_entropy / normalizer
else:
return hjoint - hy | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def conditional_entropy(f1, f2):\n\n ce = ee.entropyd(f1) - ee.midd(f1, f2)\n return ce",
"def entropy(y):\n return -1 * sum(\n [\n pipe(np.sum(y == value) / len(y), lambda ratio: ratio * np.log(ratio))\n for value in set(y)\n ]\n )",
"def conditional_entropy(self) -> float:\n pass",
"def entropy(a, b):\n with mp.extradps(5):\n a, b = _validate_a_b(a, b)\n return (_fun.logbeta(a, b)\n - (a - 1)*mp.psi(0, a)\n - (b - 1)*mp.psi(0, b)\n + (a + b - 2)*mp.psi(0, a + b))",
"def mutual_information(x, y):\r\n\r\n # INSERT YOUR CODE HERE\r\n xvalue, xcount = np.unique(x,return_counts = True)\r\n probx = xcount.astype(float)/len(x)\r\n Hyx = 0.0\r\n for pxval,xval in zip(probx,xvalue):\r\n Hyx += (pxval)*entropy(y[x==xval])\r\n \r\n Ixy = entropy(y) - Hyx\r\n return Ixy\r\n raise Exception('Function not yet implemented!')",
"def cEntropy(Y, X):\n return jEntropy(Y, X) - entropy(X)",
"def entropy(y):\r\n\r\n # INSERT YOUR CODE HERE\r\n value, count = np.unique(y,return_counts = True)\r\n Hy = 0.0\r\n prob = count.astype(float)/len(y)\r\n for p in prob:\r\n Hy += -(p)*(np.log2(p))\r\n return Hy\r\n raise Exception('Function not yet implemented!')",
"def conditional_entropy_hyper(self) -> float:\n pass",
"def cross_entropy(x, y, bins, xy_probabilities=False):\n # calculate probabilities if probabilities == False\n if xy_probabilities:\n # same bins for x and y -> same length of x and y if xy_probabilities == True\n assert len(x) == len(y)\n\n # if x does not sum up to 1, raise an error\n if not np.isclose(sum(x),1,atol=0.0001):\n raise ValueError('Probabilities in vector x do not sum up to 1.')\n # if y does not sum up to 1, raise an error\n if not np.isclose(sum(y),1,atol=0.0001):\n raise ValueError('Probabilities in vector y do not sum up to 1.')\n\n # add a small number to all probabilities if zero occurs\n if x.any(0):\n px = x + 1e-15\n py = y + 1e-15\n else:\n px = x\n py = y\n else:\n # get the bins, joint bins for x and y (same_bins=True)\n bins = get_2D_bins(x, y, bins, same_bins=True)\n\n # calculate unconditioned histograms\n hist_x = np.histogram(x, bins=bins[0])[0]\n hist_y = np.histogram(y, bins=bins[1])[0]\n\n px = (hist_x / np.sum(hist_x)) + 1e-15\n py = (hist_y / np.sum(hist_y)) + 1e-15\n\n return - px.dot(np.log2(py))",
"def _cal_igr(x, y):\n return (_cal_entropy(y) - _cal_conditionalEnt(x, y)) / _cal_conditionalEnt(x, y)",
"def cross_entropy(x, y):\n\n if len(y.shape) == 1:\n return F.cross_entropy(x, y)\n if y.shape[1] == 1:\n y = y.squeeze(1)\n return F.cross_entropy(x, y)\n\n return torch.mean(\n torch.div(\n F.binary_cross_entropy_with_logits(x, y, reduction=\"none\"),\n torch.sum(y, dim=1),\n )\n )",
"def entropy(y):\n total = y.size\n value_counts = np.bincount(y).astype(\"float\")\n proportions = value_counts / y.size\n\n return sum(-i * np.log(i) for i in proportions if i)",
"def entropy(x):\n nz = np.nonzero(x)[0]\n return -np.sum(x[nz]*np.log2(x[nz]))",
"def entropy(Y):\n\n temp = np.unique(Y, return_counts=True)\n uniq_Y = list(temp[0])\n Y_count = list(temp[1])\n \n total = sum(Y_count)\n\n ent = 0\n for elem in uniq_Y:\n prob = Y_count[uniq_Y.index(elem)] / total\n # print(\"prob:\", prob)\n ent -= (prob * (math.log2(prob)))\n # print(\"ent:\",ent)\n\n return ent",
"def entropy(y):\n EPS = 0.0005\n\n # YOUR CODE HERE\n if len(y) == 0:\n return 0.\n \n pk = np.mean(y, axis=0)\n \n return - np.sum(pk * np.log(pk + EPS))",
"def _conditional_entropy_compute(confmat: Tensor) ->Tensor:\n confmat = _drop_empty_rows_and_cols(confmat)\n total_occurrences = confmat.sum()\n p_xy_m = confmat / total_occurrences\n p_y = confmat.sum(1) / total_occurrences\n p_y_m = p_y.unsqueeze(1).repeat(1, p_xy_m.shape[1])\n return torch.nansum(p_xy_m * torch.log(p_y_m / p_xy_m))",
"def js_divergence(dist1, dist2):\n mean_dist = (dist1 + dist2) / 2.0\n js = (\n scipy.stats.entropy(dist1, mean_dist) + scipy.stats.entropy(dist2, mean_dist)\n ) / 2.0\n return js",
"def _entropy_filter(self, prob1, prob2):\n\n\n # calculate merged prob.\n prob_merged = (prob1 + prob2)/2\n # Compute entropy for each prob.\n H1 = -prob1 * math.log(prob1) - (1-prob1) * math.log(1-prob1)\n H2 = -prob2 * math.log(prob2) - (1-prob2) * math.log(1-prob2)\n Hm = -prob_merged * math.log(prob_merged) - (1-prob_merged) * math.log(1-prob_merged)\n\n H_min = min(H1, H2, Hm)\n\n if H_min == H1:\n return prob1\n elif H_min == H2:\n return prob2\n else:\n return prob_merged",
"def entropy(y):\n p = _proba(y)\n return (-p * np.log2(p)).sum()",
"def calc_conditional_entropy(map,data_stat,attribute):\n #acquire the data info of the attribute stored in data_stat\n data_info = data_stat[attribute]\n #acquire the label info\n # label_col = len(data_stat)-1\n label_col = data_stat.keys()[-1]\n # print(data_stat.keys())\n label_info = data_stat[label_col]\n #acquire the data \n data = map[attribute]\n labels = map[label_col]\n conditional_entropy =0\n for data_type in data_info:\n specific_entropy = 0\n for label_type in label_info: \n #attribute data indices where all data entries are equal to a speicifc value\n data_with_spec_val_idx = data_info[data_type]\n #label indices where all labels are of same value\n spec_label_idx = label_info[label_type]\n #the intersection of the two indices above\n intersect_idx = np.intersect1d(data_with_spec_val_idx,spec_label_idx)\n #conditional probability of label being of specific value given speicific data value\n temp_prob = len(intersect_idx)/float(len(data_with_spec_val_idx))\n if temp_prob!=0:\n specific_entropy += temp_prob*math.log(temp_prob,2)\n specific_entropy = -specific_entropy\n prob = len(data_with_spec_val_idx)/float(len(data))\n conditional_entropy += prob * specific_entropy\n return conditional_entropy",
"def joint_entropy(x, y, bins):\n # assert array length\n assert len(x) == len(y)\n\n # get the bins, x and y get their own bins in case of joint entropy\n bins = get_2D_bins(x, y, bins)\n\n # get the joint histogram\n joint_hist = np.histogram2d(x, y, bins)[0]\n\n # calculate the joint probability and add a small number\n joint_p = (joint_hist / np.sum(joint_hist)) + 1e-15\n\n # calculate and return the joint entropy\n return - np.sum(joint_p * np.log2(joint_p))",
"def _entropy(self, y):\n # Get size\n n = y.shape[0]\n summation = 0\n\n # Summatory\n for c_i in np.unique(y):\n prob = sum(y == c_i) / float(n)\n summation += prob * np.log2(prob)\n\n return -summation",
"def entropy(self, y):\n n = y.size\n if n <= 1:\n return 0\n\n labels, counts = unique(y, return_counts=True)\n\n if counts.size <= 1:\n return 0\n\n probs = counts / n\n entropy = -sum([p * log(p, 2) for p in probs])\n return entropy",
"def entropy(y,w):\r\n\r\n\t# my original entropy function commented below is not working as desired. The below implementation is based on from Sai Ram Chappidi's explanation\r\n\r\n # y_partition = partition(y)\r\n # elements,counts = np.unique(y,return_counts = True)\r\n # entropy=0\r\n\r\n # for i in range(len(elements)):\r\n # entropy += ((-(np.sum(w[y_partition[i]])))/np.sum(w))*np.log2(np.sum(w[y_partition[i]])/np.sum(w))\r\n # return entropy\r\n\r\n entropy = 0\r\n # two hypothesis cases 0,1\r\n h = {0: 0, 1: 0}\r\n leny = len(y)\r\n for i in range(leny):\r\n # if y is 0 add 0 to the weight\r\n if y[i] == 0:\r\n h[0] += w[i]\r\n # if y is 1 add 1 to the weight\r\n elif y[i] == 1:\r\n h[1] += + w[i]\r\n # summing all the weighted values \r\n val_sum = h[0] + h[1]\r\n\r\n # entropy calculation\r\n for j in range(len(h)):\r\n h[j] = h[j]/val_sum\r\n # to prevent divide by zero\r\n if h[j] != 0:\r\n entropy += h[j] * np.log2(h[j])\r\n entropy = -(entropy)\r\n return entropy",
"def transfer_entropy(X, Y):\n coords = Counter(zip(Y[1:], X[:-1], Y[:-1]))\n\n p_dist = np.zeros((config.NUM_STATES, config.NUM_STATES, config.NUM_STATES))\n for y_f, x_p, y_p in coords.keys():\n p_dist[y_p, y_f, x_p] = coords[(y_f, x_p, y_p)] / (len(X) - 1)\n\n p_yp = p_dist.sum(axis=2).sum(axis=1)\n p_joint_cond_yp = p_dist / p_yp[:, None, None]\n p_yf_cond_yp = p_dist.sum(axis=2) / p_yp[:, None]\n p_xp_cond_yp = p_dist.sum(axis=1) / p_yp[:, None]\n\n denominator = np.multiply(p_yf_cond_yp, p_xp_cond_yp)\n denominator[denominator == 0] = np.nan\n\n division = np.divide(p_joint_cond_yp, denominator[:, :, None])\n division[division == 0] = np.nan\n\n log = np.log2(division)\n\n return np.nansum(np.multiply(p_dist, log))",
"def entropy_coefficient(filter1, filter2, base=2):\n\n if (type(filter1) is NullField) or (type(filter2) is NullField):\n return 0\n\n total_count = int(filter1.bit_size)\n\n f1_element_count = filter1.filter.count(True)\n f2_element_count = filter2.filter.count(True)\n\n prob_f1 = f1_element_count / total_count\n prob_f2 = f1_element_count / total_count\n\n e_f1 = -1.0 * total_count * prob_f1 * math.log(prob_f1) / math.log(base)\n e_f2 = -1.0 * total_count * prob_f2 * math.log(prob_f2) / math.log(base)\n\n entropy = abs(e_f1 - e_f2)\n\n # for element_count in Counter(data).values():\n # p = element_count / total_count\n # entropy -= p * math.log(p, self.base)\n\n assert entropy >= 0\n\n return 1 - entropy",
"def entropy(Y):\n unique, count = np.unique(Y, return_counts=True, axis=0)\n prob = count/len(Y)\n en = np.sum((-1)*prob*np.log2(prob))\n return en",
"def entropy(x):\n x_max, x_min = x.max(), x.min()\n assert (x_min >= 0) and (x_max <= 1)\n if x_min == x_max == 0:\n return np.float32(0.)\n # Take only non-zero values as log(0) = 0 :\n nnz_x = x[np.nonzero(x)]\n entr = -np.sum(nnz_x * np.log2(nnz_x))\n\n return entr",
"def cross_entropy(X, y):\n return lambda theta: -y * np.log(logistic_hypothesis(theta)(X) + 1e-9) - (\n 1 - y\n ) * np.log(1 - logistic_hypothesis(theta)(X) + 1e-9)",
"def shannon_entropy(probs):\n return -(\n math.sum([px * math.log2(px) if px != 0 and not (np.isclose(px, 0)) else 0 for px in probs])\n )"
] | [
"0.7481201",
"0.6875717",
"0.6782211",
"0.6750487",
"0.6720426",
"0.6655979",
"0.6648373",
"0.6570259",
"0.6566688",
"0.64887154",
"0.64597607",
"0.64407265",
"0.63953054",
"0.6383221",
"0.63512045",
"0.63497704",
"0.63459283",
"0.63403004",
"0.6330119",
"0.6322664",
"0.6289737",
"0.6262488",
"0.62549144",
"0.6251724",
"0.6230731",
"0.6217289",
"0.61961704",
"0.6191671",
"0.61783874",
"0.61604345"
] | 0.71850336 | 1 |
Cross Entropy Calculates the cross entropy of two discrete distributions x and y. | def cross_entropy(x, y, bins, xy_probabilities=False):
# calculate probabilities if probabilities == False
if xy_probabilities:
# same bins for x and y -> same length of x and y if xy_probabilities == True
assert len(x) == len(y)
# if x does not sum up to 1, raise an error
if not np.isclose(sum(x),1,atol=0.0001):
raise ValueError('Probabilities in vector x do not sum up to 1.')
# if y does not sum up to 1, raise an error
if not np.isclose(sum(y),1,atol=0.0001):
raise ValueError('Probabilities in vector y do not sum up to 1.')
# add a small number to all probabilities if zero occurs
if x.any(0):
px = x + 1e-15
py = y + 1e-15
else:
px = x
py = y
else:
# get the bins, joint bins for x and y (same_bins=True)
bins = get_2D_bins(x, y, bins, same_bins=True)
# calculate unconditioned histograms
hist_x = np.histogram(x, bins=bins[0])[0]
hist_y = np.histogram(y, bins=bins[1])[0]
px = (hist_x / np.sum(hist_x)) + 1e-15
py = (hist_y / np.sum(hist_y)) + 1e-15
return - px.dot(np.log2(py)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cross_entropy(x, y):\n\n if len(y.shape) == 1:\n return F.cross_entropy(x, y)\n if y.shape[1] == 1:\n y = y.squeeze(1)\n return F.cross_entropy(x, y)\n\n return torch.mean(\n torch.div(\n F.binary_cross_entropy_with_logits(x, y, reduction=\"none\"),\n torch.sum(y, dim=1),\n )\n )",
"def cEntropy(Y, X):\n return jEntropy(Y, X) - entropy(X)",
"def cross_entropy(X, y):\n return lambda theta: -y * np.log(logistic_hypothesis(theta)(X) + 1e-9) - (\n 1 - y\n ) * np.log(1 - logistic_hypothesis(theta)(X) + 1e-9)",
"def cross_entropy_error(self, x, y):\n return -1 * sum([y[i] * np.log(self.logistic_function(self.weights.dot(x[i]))) + (1-y[i]) * np.log(1-self.logistic_function(self.weights.dot(x[i]))) for i in range(len(y))])",
"def crossEntropy(p_m1):\n p_m2 = 1 - p_m1\n D = - p_m1*math.log(p_m1) - p_m2*math.log(p_m2)\n return D",
"def cross_entropy(y_prob,y):\n from numpy import log, sum\n m = y.shape[0]\n p = y_prob\n log_likelihood = -log(p[range(m),y])\n loss = sum(log_likelihood) / m\n return loss",
"def cross_entropy(y, y_hat):\n return -tf.math.log(\n tf.gather_nd(y_hat, tf.reshape(y, (-1, 1)), batch_dims=1)\n )",
"def mutual_information(x, y):\r\n\r\n # INSERT YOUR CODE HERE\r\n xvalue, xcount = np.unique(x,return_counts = True)\r\n probx = xcount.astype(float)/len(x)\r\n Hyx = 0.0\r\n for pxval,xval in zip(probx,xvalue):\r\n Hyx += (pxval)*entropy(y[x==xval])\r\n \r\n Ixy = entropy(y) - Hyx\r\n return Ixy\r\n raise Exception('Function not yet implemented!')",
"def cross_entropy(self, yhat):\n n = len(self._y)\n c = 0.0\n for i in range(0, n):\n c += self._y[i] * log(\n yhat[i]) + (1 - self._y[i]) * log(1 - yhat[i])\n\n return c",
"def alt_cohen_d(x_arr, y_arr):\n delta = np.mean(x_arr) - np.mean(y_arr)\n pooled_std = np.sqrt((np.std(x_arr, ddof=1) ** 2 +\n np.std(y_arr, ddof=1) ** 2) / 2.0)\n return delta / pooled_std",
"def cross_entropy(y_observed, p):\n\n pass",
"def entropy(y):\r\n\r\n # INSERT YOUR CODE HERE\r\n value, count = np.unique(y,return_counts = True)\r\n Hy = 0.0\r\n prob = count.astype(float)/len(y)\r\n for p in prob:\r\n Hy += -(p)*(np.log2(p))\r\n return Hy\r\n raise Exception('Function not yet implemented!')",
"def cohen_d(x_arr, y_arr):\n delta = np.mean(x_arr) - np.mean(y_arr)\n pooled_std = np.sqrt(\n (\n (len(x_arr) - 1) * np.std(x_arr, ddof=1) ** 2 +\n (len(y_arr) - 1) * np.std(y_arr, ddof=1) ** 2\n ) / (len(x_arr) + len(y_arr))\n )\n return delta / pooled_std",
"def cohens_d(x, y):\n nx, ny = len(x), len(y)\n pooled_variance = ((nx - 1) * np.std(x, ddof=1) ** 2 +\n (ny - 1) * np.std(y, ddof=1) ** 2) / \\\n ((nx - 1) + (ny - 1))\n return (np.mean(x) - np.mean(y)) / np.sqrt(pooled_variance)",
"def transfer_entropy(X, Y):\n coords = Counter(zip(Y[1:], X[:-1], Y[:-1]))\n\n p_dist = np.zeros((config.NUM_STATES, config.NUM_STATES, config.NUM_STATES))\n for y_f, x_p, y_p in coords.keys():\n p_dist[y_p, y_f, x_p] = coords[(y_f, x_p, y_p)] / (len(X) - 1)\n\n p_yp = p_dist.sum(axis=2).sum(axis=1)\n p_joint_cond_yp = p_dist / p_yp[:, None, None]\n p_yf_cond_yp = p_dist.sum(axis=2) / p_yp[:, None]\n p_xp_cond_yp = p_dist.sum(axis=1) / p_yp[:, None]\n\n denominator = np.multiply(p_yf_cond_yp, p_xp_cond_yp)\n denominator[denominator == 0] = np.nan\n\n division = np.divide(p_joint_cond_yp, denominator[:, :, None])\n division[division == 0] = np.nan\n\n log = np.log2(division)\n\n return np.nansum(np.multiply(p_dist, log))",
"def cross_entropy(t,y):\r\n #print(-1*t*np.log(y))\r\n #print(np.shape(np.log(y)))\r\n #print(np.shape(t))\r\n return t*np.log(y)*(-1)",
"def cross_entropy(y_pred,y):\n \n epsilon = 0.001 # To prevent overflow and ensure numerical stability\n return sum(-y*np.log(y_pred+epsilon))",
"def crossEntropy(obs, actual, offset=1e-7):\n # (tf.Tensor, tf.Tensor, float) -> tf.Tensor\n # bound by clipping to avoid nan\n obs_ = tf.clip_by_value(obs, offset, 1 - offset)\n return -tf.reduce_sum(actual * tf.log(obs_) +\n (1 - actual) * tf.log(1 - obs_), 1)",
"def conditional_entropy(f1, f2):\n\n ce = ee.entropyd(f1) - ee.midd(f1, f2)\n return ce",
"def compute_cross_entropy(probs, target):\n avg_probs_per_sample = probs.mean(\n -1)\n xe = torch.nn.CrossEntropyLoss(reduction='none')\n return xe(avg_probs_per_sample, target).detach().cpu().numpy()",
"def cross_entropy(self):\n return self._cross_entropy_func",
"def J(W1, b1, W2, b2, x, y):\n yhat = forwardPropagate(W1, b1, W2, b2, x) # OLD: yhat = softmax(x.dot(w))\n return crossEntropy(y, yhat)",
"def _entropy(self, y):\n # Get size\n n = y.shape[0]\n summation = 0\n\n # Summatory\n for c_i in np.unique(y):\n prob = sum(y == c_i) / float(n)\n summation += prob * np.log2(prob)\n\n return -summation",
"def calculate_entropy(y):\n\tlog2 = lambda x: math.log(x) / math.log(2)\n\tunique_labels = np.unique(y)\n\tentropy = 0\n\tfor label in unique_labels:\n\t\tcount = len(y[y == label])\n\t\tp = count / len(y)\n\t\tentropy += -p * log2(p)\n\treturn entropy",
"def cross_entropy(predictions, targets):\n likelihood = targets * np.log(predictions)\n return -np.sum(likelihood) / predictions.shape[0]",
"def cross_entropy(X, y, using_onehot=True):\n\tM = y.shape[0]\n\tif using_onehot :\n\t\tlog_likelihood = -np.log(np.max(X * y, -1))\n\telse:\n\t\tlog_likelihood = -np.log(X[range(M), y]) # 找到y对应的那个类别所对应的logit\n\tloss = np.sum(log_likelihood) / M\n\treturn loss",
"def entropyCategorical(attr, X, y):\n uniques = X[attr].unique().tolist()\n idxLists = []\n entropies = []\n weights = []\n for u in uniques:\n idxLists.append(X.index[X[attr] == u].tolist())\n entropies.append(entropy(y, idxLists[-1]))\n weights.append(len(idxLists[-1]))\n\n entropies = np.array(entropies).reshape(1, -1)\n weights = np.array(weights).reshape(-1, 1).astype(np.float32)\n weights /= np.sum(weights)\n\n return (uniques, idxLists, (entropies @ weights)[0, 0])",
"def entropy(Y):\n\n temp = np.unique(Y, return_counts=True)\n uniq_Y = list(temp[0])\n Y_count = list(temp[1])\n \n total = sum(Y_count)\n\n ent = 0\n for elem in uniq_Y:\n prob = Y_count[uniq_Y.index(elem)] / total\n # print(\"prob:\", prob)\n ent -= (prob * (math.log2(prob)))\n # print(\"ent:\",ent)\n\n return ent",
"def crossEntropyPredict(YPredict):\n YPredict = np.atleast_2d(YPredict)\n return np.argmax(YPredict, axis=1)",
"def cross_entropy_loss(self, logits, labels):\n return F.cross_entropy(logits, labels)"
] | [
"0.7399793",
"0.7246357",
"0.71943384",
"0.70277774",
"0.6669306",
"0.6637702",
"0.6602384",
"0.65894943",
"0.65266645",
"0.6473503",
"0.6437216",
"0.642684",
"0.63894004",
"0.6365542",
"0.6353329",
"0.63220906",
"0.628778",
"0.6279644",
"0.62760943",
"0.6269529",
"0.62406224",
"0.62195385",
"0.61654633",
"0.6152873",
"0.6143035",
"0.61429644",
"0.6135727",
"0.6133718",
"0.60981774",
"0.60756135"
] | 0.7348612 | 1 |
r"""Joint Entropy Calculates the joint entropy of two discrete distributions x and y. This is the combined Entropy of X added to the conditional Entropy of x given y. | def joint_entropy(x, y, bins):
# assert array length
assert len(x) == len(y)
# get the bins, x and y get their own bins in case of joint entropy
bins = get_2D_bins(x, y, bins)
# get the joint histogram
joint_hist = np.histogram2d(x, y, bins)[0]
# calculate the joint probability and add a small number
joint_p = (joint_hist / np.sum(joint_hist)) + 1e-15
# calculate and return the joint entropy
return - np.sum(joint_p * np.log2(joint_p)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mutual_information(x, y):\r\n\r\n # INSERT YOUR CODE HERE\r\n xvalue, xcount = np.unique(x,return_counts = True)\r\n probx = xcount.astype(float)/len(x)\r\n Hyx = 0.0\r\n for pxval,xval in zip(probx,xvalue):\r\n Hyx += (pxval)*entropy(y[x==xval])\r\n \r\n Ixy = entropy(y) - Hyx\r\n return Ixy\r\n raise Exception('Function not yet implemented!')",
"def joint_entropy(column_i, column_j):\n\tfreq_ij = dict()\n\ttotal = len(column_i)\n\tentropy = 0\n\tfor index in range(total):\n\t\ti = column_i[index]\n\t\tj = column_j[index]\n\t\tif i+j in freq_ij:\n\t\t\tfreq_ij[i+j] +=1\n\t\telse:\n\t\t\tfreq_ij[i+j] = 1\n\n\tfor key in freq_ij:\n\t\tfreq_ij[key] /= total\n\t\tentropy += freq_ij[key]*math.log(freq_ij[key], 2)\n\treturn -entropy",
"def joint_entropy(column_i, column_j):\n\tif len(column_i) != len(column_j):\n\t\traise IndexError(\"The two MSA should have the same number of related sequences (same species)\")\n\tfreq_ij = dict()\n\ttotal = len(column_i)\n\tentropy = 0\n\tfor index in range(total):\n\t\ti = column_i[index]\n\t\tj = column_j[index]\n\t\tif i+j in freq_ij:\n\t\t\tfreq_ij[i+j] +=1\n\t\telse:\n\t\t\tfreq_ij[i+j] = 1\n\n\tfor key in freq_ij:\n\t\tfreq_ij[key] /= total\n\t\tentropy += freq_ij[key]*math.log(freq_ij[key], 2)\n\tif entropy != 0.0:\n\t\treturn -entropy\n\telse:\n\t\treturn entropy",
"def joint_pdf(self, x1, x2 = None):\n return np.exp(self.joint_logpdf(x1, x2))",
"def transfer_entropy(X, Y):\n coords = Counter(zip(Y[1:], X[:-1], Y[:-1]))\n\n p_dist = np.zeros((config.NUM_STATES, config.NUM_STATES, config.NUM_STATES))\n for y_f, x_p, y_p in coords.keys():\n p_dist[y_p, y_f, x_p] = coords[(y_f, x_p, y_p)] / (len(X) - 1)\n\n p_yp = p_dist.sum(axis=2).sum(axis=1)\n p_joint_cond_yp = p_dist / p_yp[:, None, None]\n p_yf_cond_yp = p_dist.sum(axis=2) / p_yp[:, None]\n p_xp_cond_yp = p_dist.sum(axis=1) / p_yp[:, None]\n\n denominator = np.multiply(p_yf_cond_yp, p_xp_cond_yp)\n denominator[denominator == 0] = np.nan\n\n division = np.divide(p_joint_cond_yp, denominator[:, :, None])\n division[division == 0] = np.nan\n\n log = np.log2(division)\n\n return np.nansum(np.multiply(p_dist, log))",
"def J(W1, b1, W2, b2, x, y):\n yhat = forwardPropagate(W1, b1, W2, b2, x) # OLD: yhat = softmax(x.dot(w))\n return crossEntropy(y, yhat)",
"def cross_entropy(x, y, bins, xy_probabilities=False):\n # calculate probabilities if probabilities == False\n if xy_probabilities:\n # same bins for x and y -> same length of x and y if xy_probabilities == True\n assert len(x) == len(y)\n\n # if x does not sum up to 1, raise an error\n if not np.isclose(sum(x),1,atol=0.0001):\n raise ValueError('Probabilities in vector x do not sum up to 1.')\n # if y does not sum up to 1, raise an error\n if not np.isclose(sum(y),1,atol=0.0001):\n raise ValueError('Probabilities in vector y do not sum up to 1.')\n\n # add a small number to all probabilities if zero occurs\n if x.any(0):\n px = x + 1e-15\n py = y + 1e-15\n else:\n px = x\n py = y\n else:\n # get the bins, joint bins for x and y (same_bins=True)\n bins = get_2D_bins(x, y, bins, same_bins=True)\n\n # calculate unconditioned histograms\n hist_x = np.histogram(x, bins=bins[0])[0]\n hist_y = np.histogram(y, bins=bins[1])[0]\n\n px = (hist_x / np.sum(hist_x)) + 1e-15\n py = (hist_y / np.sum(hist_y)) + 1e-15\n\n return - px.dot(np.log2(py))",
"def entropy(y):\r\n\r\n # INSERT YOUR CODE HERE\r\n value, count = np.unique(y,return_counts = True)\r\n Hy = 0.0\r\n prob = count.astype(float)/len(y)\r\n for p in prob:\r\n Hy += -(p)*(np.log2(p))\r\n return Hy\r\n raise Exception('Function not yet implemented!')",
"def entropy(y,w):\r\n\r\n\t# my original entropy function commented below is not working as desired. The below implementation is based on from Sai Ram Chappidi's explanation\r\n\r\n # y_partition = partition(y)\r\n # elements,counts = np.unique(y,return_counts = True)\r\n # entropy=0\r\n\r\n # for i in range(len(elements)):\r\n # entropy += ((-(np.sum(w[y_partition[i]])))/np.sum(w))*np.log2(np.sum(w[y_partition[i]])/np.sum(w))\r\n # return entropy\r\n\r\n entropy = 0\r\n # two hypothesis cases 0,1\r\n h = {0: 0, 1: 0}\r\n leny = len(y)\r\n for i in range(leny):\r\n # if y is 0 add 0 to the weight\r\n if y[i] == 0:\r\n h[0] += w[i]\r\n # if y is 1 add 1 to the weight\r\n elif y[i] == 1:\r\n h[1] += + w[i]\r\n # summing all the weighted values \r\n val_sum = h[0] + h[1]\r\n\r\n # entropy calculation\r\n for j in range(len(h)):\r\n h[j] = h[j]/val_sum\r\n # to prevent divide by zero\r\n if h[j] != 0:\r\n entropy += h[j] * np.log2(h[j])\r\n entropy = -(entropy)\r\n return entropy",
"def dist_calc(self, x, y):\n p_xy = self.d2_bin(x, y)\n p_x = np.sum(p_xy, axis=1)\n p_y = np.sum(p_xy, axis=0)\n\n p_x_times_p_y = np.tensordot(p_x, p_y, axes = 0)\n info = np.sum(p_xy * np.ma.log(np.ma.divide(p_xy, p_x_times_p_y)))\n entropy = np.sum(-1 * p_xy * np.ma.log(p_xy))\n\n output = max(0.0, (1 - (info / entropy)))\n return output",
"def joint_logpdf(self, x1, x2 = None):\n dists = self.conditionalMVNs\n joint_pdfs = np.array([d.joint_pdf(x1, x2) for d in dists])\n return np.log(np.sum(self.weights * joint_pdfs))",
"def joint_feature(self, x, y):\n self._check_size_x(x)\n features, edges = self._get_features(x), self._get_edges(x)\n n_nodes = features.shape[0]\n\n if isinstance(y, tuple):\n # y is result of relaxation, tuple of unary and pairwise marginals\n unary_marginals, pw = y\n unary_marginals = unary_marginals.reshape(n_nodes, self.n_states)\n # accumulate pairwise\n pw = pw.reshape(-1, self.n_states, self.n_states).sum(axis=0)\n else:\n y = y.reshape(n_nodes)\n gx = np.ogrid[:n_nodes]\n\n #make one hot encoding\n unary_marginals = np.zeros((n_nodes, self.n_states), dtype=np.int)\n gx = np.ogrid[:n_nodes]\n unary_marginals[gx, y] = 1\n\n ##accumulated pairwise\n pw = np.dot(unary_marginals[edges[:, 0]].T,\n unary_marginals[edges[:, 1]])\n unaries_acc = np.dot(unary_marginals.T, features)\n if self.directed:\n pw = pw.ravel()\n else:\n pw = compress_sym(pw)\n joint_feature_vector = np.hstack([unaries_acc.ravel(), pw])\n return joint_feature_vector",
"def joint_entropy(P):\n P_nan = P.copy()\n P_nan[P_nan == 0] = np.nan\n return np.nansum(np.multiply(P_nan, np.log2(1 / P_nan)))",
"def calculateLogJointProbabilities(self, datum):\n\tlogJoint = util.Counter()\n\t#want to calculate log(P(y)) + log(sum(P(fi|y)))\n\t#where y is a label\n\tfor label in self.legalLabels:\n\t\tlogJoint[label] = math.log(self.prior_distribution_prob[label])\n\t\tfor feature, value in datum.items():\n\t\t\tcp = self.conditional_prob[label][feature][value]\n\t\t\tif cp > 0: #condition check for values < 0 because log(0) is undefined and math domain error occurs\n\t\t\t\tlogJoint[label] += math.log(cp) #summing up\n\t\t\t\t\n\treturn logJoint",
"def cross_entropy(x, y):\n\n if len(y.shape) == 1:\n return F.cross_entropy(x, y)\n if y.shape[1] == 1:\n y = y.squeeze(1)\n return F.cross_entropy(x, y)\n\n return torch.mean(\n torch.div(\n F.binary_cross_entropy_with_logits(x, y, reduction=\"none\"),\n torch.sum(y, dim=1),\n )\n )",
"def mutual_info(l1, l2):\n return entropy(l1) + entropy(l2) - entropy(joint_dataset(l1, l2))",
"def cEntropy(Y, X):\n return jEntropy(Y, X) - entropy(X)",
"def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n \"*** YOUR CODE HERE ***\"\n\t#Adds log(P(y)) to calculate P(y|f1,f2...)\n for label in self.legalLabels:\n\t\tlogJoint[label] += math.log(self.prior[label])\n\t#Adds log(P(f1|y)), log(P(f2|y))... to calculate P(y|f1, f2...)\n for key in datum:\n\t\t#if key == (7, 3):\n\t\t\t#print self.condprobs[key, 0]\n\t\tfor label in self.legalLabels:\n\t\t\t#print str(key) + str(datum[key])\n\t\t\tlogJoint[label] += math.log(self.condprobs[key, label][datum[key]])\n return logJoint",
"def cond_entropy(joint_prob, cond_prob):\n # Computing log2(P cond)\n log2_p = (np.ma.log2(cond_prob)).filled(0)\n # Multipling element wise the arrays\n prod_entropy = np.multiply(joint_prob, log2_p)\n # Getting the - sum of the resulting array.\n H = -( np.sum(prod_entropy))\n return H",
"def joint_dataset(l1, l2):\n N = np.max(l1) + 1\n return l2 * N + l1",
"def _calculate_probs_and_entropy_y(self):\n #calculate y probabilities and H(Y)\n #H(Y) = Sum(y € Y)(-P(Y=y) * log(P(Y=y)))\n self.lab_entropy = 0\n s = sum(self.lab_counts.values())\n for label, count in self.lab_counts.items():\n self.lab_probs[label] = count / s\n self.lab_entropy -= self.lab_probs[label] * self.log(self.lab_probs[label])",
"def _entropy(self, y):\n # Get size\n n = y.shape[0]\n summation = 0\n\n # Summatory\n for c_i in np.unique(y):\n prob = sum(y == c_i) / float(n)\n summation += prob * np.log2(prob)\n\n return -summation",
"def logistic_loss(x, y):\n N = x.shape[0]\n x = np.squeeze(x)\n y_prime = (y + 1)/2\n h = 1 /(1 + np.exp(-x))\n loss = np.sum(-np.log( (h**y_prime) * ((1-h)**(1-y_prime)) ))/N\n dx = np.exp(-y*x)*(-y)/(1+np.exp(-y*x))/N\n return loss, dx",
"def entropy(y):\n return -1 * sum(\n [\n pipe(np.sum(y == value) / len(y), lambda ratio: ratio * np.log(ratio))\n for value in set(y)\n ]\n )",
"def log_prob(self, x, y):\n p = self.tag_log_prob(y)\n for i in range(len(y)):\n if self.out_prob(x[i], y[i]) == 0:\n return -math.inf\n\n p += math.log2(self.out_prob(x[i], y[i]))\n\n return p",
"def logq_joint(self, x, h, return_mu=False):\n logph = distributions.Normal(0, 1).log_prob(h).sum(1)\n gmu = self.g(h)\n px_given_h = distributions.Normal(gmu, self.logsigma.exp())\n logpx_given_h = px_given_h.log_prob(x).flatten(start_dim=1).sum(1)\n if return_mu:\n return logpx_given_h + logph, gmu\n else:\n return logpx_given_h + logph",
"def entropy(Y):\n\n temp = np.unique(Y, return_counts=True)\n uniq_Y = list(temp[0])\n Y_count = list(temp[1])\n \n total = sum(Y_count)\n\n ent = 0\n for elem in uniq_Y:\n prob = Y_count[uniq_Y.index(elem)] / total\n # print(\"prob:\", prob)\n ent -= (prob * (math.log2(prob)))\n # print(\"ent:\",ent)\n\n return ent",
"def _cal_igr(x, y):\n return (_cal_entropy(y) - _cal_conditionalEnt(x, y)) / _cal_conditionalEnt(x, y)",
"def entropy(Y):\n unique, count = np.unique(Y, return_counts=True, axis=0)\n prob = count/len(Y)\n en = np.sum((-1)*prob*np.log2(prob))\n return en",
"def prob(self, x, y):\n p = self.tag_prob(y)\n for i in range(len(y)):\n p *= self.out_prob(x[i], y[i])\n\n return p"
] | [
"0.6370943",
"0.63690835",
"0.63631195",
"0.6334478",
"0.6209416",
"0.6186507",
"0.60127246",
"0.5941165",
"0.58904195",
"0.5855695",
"0.5840326",
"0.5787013",
"0.5770392",
"0.57627195",
"0.5750125",
"0.5739848",
"0.5735412",
"0.56943065",
"0.56919396",
"0.5672802",
"0.5654701",
"0.5654116",
"0.5653314",
"0.55887705",
"0.5579684",
"0.55605185",
"0.55579716",
"0.5546054",
"0.55091",
"0.55074996"
] | 0.7735844 | 0 |
r"""KullbackLeibler Divergence Calculates the KullbackLeibler Divergence between two discrete distributions x and y. X is considered to be an empirical discrete distribution while y is considered to be the real discrete distribution of the underlying population. | def kullback_leibler(x, y, bins, xy_probabilities=False):
if xy_probabilities:
# if x does not sum up to 1, raise an error
if not np.isclose(sum(x),1,atol=0.0001):
raise ValueError('Probabilities in vector x do not sum up to 1.')
# if y does not sum up to 1, raise an error
if not np.isclose(sum(y),1,atol=0.0001):
raise ValueError('Probabilities in vector y do not sum up to 1.')
# add a small number to all probabilities if zero occurs
if x.any(0):
px = x + 1e-15
py = y + 1e-15
else:
px = x
py = y
else:
# get the bins, joint bins for x and y (same_bins=True)
bins = get_2D_bins(x, y, bins, same_bins=True)
# calculate unconditioned histograms
hist_x = np.histogram(x, bins=bins[0])[0]
hist_y = np.histogram(y, bins=bins[1])[0]
#calculate probabilities
px = (hist_x / np.sum(hist_x))
py = (hist_y / np.sum(hist_y))
# calculate the cross entropy and unconditioned entropy of y
hcross = cross_entropy(px, py, bins, xy_probabilities=True)
hx = entropy(px, bins, xy_probabilities=True)
return hcross - hx | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def kl_bern(x, y):\n x = min(max(x, eps), 1-eps)\n y = min(max(y, eps), 1-eps)\n return x*log(x/y) + (1-x)*log((1-x)/(1-y))",
"def kl_divergence(x,y):\n\tassert (isinstance(x, BayesNet) and isinstance(y, BayesNet)), 'Must pass in BayesNet objects.'\n\tassert (x==y), 'Passed-in BayesNet objects are not structurally equal.'\n\n\tdistance = np.sum( x.flat_cpt() * np.log( x.flat_cpt() / y.flat_cpt() ) )\n\treturn distance",
"def kl_divergence(x, y, thresholded=True, symmetrized=True, normalize=True):\n assert (x.dtype == np.float64 and y.dtype == np.float64) or (\n x.dtype == np.float32 and y.dtype == np.float32)\n # assert (np.all(x.sum(1) != 0.) and np.all(y.sum(1) != 0.))\n if thresholded:\n normalize = True\n if normalize:\n x /= x.sum(1).reshape(x.shape[0], 1)\n y /= y.sum(1).reshape(y.shape[0], 1)\n if thresholded:\n eps = np.finfo(x.dtype).eps\n x = x + eps\n y = y + eps\n x /= x.sum(1).reshape(x.shape[0], 1)\n y /= y.sum(1).reshape(y.shape[0], 1)\n res = __kl_divergence(x, y)\n\n if symmetrized:\n res = 0.5 * res + 0.5 * __kl_divergence(y, x).transpose()\n\n return np.float64(res).reshape(res.shape)",
"def kullback_leibler_divergence_loss(self, y_true=None, y_pred=None, decimal=5, **kwargs):\n y_true, y_pred, binary, representor, decimal = self.get_processed_data2(y_true, y_pred, decimal)\n y_pred = np.clip(y_pred, self.EPSILON, 1 - self.EPSILON) # Clip predicted probabilities\n if binary:\n y_true = np.clip(y_true, self.EPSILON, 1 - self.EPSILON) # Clip true labels\n res = y_true * np.log(y_true / y_pred) + (1 - y_true) * np.log((1 - y_true) / (1 - y_pred))\n res = np.mean(res)\n else:\n # Convert y_true to one-hot encoded array\n num_classes = len(np.unique(y_true))\n y_true = np.eye(num_classes)[y_true]\n y_true = np.clip(y_true, self.EPSILON, 1 - self.EPSILON) # Clip true labels\n res = np.sum(y_true * np.log(y_true / y_pred), axis=1)\n res = np.mean(res)\n return np.round(res, decimal)",
"def kl_divergence(y_true, y_pred):\n y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)\n y_true = math_ops.cast(y_true, y_pred.dtype)\n y_true = backend.clip(y_true, backend.epsilon(), 1)\n y_pred = backend.clip(y_pred, backend.epsilon(), 1)\n return math_ops.reduce_sum(y_true * math_ops.log(y_true / y_pred), axis=-1)",
"def kl_divergence(p_dist, q_dist, n_samples_per_axis=30, n_axis=2):\r\n global COUNTER\r\n if n_axis == 2:\r\n x = np.linspace(-1.0, 1.0, n_samples_per_axis)\r\n y = np.linspace(-1.0, 1.0, n_samples_per_axis)\r\n grids = np.meshgrid(x, y)\r\n elif n_axis == 3:\r\n x = np.linspace(-1.0, 1.0, n_samples_per_axis)\r\n y = np.linspace(-1.0, 1.0, n_samples_per_axis)\r\n z = np.linspace(-1.0, 1.0, n_samples_per_axis)\r\n grids = np.meshgrid(x, y, z)\r\n elif n_axis == 1:\r\n grids = np.linspace(-1.1, 1.1, 120)\r\n print(\"Grid complete!\")\r\n if n_axis != 1:\r\n grid = np.vstack(grids).reshape((n_axis, n_samples_per_axis**n_axis)).T\r\n else:\r\n grid = grids\r\n grid = np.reshape(grid, (grid.shape[0], 1))\r\n probs_p = np.exp(p_dist.score_samples(grid))\r\n probs_q = np.exp(q_dist.score_samples(grid))\r\n print(\"prob_calc_complete\")\r\n kl = entropy(probs_p, probs_q)\r\n return kl",
"def kl_divergence_from_logits(self, logits_a, logits_b):\n distribution1 = tf.contrib.distributions.Categorical(logits=logits_a)\n distribution2 = tf.contrib.distributions.Categorical(logits=logits_b)\n return tf.contrib.distributions.kl_divergence(distribution1, distribution2)",
"def KL_divergence(model_1, model_2, samples):\n posterior_1 = create_posterior_object(model_1, samples)\n posterior_2 = create_posterior_object(model_2, samples)\n return posterior_1.KL(posterior_2)",
"def KL_divergence(xs,ys,pdf_x=None,pdf_y=None,data_range=None):\n if data_range is None:\n data_range = list(set(xs)) + list(set(ys))\n if pdf_x is None:\n pdf_x = prob_density_func(xs,norm=True,data_range=data_range)\n if pdf_y is None:\n pdf_y = prob_density_func(ys,norm=True,data_range=data_range)\n keys = set(pdf_x.keys()+pdf_y.keys())\n PQ = []\n for k in keys:\n if k in pdf_x and k in pdf_y:\n PQ.append((pdf_x[k],pdf_y[k]))\n return np.sum([p*np.log(float(p)/float(q)) for (p,q) in PQ if q>0 and p>0])",
"def _graph_fn_kl_divergence(distribution_a, distribution_b):\n if get_backend() == \"tf\":\n return tf.no_op()\n # TODO: never tested. tf throws error: NotImplementedError: No KL(distribution_a || distribution_b) registered for distribution_a type Bernoulli and distribution_b type ndarray\n #return tf.distributions.kl_divergence(\n # distribution_a=distribution_a,\n # distribution_b=distribution_b,\n # allow_nan_stats=True,\n # name=None\n #)",
"def __init__(self, name='backward_kl_divergence', **kwargs):\n\n super(BackwardKLDivergence, self).__init__(name=name, **kwargs)\n\n return",
"def test_divergences_to_kl2(dists, divergence):\n for dist1, dist2 in combinations(dists, 2):\n assert divergence(dist1, dist2, alpha=1) == pytest.approx(kullback_leibler_divergence(dist1, dist2))",
"def get_KL_divergence(self):\n KL_loss_W = Vil.get_KL_divergence_Samples(self.mu_weight, Vil.softplus(self.rho_weight), self.weight, self.prior)\n KL_loss_b = 0\n if self.bias is not None:\n KL_loss_b = Vil.get_KL_divergence_Samples(self.mu_bias, Vil.softplus(self.rho_bias), self.bias, self.prior)\n \n KL_loss = KL_loss_W + KL_loss_b\n \n return KL_loss",
"def elbo(self, x, y):\n self.forward(x, y)\n\n # prior-posterior divergence\n kl_loss = kl.kl_divergence(\n self.prior_latent_distribution, self.posterior_latent_distribution).mean()\n\n # reconstruction loss\n if not self.training:\n # resample output based on prior, not posterior\n self.forward(x)\n reconstruction_loss = self.recon_loss_fun(self.y_hat_raw, y[:, 0])\n\n # training loss\n loss = reconstruction_loss + self.beta * kl_loss\n\n # statictics about prior and posterior\n mu_prior = self.prior_latent_distribution.mean\n mu_posterior = self.posterior_latent_distribution.mean\n mu_dist = torch.norm(mu_prior - mu_posterior, dim=-1).mean()\n std_prior = self.prior_latent_distribution.stddev\n std_prior = torch.norm(std_prior - mu_posterior, dim=-1).mean()\n std_posterior = self.posterior_latent_distribution.stddev\n std_posterior = torch.norm(std_posterior - mu_posterior, dim=-1).mean()\n\n return loss, reconstruction_loss, kl_loss, mu_dist, std_prior, std_posterior",
"def kl_divergence(a, b, normalize=True):\n a, b = np.array(a), np.array(b)\n\n x = np.linspace(\n min(a.min(), b.min()) - 1,\n max(a.max(), b.max()) + 1,\n 100\n )\n\n p = gaussian_kde(a)(x)\n q = gaussian_kde(b)(x)\n\n if normalize:\n p = p/np.sum(p)\n q = q/np.sum(q)\n\n return np.sum(np.where(p != 0, (p) * np.log(p / q), 0))",
"def kde2D(x, y, bandwidth, xbins=100j, ybins=100j, **kwargs):\n\n # create grid of sample locations (default: 100x100)\n xx, yy = np.mgrid[x.min():x.max():xbins, \n y.min():y.max():ybins]\n\n xy_sample = np.vstack([yy.ravel(), xx.ravel()]).T\n xy_train = np.vstack([y, x]).T\n\n kde_skl = KernelDensity(bandwidth=bandwidth, **kwargs)\n kde_skl.fit(xy_train)\n\n # score_samples() returns the log-likelihood of the samples\n z = np.exp(kde_skl.score_samples(xy_sample))\n return xx, yy, np.reshape(z, xx.shape)",
"def zkl_divergence(x, y, gamma):\n return np.sum([p_i*np.log(p_i/q_i) if q_i > 0 and p_i > 0 else p_i*gamma for (p_i, q_i) in zip(x, y)])",
"def find_knee(x,y):\n\n # find ranges\n if len(x) != len(y):\n raise Exception(\"bad data\")\n tot_len = len(x)\n \n \n \n # fit strait lines to both\n\n # find intercept\n knee_r = (f_top.beta[1] - f_bottom.beta[1])/(-f_top.beta[0] + f_bottom.beta[0])",
"def kl_gauss(x, y, sig2=1.):\n return (x - y) ** 2 / (2 * sig2)",
"def kl_div_prior_gradient(self, posterior_logits, posterior_binary_samples):\n #DVAE Eq11 - gradient of prior\n #gradient of the KLD between posterior and prior wrt to prior\n #parameters theta, i.e. generative model parameters.\n #logits to probabilities\n posterior_probs=torch.sigmoid(posterior_logits)\n positive_probs=posterior_probs.detach()\n \n #samples from posterior are labelled positive\n positive_samples=posterior_binary_samples.detach()\n\n n_split=positive_samples.size()[1]//2\n positive_samples_left,positive_samples_right=torch.split(positive_samples,split_size_or_sections=int(n_split),dim=1)\n \n #-z_left^t J z_right\n pos_first_term=torch.matmul(positive_samples_left,self.prior.get_weights())*positive_samples_right\n \n rbm_bias_left=self.prior.get_visible_bias()\n rbm_bias_right=self.prior.get_hidden_bias()\n rbm_bias=torch.cat([rbm_bias_left,rbm_bias_right])#self._h\n \n #this gives [42,400] size\n #- z^t h\n #TODO this uses positive probs. Should it not use positive samples?\n # FIXME an indication are the negative ones where samples are used! On\n #other hand this is the only place this this used\n pos_sec_term=positive_probs*rbm_bias\n # pos_sec_term=positive_samples*rbm_bias\n\n # Energy = -z_left^t J z_right - z^t h\n pos_kld_per_sample=-(torch.sum(pos_first_term,axis=1)+torch.sum(pos_sec_term,axis=1))\n #samples from rbm are labelled negative\n\n #rbm_samples Tensor(\"zeros:0\", shape=(200, 200), dtype=float32)\n #this returns the full RBM set: left and right nodes concatenated\n\n #TODO What are these samples here?\n #TODO what's the impact of doing gibbs sampling here? does this make\n #sense?\n rbm_samples=self.prior.get_samples_kld(approx_post_samples=positive_samples_left,n_gibbs_sampling_steps=1)\n negative_samples=rbm_samples.detach()\n\n # print(self.prior.get_weights())\n n_split=negative_samples.size()[1]//2\n negative_samples_left,negative_samples_right=torch.split(negative_samples,split_size_or_sections=int(n_split),dim=1)\n neg_first_term=torch.matmul(negative_samples_left,self.prior.get_weights())*negative_samples_right\n \n #FIXME see above, the positive case looks different. Why?\n neg_sec_term=negative_samples*rbm_bias\n neg_kld_per_sample=(torch.sum(neg_first_term,axis=1)+torch.sum(neg_sec_term,axis=1))\n \n kld_per_sample=pos_kld_per_sample+neg_kld_per_sample\n\n return kld_per_sample",
"def kl_divergence(self, samples):\n # Check size of input\n if not len(samples.shape) == 2:\n raise ValueError('Given samples list must be n x 2.')\n if samples.shape[1] != self._n_parameters:\n raise ValueError(\n 'Given samples must have length ' + str(self._n_parameters))\n\n best_mode = np.zeros(samples.shape[0])\n for i in range(samples.shape[0]):\n a_sample = samples[i, :]\n a_log_pdf = -np.inf\n a_max_index = -1\n for j, var in enumerate(self._vars):\n a_test_log_pdf = var.logpdf(a_sample)\n if a_test_log_pdf > a_log_pdf:\n a_log_pdf = a_test_log_pdf\n a_max_index = j\n best_mode[i] = a_max_index\n\n kl = np.zeros(len(self._vars))\n for i in range(len(self._vars)):\n y = np.array(samples[best_mode == i, :], copy=True)\n # when a mode has no points use all samples\n if y.shape[0] == 0:\n y = np.array(samples, copy=True)\n m0 = np.mean(y, axis=0)\n s0 = np.cov(y.T)\n s1 = self._covs[i]\n m1 = self._modes[i]\n s1_inv = np.linalg.inv(s1)\n if len(np.atleast_1d(s0)) > 1:\n kl[i] = 0.5 * (\n np.trace(np.matmul(s1_inv, s0)) +\n np.matmul(np.matmul(m1 - m0, s1_inv), m1 - m0) -\n np.log(np.linalg.det(s0)) +\n np.log(np.linalg.det(s1)) -\n self._n_parameters)\n else:\n kl[i] = 0.5 * (\n np.sum(s1_inv * s0) +\n (m1 - m0) * s1_inv * (m1 - m0) -\n np.log(s0) +\n np.log(s1) -\n 1)\n return kl",
"def kl_divergence(dist1, dist2, symmetrized=True):\n if symmetrized == True:\n kl = (\n scipy.stats.entropy(dist1, dist2) + scipy.stats.entropy(dist2, dist1)\n ) / 2.0\n return kl\n else:\n kl = scipy.stats.entropy(dist1, dist2)\n return kl",
"def MyKLD(X,Y): \n mu1,mu2 = tuple(np.mean(X,axis=0))\n sigma1,sigma2 = tuple(np.std(X,axis=0))\n m1,m2 = tuple(np.mean(X,axis=0))\n s1,s2 = tuple(np.std(X,axis=0))\n rho = np.corrcoef(X,rowvar=False)[0,1]\n r = np.corrcoef(Y,rowvar=False)[0,1]\n \n return (\n ((mu1-m1)**2/s1**2 - 2*r*(mu1-m1)*(mu2-m2)/(s1*s2) + (mu2-m2)**2/s2**2) /\n (2 * (1 - r**2)) +\n ((sigma1**2-s1**2)/s1**2 - 2*r*(rho*sigma1*sigma2-r*s1*s2)/(s1*s2) + \n (sigma2**2-s2**2)/s2**2) /\n (2 * (1 - r**2)) +\n np.log((s1**2 * s2**2 * (1-r**2)) / (sigma1**2 * sigma2**2 * (1-rho**2))) / 2\n )",
"def kl_divergence(self) -> Tensor:\n return torch.tensor(0.0)",
"def maxkl_strategy(self):\n # TODO: rewrite to update only distribution from sampled bucket\n # Instead of computing everything again every iteration\n\n # Label model distributions\n lm_posteriors = self.bucket_probs.clip(1e-5, 1-1e-5)\n\n # Sample distributions\n # D_KL(LM distribution||Sample distribution)\n rel_entropy = np.zeros(len(lm_posteriors))\n sample_posteriors = np.zeros(lm_posteriors.shape)\n\n # Iterate over buckets\n for i in range(len(lm_posteriors)):\n # Collect points in bucket\n bucket_items = self.ground_truth_labels[np.where(self.unique_inverse == i)[0]]\n # Collect labeled points in bucket\n bucket_gt = list(bucket_items[bucket_items != -1])\n # Add initial labeled point\n if not bucket_gt:\n bucket_gt.append(\n int(np.round(\n self.probs[\"bucket_labels_train\"][0][i].clip(0, 1)\n )))\n bucket_gt = np.array(bucket_gt)\n\n # Bucket distribution, clip to avoid D_KL undefined\n eps = 1e-2 / (len(bucket_gt))\n sample_posteriors[i, 1] = bucket_gt.mean().clip(eps, 1 - eps)\n sample_posteriors[i, 0] = 1 - sample_posteriors[i, 1]\n\n # KL divergence\n rel_entropy[i] = entropy(lm_posteriors[i, :], sample_posteriors[i, :])\n self.bucket_values = rel_entropy\n\n # Select buckets with highest KL divergence\n return np.where(\n np.logical_and(\n rel_entropy == np.max(rel_entropy[self.is_valid_bucket]), self.is_valid_bucket\n )\n )[0]",
"def test_divergences_to_kl(dists):\n for dist1, dist2 in combinations(dists, 2):\n assert alpha_divergence(dist1, dist2, alpha=-1) == pytest.approx(kullback_leibler_divergence(dist2, dist1))\n\n assert alpha_divergence(dist1, dist2, alpha=0) != pytest.approx(kullback_leibler_divergence(dist2, dist1))\n assert alpha_divergence(dist1, dist2, alpha=2) != pytest.approx(kullback_leibler_divergence(dist2, dist1))",
"def Kernel(x, y):\n\n Result = (np.dot(x_train[x, :], x_train[y, :])+1)**5 # Polynomial\n #Result = (np.dot(x_train[x, :], x_train[y, :])+1) # Linear\n #Gaussian\n \"\"\"\n sigma = 1\n if np.ndim(x_train[x, :]) == 1 and np.ndim(x_train[y, :]) == 1:\n Result = np.exp(- (np.linalg.norm(x_train[x, :] - x_train[y, :], 2)) ** 2 / (2 * sigma ** 2))\n elif (np.ndim(x_train[x, :]) > 1 and np.ndim(x_train[y, :]) == 1) or (np.ndim(x_train[x, :]) == 1 and np.ndim(x_train[y, :]) > 1):\n Result = np.exp(- (np.linalg.norm(x_train[x, :] - x_train[y, :], 2, axis=1) ** 2) / (2 * sigma ** 2))\n elif np.ndim(x_train[x, :]) > 1 and np.ndim(x_train[y, :]) > 1:\n Result = np.exp(- (np.linalg.norm(x[:, np.newaxis] - y[np.newaxis, :], 2, axis=2) ** 2) / (2 * sigma ** 2))\n \"\"\"\n return Result",
"def kl_poisson(x, y):\n x = max(x, eps)\n y = max(y, eps)\n return y-x+x*log(x/y)",
"def test_kl_divergence(get_distributions):\n for i, dist_a in enumerate(get_distributions):\n for j, dist_b in enumerate(get_distributions):\n kl = kl_divergence(dist_a, dist_b)\n if i == j:\n assert pytest.approx(kl, 0.0001) == 0.0\n else:\n assert kl > 0",
"def kl_divergence(self) -> Tensor:\n return self.variational_strategy.kl_divergence().sum(dim=1).mean()"
] | [
"0.70139825",
"0.6948134",
"0.6635383",
"0.6421419",
"0.63527554",
"0.6301051",
"0.62800944",
"0.62187314",
"0.6194453",
"0.6156323",
"0.6144021",
"0.61299276",
"0.61106426",
"0.6105154",
"0.6101856",
"0.6100986",
"0.6093369",
"0.6083329",
"0.60363257",
"0.6034415",
"0.6016642",
"0.6007839",
"0.6007176",
"0.59686935",
"0.5943885",
"0.59380877",
"0.5933738",
"0.59239966",
"0.58543617",
"0.58409435"
] | 0.7132836 | 0 |
Main method to get dependent review IDs of a specific review request on the ReviewBoard. | def main():
parameters = parse_parameters()
review_request_url = "%s/api/review-requests/%s/" % (REVIEWBOARD_URL,
parameters.review_id)
handler = ReviewBoardHandler()
review_request = handler.api(review_request_url)["review_request"]
review_ids = handler.get_dependent_review_ids(review_request)
if parameters.out_file:
with open(parameters.out_file, 'w') as f:
for r_id in review_ids:
f.write("%s\n" % (str(r_id)))
else:
for r_id in review_ids:
print("%s\n" % (str(r_id))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_review_request(self, rid):\r\n rsp = self.api_call('api/review-requests/%s/' % rid)\r\n return rsp['review_request']",
"def get_review_request(self, request_id, api_root):\n try:\n request = api_root.get_review_request(review_request_id=request_id)\n except APIError, e:\n raise CommandError(\"Error getting review request: %s\" % e)\n\n return request",
"def get_reviews(business_id):\n\n reviews_path = BUSINESS_PATH + business_id + '/reviews'\n\n return request(reviews_path)",
"def fetch_reviews(self, rb_id, start=0, max_results=25):\r\n return self.api_call('/api/review-requests/%s/reviews/?start=%s&max-results=%s'\r\n % (rb_id, start, max_results))['reviews']",
"def get_reviews(bearer_token, business_id):\n reviews_path = BUSINESS_PATH + business_id + '/reviews'\n\n return request(API_HOST, reviews_path, bearer_token)",
"def get_reviews(review_url):\n print review_url\n html = urllib.urlopen(review_url).read()\n soup = bs4.BeautifulSoup(html, 'html.parser')\n\n rating_scores = soup.findAll(\"span\", \"ratingScore\")\n num_ratings = len(rating_scores) - 1\n\n current_reviews = soup.findAll(\"div\", \"currentVintageProfessinalReviews\")\n num_cur_reviews = str(current_reviews).count('ratingProvider')\n past_reviews = soup.findAll(\"ul\", \"pastVintagesProfessionalReviews\")\n num_past_reviews = str(past_reviews).count('ratingProvider')\n\n print 'There are {0} reviews for prior vintages of this wine.'.format(num_past_reviews)\n print 'There are {0} current reviews for this vintage.\\n'.format(num_cur_reviews)\n\n rating_provider = soup.findAll(\"span\", \"ratingProvider\")\n rating_score = soup.findAll(\"span\", \"ratingScore\")\n reviewers = re.findall('(?<![A-Z])[>]([A-Z]+(?![A-Z]))', str(rating_provider))\n ratings = re.findall('(?<![A-Z])[0-9]{2}(?![A-Z])', str(rating_score))\n\n print \"Ratings List:\", ratings\n print \"Current Reviews: \", num_cur_reviews\n\n currentreviews = []\n for j in range(num_cur_reviews):\n print \"Current Review #\"+str(j+1)+\":\", reviewers[j], ratings[j]\n currentreviews.append((reviewers[j], ratings[j]))\n print currentreviews\n\n print \"\\nPast Reviews: \", num_past_reviews\n past_review_ratings = []\n for k in range(num_cur_reviews, num_past_reviews+num_cur_reviews):\n #print \"Past Review #\"+str(k-num_cur_reviews+1)+\":\", reviewers[k], int(ratings[k])\n past_review_ratings.append(float(ratings[k]))\n if k > 30:\n break\n if num_past_reviews != 0:\n avg_past_reviews = sum(past_review_ratings)/len(past_review_ratings)\n round(avg_past_reviews, 2)\n else:\n avg_past_reviews = 0\n\n print \"Average of Past Reviews: \", avg_past_reviews\n\n return currentreviews, avg_past_reviews",
"def test_get_url_on_review_request(self) -> None:\n review_request = self.create_review_request()\n\n self.assertEqual(\n self.action.get_url(context=self._create_request_context(\n review_request=review_request,\n url_name='review-request-detail')),\n '/r/%s/diff/raw/' % review_request.display_id)",
"def get_completed_chart_reviews(self, request):\n reqParams = request.GET\n project = reqParams.get('project', None)\n cohort = reqParams.get('cohort', None)\n patient_id = reqParams.get('patient_id', None)\n queryset = self.search_chart_review_data(project, cohort, patient_id)\n page = self.paginate_queryset(queryset)\n data = []\n\n if page is not None:\n serializer = self.get_serializer(page, many=True)\n data = serializer.data\n\n return self.get_paginated_response(data)",
"def get_review(self, id):\n endpoint = '/v3/educator/reviews/%s' % id\n result = self.request(endpoint)",
"def build_indices(review_ids):\n\n review_indices = {}\n\n # Load qrel_abs_train txt file\n clef_data = pd.read_csv(config.TRAIN_QREL_LOCATION, sep=\"\\s+\", names=['review_id', 'q0', 'pmid', 'included'])\n\n # Get index of documents for each review\n for review_id in review_ids:\n index = clef_data.index[clef_data['review_id'] == review_id].tolist()\n\n # Get the range of index for all documents within each review\n review_indices[review_id] = (min(index), max(index) + 1)\n\n return review_indices",
"def get_review_status(pr_id):\n reviews = get_status_json(pr_id, 'reviews')\n requests = get_status_json(pr_id, 'reviewRequests')\n\n requested_authors = [r[\"login\"] for r in requests]\n\n review_status = {}\n for r in reviews:\n author = r['author']['login']\n date = datetime.fromisoformat(r['submittedAt'].strip('Z'))\n state = r['state']\n if author not in review_status:\n review_status[author] = ReviewComment(state, date, author)\n elif state != 'COMMENTED' and review_status[author].date < date:\n review_status[author] = ReviewComment(state, date, author)\n for a in review_status:\n if a in requested_authors:\n review_status[a] = ReviewComment('REVIEW_REQUESTED', review_status[a].date, a)\n for a in requested_authors:\n if a not in review_status:\n review_status[a] = ReviewComment('UNRESPONSIVE', None, a)\n return review_status, requested_authors",
"def get_review(review_id):\n return get(cls, review_id)",
"def _request_reviews(self, token, owner, repo, number, reviewers):\n post_data = {'reviewers': reviewers.split(',')}\n headers = {'Authorization': 'Basic ' + token}\n response = requests.post(\n flask.current_app.config['GITHUB_API_CREATE_REVIEW_REQUEST'].format(owner=owner, repo=repo, number=number),\n data=json.dumps(post_data), headers=headers)\n\n return response",
"def get_parent_rr(review_request_details, commit_data=None):\n commit_data = fetch_commit_data(review_request_details, commit_data)\n\n if not is_pushed(review_request_details, commit_data):\n return None\n\n if is_parent(review_request_details, commit_data):\n return review_request_details\n\n identifier = commit_data.get_for(review_request_details, IDENTIFIER_KEY)\n\n return ReviewRequest.objects.get(\n commit_id=identifier,\n repository=review_request_details.repository)",
"def parse_parameters():\n parser = argparse.ArgumentParser(\n description=\"Get all dependent review IDs\")\n parser.add_argument(\"-r\", \"--review-id\", type=str, required=True,\n help=\"Review ID\")\n parser.add_argument(\"-o\", \"--out-file\", type=str, required=False,\n help=\"The out file with the reviews IDs\")\n return parser.parse_args()",
"def find_reviews():\n print(\"***** Find Reviews of a Business *****\")\n while (True):\n print()\n business_object = query_business_name()\n if business_object == \"back\":\n return\n elif business_object is None:\n continue\n id = business_object['business_id']\n review_object = review_col.find({\"business_id\": id})\n print(f'{business_object[\"name\"]} has'\n f' {business_object[\"review_count\"]} '\n f'reviews:')\n for review in review_object:\n userid = review['user_id']\n print(f'- ({review[\"stars\"]}):'\n f' {review[\"text\"]}.'\n f' {review[\"date\"]}')",
"def get_request_ids(request_id=None, workload_id=None, session=None):\n if request_id:\n return [request_id]\n return get_request_ids_by_workload_id(workload_id)",
"def get_reviews(recipe_id=None):\n\n recipe = storage.get(Recipe, recipe_id)\n print(recipe)\n if not recipe:\n abort(404)\n reviews = []\n for review in recipe.reviews:\n reviews.append(review.to_dict())\n return jsonify(reviews)",
"def one_review(review_id=None):\n if review_id:\n for item in storage.all(Review).values():\n if review_id == item.id:\n return (jsonify(item.to_dict()))\n abort(404)",
"def getTaskIds(self, director):\n # the computation record\n computation = self._getComputationRecord(director)\n \n # search for tasks\n iworker = self.inventory.iworker\n tasks = computation.findTasks(director.clerk.db, iworker)\n\n ids = [t.id for t in tasks]\n return ','.join(ids)",
"def get_reviews(review_id):\n if review_id:\n review = storage.get(Review, review_id) # retrieves obj\n if review is None:\n return jsonify({'error': 'Not found'}), 404\n if request.method == 'DELETE':\n storage.delete(review) # deletes\n storage.save()\n return jsonify({}), 200\n elif request.method == 'PUT':\n js = request.get_json()\n if js is None:\n return jsonify({'error': 'Not a JSON'}), 400\n js.pop('id', None)\n js.pop('user_id', None)\n js.pop('place_id', None)\n js.pop('created_at', None)\n js.pop('updated_at', None)\n for key, value in js.items():\n setattr(review, key, value) # updates\n review.save()\n return jsonify(review.to_dict()), 200\n else:\n return jsonify(review.to_dict()), 200\n\n if request.method == 'POST':\n js = request.get_json()\n if js is None:\n return jsonify({'error': 'Not a JSON'}), 400\n if js.get('user_id', None) is None:\n return jsonify({'error': 'Missing user_id'}), 400\n if js.get('text', None) is None:\n return jsonify({'error': 'Missing text'}), 400\n obj = Review(**js) # creates\n obj.save()\n return jsonify(obj.to_dict()), 201\n\n reviews = []\n reviews_obj = storage.all('Review') # retrieves list obj\n for obj in reviews_obj:\n reviews.append(reviews_obj[obj].to_dict())\n return jsonify(reviews)",
"def reviews(self):\n reviewList = []\n for review in storage.all(Review).values():\n if review.getattr('place_id') == self.id:\n reviewList.append(review)\n return(reviewList)",
"def dependent_prs(self):\n comments = self.data['body'].replace('\\r\\n', ' ')\n for comment in self.comments():\n comments += comment['body'].replace('\\r\\n', ' ')\n\n dependent_prs = []\n dependent_keywords = ['depends on']\n for keyword in dependent_keywords:\n pattern = r'%s %s/(\\S+)/(\\S+)/pull/(\\d+)' % (keyword, GITHUB)\n LOGGER.info(\"Finding dependent PRs by '%s' in the comments\")\n dependent_prs += re.findall(pattern, comments)\n return set(dependent_prs)",
"def task_reviews_collection(request, task_id):\n try:\n task = Task.objects.get(id=task_id)\n except Task.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == \"GET\":\n reviews = Review.objects.filter(task=task).all()\n serializer = ReviewSerializer(reviews, many=True)\n return Response(serializer.data)",
"def get_context_data(\n self,\n **kwargs,\n ) -> Dict[str, Any]:\n review_request = self.review_request\n draft = review_request.get_draft(self.request.user)\n\n # We only want to show one label. If there's a draft, then that's\n # the most important information, so we'll only show that. Otherwise,\n # we'll show the submitted/discarded state.\n label = None\n\n if draft:\n label = ('review-request-infobox-label-draft', _('Draft'))\n elif review_request.status == ReviewRequest.SUBMITTED:\n label = ('review-request-infobox-label-submitted', _('Submitted'))\n elif review_request.status == ReviewRequest.DISCARDED:\n label = ('review-request-infobox-label-discarded', _('Discarded'))\n\n if label:\n label = format_html('<label class=\"{0}\">{1}</label>', *label)\n\n # Fetch information on the reviews for this review request.\n review_count = (\n review_request.reviews\n .filter(public=True, base_reply_to__isnull=True)\n .count()\n )\n\n # Fetch information on the draft for this review request.\n diffset = None\n\n if draft and draft.diffset_id:\n diffset = draft.diffset\n\n if not diffset and review_request.diffset_history_id:\n try:\n diffset = (\n DiffSet.objects\n .filter(history__pk=review_request.diffset_history_id)\n .latest()\n )\n except DiffSet.DoesNotExist:\n pass\n\n if diffset:\n diff_url = '%s#index_header' % local_site_reverse(\n 'view-diff-revision',\n args=[review_request.display_id, diffset.revision],\n local_site=review_request.local_site)\n else:\n diff_url = None\n\n return {\n 'review_request': review_request,\n 'review_request_label': label or '',\n 'review_request_details': draft or review_request,\n 'issue_total_count': (review_request.issue_open_count +\n review_request.issue_resolved_count +\n review_request.issue_dropped_count +\n review_request.issue_verifying_count),\n 'review_count': review_count,\n 'diffset': diffset,\n 'diff_url': diff_url,\n }",
"def test_get_dealer_reviews(self):\n pass",
"def getReviewNumbers(singleStoryFooter):\n\twords = singleStoryFooter.get_text()\n\treview = re.compile(r\"Reviews: \\d+\").search(words)\n\tif review:\n\t\treviewNum = review.group()[9:]\n\t\treturn int(reviewNum)\n\telse: return 0",
"def _dependency_id(self):\n if self._dependency_ids:\n return self._dependency_ids[0]",
"def save_draft(self, review_request):\r\n self.api_call('api/review-requests/%s/draft/save/' %\r\n review_request['id'])\r\n self.debug('Review request draft saved')",
"def get_open_reviews(args):\n args['status'] = 'pending'\n if 'max_results' not in args:\n args['max_results'] = 100\n\n client = RBClient(REVIEWBOARD_URL)\n\n # If we have a username and password, login\n if REVIEWBOARD_USERNAME and REVIEWBOARD_PASSWORD:\n client.login(REVIEWBOARD_USERNAME, REVIEWBOARD_PASSWORD)\n\n root = client.get_root()\n\n if not root:\n logger.error(u'Could not get RBClient root')\n return None\n\n try:\n req = root.get_review_requests(**args)\n except APIError:\n logger.exception(u'Error querying API')\n return None\n\n ret = {'total': req.total_results, 'reviews': []}\n review_fmt = u\"[{user}] {summary} ({url}/r/{id})\"\n\n for review in req:\n ret['reviews'].append(review_fmt.format(user=review.get_submitter().username,\n summary=review.summary,\n url=REVIEWBOARD_URL,\n id=review.id))\n\n return ret"
] | [
"0.57654417",
"0.5477173",
"0.5378847",
"0.5101541",
"0.5080031",
"0.4982286",
"0.49632528",
"0.49589026",
"0.49061635",
"0.48880798",
"0.488128",
"0.4835317",
"0.48325068",
"0.4829637",
"0.48200688",
"0.48102397",
"0.47959515",
"0.4773191",
"0.4731234",
"0.4722772",
"0.4721991",
"0.4713366",
"0.46949184",
"0.46884874",
"0.46866146",
"0.46725786",
"0.46725452",
"0.46543872",
"0.46502155",
"0.4646397"
] | 0.6459412 | 0 |
Initalize with a usersupplied list of segments. | def __init__(self, segments, lemma = None, case = None):
self.segments = segments
if isinstance(self.segments, str):
self.segments = [Segment.new_segment(s) for s in self.segments]
self.lemma = lemma
self.case = case | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def construct_segments(self):\n for strand in self.strand_list:\n strand.construct_segment()",
"def set_segments(self, segments):\n self.send_command(Command.SET_SEGMENT_COUNT, [segments])",
"def form_segment(self, node_oid):\n # init empty segment and stuff\n new_segment = Segment()\n new_segment.save()\n name = \"%s_seg_%s\" % (self.PREFIX, new_segment.id)\n node = Node.objects(id=node_oid)[0]\n list_id = DripCampaign.objects(id=node[\"drip_campaign_id\"])[0][\"list_id\"]\n node.update(set__segment_oid=new_segment.id, set__updated_at=datetime.utcnow())\n\n # gather all users that apply for this node after triggers on previous nodes\n all_euids = set()\n if node[\"initial\"]:\n all_euids = set(List.objects(list_id=list_id)[0][\"members_euid\"])\n else:\n for trg in Trigger.objects(node_to=node_oid):\n for euids, to_node_oid in self.segment_by_triggers(trg[\"node_from\"]):\n if to_node_oid == node_oid:\n all_euids.update(set(euids))\n\n # # intersect euids with current state of the list\n # # it might be the case that some people are removed from the list since previous email\n self.fetch_members_for_list(list_id)\n all_euids = all_euids & set(List.objects(list_id=list_id)[0][\"members_euid\"])\n\n all_euids = list(all_euids)\n\n # apply the user list to segment n stuff\n # if user list is empty, save only meta info and don't actually work with mailchimp\n if all_euids:\n segment_id = self.mw.create_segment(list_id, name)\n self.mw.update_segment_members(list_id, segment_id, all_euids)\n else:\n segment_id = None\n new_segment.update(set__segment_id=segment_id, set__name=name, members_euid=all_euids,\n set__updated_at=datetime.utcnow())",
"def __load_segments(self):\r\n self.__segments = []\r\n if len(self.points) > 1:\r\n s = self.points[0]\r\n k = 1\r\n while k < len(self.points):\r\n e = self.points[k]\r\n self.__segments.append(Segment(s, e))\r\n s = e \r\n k += 1\r\n e = self.points[0]\r\n self.__segments.append(Segment(s, e))",
"def setSegments(self, segments):\n for point, segment in zip(self.points, segments):\n point.set(segment.p1)",
"def __init__(self, sets: List[ColdStartUserSet]):\n self.sets = sets",
"def __init__(self, segments, display_res=\"1920x1080\", stream_id=None):\n self.segments = segments\n self.display_res = display_res\n self.stream_id = stream_id\n self.o22 = []\n self.mode = None",
"def create_network_segments(self, tenant_id, network_id,\n network_name, segments):",
"def _setup_splits(self):\n #ntot = self.reredux_conf['nperfile']\n ntot = self.reredux_conf['Ngals']\n npersplit = self.runconf['nper']\n\n self.beglist, self.endlist = get_splits(ntot, npersplit)",
"def __init__(__self__, *,\n segment_name: Optional[pulumi.Input[str]] = None):\n if segment_name is not None:\n pulumi.set(__self__, \"segment_name\", segment_name)",
"def _trainBySegments(self, divisions, trainingSet):\n # train the subdomain ROMs\n counter, remainder = divisions\n roms = self._trainSubdomainROMs(self._templateROM, counter, trainingSet, self._romGlobalAdjustments)\n # if there were leftover domain segments that didn't go with the rest, train those now\n if remainder:\n unclusteredROMs = self._trainSubdomainROMs(self._templateROM, remainder, trainingSet, self._romGlobalAdjustments)\n roms = np.hstack([roms, unclusteredROMs])\n self._roms = roms",
"def __init__(self, segments='CCCVV', root_length=3):\n # residue should be at least 1 segment long\n assert(root_length < len(segments))\n\n self.segments = segments\n self.root_length = root_length\n self.residue_length = len(segments) - root_length",
"def __init__(self):\n super().__init__()\n self._points = 0\n self._segments = []\n self.fill_list()\n # i = random.randint(0, len(self._segments) - 1)\n # self.set_text(self._segments[i])\n self.reset()",
"def initialize_vasp_runs(self):\n\n\t\treference_polarization_path = self.get_extended_path('reference_polarization')\n\t\tdistorted_polarization_path = self.get_extended_path('distorted_polarization')\n\n\t\t#if not Path.exists(reference_polarization_path):\n\t\tself.create_new_vasp_run(reference_polarization_path, self.reference_structure)\n\n\t\t# if not Path.exists(distorted_polarization_path):\n\t\tself.create_new_vasp_run(distorted_polarization_path, self.distorted_structure)",
"def add_segments(self, *segments):\n for s in segments:\n self._add_one(s)",
"def prepareParrallelize(self,segs):\n\n angles = numpy.array([s.angle for s in segs ])\n angles[numpy.where(angles<0)] += _pi # we care about direction, not angle orientation\n clList = clusterValues(angles, 0.15, refScaleAbs='abs')\n\n for cl in clList:\n meanA = angles[list(cl)].mean()\n for i in cl:\n seg = segs[i]\n seg.newAngle = meanA if seg.angle>=0. else meanA-_pi",
"def __init__(self, path_list):\n self.path_list = path_list",
"def __init__(\n self,\n segments: Tuple[\"BaseSegment\", ...],\n # These are tuples of segments but we're expecting them to\n # be tuples of length 1. This is because we'll almost always\n # be doing tuple arithmetic with the results and constructing\n # 1-tuples on the fly is very easy to misread.\n start_bracket: Tuple[BaseSegment],\n end_bracket: Tuple[BaseSegment],\n pos_marker: Optional[PositionMarker] = None,\n uuid: Optional[UUID] = None,\n ):\n if not start_bracket or not end_bracket: # pragma: no cover\n raise ValueError(\n \"Attempted to construct Bracketed segment without specifying brackets.\"\n )\n self.start_bracket = start_bracket\n self.end_bracket = end_bracket\n super().__init__(segments=segments, pos_marker=pos_marker, uuid=uuid)",
"def __init__(self, focalPoint, focalDist, angles, segments):\n self.focalPoint = focalPoint\n self.focalDist = focalDist\n self.angles = angles\n self.segments = segments",
"def set_calculated_segments(self, total_lights, segments):\n self.set_segments(segments)\n self.set_lights_per_segment(int(total_lights / segments))",
"def __init__(self, tag: str, *elements: Union[str, List[str]]):\n if type(tag) != str:\n raise TypeError(\"'tag' argument must be a str\")\n if tag == \"\":\n raise ValueError(\"The tag of a segment must not be empty.\")\n self.tag = tag\n\n # The data elements for this segment.\n # this is converted to a list (due to the fact that python creates a tuple\n # when passing a variable arguments list to a method)\n self.elements = list(elements)",
"def __init__(self):\n self.s_sect = []",
"def __init__(self, name, ssn, address, courses=None):\n super().__init__(name, ssn, address)\n if courses is None:\n courses = []\n if courses == isinstance(courses, list):\n self.courses = courses\n else:\n self.courses = list(courses)",
"def initialize(self):\n self.path = []\n self.sectorClean = False",
"def __init__(self,numSegments,startX,startY):\n \n self.numSegments = numSegments\n\n # Construct the body\n self.body = []\n\n for i in range(numSegments):\n self.body.append([startX-i, startY])",
"def set_market_segments(self, segments):\r\n \"\"\"\r\n Q1-2. Implement this method, which takes an iterable of MarketSegments\r\n to which this Account will be attached. This method REPLACES all\r\n MarketSegment associations, so be sure to update each\r\n MarketSegment's internal representation of associated Accounts\r\n appropriately.\r\n \"\"\"\r\n for existing_segment in self._market_segments:\r\n # only need to remove the ones that aren't in the new list\r\n if existing_segment not in segments:\r\n existing_segment.remove_account(self)\r\n for segment in segments:\r\n # add segments, catch ValueErrors which means the segment was\r\n # already part of this account, therefor no followup action is\r\n # needed\r\n try:\r\n self._market_segments.append(segment)\r\n # add_ms_to_account needs to be False because we've already\r\n # added the segment to this account\r\n segment.add_account(self, add_ms_to_account=False)\r\n except ValueError:\r\n # this account was already associated to that segment,\r\n # continue on\r\n continue",
"def __init__(self, word_string, feature_table):\n self.word_string = word_string\n self.feature_table = feature_table\n self.segments = [Segment(char, self.feature_table) for char in self.word_string]",
"def segment(raw_sents:List[str], segment=\"jieba\") -> List[List[str]]:\n\t# segment_list = [\"pkuseg\", \"jieba\"]\n\t# if segment.strip() not in segment_list:\n\t# \treturn []\n\n\tseg_sents = []\n\tif segment == \"pkuseg\":\n\t\timport pkuseg\n\n\t\t## init the seg\n\t\tseg = pkuseg.pkuseg()\n\n\t\t## segment the sentence by pkuseg\n\t\tfor sent in raw_sents:\n\t\t\tres_seg = seg.cut(sent)\n\t\t\tseg_sents.append(res_seg)\n\t\t# print(seg_sents)\n\telif segment == \"jieba\":\n\t\timport jieba\n\t\tfor sent in raw_sents:\n\t\t\tres_seg = jieba.lcut(sent)\n\t\t\tsentence = \" \".join(res_seg)\n\t\t\tpattern4 = re.compile(\" +\", re.S)\n\t\t\tsentence = pattern4.sub(\" \", sentence)\n\t\t\tres_seg = sentence.split(\" \")\n\t\t\tseg_sents.append(res_seg)\n\n\treturn seg_sents",
"def test_getting_segments(self):\n pass",
"def populate_vertices(self, vertices_list):\n vertices = []\n for vertex in vertices_list:\n vertex_id = vertex[0]\n vertices.append(Vertex(vertex_id))\n self.vertices = vertices"
] | [
"0.6232747",
"0.596336",
"0.58711517",
"0.5702215",
"0.55154556",
"0.5394213",
"0.53759134",
"0.53305984",
"0.5308964",
"0.529063",
"0.5197425",
"0.51839024",
"0.5133967",
"0.5053822",
"0.50320536",
"0.5010542",
"0.50038457",
"0.4999929",
"0.49838173",
"0.4962347",
"0.49609384",
"0.49214706",
"0.48969644",
"0.48933196",
"0.48866424",
"0.48850414",
"0.48827687",
"0.48644063",
"0.4854759",
"0.48456323"
] | 0.6051361 | 1 |
Create a WordForm of the given CV shape with random segments. | def random_segs(cls, shape, lemma = None, case = None):
# For each C or V segment in `shape`, initialize a random Segment of the
# appropriate type. Initialize a new WordForm with all these Segments.
return cls([Segment(seg_type = seg) for seg in shape], lemma, case) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_word(self):\r\n\r\n template = self.word_constructions.get()\r\n word = \"\"\r\n for c in template:\r\n if c == \"v\":\r\n letter = self.get_letter(100)\r\n else:\r\n letter = self.get_letter(0)\r\n word += letter\r\n\r\n while not any(letter in self.vowels for letter in word):\r\n length = len(word)\r\n if length == 1:\r\n index = 0\r\n elif length == 2:\r\n index = random.randrange(0, 2)\r\n else:\r\n a = len(word) / 2\r\n index = a + random.randrange(-a / 2, a / 2)\r\n word = word[:index] + self.get_letter(100) + word[index + 1:]\r\n\r\n if random.random() > self.capital_chance:\r\n word = word.capitalize()\r\n self.words.append(word)\r\n self.word_count += 1\r\n return word",
"def generateByWord(model, voc, maxlen=20, diversity=0.5, numwords=42):\n\n text, sym_indices, indices_sym = voc\n syms = set(text)\n start_index = random.randint(0, len(text) - maxlen - 1) \n generated = ''\n sentence = text[start_index: start_index + maxlen]\n \n #generated += sentence\n generated += ' '.join(sentence)\n print('----- Generating with seed: \"' + ' '.join(sentence) + '\"')\n sys.stdout.write(generated)\n\n for i in range(numwords):\n x = np.zeros((1, maxlen, len(syms)))\n for t, sym in enumerate(sentence):\n x[0, t, sym_indices[sym]] = 1.\n \n preds = model.predict(x, verbose=0)[0]\n next_index = sample(preds, diversity)\n next_sym = indices_sym[next_index]\n generated += ' '+next_sym\n sentence.append(next_sym)\n tmpsentence = sentence[1:]\n sentence = tmpsentence\n sys.stdout.write(next_sym+' ')\n sys.stdout.flush()\n print()",
"def create(seed, model, tokenizer, temp=0.5):\n\n dictionary = [\"\"] + list(tokenizer.index_word.values())\n start = np.array(tokenizer.texts_to_sequences(seed)).reshape(1, -1)\n if seed[0] == '<start>':\n output = [seed[-1]]\n else:\n output = seed[:]\n\n for _ in range(45):\n weights = reweight_distribution(model.predict(start), temperature=temp)\n word = np.random.choice(dictionary, size=1, p=weights[0, :])[0]\n if word == '<end>': \n if len(output) > 10:\n break\n else:\n continue\n output.append(word)\n start = np.append(start[0, 1:], tokenizer.texts_to_sequences([word])).reshape(1, -1)\n return \" \".join(output)",
"def create_word(self):\n return self.random.choice(CONSONANTS) + self.random.choice(VOWELS)",
"def build_vocabulary(image_paths, vocab_size):\n n_image = len(image_paths)\n\n # Since want to sample tens of thousands of SIFT descriptors from different images, we\n # calculate the number of SIFT descriptors we need to sample from each image.\n n_each = int(np.ceil(40000 / n_image)) # You can adjust 10000 if more is desired\n\n # Initialize an array of features, which will store the sampled descriptors\n features = np.zeros((n_image * n_each, 128))\n j=0\n for i, path in enumerate(image_paths):\n # Load SIFT features from path\n descriptors = np.loadtxt(path, delimiter=',',dtype=float)\n\n # TODO: Randomly sample n_each features from descriptors, and store them in features\n #use the randomizer in numpy library to make n_each random index\n idx= np.array(np.random.randint(0,len(descriptors),n_each))\n\n # choose randomly n_each number of discriptor to train K-mean classifier\n for k in idx:\n\n features[j] = descriptors[k,:]\n j = j+1\n # TODO: pefrom k-means clustering to cluster sampled SIFT features into vocab_size regions.\n # You can use KMeans from sci-kit learn.\n # Reference: https://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html\n\n #use K_mean classifier to make Bag of visual words represantation for SIFT features\n kmeans = KMeans(n_clusters=250).fit(features)\n #kmeans= clustering = AgglomerativeClustering().fit(features)\n\n\n return kmeans",
"def create_new_doc(self, doc: Doc, min_prob: float = 0.25) -> Doc:\n\n # print(\"running on\", doc[:10])\n\n if not self.form_frequencies:\n raise RuntimeError(\n \"Cannot truecase without a dictionary of form frequencies\")\n\n tokens = []\n spaces = []\n doctext = doc.text\n for tok in doc:\n toktext = tok.text\n\n # We only change casing for words in Title or UPPER\n if tok.is_alpha and toktext[0].isupper():\n cond1 = tok.is_upper and len(toktext) > 2 # word in uppercase\n cond2 = toktext[0].isupper(\n ) and not tok.is_sent_start # titled word\n if cond1 or cond2:\n token_lc = toktext.lower()\n if token_lc in self.form_frequencies:\n frequencies = self.form_frequencies[token_lc]\n if frequencies.get(toktext, 0) < min_prob:\n alternative = sorted(\n frequencies.keys(), key=lambda x: frequencies[x])[-1]\n\n # We do not change from Title to to UPPER\n if not tok.is_title or not alternative.isupper():\n toktext = alternative\n\n tokens.append(toktext)\n\n # Spacy needs to know whether the token is followed by a space\n if tok.i < len(doc)-1:\n spaces.append(doctext[tok.idx+len(tok)].isspace())\n else:\n spaces.append(False)\n\n # Creates a new document with the tokenised words and space information\n doc2 = Doc(self.model.vocab, words=tokens, spaces=spaces) #type: ignore\n # print(\"finished with doc\", doc2[:10])\n return doc2",
"def create_wordcloud(self, text):\n text = ' '.join(f\"{word}\" for word in text)\n mask = np.array(Image.open(os.path.join(CURRDIR, \"cloud.png\")))\n wc = WordCloud(background_color=\"white\",\n max_words=200,\n mask=mask)\n wc.generate(text)\n wc.to_file(PATH_TO_SAVE_IMG, \"wordle.png\")",
"def word(length, upper=False):\n letters = \"abcdefghijklmnopqrstuvwxyz\"\n if upper:\n letters = letters.upper()\n\n def gen(shape):\n lengths = _ints(length, shape)\n field_length = lengths.max()\n dtype = \"U{}\".format(field_length)\n\n result = np.empty(shape, dtype=dtype)\n flat = result.ravel()\n for i, l in enumerate(lengths):\n flat[i] = \"\".join( random.choice(letters) for _ in range(l) )\n return result\n\n return gen",
"def create_random_text(word_count=10):\n sample_text_lst = TEXT_BASE_RUS.replace('\\n', '').split(' ')\n generate_text_lst = []\n for i in range(word_count):\n generate_text_lst.append(random.choice(sample_text_lst))\n generate_text = ' '.join(generate_text_lst)\n return generate_text",
"def generate(model, voc, maxlen=20, diversity=0.5, numchars=100):\n\n text, char_indices, indices_char = voc\n chars = set(text)\n start_index = random.randint(0, len(text) - maxlen - 1) \n generated = ''\n sentence = text[start_index: start_index + maxlen]\n #print(\"Insert text to start from [min 20 chars]:\")\n #sentence = str(raw_input())\n #sentence = sentence[:maxlen]\n generated += sentence\n print('----- Generating with seed: \"' + sentence + '\"')\n sys.stdout.write(generated)\n\n for i in range(numchars):\n x = np.zeros((1, maxlen, len(chars)))\n for t, char in enumerate(sentence):\n x[0, t, char_indices[char]] = 1.\n \n preds = model.predict(x, verbose=0)[0]\n next_index = sample(preds, diversity)\n next_char = indices_char[next_index]\n generated += next_char\n sentence = sentence[1:] + next_char\n sys.stdout.write(next_char)\n sys.stdout.flush()\n print()",
"def generate_sentence():\n markov_chain = makeMarkovDict(\"text.txt\")\n\n # Pick a random word to begin with.\n first_word = random.choice(markov_chain.keys()) # Illegall\n\n # print first_word\n # random_choice = random.randint(0, len(markov_chain.keys()))\n # index = 0\n # first_word = \"\"\n # for word in markov_chain:\n # print word\n # if index == random_choice:\n # first_word = word\n # break\n # index += 1\n\n # Based on that word, call function to chose the next word.\n # print markov_chain[first_word]\n # print word_selection(markov_chain[first_word])\n\n lenght_of_sentence = 10\n sentence = [first_word] # First word already in there\n for i in range(lenght_of_sentence):\n sentence.append(word_selection(markov_chain[sentence[i]]))\n # Sentence after loop: ['fish', 'red', 'fish', 'two', 'fish', 'red', 'fish', 'red', 'fish', 'two', 'fish']\n\n # Cap with letter and add period at the end.\n final_sentece = \" \".join(sentence) + \".\"\n return final_sentece.capitalize()",
"def create_vocab(vocab_path='ORBvoc-synth.txt'):\n total_time = 10 # seconds\n num_frames = 20\n speed = 3.0\n vocab_builder = VocabularyBuilder()\n for seed in tqdm(range(100), total=100):\n image_builder = DemoImageBuilder(\n mode=ImageMode.MONOCULAR, seed=seed,\n length=total_time * speed\n )\n for idx in range(num_frames):\n time = total_time * idx / num_frames\n image = image_builder.create_frame(time)\n vocab_builder.add_image(image.pixels)\n vocab_builder.build_vocabulary(str(vocab_path))",
"def word_cloud_generator(text: str, mask_image: Path, save_to_file=False) -> WordCloud:\n mask = imageio.imread(mask_image)\n word_cloud = WordCloud(colormap='prism', mask=mask, background_color='white')\n word_cloud = word_cloud.generate(text)\n if save_to_file:\n word_cloud.to_file('word_cloud.png')\n return word_cloud",
"def generate_wordcloud(topic_description, use_mask='rectangle', store_to_file=False):\n\n # transform the topic description in frequencies\n topic_frequencies = get_word_frequencies(topic_description)\n\n if use_mask == 'oval':\n mask = numpy.array(Image.open(os.path.join(config.__resources_folder_path, \"oval.jpg\")))\n else:\n mask = numpy.array(Image.open(os.path.join(config.__resources_folder_path, \"rect.png\")))\n\n wc = WordCloud(background_color=\"white\", max_words=2000, mask=mask)\n # generate word cloud\n wc.generate_from_frequencies(topic_frequencies)\n\n if store_to_file:\n # store to file\n wc.to_file(os.path.join(config.__inputs_outputs_folder_path, \"wordcloud_{0}_{1}.png\".format(\n hash(str(topic_description)), use_mask)))\n\n # show\n plt.imshow(wc, interpolation='bilinear')\n plt.axis(\"off\")\n plt.show()",
"def generate(self, count=15):\n\n sentence = []\n print(\"self.word_dict\", self.word_dict)\n for i in range(count):\n first_tuple = random.choice(list(self.word_dict.keys())) # first word for our sentence\n first_word = random.choice(first_tuple)\n sentence.append(first_word)\n second_word = self.word_dict[first_tuple]\n # print(\"second_word\", second_word)\n next_word = second_word.sample()\n # print(\"next_word\", next_word)\n # first_tuple = second_word\n sentence.append(next_word)\n # end_tuple =\n sentence = ' '.join(sentence)\n return sentence + \".\"\n # for i in range(len(self.token)):\n # val = list(self.word_dict.values())[i]\n # print(len(val))\n # # print(\"val\", val)\n # next_word = val.sample()\n # sentence.append(next_word)\n # sentence = ' '.join(sentence)\n # return sentence + \".\"",
"def build_from_words(self, words):\n if isinstance(words, unicode):\n self.build(words)\n elif isinstance(words, list):\n flag = \"seg\"\n assert len(words) > 0\n\n word = words[0]\n if isinstance(word, unicode):\n flag = \"seg\"\n elif ((isinstance(word, list) or isinstance(word, tuple)) and\n len(word) == 2 and isinstance(word[0], unicode) and isinstance(word[1], unicode)):\n flag = \"pos\"\n elif ((isinstance(word, list) or isinstance(word, tuple)) and\n len(word) == 4 and isinstance(word[0], unicode) and isinstance(word[1], unicode)):\n flag = \"dp\"\n else:\n flag = \"unknown\"\n\n self._xml4nlp = Element('xml4nlp')\n self._note = SubElement(self._xml4nlp, 'note')\n self._doc = SubElement(self._xml4nlp, 'doc')\n\n para = SubElement(self._doc, 'para')\n sent = SubElement(para, 'sent')\n\n para.set(\"id\", \"0\")\n sent.set(\"id\", \"0\")\n\n self._clean_note()\n\n if flag == \"seg\":\n for i, word in enumerate(words):\n sent.append(Element('word', {\n 'id': unicode(i),\n 'cont': word\n }))\n sent.set('cont', (\"\".join(words)))\n self._set_word_on_note()\n elif flag == \"pos\":\n for i, word_pos in enumerate(words):\n word, pos = word_pos\n sent.append(Element('word', {\n 'id': unicode(i),\n 'cont': word,\n 'pos': pos\n }))\n sent.set('cont', (\"\".join([word[0] for word in words])))\n self._set_pos_on_note()\n elif flag == \"dp\":\n for i, rep in enumerate(words):\n word, pos, head, dep_rel = rep\n sent.append(Element('word', {\n 'id': unicode(i),\n 'cont': word,\n 'pos': pos,\n 'parent': str(int(head) - 1),\n 'relation': dep_rel\n }))\n sent.set('cont', (\"\".join([word[0] for word in words])))\n self._set_parser_on_note()\n\n self.dom = self._xml4nlp",
"def generate_words(text='', train_path=None, case_sensitive=True, epochs=20, classifier=nlup.BinaryAveragedPerceptron, **kwargs):\n if train_path:\n generate_sentences.detector = Detector(slurp(train_path), epochs=epochs, nocase=not case_sensitive)\n # generate_sentences.detector = SentenceDetector(text=text, nocase=not case_sensitive, epochs=epochs, classifier=classifier)\n return iter(generate_sentences.detector.segments(text))",
"def surface_labelled_data_preparation_pipeline(word_list: [str]):\n X = []\n\n for word in word_list:\n segments = word.split('-')\n segment_features = []\n for i in range(len(segments)):\n features = {}\n\n segment_length = len(segments[i])\n features['length'] = segment_length\n\n features['segment.lower()'] = segments[i].lower()\n features['pos_in_word'] = i\n\n if segment_length % 2 == 0:\n features['even'] = 1\n else:\n features['odd'] = 1\n\n features['begin'] = segments[i][0]\n features['end'] = segments[i][len(segments[i]) - 1]\n\n try:\n features['prev_segment'] = segments[i - 1]\n except IndexError:\n features['prev_segment'] = ''\n # continue\n\n try:\n features['next_segment'] = segments[i + 1]\n except IndexError:\n features['next_segment'] = ''\n\n if segments[0].isupper():\n features['start_upper'] = 1\n else:\n features['start_lower'] = 1\n\n if segments[0] in 'aeiou':\n features['first_vowel'] = 1\n else:\n features['first_const'] = 1\n\n segment_features.append(features)\n\n X.append(segment_features)\n\n return X",
"def makeFeatureVec(words, model, num_features):\n featureVec = np.zeros((num_features,),dtype=\"float32\")\n num_words = 0.\n index2word_set = set(model.wv.index2word)\n for word in words:\n if word in index2word_set:\n num_words += 1\n featureVec = np.add(featureVec,model[word]) \n featureVec = np.divide(featureVec,num_words)\n return featureVec",
"def gen_words(self, doc):\n pattern = re.compile(u'[\\\\s\\\\d,.<>/?:;\\'\\\"[\\\\]{}()\\\\|~!@#$%^&*\\\\-_=+a-zA-Z,。《》、?:;“”‘’{}【】()…¥!—┄-]+')\n doc = re.sub(pattern, ' ', doc)\n suffix_indexes = index_of_sorted_suffix(doc, self.max_word_len)\n word_cands = {}\n # compute frequency and neighbors\n for suf in suffix_indexes:\n word = doc[suf[0]:suf[1]]\n if word not in word_cands:\n word_cands[word] = WordInfo(word)\n word_cands[word].update(doc[suf[0] - 1:suf[0]], doc[suf[1]:suf[1] + 1])\n # compute probability and entropy\n length = len(doc)\n for k in word_cands:\n word_cands[k].compute(length)\n word_cands[k].compute_pp(self.pos_prop)\n # compute aggregation of words whose length > 1\n values = sorted(word_cands.values(), key=lambda x: len(x.text))\n for v in values:\n if len(v.text) == 1:\n continue\n v.compute_cohesion(word_cands)\n\n return sorted(values, key=lambda v: v.freq, reverse=True)",
"def make_text(markov_chains):\n\n random_num = generate_random_number(markov_chains.keys())\n\n random_text = []\n\n start_words = generate_start_words(random_num, markov_chains.keys())\n \n random_text.extend(start_words)\n\n\n for i in range(500):\n word_tuple = (random_text[-2],random_text[-1])\n next_word = add_next_word(word_tuple, markov_chains)\n random_text.append(next_word)\n\n return random_text",
"def word2vec_model(sentences, size=100, min_count=5, window=5,\n negative=5, cbow=True, iterations=5, seed=0,\n workers=1):\n if cbow is True:\n sg = 0\n else:\n sg = 1\n model = Word2Vec(size=size, window=window,\n min_count=min_count, workers=workers,\n sg=sg, negative=negative, seed=seed)\n\n model.build_vocab(sentences)\n\n model.train(sentences, total_examples=model.corpus_count,\n epochs=iterations)\n return model",
"def create_random_tags(count=100):\n all_words = words.words('en')\n selected_words = []\n picker = ColorPicker(reset=True)\n colors = picker._get_colors()\n while count > 0:\n word = random.choice(all_words)\n selected_words.insert(0, word)\n all_words.remove(word)\n count += -1\n del all_words\n for word in selected_words:\n color = colors.next()\n tag = Tag(slug=slugify(word), tag=word, color=color)\n tag.save()",
"def generate_sample(sentences, vocab, window):\n for sentence in sentences:\n word_vocabs = [vocab[w] for w in sentence if w in vocab and\n vocab[w]['prob'] > np.random.rand()]\n\n for index, word in enumerate(word_vocabs):\n center = word['index']\n reduced_window = np.random.randint(1, window + 1)\n\n # words before the center word\n for context in word_vocabs[max(0, index - reduced_window):index]:\n target = context['index']\n yield center, target\n\n # words after the center word\n for context in word_vocabs[(index + 1):(index + 1 + reduced_window)]:\n target = context['index']\n yield center, target",
"def generate_text_owc(model: Dict[str, Set[str]], n: int) -> str:\n # ACCUMULATOR: a list of the randomly-generated words so far\n words_so_far = []\n # We've provided this template as a starting point; you may modify it as necessary.\n words_so_far.append(generate_new_word(model))\n for x in range(0, n-1):\n key = words_so_far[x]\n new_word = generate_next_word(model,key)\n if new_word == \".\":\n words_so_far[x] = words_so_far[x]+'.'\n new_word= generate_new_word(model)\n elif new_word == {}:\n new_word = generate_new_word(model)\n words_so_far.append(new_word)\n\n return str.join(' ', words_so_far)",
"def generate_words(num_words, word_len, grid, reject_func=is_overlapping):\n height, width = len(grid), len(grid[0])\n restrictions = position_restrictions(word_len, height, width)\n word_hashes = set()\n words_positions = []\n words = []\n while len(word_hashes) < num_words:\n cardinal = random.choice(list(restrictions.keys()))\n (min_h, max_h), (min_w, max_w) = restrictions[cardinal]\n x0, y0 = random.randint(min_h, max_h), random.randint(min_w, max_w)\n x, y = DIRECTIONS[cardinal]\n positions = [(x0 + x * i, y0 + y * i) for i in range(word_len)]\n if (word_hash := (cardinal, (x0, y0))) not in word_hashes \\\n and not reject_func(p_set := set(positions), words_positions):\n words.append(\"\".join(str(grid[x][y]) for x, y in positions))\n word_hashes.add(word_hash)\n words_positions.append(p_set)\n return words",
"def build_vocab(sentences_list, vocab_size, visual_fld):\n words = [word for sentence in sentences_list for word in sentence]\n utils.safe_mkdir(visual_fld)\n with open(os.path.join(visual_fld, 'vocab.tsv'), 'w') as fd:\n dictionary = {}\n index_dictionary = {}\n count = [('UNK', -1)]\n count.extend(Counter(words).most_common(vocab_size - 1))\n for index, (word, _) in enumerate(count):\n dictionary[word] = index\n index_dictionary[index] = word\n fd.write(word + '\\n')\n\n return dictionary, index_dictionary",
"def generate_words(self, count=100):\n\n with self.open_text_data() as f:\n result = self.read_words(f, count=count)\n return result",
"def create_word(char_list):",
"def fill_list(self):\n for i in range(0, constants.STARTING_WORDS):\n random_word = constants.LIBRARY[random.randint(0, len(constants.LIBRARY) - 1)]\n x = random.randint(1, constants.MAX_X - len(self.get_text()))\n y = random.randint(1, constants.MAX_Y - len(self.get_text()))\n position = Point(x, y)\n self.set_position(position)\n velocity = Point(0, 1)\n self._add_segment(random_word, position, velocity)\n print()"
] | [
"0.61004114",
"0.5693294",
"0.55114466",
"0.5438077",
"0.53612614",
"0.5311946",
"0.52376354",
"0.51894677",
"0.5161035",
"0.5152379",
"0.5143327",
"0.5127287",
"0.51046485",
"0.50831926",
"0.50809175",
"0.5080614",
"0.5072338",
"0.5021304",
"0.50183684",
"0.4984602",
"0.4969572",
"0.49440196",
"0.49193308",
"0.4914714",
"0.49089342",
"0.48999873",
"0.48745552",
"0.4864325",
"0.48638293",
"0.48628196"
] | 0.72299457 | 0 |
Add the suffix vowel. | def add_suffix(self, suffix):
# Append the suffix vowel to this WordForm.
self.segments.append(Segment.new_segment(suffix)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_suffix(word, suffix):\n suffix, sep, rest = suffix.partition(' ')\n expanded = _add_suffix(word, suffix)\n return expanded + sep + rest",
"def get_vowel_names():",
"def _replace_suffix(self, word, suffix, replacement):\n assert word.endswith(suffix), \"Given word doesn't end with given suffix\"\n if suffix == \"\":\n return word + replacement\n else:\n return word[: -len(suffix)] + replacement",
"def translate(self):\n\t\tvowels = \"aeiou\"\n\n\t\tif (self.word[0] not in vowels) and (self.word[1] in vowels):\n\t\t\tnew_word = self.word[1:] + self.word[0] + \"ay\"\n\t\telif self.word[0] in vowels:\n\t\t\tnew_word = self.word + \"way\"\n\t\telse:\n\t\t\tnew_word = self.word[2:] + self.word[:2] + \"ay\"\n\n\t\tprint(new_word)",
"def pig_word(self, original):\n word = original.lower()\n if word[0] in \"aeiou\":\n new_word = word + 'ay'\n else:\n new_word = word[1:] + word[0] + 'ay'\n return new_word",
"def stem(self, word):\n word = word.lower()\n\n if word in self.stopwords:\n return word\n\n step1_success = False\n\n r1, r2 = self._r1r2_standard(word, self.__vowels)\n rv = self._rv_standard(word, self.__vowels)\n\n # STEP 0: Attached pronoun\n for suffix in self.__step0_suffixes:\n if not (word.endswith(suffix) and rv.endswith(suffix)):\n continue\n\n if (\n rv[: -len(suffix)].endswith(\n (\n \"ando\",\n \"ar\",\n \"er\",\n \"iendo\",\n \"ir\",\n )\n )\n ) or (\n rv[: -len(suffix)].endswith(\"yendo\")\n and word[: -len(suffix)].endswith(\"uyendo\")\n ):\n\n word = self.__replace_accented(word[: -len(suffix)])\n r1 = self.__replace_accented(r1[: -len(suffix)])\n r2 = self.__replace_accented(r2[: -len(suffix)])\n rv = self.__replace_accented(rv[: -len(suffix)])\n break\n\n # STEP 1: Standard suffix removal\n for suffix in self.__step1_suffixes:\n if not word.endswith(suffix):\n continue\n\n if suffix == \"amente\" and r1.endswith(suffix):\n step1_success = True\n word = word[:-6]\n r2 = r2[:-6]\n rv = rv[:-6]\n\n if r2.endswith(\"iv\"):\n word = word[:-2]\n r2 = r2[:-2]\n rv = rv[:-2]\n\n if r2.endswith(\"at\"):\n word = word[:-2]\n rv = rv[:-2]\n\n elif r2.endswith((\"os\", \"ic\", \"ad\")):\n word = word[:-2]\n rv = rv[:-2]\n\n elif r2.endswith(suffix):\n step1_success = True\n if suffix in (\n \"adora\",\n \"ador\",\n \"acion\",\n \"adoras\",\n \"adores\",\n \"aciones\",\n \"ante\",\n \"antes\",\n \"ancia\",\n \"ancias\",\n ):\n word = word[: -len(suffix)]\n r2 = r2[: -len(suffix)]\n rv = rv[: -len(suffix)]\n\n if r2.endswith(\"ic\"):\n word = word[:-2]\n rv = rv[:-2]\n\n elif suffix in (\"logia\", \"logias\"):\n word = suffix_replace(word, suffix, \"log\")\n rv = suffix_replace(rv, suffix, \"log\")\n\n elif suffix in (\"ucion\", \"uciones\"):\n word = suffix_replace(word, suffix, \"u\")\n rv = suffix_replace(rv, suffix, \"u\")\n\n elif suffix in (\"encia\", \"encias\"):\n word = suffix_replace(word, suffix, \"ente\")\n rv = suffix_replace(rv, suffix, \"ente\")\n\n elif suffix == \"mente\":\n word = word[: -len(suffix)]\n r2 = r2[: -len(suffix)]\n rv = rv[: -len(suffix)]\n\n if r2.endswith((\"ante\", \"able\", \"ible\")):\n word = word[:-4]\n rv = rv[:-4]\n\n elif suffix in (\"idad\", \"idades\"):\n word = word[: -len(suffix)]\n r2 = r2[: -len(suffix)]\n rv = rv[: -len(suffix)]\n\n for pre_suff in (\"abil\", \"ic\", \"iv\"):\n if r2.endswith(pre_suff):\n word = word[: -len(pre_suff)]\n rv = rv[: -len(pre_suff)]\n\n elif suffix in (\"ivo\", \"iva\", \"ivos\", \"ivas\"):\n word = word[: -len(suffix)]\n r2 = r2[: -len(suffix)]\n rv = rv[: -len(suffix)]\n if r2.endswith(\"at\"):\n word = word[:-2]\n rv = rv[:-2]\n else:\n word = word[: -len(suffix)]\n rv = rv[: -len(suffix)]\n break\n\n # STEP 2a: Verb suffixes beginning 'y'\n if not step1_success:\n for suffix in self.__step2a_suffixes:\n if rv.endswith(suffix) and word[-len(suffix) - 1 : -len(suffix)] == \"u\":\n word = word[: -len(suffix)]\n rv = rv[: -len(suffix)]\n break\n\n # STEP 2b: Other verb suffixes\n for suffix in self.__step2b_suffixes:\n if rv.endswith(suffix):\n word = word[: -len(suffix)]\n rv = rv[: -len(suffix)]\n if suffix in (\"en\", \"es\", \"eis\", \"emos\"):\n if word.endswith(\"gu\"):\n word = word[:-1]\n\n if rv.endswith(\"gu\"):\n rv = rv[:-1]\n break\n\n # STEP 3: Residual suffix\n for suffix in self.__step3_suffixes:\n if rv.endswith(suffix):\n word = word[: -len(suffix)]\n if suffix in (\"e\", \"\\xE9\"):\n rv = rv[: -len(suffix)]\n\n if word[-2:] == \"gu\" and rv.endswith(\"u\"):\n word = word[:-1]\n break\n\n word = self.__replace_accented(word)\n\n return word",
"def create_extended_name(y: str, p: str) -> str:\n final_letter = y[-1]\n if final_letter == \"e\":\n extended_name = y + \"x\" + p\n elif final_letter in [\"a\", \"i\", \"o\", \"u\"]:\n extended_name = y[:-1] + \"ex\" + p\n elif final_letter == \"x\":\n if y[-2] == \"e\":\n extended_name = y + p\n else:\n extended_name = y + \"ex\" + p\n return extended_name",
"def _ends_with_vowel(self, letter_group: str) -> bool:\n if len(letter_group) == 0:\n return False\n return self._contains_vowels(letter_group[-1])",
"def find_vowels(s):\n \"*** YOUR CODE HERE ***\"",
"def add_suffix(name: str, suffix: str):\n return f'{name}_{suffix}'",
"def addSuffixes(self, alist):\n for i, (word, filename) in enumerate(alist):\n withsuffix = self._findVideoFile(filename)\n alist[i] = (word, withsuffix)\n return alist",
"def upper_vowel(s):\n for k, v in REPLACED_MAP.iteritems():\n s = s.replace(k, v)\n return s",
"def last_char_to_vowel(word):\n assert isinstance(word, str)\n # We iterate over characters of the word, because the last might be a\n # punctuation, perhaps.\n for last in reversed(word):\n last = last.lower()\n for ch, prev in ((\"a\", \"a/+£\"),\n (\"e\", \"eébcçdgptvwz&*:.\"),\n (\"o\", \"ohk€å\"),\n (\"ä\", \"äflmnrsx§\"),\n (\"ö\", \"ö\"),\n (\"i\", \"ij%$\"),\n (\"u\", \"uq,\"),\n (\"y\", \"yü\")):\n if last in prev:\n return ch\n return \"e\"",
"def suffix():\r\n\r\n return _random.choice(\r\n [\r\n 'Sr.', 'Jr.', 'II', 'III', 'IV', 'V'\r\n ]\r\n )",
"def reverse_vowels(s):\n\n phrase = \"\"\n vowels = []\n for letter in s:\n if letter.lower() in \"aeiou\":\n phrase += \"~\"\n vowels.append(letter)\n else: \n phrase += letter\n \n index = 0\n new_phrase = \"\"\n vowels = vowels[-1:-len(vowels)-1:-1]\n \n for letter in phrase:\n\n if letter == \"~\":\n new_phrase += vowels[index]\n index += 1\n else:\n new_phrase += letter\n\n return new_phrase",
"def find_vowel(text: str) -> str:\r\n\r\n vowel = text.count('a') + text.count('o') + text.count('u') +\\\r\n text.count('i') + text.count('e') + text.count(\"y\") +\\\r\n text.count('A') + text.count('O') + text.count('U') +\\\r\n text.count('I') + text.count('E') + text.count('Y')\r\n\r\n return(vowel)",
"def _suffix(self) -> str:\n return \"\"",
"def suffix_replace(original, old, new):\n ...",
"def add_suffix(in_image,\n suffix_str):\n bandnames = in_image.bandNames().map(lambda elem: ee.String(elem).toLowerCase().cat('_').cat(suffix_str))\n nb = bandnames.length()\n return in_image.select(ee.List.sequence(0, ee.Number(nb).subtract(1)), bandnames)",
"def FindSuffix(self):\n self.numSuffixes = 0\n self.forceStress = 0\n resultslist = []\n for f in self.suffixes.finditer(self.wd):\n resultslist.append((f.group(), f.start()))\n if not resultslist: return\n # make sure *end* of word is in list! otherwise, 'DESP erate'\n if resultslist[-1][1] + len(resultslist[-1][0]) < len(self.wd):\n return\n resultslist.reverse()\n for res in resultslist:\n # if no vowel left before, false suffix ('singing')\n # n.b.: will choke on 'quest' etc! put in dictionary, I guess\n if not sre.search('[aeiouy]', self.wd[:res[1]]): break\n if res[0] == 'ing' and self.wd[res[1]-1] == self.wd[res[1]-2]:\n self.sylBounds.append(res[1] - 1) # freq special case\n else: self.sylBounds.append(res[1]) # sorted later\n self.wd = self.wd[:res[1]]\n self.numSuffixes += 1\n if res[0] in STRESSSUFFIX:\n self.forceStress = 0 - len(self.sylBounds)\n if res[0] in MULTISUFFIX:\n # tricky bit! it *happens* that secondary division in all these\n # comes after its first character; NOT inevitable!\n # also does not allow for 3-syl: 'ically' (which are reliable!)\n self.sylBounds.append(res[1]+1)\n self.numSuffixes += 1",
"def generate_vowel():\n return random.sample(['a', 'e', 'i', 'o', 'u', 'y'], 1)",
"def gerundify(verb):\n if verb.endswith(\"e\"):\n verb = verb[:-1]\n\n if random() < 0.4:\n if (\n not verb.startswith(\"a\")\n and not verb.startswith(\"e\")\n and not verb.startswith(\"i\")\n and not verb.startswith(\"o\")\n and not verb.startswith(\"u\")\n ):\n verb = \"a-\" + verb\n\n return verb + \"ing\"",
"def apply_sinalefa(self):\n syllables_sinalefa = []\n index = 0\n while index < len(self.word_syllables):\n try:\n # checking if there is sinalefa\n if self.are_vowels(syllables_sinalefa[-1][-1], self.word_syllables[index][0]):\n merged_syllables = ''.join([syllables_sinalefa[-1], self.word_syllables[index]])\n # replacing the last syllable with the merged syllable\n syllables_sinalefa.pop(-1)\n syllables_sinalefa.append(merged_syllables)\n else:\n syllables_sinalefa.append(self.word_syllables[index])\n except IndexError:\n # we reached the last word\n syllables_sinalefa.append(self.word_syllables[index])\n finally:\n index += 1\n\n return '-'.join(syllables_sinalefa)",
"def is_suffix(v,s):\n c = len(v)-1\n n = len(s)\n return c + v[c] == 2*n",
"def removesuffix(self, x) -> String:\n pass",
"def vowel_with_for(character):\r\n\tif character in vowels:\r\n\t\tprint(\"Entered character is vowel..!\")\r\n\telse:\r\n\t\tprint(\"Not a Vowel\")",
"def step1c(self, word):\r\n\r\n if word.endswith('y'):\r\n result = word.rfind('y')\r\n base = word[:result]\r\n if self.containsVowel(base):\r\n word = base\r\n word += 'i'\r\n return word",
"def is_suffix(suffix: str, word: str):\n return word.endswith(suffix)",
"def stem(self, word):\n word = word.lower()\n\n if word in self.__special_words:\n return self.__special_words[word]\n\n # Map the different apostrophe characters to a single consistent one\n word = (word.replace(u(\"\\u2019\"), u(\"\\x27\"))\n .replace(u(\"\\u2018\"), u(\"\\x27\"))\n .replace(u(\"\\u201B\"), u(\"\\x27\")))\n\n if word.startswith(u(\"\\x27\")):\n word = word[1:]\n\n if word.startswith(\"y\"):\n word = \"\".join((\"Y\", word[1:]))\n\n for i in range(1, len(word)):\n if word[i - 1] in self.__vowels and word[i] == \"y\":\n word = \"\".join((word[:i], \"Y\", word[i + 1:]))\n\n step1a_vowel_found = False\n step1b_vowel_found = False\n\n r1 = \"\"\n r2 = \"\"\n\n if word.startswith((\"gener\", \"commun\", \"arsen\")):\n if word.startswith((\"gener\", \"arsen\")):\n r1 = word[5:]\n else:\n r1 = word[6:]\n\n for i in range(1, len(r1)):\n if r1[i] not in self.__vowels and r1[i - 1] in self.__vowels:\n r2 = r1[i + 1:]\n break\n else:\n r1, r2 = self._r1r2_standard(word, self.__vowels)\n\n # STEP 0\n for suffix in self.__step0_suffixes:\n if word.endswith(suffix):\n word = word[:-len(suffix)]\n r1 = r1[:-len(suffix)]\n r2 = r2[:-len(suffix)]\n break\n\n # STEP 1a\n for suffix in self.__step1a_suffixes:\n if word.endswith(suffix):\n\n if suffix == \"sses\":\n word = word[:-2]\n r1 = r1[:-2]\n r2 = r2[:-2]\n\n elif suffix in (\"ied\", \"ies\"):\n if len(word[:-len(suffix)]) > 1:\n word = word[:-2]\n r1 = r1[:-2]\n r2 = r2[:-2]\n else:\n word = word[:-1]\n r1 = r1[:-1]\n r2 = r2[:-1]\n\n elif suffix == \"s\":\n for letter in word[:-2]:\n if letter in self.__vowels:\n step1a_vowel_found = True\n break\n\n if step1a_vowel_found:\n word = word[:-1]\n r1 = r1[:-1]\n r2 = r2[:-1]\n break\n\n # STEP 1b\n for suffix in self.__step1b_suffixes:\n if word.endswith(suffix):\n if suffix in (\"eed\", \"eedly\"):\n\n if r1.endswith(suffix):\n word = \"\".join((word[:-len(suffix)], \"ee\"))\n\n if len(r1) >= len(suffix):\n r1 = \"\".join((r1[:-len(suffix)], \"ee\"))\n else:\n r1 = \"\"\n\n if len(r2) >= len(suffix):\n r2 = \"\".join((r2[:-len(suffix)], \"ee\"))\n else:\n r2 = \"\"\n else:\n for letter in word[:-len(suffix)]:\n if letter in self.__vowels:\n step1b_vowel_found = True\n break\n\n if step1b_vowel_found:\n word = word[:-len(suffix)]\n r1 = r1[:-len(suffix)]\n r2 = r2[:-len(suffix)]\n\n if word.endswith((\"at\", \"bl\", \"iz\")):\n word = \"\".join((word, \"e\"))\n r1 = \"\".join((r1, \"e\"))\n\n if len(word) > 5 or len(r1) >= 3:\n r2 = \"\".join((r2, \"e\"))\n\n elif word.endswith(self.__double_consonants):\n word = word[:-1]\n r1 = r1[:-1]\n r2 = r2[:-1]\n\n elif ((r1 == \"\" and len(word) >= 3 and\n word[-1] not in self.__vowels and\n word[-1] not in \"wxY\" and\n word[-2] in self.__vowels and\n word[-3] not in self.__vowels)\n or\n (r1 == \"\" and len(word) == 2 and\n word[0] in self.__vowels and\n word[1] not in self.__vowels)):\n\n word = \"\".join((word, \"e\"))\n\n if len(r1) > 0:\n r1 = \"\".join((r1, \"e\"))\n\n if len(r2) > 0:\n r2 = \"\".join((r2, \"e\"))\n break\n\n # STEP 1c\n if (len(word) > 2\n and word[-1] in \"yY\"\n and word[-2] not in self.__vowels):\n word = \"\".join((word[:-1], \"i\"))\n if len(r1) >= 1:\n r1 = \"\".join((r1[:-1], \"i\"))\n else:\n r1 = \"\"\n\n if len(r2) >= 1:\n r2 = \"\".join((r2[:-1], \"i\"))\n else:\n r2 = \"\"\n\n # STEP 2\n for suffix in self.__step2_suffixes:\n if word.endswith(suffix):\n if r1.endswith(suffix):\n if suffix == \"tional\":\n word = word[:-2]\n r1 = r1[:-2]\n r2 = r2[:-2]\n\n elif suffix in (\"enci\", \"anci\", \"abli\"):\n word = \"\".join((word[:-1], \"e\"))\n\n if len(r1) >= 1:\n r1 = \"\".join((r1[:-1], \"e\"))\n else:\n r1 = \"\"\n\n if len(r2) >= 1:\n r2 = \"\".join((r2[:-1], \"e\"))\n else:\n r2 = \"\"\n\n elif suffix == \"entli\":\n word = word[:-2]\n r1 = r1[:-2]\n r2 = r2[:-2]\n\n elif suffix in (\"izer\", \"ization\"):\n word = \"\".join((word[:-len(suffix)], \"ize\"))\n\n if len(r1) >= len(suffix):\n r1 = \"\".join((r1[:-len(suffix)], \"ize\"))\n else:\n r1 = \"\"\n\n if len(r2) >= len(suffix):\n r2 = \"\".join((r2[:-len(suffix)], \"ize\"))\n else:\n r2 = \"\"\n\n elif suffix in (\"ational\", \"ation\", \"ator\"):\n word = \"\".join((word[:-len(suffix)], \"ate\"))\n\n if len(r1) >= len(suffix):\n r1 = \"\".join((r1[:-len(suffix)], \"ate\"))\n else:\n r1 = \"\"\n\n if len(r2) >= len(suffix):\n r2 = \"\".join((r2[:-len(suffix)], \"ate\"))\n else:\n r2 = \"e\"\n\n elif suffix in (\"alism\", \"aliti\", \"alli\"):\n word = \"\".join((word[:-len(suffix)], \"al\"))\n\n if len(r1) >= len(suffix):\n r1 = \"\".join((r1[:-len(suffix)], \"al\"))\n else:\n r1 = \"\"\n\n if len(r2) >= len(suffix):\n r2 = \"\".join((r2[:-len(suffix)], \"al\"))\n else:\n r2 = \"\"\n\n elif suffix == \"fulness\":\n word = word[:-4]\n r1 = r1[:-4]\n r2 = r2[:-4]\n\n elif suffix in (\"ousli\", \"ousness\"):\n word = \"\".join((word[:-len(suffix)], \"ous\"))\n\n if len(r1) >= len(suffix):\n r1 = \"\".join((r1[:-len(suffix)], \"ous\"))\n else:\n r1 = \"\"\n\n if len(r2) >= len(suffix):\n r2 = \"\".join((r2[:-len(suffix)], \"ous\"))\n else:\n r2 = \"\"\n\n elif suffix in (\"iveness\", \"iviti\"):\n word = \"\".join((word[:-len(suffix)], \"ive\"))\n\n if len(r1) >= len(suffix):\n r1 = \"\".join((r1[:-len(suffix)], \"ive\"))\n else:\n r1 = \"\"\n\n if len(r2) >= len(suffix):\n r2 = \"\".join((r2[:-len(suffix)], \"ive\"))\n else:\n r2 = \"e\"\n\n elif suffix in (\"biliti\", \"bli\"):\n word = \"\".join((word[:-len(suffix)], \"ble\"))\n\n if len(r1) >= len(suffix):\n r1 = \"\".join((r1[:-len(suffix)], \"ble\"))\n else:\n r1 = \"\"\n\n if len(r2) >= len(suffix):\n r2 = \"\".join((r2[:-len(suffix)], \"ble\"))\n else:\n r2 = \"\"\n\n elif suffix == \"ogi\" and word[-4] == \"l\":\n word = word[:-1]\n r1 = r1[:-1]\n r2 = r2[:-1]\n\n elif suffix in (\"fulli\", \"lessli\"):\n word = word[:-2]\n r1 = r1[:-2]\n r2 = r2[:-2]\n\n elif suffix == \"li\" and word[-3] in self.__li_ending:\n word = word[:-2]\n r1 = r1[:-2]\n r2 = r2[:-2]\n break\n\n # STEP 3\n for suffix in self.__step3_suffixes:\n if word.endswith(suffix):\n if r1.endswith(suffix):\n if suffix == \"tional\":\n word = word[:-2]\n r1 = r1[:-2]\n r2 = r2[:-2]\n\n elif suffix == \"ational\":\n word = \"\".join((word[:-len(suffix)], \"ate\"))\n\n if len(r1) >= len(suffix):\n r1 = \"\".join((r1[:-len(suffix)], \"ate\"))\n else:\n r1 = \"\"\n\n if len(r2) >= len(suffix):\n r2 = \"\".join((r2[:-len(suffix)], \"ate\"))\n else:\n r2 = \"\"\n\n elif suffix == \"alize\":\n word = word[:-3]\n r1 = r1[:-3]\n r2 = r2[:-3]\n\n elif suffix in (\"icate\", \"iciti\", \"ical\"):\n word = \"\".join((word[:-len(suffix)], \"ic\"))\n\n if len(r1) >= len(suffix):\n r1 = \"\".join((r1[:-len(suffix)], \"ic\"))\n else:\n r1 = \"\"\n\n if len(r2) >= len(suffix):\n r2 = \"\".join((r2[:-len(suffix)], \"ic\"))\n else:\n r2 = \"\"\n\n elif suffix in (\"ful\", \"ness\"):\n word = word[:-len(suffix)]\n r1 = r1[:-len(suffix)]\n r2 = r2[:-len(suffix)]\n\n elif suffix == \"ative\" and r2.endswith(suffix):\n word = word[:-5]\n r1 = r1[:-5]\n r2 = r2[:-5]\n break\n\n # STEP 4\n for suffix in self.__step4_suffixes:\n if word.endswith(suffix):\n if r2.endswith(suffix):\n if suffix == \"ion\":\n if word[-4] in \"st\":\n word = word[:-3]\n r1 = r1[:-3]\n r2 = r2[:-3]\n else:\n word = word[:-len(suffix)]\n r1 = r1[:-len(suffix)]\n r2 = r2[:-len(suffix)]\n break\n\n # STEP 5\n if r2.endswith(\"l\") and word[-2] == \"l\":\n word = word[:-1]\n elif r2.endswith(\"e\"):\n word = word[:-1]\n elif r1.endswith(\"e\"):\n if len(word) >= 4 and (word[-2] in self.__vowels or\n word[-2] in \"wxY\" or\n word[-3] not in self.__vowels or\n word[-4] in self.__vowels):\n word = word[:-1]\n\n word = word.replace(\"Y\", \"y\")\n return word",
"def is_vowel(self, letter):\n\n if letter in (\"a\", \"e\", \"i\", \"o\", \"u\", \"A\", \"E\", \"I\", \"O\", \"U\"):\n return True\n return False"
] | [
"0.5983021",
"0.5954164",
"0.59408945",
"0.5778313",
"0.5686185",
"0.5563964",
"0.5542913",
"0.55272454",
"0.54683185",
"0.5462567",
"0.54531056",
"0.5446963",
"0.54377186",
"0.5427365",
"0.53498006",
"0.53402376",
"0.53384125",
"0.5302798",
"0.52940315",
"0.5291557",
"0.52781844",
"0.52564484",
"0.52411693",
"0.52356154",
"0.5230086",
"0.5162771",
"0.51485455",
"0.5136728",
"0.5126367",
"0.5126245"
] | 0.7781676 | 0 |
Entrench at the level of the WordForm. | def entrench_word(self, cloud, paradigms, informativity, categorization,
unique_base):
# Entrench within the WordForm's own cloud. Iterate over positions in
# the WordForm (up to three Segments).
for pos, seg in enumerate(self.segments):
if pos < 3:
# Iterate over features.
for feat in seg.features:
if uniform(0, 1) < probability_of_analogy:
# Collect other values of the feature across the cloud.
# Since this is the WordForm's own cloud, set all the
# weights to 1.
wv = [(e.segments[pos].features[feat], 1)
for e in cloud
if e.lemma == self.lemma
and e.case == self.case]
# Entrench the segment based on these values.
seg.entrench_feature(feat, wv,
top_value = self_top_value,
max_movement = self_max_movement)
# Entrench within other clouds of the same paradigm.
if paradigms:
# Iterate over positions in the WordForm (up to three Segments).
for pos, seg in enumerate(self.segments):
if pos < 3:
# Iterate over features.
for feat in seg.features:
if uniform(0, 1) < (probability_of_analogy *
paradigm_weight):
# Get the weight for each case.
weights = dict()
# If informativity is measured via the entropy
# method, the weight of a case is proportional to
# the entropy of the feature across all lemmas of
# that case.
if informativity == 'entropy':
weights = {c: entropy(feat, [e.segments[pos].\
features[feat]
for e in cloud
if e.case == c])
for c in cases}
# If informativity is measured via a classification
# algorithm, the weight of a case is proportional to
# the performance of the classifier on lemmas within
# that case using just the current feature.
elif informativity == 'classification':
weights = {c: performance([e
for e in cloud
if e.case == c],
positions = [pos],
features = [feat],
method = categorization)
for c in cases}
# If informativity is not measured, set the weights
# of all cases to 1.
elif informativity == 'none':
weights = {c: 1
for c in cases}
# If paradigms are required to have a unique base,
# the winner takes all the weight.
if unique_base:
max_weight = max(weights.values())
for c in weights:
if weights[c] < max_weight:
weights[c] = 0
# Collect other values of the feature across the
# cloud, and pair them with their weights.
wv = [(e.segments[pos].features[feat],
weights[e.case])
for e in cloud
if e.lemma == self.lemma
and e.case != self.case]
# Entrench the segment based on these values.
seg.entrench_feature(feat, wv,
top_value = paradigm_top_value,
max_movement = paradigm_max_movement) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _commit_level(self):\n assert self.current_level is not None, \"Cannot write a level with an empty name\"\n # Create a new level descriptor in the lump directory\n self.wad.add_lump(self.current_level, None)\n # Add the lumps to WAD file\n self.wad.add_lump('THINGS', self.lumps['THINGS'])\n self.wad.add_lump('LINEDEFS', self.lumps['LINEDEFS'])\n self.wad.add_lump('SIDEDEFS', self.lumps['SIDEDEFS'])\n self.wad.add_lump('VERTEXES', self.lumps['VERTEXES'])\n self.wad.add_lump('SECTORS', self.lumps['SECTORS'])\n self.lumps = {'THINGS':Things(), 'LINEDEFS':Linedefs(), 'VERTEXES':Vertexes(),'SIDEDEFS': Sidedefs(), 'SECTORS':Sectors()}",
"def Wraith_Form(self):\t\t\n\t\tprint(self.name.Title() + \"Wraith\")",
"def __change_level(self, level):\n self.level = level",
"def clean_level_(self):\n try:\n # Get the verb categories of the taxonomy\n verb_cats = VerbCategory.objects.filter(taxonomy=self.taxonomy)\n except Taxonomy.DoesNotExist:\n raise Http404('The taxonomy does not exist!')\n else:\n\n # Check categories for the entered level value\n submitted_level = self.cleaned_data.get('level', None)\n\n # if updating, need to allow the original level value to be re-entered\n old_level = None if not self.old_category else self.old_category.level\n\n if submitted_level in [cat.level for cat in verb_cats.all()\\\n if cat.level != old_level]:\n culprit = verb_cats.get(level=submitted_level)\n raise forms.ValidationError(f'The verb category \"{culprit.title}\" \\\n already has this value!')\n\n return submitted_level",
"def resetWordLevel(self, ID):\n\t\tcommand = \"UPDATE words SET level=0 WHERE ID=?\"\n\t\tparams = (ID,)\n\n\t\tself._run_command(command, params)",
"def addLevel(self):\n pass",
"def setLevel(self, level):\n self.lvl = level",
"def change_level(self):\r\n error = False\r\n\r\n try:\r\n char_lvl = int(self.__char_lvl.get())\r\n except ValueError:\r\n error = True\r\n\r\n if error or char_lvl <= 0:\r\n self.__skill_points_indicator.configure(\r\n text=\"Level must be a positive whole number\")\r\n for skill_string in self.__skills:\r\n self.skill_up_disable(skill_string)\r\n self.skill_down_disable(skill_string)\r\n\r\n else:\r\n self.reset_all();\r\n self.__skill_points = 10 + 20 * (char_lvl - 1)\r\n self.__skill_points_indicator.configure(\r\n text=\"Available skillpoints: \" + str(\r\n self.__skill_points))\r\n for skill in self.__skills:\r\n self.check_skill_requirements(skill)",
"def setLevel( self, lvl ):\n if isinstance( lvl, str ):\n return super().setLevel( lvl.upper() )\n else:\n return super().setLevel( lvl )",
"def __editUnindent(self):\n self.activeWindow().unindentLineOrSelection()",
"def addOtherForm(documentName, word, unique):\r\n formRef = \":form_\" + replace_form(word.word)\r\n if word.transliteration and word.transliteration.word != \"\" and word.transliteration.word != \" \":\r\n formRef += \"_\" + word.transliteration.word\r\n formRef += \"_\" + unique\r\n\r\n formRef += \" a ontolex:Form;\\n\"\r\n\r\n writtenRepRef = \" ontolex:writtenRep \\\"\"\r\n writtenRepRef += word.word + \"\\\"\" + word.writingLanguage\r\n\r\n if word.transliteration and word.transliteration.word != \"\":\r\n writtenRepRef += \", \\\"\" + word.transliteration.word + \"\\\"\" + word.transliteration.writingLanguage\r\n writtenRepRef += \" .\"\r\n\r\n frequencyRef = \"\"\r\n if word.frequencyDict:\r\n frequencyRef = \"\\n\"\r\n for corpus,frequency in word.frequencyDict.items():\r\n if frequency != 0:\r\n frequencyRef +=' frac:frequency [a e2model:' + corpus +'; rdf:value \"' + str(frequency) + '\" ] ;\\n'\r\n frequencyRef = frequencyRef[:len(frequencyRef) -2]\r\n frequencyRef += \".\"\r\n formEntry = formRef + writtenRepRef\r\n if frequencyRef != \".\":\r\n formEntry = formEntry[:len(formEntry) -1]\r\n formEntry += \";\"\r\n formEntry += frequencyRef\r\n\r\n with open(documentName, 'a') as f:\r\n f.write(formEntry)\r\n f.write(\"\\n\\n\")\r\n return",
"def setWL(self, dn, w, l):\r\n # productive #frequent #onDrag\r\n if frequent: profprint();\r\n dn.SetWindow(w)\r\n dn.SetLevel(l)",
"def print_level():\n print(\"\")\n\n def show_hide_word(word):\n \"\"\"show/hide finished/unfinished words\"\"\"\n if word not in current_level.finished_words:\n return \"*\" * len(word)\n return word\n\n current_level.layout.print_layout(\n show_hide_word,\n # Print unfinished words first with '*'\n set(current_level.words) - set(current_level.finished_words),\n )\n\n # level state\n print(\"\")\n print(\"Level: %d/%d\" % (current_level_index + 1, len(all_levels)))\n if current_level.bonus_words:\n bonus_words_status = \"Bonus words: %d/%d\" % (\n len(current_level.finished_bonus_words),\n len(current_level.bonus_words)\n )\n bonus_words_status += \" %s\" % \" \".join(\n change_case(word)\n if word in current_level.finished_bonus_words\n else \"*\" * len(word)\n for word in current_level.bonus_words\n )\n print(bonus_words_status)\n\n # characters\n print(\"\")\n print(\"Chars: %s\" % \" \".join(change_case(char) for char in current_level.chars))\n print(\"\")",
"def __set_level(self,L):\n assert isinstance(L,level)\n self.__level = L",
"def setLevel(self, level):\n self.level = level",
"def reset_level(self, format_level):\n assert(format_level in pos_levels)\n self.level = format_level\n self.reset_format()",
"def edit_entry(self):\r\n self.session = tk.Toplevel(self.master, **jt.bframe_style)\r\n jd.Page(self.session, self.source)",
"def edit(self):\n self.toplevel = tk.Toplevel()\n # ============================= Frame Setup\n # Get Frames for each side of the editor\n self.leftSide = tk.LabelFrame(self.toplevel, text=\"Leftside\")\n self.rightSide = tk.LabelFrame(self.toplevel, text=\"Rightside\")\n self.leftSide.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)\n self.rightSide.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)\n #### Build the leftside\n # Frame for controlling the title of node\n self.titleFrame = tk.LabelFrame(self.leftSide, text=\"Title\")\n self.titleFrame.pack(side=tk.TOP, fill=tk.X, expand=False)\n self.titleEntry = tk.Entry(self.titleFrame)\n self.titleEntry.pack(side=tk.LEFT, fill=tk.X, expand=True)\n self.titleUpdateButton = tk.Button(self.titleFrame, text=\"Update\", command=self.update_title_from_entry)\n self.titleUpdateButton.pack(side=tk.LEFT)\n # ============================= EditorFrame\n self.editorFrame = tk.Frame(self.leftSide)\n self.editorFrame.pack(side=tk.TOP, fill=tk.BOTH, expand=True)\n self.textWidget = tk.Text(self.editorFrame)\n self.textWidget.pack(side=tk.TOP, fill=tk.BOTH, expand=True)\n # ============================= Status Bar\n self.statusFrame = tk.LabelFrame(self.leftSide, text=\"Status\", relief=tk.SUNKEN)\n self.statusFrame.pack(side=tk.TOP, fill=tk.X, expand=False)\n self.wordWrapStatus = tk.Menubutton(self.statusFrame)\n self.wordWrapStatus.pack()\n # ============================== Buttons on the right side of the editor\n self.buttonFrame = tk.Frame(self.rightSide)\n self.buttonFrame.pack(side=tk.TOP)\n self.saveButton = tk.Button(self.buttonFrame, text=\"save\", command=self.on_editor_save, bg=\"green\")\n self.exitButton = tk.Button(self.buttonFrame, text=\"exit\", command=self.on_editor_exit, bg=\"red\")\n self.saveButton.pack(side=tk.LEFT, fill=tk.X, expand=True)\n self.exitButton.pack(side=tk.LEFT, fill=tk.X, expand=True)\n # insert title of node into title entry\n self.titleEntry.insert(tk.END, self.title)\n # insert contents of node into textwidget\n self.textWidget.insert(tk.END, self.text)",
"def __editProjectPEL(self):\n pel = e5App().getObject(\"Project\").getProjectDictionaries()[1]\n self.__editSpellingDictionary(pel)",
"def incrementWordLevel(self, ID):\n\t\tcommand = \"UPDATE words SET level=level+1 WHERE ID=?\"\n\t\tparams = (ID,)\n\n\t\tself._run_command(command, params)",
"def level_up(self):\n pass",
"def level_down(self):\n if self.level > 1:\n self.level = self.level - 1\n self.update_level_buttons()",
"def level_up(self):\n if self.level < self.max_level:\n self.level = self.level + 1\n self.update_level_buttons()",
"def setWL(self,dn,w,l):\n #productive #frequent #onDrag\n if frequent: profprint();\n dn.SetWindow(w)\n dn.SetLevel(l)",
"def level_upgrade(self, lvl):\n\t\tpass",
"def update(self, event, level):\n\t\tDialog.update(self, event, level)\n\t\tif(self.index/SCROLL_CONSTANT >= len(self.text)):\n\t\t\tself.choosing = True",
"def clean(self):\n return super(CharacterSkillForm, self).clean()",
"def normal_form(self, w):\n return self.element_class(self, self._normalize_word(w))",
"def level(self, level):\n\n self._level = level",
"def level(self, level):\n\n self._level = level"
] | [
"0.51781356",
"0.5135534",
"0.51090235",
"0.50966406",
"0.5095478",
"0.5094889",
"0.5074766",
"0.5069701",
"0.49998125",
"0.49493456",
"0.4920468",
"0.49131694",
"0.4900337",
"0.48817414",
"0.48688623",
"0.48541382",
"0.48305783",
"0.48229364",
"0.4820413",
"0.47837245",
"0.47664204",
"0.47626385",
"0.47601262",
"0.47592494",
"0.47491702",
"0.47484893",
"0.47252023",
"0.47114256",
"0.4710655",
"0.4710655"
] | 0.5143527 | 1 |
Add noise to the nonsuffix segments in the WordForm. | def add_noise(self):
self.segments = deepcopy(self.segments)
# Iterate through each of the first three Segments in the WordForm.
for i in range(3):
# Add noise to each Segment.
self.segments[i].add_noise() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_noise(self, words, lengths):\n words, lengths = self.word_shuffle(words, lengths)\n words, lengths = self.word_dropout(words, lengths)\n # words, lengths = self.word_blank(words, lengths)\n return words, lengths",
"def add_noise(self, data):",
"def remove_noise(text):\n\n text = text.split()\n word = [word for word in text if word not in [\n 'pertain',\n 'estimate',\n 'link',\n 'and',\n 'more',\n 'fetch',\n 'be',\n 'there',\n 'do',\n 'you',\n 'have',\n 'any',\n 'is',\n 'my',\n 'on',\n 'can',\n 'i',\n 'get',\n 'some',\n 'am',\n 'look',\n 'for',\n 'the',\n 'to',\n 'share',\n 'me',\n 'of',\n 'please',\n 'a',\n 'very',\n 'at',\n 'with',\n 'relate',\n 'sorry'\n ]]\n return ' '.join(word)",
"def add_noise(self, noise):\n if noise > 0.0:\n for key in self.counts:\n self.counts[key] *= 1.0 + noise * np.random.random_sample()",
"def noiseAtten(atten) :\n s.noiseAtten(atten)",
"def noise(self, freq: int, /) -> None:",
"def add_suffix(self, suffix):\n # Append the suffix vowel to this WordForm.\n self.segments.append(Segment.new_segment(suffix))",
"def noise(self, stddev):\n #add noise to weights\n pass",
"def addNoise(self, sigma=1.0):\n noise = numpy.random.normal(loc=0, scale=sigma, size=(self.ny, self.nx))\n self.image += noise\n return",
"def add_noise(self, words, lengths, lang_id):\n words, lengths = self.word_shuffle(words, lengths, lang_id)\n words, lengths = self.word_dropout(words, lengths, lang_id)\n words, lengths = self.word_blank(words, lengths, lang_id)\n return words, lengths",
"def addNoise(pure,snr):\r\n watts = pure**2\r\n # Calculate signal power and convert to dB \r\n sig_avg_watts = np.mean(watts)\r\n sig_avg_db = 10 * np.log10(sig_avg_watts)\r\n # Calculate noise according to [2] then convert to watts\r\n noise_avg_db = sig_avg_db - snr\r\n noise_avg_watts = 10 ** (noise_avg_db / 10)\r\n # Generate an sample of white noise\r\n mean_noise = 0\r\n noise = np.random.normal(mean_noise, np.sqrt(noise_avg_watts), len(watts))\r\n \r\n return pure+noise",
"def make_noise(self, num):\n return np.random.randn(num, self.seq_length + 2 * self.seq_pad,\n self.noise_dim)",
"def add_noise(Y, sigma):\r\n return Y + np.random.normal(0, sigma, Y.shape)",
"def _make_noisy(x, the_noise):\n noise_sample = the_noise[np.random.choice(the_noise.shape[0],\n x.shape[0],\n replace=False)]\n return x + noise_sample",
"def build_unigram_noise(freq):\n total = freq.sum()\n noise = freq / total\n assert abs(noise.sum() - 1) < 0.001\n return noise",
"def transform_audio(self, segment: Union[AudioSegment, SpeechSegment]) -> None:\n noise_data = self._rng.sample(self._noise_data, 1)[0]\n if noise_data[\"duration\"] < segment.duration:\n raise RuntimeError(\"The duration of sampled noise audio is smaller than the audio segment.\")\n diff_duration = noise_data[\"duration\"] - segment.duration\n start = self._rng.uniform(0, diff_duration)\n end = start + segment.duration\n noise_seg = AudioSegment.from_slice_file(noise_data[\"src\"], start=start, end=end)\n snr_dB = self._rng.uniform(self._min_snr_dB, self._max_snr_dB)\n segment.add_noise(noise_seg, snr_dB=snr_dB, allow_downsampling=True, rng=self._rng)",
"def add_noise(image):\n image += 10e-10 * np.random.randn(image.shape[0], image.shape[1], 1)\n \n return image",
"def remove_noise(text):\n text1 = re.sub(\"[\\t\\r\\s]\", \" \",text)\n text1 = \" \" + text1\n text2 = re.sub(r\"([ \" + string.punctuation + \"]+)[^a-zA-Z ]+\", \"\\g<1> \", text1)\n return text2",
"def add_noise(self, snr, unit=None):\n return self.from_time(self.fs, noisify(self.in_time, snr, unit=unit))",
"def make_noise(self, num):\n return np.random.randn(num, self.seq_length, self.noise_dim)",
"def add_noise(self):\n self.noise = torch.normal(0.5, .2, self.state.shape).double()\n self.noise *= torch.sqrt(2 *\n self.vars['T']*torch.tensor(self.vars['dt']))",
"def noise(self, noise):\n\n self._noise = noise",
"def add_noise(self):\n self.noise = np.random.poisson(lam=self.lam, size=self.image.shape)\n self.image += self.noise\n return",
"def _addNoise(self):\n self.dispNoise = self.dispRaw.copy()\n self.dispNoise[:, 0] += self.sigmaEast * numpy.random.randn(self.numStations)\n self.dispNoise[:, 1] += self.sigmaNorth * numpy.random.randn(self.numStations)\n self.dispNoise[:, 2] += self.sigmaUp * numpy.random.randn(self.numStations)\n return",
"def add_weight_noise(self, std):\n with torch.no_grad():\n param_vector = parameters_to_vector(self.parameters())\n normal_dist = torch.distributions.Normal(loc=torch.tensor([0.0]), scale=torch.tensor([std]))\n noise = normal_dist.sample(param_vector.size())\n if self.device_id >= 0:\n noise = noise\n param_vector.add_(noise[0])\n vector_to_parameters(param_vector, self.parameters())",
"def noiseReduction(self):\n pass",
"def addNormalizing(self, name, seq):\n\n for i in xrange(len(seq) - self.kmer_size + 1):\n s = strandless(seq[i:i + self.kmer_size].upper())\n if \"N\" in s:\n continue\n self.normalizingKmers.add(s)",
"def add_noise(emg):\n MAX_AMPLITUDE = 32767\n\n # Sampling\n # 1 second of data requires 600 frames. And 600 fps is 600 Hz, sampling rate of EMG.\n Ts = 1/EMG_F_SAMPLE\n\n # Time vector\n t = np.arange(0, len(emg)/EMG_F_SAMPLE, Ts) # each unit of t is a second\n\n # Noise\n randAmplitudeScale = np.random.random()*0.1\n randOffset = np.random.random() * 2*np.pi\n \n fNoise = 50; # Frequency [Hz]\n aNoise = randAmplitudeScale*MAX_AMPLITUDE # Amplitude\n noise = aNoise * np.sin(2 * np.pi * t * fNoise + randOffset)\n\n # Add noise to signal\n for channel in [\"emg1\", \"emg2\", \"emg3\", \"emg4\", \"emg5\", \"emg6\"]:\n emg[channel] += noise\n return emg",
"def add_noise(self):\n self.noise = np.random.poisson(lam=self.lam, size=self.im.shape)\n self.im += self.noise\n return",
"def generate_noise_vector(self, ):\n self.noise.resize_(\n self.batch_size, int(self.opt.nz), 1, 1).normal_(0, 1)\n self.noisev = Variable(self.noise) # TODO: Add volatile=True???"
] | [
"0.61349237",
"0.6130484",
"0.60680324",
"0.604623",
"0.5941968",
"0.58597803",
"0.58269954",
"0.58108455",
"0.56916386",
"0.56686974",
"0.56611174",
"0.55890733",
"0.55856615",
"0.55645496",
"0.556166",
"0.55571675",
"0.5511801",
"0.55111974",
"0.5508282",
"0.5461142",
"0.54018235",
"0.5400243",
"0.5397725",
"0.5388273",
"0.5380952",
"0.5362307",
"0.53568375",
"0.5338852",
"0.5334128",
"0.5333122"
] | 0.8073021 | 0 |
Return the similarity between this WordForm and the one provided. | def similarity(self, wf, positions = None, features = None):
# The similarity is the inverse square of the distance between the two
# WordForms. Impose a minimum on distances (to deal with zero).
dist = self.distance(wf, positions = positions, features = features)
if dist < .1:
dist = .1
sim = 1 / (dist ** 2)
return sim | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def wordSimilarityRatio(sent_1,sent_2):",
"def similarity(self, w1, w2):\r\n return self.represent(w1).dot(self.represent(w2))",
"def similarity(self, w1, w2):\r\n return self.represent(w1).dot(self.represent(w2))",
"def similarity(self, word1, word2):\n common_vect = +np.ones(self.nEmbed) * 10000\n if word1 not in self.vocab and word2 in self.vocab:\n id_word_2 = self.w2id[word2]\n w1 = common_vect\n w2 = self.U[id_word_2]\n elif word1 in self.vocab and word2 not in self.vocab:\n id_word_1 = self.w2id[word1]\n w1 = self.U[id_word_1]\n w2 = common_vect\n elif word1 not in self.vocab and word2 not in self.vocab:\n w1 = common_vect\n w2 = common_vect\n else:\n id_word_1 = self.w2id[word1]\n id_word_2 = self.w2id[word2]\n w1 = self.U[id_word_1]\n w2 = self.U[id_word_2]\n\n # scalair = w1.dot(w2)/np.linalg.norm(w1,w2)\n similarity = w1.dot(w2) / (np.linalg.norm(w1) * np.linalg.norm(w2))\n # similarity = 1 / (1 + np.exp(-scalair))\n # similarity = scalair / (np.linalg.norm(w1) * np.linalg.norm(w2))\n return similarity",
"def similarity(self, other):\n part = self.__part_converter(self.part)\n if part != self.__part_converter(other.part):\n return 0\n tresh = 0.2\n sss = wn.synsets(self.string, part)\n sso = wn.synsets(other.string, part)\n best_sim = 0\n for ss in sss:\n # if not match('^' + self.string + '\\..+', ss.name()):\n # continue\n for so in sso:\n # if not match('^' + other.string + '\\..+', so.name()):\n # continue\n sim = ss.wup_similarity(so)\n if (tresh < sim) and (best_sim < sim):\n best_sim = sim\n return best_sim",
"def text_similarity(self, text_1: str, text_2: str):\n txt1 = self._pre_process(text_1)\n txt2 = self._pre_process(text_2)\n\n sim = self.model.wmdistance(txt1, txt2)\n\n if sim == inf:\n sim = INF_SIMILIARITY\n return sim",
"def similarity(self, w1, w2):\r\n sim = self.represent(w1).dot(self.represent(w2))\r\n return sim",
"def similarity(self, word1: str, word2: str, metric='cosine') -> float:\n if 0 == self.word2idx.get(word1, 0) or 0 == self.word2idx.get(word2, 0):\n return 0.\n\n return self.similarity_vec(self[word1], self[word2], metric=metric)\n # vec1 = self.__getitem__(word1).reshape((1, -1))\n # vec2 = self.__getitem__(word2).reshape((1, -1))\n # return 1 - distance.cdist(vec1, vec2, metric=metric).reshape(-1)",
"def similarity(self, new_sentence):\n cleaned = self.clean_string(new_sentence)\n stemmed = self.stem(cleaned, train=False)\n\n if not set(stemmed).intersection(set(self.vocabulary.keys())):\n return None\n\n else:\n difference = set(stemmed) - set(self.vocabulary.keys())\n to_append = np.zeros((self.matrix.shape[0], len(difference)))\n matrix = np.append(self.matrix, to_append, axis=1)\n\n new_voc = copy.deepcopy(self.vocabulary)\n for word in difference:\n if word not in new_voc:\n new_voc[word] = len(new_voc)\n\n question_vector = self.stem2vec(stemmed, new_voc)\n result = np.matmul(matrix, question_vector)\n return np.argmax(result)",
"def similarity(self, token1, token2):\n vec1 = self.get_vector(token1)\n vec2 = self.get_vector(token2)\n assert vec1 is not None and vec2 is not None, \"Cannot compute similarity between None type vectors.\"\n if not self.normalize:\n # if model not loaded as normalized embeddings \n vec1 = vec1 / np.linalg.norm(vec1)\n vec2 = vec2 / np.linalg.norm(vec2)\n return np.dot(vec1, vec2)",
"def word_order_similarity(self,sentence_1, sentence_2):\n\t words_1 = sentence_1.getList_of_words()\n\t words_2 = sentence_2.getList_of_words()\n\t joint_words = list(set(words_1).union(set(words_2)))\n\t windex = {x[1]: x[0] for x in enumerate(joint_words)}\n\t r1 = self.word_order_vector(words_1, joint_words, windex)\n\t r2 = self.word_order_vector(words_2, joint_words, windex)\n\t return 1.0 - (np.linalg.norm(r1 - r2) / np.linalg.norm(r1 + r2))",
"def word_similarity(self):\n y_true = []\n y_pred = []\n for i in open(\"data/word_sim_dataset.txt\").read().split('\\n'):\n i = self.preprocessor(i)\n w1 = i.split()[-1]\n w2 = i.split()[-2] \n st = float(i.split()[-3]) / 4 #dataset has scale from 0 to 4\n \n try:\n w1 = self.embeddings_index[w1] \n w2 = self.embeddings_index[w2] \n w1 = w1 / np.linalg.norm(w1)\n w2 = w2 / np.linalg.norm(w2)\n y_pred.append(np.dot(w1,w2))\n y_true.append(st)\n except:\n pass\n if y_true == []:\n return 1.0\n return mean_squared_error(y_true, y_pred, squared=False)",
"def similarity(self, wSet1, wSet2, idf): \n if len(wSet1) == 0 or len(wSet2) == 0:\n return 0.0\n else:\n defaultIDF = idf['unknownToken']\n intersection = wSet1.intersection(wSet2)\n# intersection = self.synonymIntersection(wSet1, wSet2, idf)\n if len(intersection) == 0:\n return 0\n sum1 = 0\n sum2 = 0\n intersectionSum = 0\n for word in wSet1:\n sum1 += (idf.get(word, defaultIDF))**2\n for word in wSet2:\n sum2 += (idf.get(word, defaultIDF))**2\n for word in intersection:\n intersectionSum += (idf.get(word, defaultIDF))**2\n \n if sum1 == 0 or sum2 == 0:\n return 0.0\n else:\n return intersectionSum/(math.sqrt(sum1) * math.sqrt(sum2))",
"def semantic_similarity(self,sentence_1, sentence_2, info_content_norm):\n\t words_1 = sentence_1.getList_of_words()\n\t words_2 = sentence_2.getList_of_words()\n\t joint_words = set(words_1).union(set(words_2))\n\t vec_1 = self.semantic_vector(words_1, joint_words, info_content_norm)\n\t vec_2 = self.semantic_vector(words_2, joint_words, info_content_norm)\n\t return np.dot(vec_1, vec_2.T) / (np.linalg.norm(vec_1) * np.linalg.norm(vec_2))",
"def similarity(self, x, y, keyboard_weight=None):\r\n dist = self.distance(x, y, keyboard_weight)\r\n max_len = max(len(x), len(y))\r\n max_dissimilarity = max_len * self.scale_coef\r\n similarity = 1 - dist / max_dissimilarity\r\n return similarity",
"def similar_text(word1, word2) -> float:\n\n return textdistance.overlap.similarity(word1, word2)",
"def similarity(self, char1, char2, weights=(1.0, 0.0, 0.0), as_tree=False):\n\n assert char1 in self.char_dict\n assert char2 in self.char_dict\n shape_w, sound_w, freq_w = weights\n\n if char1 in self.char_dict and char2 in self.char_dict:\n\n shape_sim = self.shape_similarity(char1, char2, as_tree=as_tree)\n sound_sim = self.pronunciation_similarity(char1, char2)\n freq_sim = 1.0 - self.char_dict[char2] / len(self.char_dict)\n\n return shape_sim * shape_w + sound_sim * sound_w + freq_sim * freq_w\n else:\n return 0.0",
"def content_similarity(self, movie1, movie2):\n v1, v2 = self.get_tfidf(movie1), self.get_tfidf(movie2)\n return self.cosine_similarity(v1, v2)",
"def similarity(self, e1, e2):\n\t\tpass",
"def similarity_function_old(feature1, feature2):\n f1Magnitude = feature1.dot(feature1)\n f2Magnitude = feature2.dot(feature2)\n return 1 - feature1.dot(feature2) / (f1Magnitude * f2Magnitude)",
"def similarity(a, b):\n distance = Levenshtein.distance(a, b)\n return 1 - (distance / max((len(a), len(b))))",
"def distance(self, word1, word2):\n\n return scipy.spatial.distance.cosine(self.vectors.get(word1), self.vectors.get(word2))",
"def similarity_with(self, other_text_analyzer):\n pass",
"def compute_similarity(self, text1, text2):\n\n text1_dist = self.predict(text1)[0]\n text2_dist = self.predict(text2)[0]\n return jensenshannon(text1_dist, text2_dist)",
"def compare(self) -> float:\n if not self._hadith_text1 or not self._hadith_text2:\n raise Exception('Hadith texts to compare not set. Use setHadithTexts() to set the texts...')\n\n text1 = self._hadith_text1_cleaned\n text2 = self._hadith_text2_cleaned\n\n if self._ignore_diacritics:\n text1 = self._remove_diacritics(self._hadith_text1_cleaned)\n text2 = self._remove_diacritics(self._hadith_text2_cleaned)\n\n sm = difflib.SequenceMatcher(None, text1, text2)\n return sm.ratio()",
"def sentence_similarity(self,sentence1, sentence2):\n # Tokenize and tag\n sentence1 = pos_tag(word_tokenize(sentence1))\n sentence2 = pos_tag(word_tokenize(sentence2))\n\n # Get the synsets for the tagged words\n synsets1 = [self.tagged_to_synset(*tagged_word) for tagged_word in sentence1]\n synsets2 = [self.tagged_to_synset(*tagged_word) for tagged_word in sentence2]\n\n # Filter out the Nones\n synsets1 = [ss for ss in synsets1 if ss]\n synsets2 = [ss for ss in synsets2 if ss]\n\n score, count = 0.0, 0\n\n # For each word in the first sentence\n for synset in synsets1:\n # Get the similarity value of the most similar word in the other sentence\n vals = [synset.path_similarity(ss) if synset.path_similarity(ss) is not None else 0 for ss in synsets2]\n best_score = max(vals,default=0)\n\n # Check that the similarity could have been computed\n if best_score is not None:\n score += best_score\n count += 1\n\n # Average the values\n if count == 0:\n return 0\n score /= count\n return score",
"def calc_similarity_between_words(word1, word2):\n # pos = wn.Noun is mandatory otherwise the lowest common hypernym cant be found because of part of speach\n word1_synsets = wn.synsets(word1, pos=wn.NOUN)\n word2_synsets = wn.synsets(word2, pos=wn.NOUN)\n\n w1 = get_words_from_sysets(word1_synsets)\n w2 = get_words_from_sysets(word2_synsets)\n\n sim_matrix = np.zeros((len(w1), len(w2)))\n\n for i in range(len(w1)):\n for j in range(len(w2)):\n try:\n sim_matrix[i, j] = embeddings.distances(w1[i], [w2[j]])\n except KeyError:\n sim_matrix[i, j] = 1000\n continue\n\n w1_ind, w2_ind = np.unravel_index(np.nanargmin(sim_matrix, axis=None), sim_matrix.shape)\n lowest_common_hyp = (word1_synsets[w1_ind]).lowest_common_hypernyms(word2_synsets[w2_ind])\n return (sim_matrix[w1_ind, w2_ind], lowest_common_hyp)",
"def similarities (self, listOfWords):\n \n # building the query dictionary\n queryDict = collections.defaultdict(int)\n for w in listOfWords:\n queryDict [w] += + 1.0\n \n # normalizing the query\n length = float (len (listOfWords))\n for k in queryDict:\n queryDict [k] /= length\n \n # computing the list of similarities\n sims = []\n for doc in self.documents:\n score = 0.0\n docDict = doc [1]\n for k in queryDict:\n if docDict.has_key (k):\n score += (queryDict [k] / self.corpusDict [k]) + (docDict [k] / self.corpusDict [k])\n sims.append ([doc [0], score])\n \n return sims",
"def similarity(query,word_dict,dictionary,number_of_docs,id):\n similarity = 0.0\n scalar_leng = 0.0\n for term in query:\n if term in dictionary:\n similarity += word_dict[term][1]*imp(term,word_dict,number_of_docs,id)\n\n for term in dictionary:\n scalar_leng += imp(term, word_dict, number_of_docs, id) ** 2\n\n final_scalar_leng = math.sqrt(scalar_leng)\n similarity = similarity / final_scalar_leng\n #print(similarity)\n return similarity",
"def get_score(self,sentence_1, sentence_2):\n\t return self.DELTA * self.semantic_similarity(sentence_1, sentence_2, True) + (1.0 - self.DELTA) * self.word_order_similarity(sentence_1, sentence_2)"
] | [
"0.7596557",
"0.747573",
"0.747573",
"0.7371653",
"0.73133916",
"0.7294227",
"0.72550523",
"0.71882695",
"0.71096104",
"0.70324767",
"0.7010185",
"0.6944519",
"0.692075",
"0.6890939",
"0.6874697",
"0.68725497",
"0.67834336",
"0.67505664",
"0.6722828",
"0.6527661",
"0.6500944",
"0.6483019",
"0.6470009",
"0.6463275",
"0.6459873",
"0.64470315",
"0.64300597",
"0.63874346",
"0.63811654",
"0.6362673"
] | 0.76069194 | 0 |
Given an Frame object, will return the bytes of that Frame's file. If provided, will also scale the size of the image and convert to the required format. | def convert_frames(frame, img_format: str, scale=None) -> bytes:
path = frame.filename
with open(path, "rb") as image_file:
im = Image.open(image_file)
converted_img = BytesIO()
if scale:
_LOGGER.debug("Scaling the image")
(width, height) = (int(im.width * scale), int(im.height * scale))
_LOGGER.debug("Original size is {}wx{}h, new size is {}wx{}h".format(im.width, im.height, width, height))
im = im.resize([width, height])
im.save(converted_img, img_format)
return converted_img.getvalue() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_to_image(self, frame, base64_encode=False):\n #NOTE: tuple (85010, 1) ndarray --> data reduction\n img_buf_arr = cv2.imencode(\".jpeg\", frame)[1]\n if base64_encode:\n img_buf_arr = b\"data:image/jpeg;base64,\" + base64.b64encode(img_buf_arr)\n return img_buf_arr\n return bytes(img_buf_arr)",
"def getFrame(self):\n s, image = self.capture.read()\n return image",
"def get_image(self, frame):\n msec = frame * config.MS_PER_FRAME\n frame = msec // 250\n return self.frames[frame % self.num_frames]",
"def get_frame(self, frame: int) -> BaseImage:\n return self.sequence[frame]",
"def image_to_byte_array(image: Image, f='JPEG'):\n imgByteArr = io.BytesIO()\n image.save(imgByteArr, format=f)\n imgByteArr = imgByteArr.getvalue()\n return imgByteArr",
"def decodeFrame(self, image):\n return image",
"def to_blob(self):\n x = cv2.dnn.blobFromImage(self.frame, 0.00392, (416, 416), (0, 0, 0), True, crop=False)\n return x",
"def convertFrame(self):\n try:\n height,width=self.currentFrame.shape[:2]\n img=QtGui.QImage(self.currentFrame,\n width,\n height,\n QtGui.QImage.Format_RGB888)\n img=QtGui.QPixmap.fromImage(img)\n self.previousFrame = self.currentFrame\n return img\n except:\n return None",
"def decodeFrame(frameJson):\n frameBase64 = frameJson[\"imageBase64\"]\n return base64.b64decode(frameBase64)",
"def readFrame(self):\n\t\tsuccess, self.frameImage = self.vidcap.read()\n\t\treturn success, self.frameImage",
"def convertFrame(self):\r\n try:\r\n height, width = self.currentFrame.shape[:2]\r\n img = QtGui.QImage(self.currentFrame,\r\n width,\r\n height,\r\n QtGui.QImage.Format_RGB888)\r\n img = QtGui.QPixmap.fromImage(img)\r\n self.previousFrame = self.currentFrame\r\n return img\r\n except:\r\n return None",
"def convertFrame(self):\n try:\n img = QImage(self.currentVideoFrame,\n self.currentVideoFrame.shape[1],\n self.currentVideoFrame.shape[0],\n QImage.Format_RGB888\n )\n return img\n except:\n return None",
"def convertFrame(self):\n try:\n img = QImage(self.currentVideoFrame,\n self.currentVideoFrame.shape[1],\n self.currentVideoFrame.shape[0],\n QImage.Format_RGB888\n )\n return img\n except:\n return None",
"def frames(self):\n while True:\n ret, frame = self.classification()\n if ret == True:\n yield cv2.imencode('.jpg', frame)[1].tobytes()\n else:\n break",
"def get_frame(frame):\n\n return int.from_bytes(frame, byteorder='big')",
"def test_get_image_and_to_byte_array_are_compatible(self):\n\n with open(self.subject, \"rb\") as f:\n content = f.read()\n\n image = image_helper.get_image(content)\n\n self.assertEqual(image.size, (800, 450))\n\n bytes_array = image_helper.to_byte_array(image)\n\n image = image_helper.get_image(bytes_array)\n\n self.assertEqual(image.size, (800, 450))",
"def read(self):\n\n # Obtém frame da câmera.\n status , frame = super().read()\n\n if not status: return\n\n # Obtém a imagem.\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame = Image.fromarray(frame)\n \n # Se a opção de efeito espelho estiver ativa, a imagem será invertida.\n if self.__mirror:\n frame = frame.transpose(Image.FLIP_LEFT_RIGHT)\n \n return ImageTk.PhotoImage(frame) , frame.size",
"def make_blob(self, format=None):\n if format is not None:\n with self.convert(format) as converted:\n return converted.make_blob()\n library.MagickResetIterator(self.wand)\n length = ctypes.c_size_t()\n blob_p = library.MagickGetImageBlob(self.wand, ctypes.byref(length))\n if blob_p and length.value:\n blob = ctypes.string_at(blob_p, length.value)\n library.MagickRelinquishMemory(blob_p)\n return blob\n self.raise_exception()",
"def frame(self):\n try:\n AppHelper.runConsoleEventLoop(installInterrupt=True)\n return str(self._delegate.frame.representations()[0].TIFFRepresentation().bytes())\n except:\n return None",
"def image_to_byte(img):\n img2 = img.crop(box=None)\n byte_arr = io.BytesIO()\n img2.save(byte_arr, format='PNG')\n return byte_arr.getvalue()",
"def get_movie_frame(movie_file, frame=0):\n movie = cv2.VideoCapture(movie_file)\n _, image = movie.read() \n height, width, _ = image.shape\n filename = os.path.splitext(movie_file)[0] + f'_{frame}.jpg'\n cv2.imwrite(filename, image)\n \n return filename, height, width",
"def image_to_bytes(a, fmt='png'):\n a = np.uint8(a)\n f = io.BytesIO()\n ima = PIL.Image.fromarray(a).save(f, fmt)\n return f.getvalue()",
"def read(self):\n\n # ret, image = self.video.read()\n (self.grabbed, self.frame) = self.cap.read()\n image = self.frame\n\n if image is not None:\n \"\"\"Update FPS, and incode received frame. \"\"\"\n self.fps.update()\n # TODO: add self.fps.fps() to image, if flagged raised.\n\n # We are using Motion JPEG, but OpenCV defaults to cap raw images,\n # so we must encode it into JPEG in order to correctly display the\n # video stream.\n\n # display a piece of text to the frame (so we can benchmark\n # fairly against the fast method)\n self.fps.stop()\n cv2.putText(image, \"FPS (simple): {:.2f}\".format(self.fps.fps()), (10, 30),\n cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)\n self.frame = image.copy()\n\n ret, jpeg = cv2.imencode('.jpg', image)\n return jpeg.tobytes()\n else:\n self.logger.debug(\"in 'get_frame', video.read not success\")",
"def _prepare_frame(self, frame):\n\n initial_h, initial_w = frame.shape[:2]\n scale_h, scale_w = initial_h / float(self.input_height), initial_w / float(self.input_width)\n\n in_frame = cv2.resize(frame, (self.input_width, self.input_height))\n in_frame = in_frame.transpose((2, 0, 1))\n in_frame = in_frame.reshape(self.input_size)\n\n return in_frame, scale_h, scale_w",
"def getFile(self, file_name: str) -> bytes:\n obj = self.driver.get_object(self.container_name, file_name)\n gen = self.driver.download_object_as_stream(obj)\n file_as_bytes = next(gen)\n return file_as_bytes",
"def grabFrame(self):\r\n \r\n data, w, h, orientation = self.grabRawFrame()\r\n return Image.fromstring(\"RGB\", (w, h), data, \"raw\", \"BGR\", 0, orientation)",
"def get_original_frame(self, frame):\n if self._pad_top is not None:\n frame = frame[self._pad_top:frame.shape[0] - self._pad_bottom,\n self._pad_left:frame.shape[1] - self._pad_right]\n if self._scale_factor is not None and self._scale_factor != 1:\n frame = cv2.resize(frame,\n (int(frame.shape[1] / self._scale_factor),\n int(frame.shape[0] / self._scale_factor)))\n return frame",
"def get_frame(self):\n self._serial_port.close()\n self._serial_port.open()\n\n self._request_frame()\n\n serial_data = self._serial_port.readall()\n\n frame_start_idx = serial_data.find(BEGIN_FRAME) + len(BEGIN_FRAME)\n frame_end_idx = serial_data.find(END_FRAME)\n\n print serial_data[0:frame_start_idx]\n print serial_data[frame_end_idx:]\n\n raw_frame = serial_data[frame_start_idx:frame_end_idx]\n\n np_frame = np.fromstring(raw_frame, dtype=np.uint8)\n # np_frame = np_frame.reshape((30, 30))\n\n # image = cv2.fromarray(np_frame)\n\n # return image\n return np_frame",
"def PIL_to_bytes(img, ext: str = 'png', quality: int = None):\n bytes_io = io.BytesIO()\n if quality is not None:\n img.save(bytes_io, format=ext, quality=quality)\n else:\n subsampling = -1 if ext.lower() in ['jpg', 'jpeg'] else 0\n img.save(bytes_io, format=ext, quality=100, subsampling=subsampling)\n bytes_io.seek(0)\n return bytes_io",
"def get_byte(self, *args) -> \"PyObject *\":\n return _ida_fpro.qfile_t_get_byte(self, *args)"
] | [
"0.5845967",
"0.5829252",
"0.568239",
"0.5598484",
"0.5597409",
"0.5580428",
"0.5558993",
"0.55013925",
"0.54556245",
"0.54468995",
"0.54443717",
"0.54354507",
"0.54354507",
"0.54297215",
"0.5396869",
"0.5359741",
"0.5324554",
"0.5318987",
"0.53071946",
"0.53018034",
"0.5250378",
"0.52319074",
"0.52235484",
"0.52053475",
"0.5169272",
"0.5164558",
"0.5150451",
"0.51420194",
"0.51172227",
"0.5109753"
] | 0.7540226 | 0 |
Given a dictionary, changes the key from snake case to lower camel case. | def lower_camel_casify_dict_keys(d: dict) -> dict:
return {to_camel_case(key): value for key, value in d.items()} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def transform_from_camelcase(key):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', key)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()",
"def transform_to_camelcase(key):\n return Jsonifiable.lower_first(\n ''.join(c.capitalize() or '_' for c in key.split('_')))",
"def convert_dict_keys_to_camel_case(d):\n data = {}\n for k, v in d.items():\n new_key = snake_to_camel_case(k)\n data[new_key] = d[k]\n return data",
"def dict_keys_snake_to_camel_case(snake_dict: dict) -> dict:\n\n camel_dict = dict()\n\n for key, val in snake_dict.items():\n if isinstance(key, str):\n camel_dict[snake_to_camel_case(key)] = val\n else:\n camel_dict[key] = val\n\n return camel_dict",
"def _lower(dictionary: dict):\n return {key.lower(): value.lower() for key, value in dictionary.items()}",
"def transform_key(self, key):\n return key.lower()",
"def lowercase_keys(input_dict):\n if not isinstance(input_dict,dict):\n return input_dict\n\n safe = dict()\n for key,value in input_dict.items():\n safe[str(key).lower()] = value\n return safe",
"def lower_dict(input_dict):\r\n return {k.lower(): v for k, v in input_dict.iteritems()}",
"def _convert_keys_to_lower(self, dictionary: dict) -> dict:\n lower_case_dictionary = OrderedDict()\n\n for key, value in dictionary.items():\n if not key.islower():\n if key.lower() in lower_case_dictionary.keys():\n raise ValueError(f\"Duplicate (case insensitive) key found: {key.lower()}\")\n if isinstance(value, dict):\n lower_case_dictionary[key.lower()] = self._convert_keys_to_lower(value)\n else:\n lower_case_dictionary[key.lower()] = value\n\n return lower_case_dictionary",
"def _snake_to_camel(name, strict=False):\n if strict:\n name = name.lower()\n terms = name.split('_')\n return terms[0] + ''.join([term.capitalize() for term in terms[1:]])",
"def lower_dict_keys(origin_dict):\n if not origin_dict or not isinstance(origin_dict, dict):\n return origin_dict\n\n return {key.lower(): value for key, value in origin_dict.items()}",
"def convert_to_snake_case(camel_case_string):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', camel_case_string)\n s2 = re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()\n return s2.replace('__', '_')",
"def __setitem__(self, key, value):\n super(CaseInsensitiveStringDict, self).__setitem__(key.lower(), value)",
"def _lower(self, mapping):\n _mapping = {}\n for k, v in sorted(mapping.items()):\n k = k.lower()\n if k not in _mapping:\n _mapping[k] = v\n return _mapping",
"def snake_to_camel_case(snake_str: str) -> str:\n\n words = snake_str.strip(\"_\").split(\"_\")\n return words[0] + \"\".join(word[:1].upper() + word[1:] for word in words[1:])",
"def snake_to_camel_case(value):\n words = value.strip(\"_\").split(\"_\")\n return words[0].lower() + \"\".join([word.capitalize() for word in words[1:]])",
"def camel_to_snake_case(name: str) -> str:\n return CAPITALS.sub(r'_\\1', name).lower().lstrip('_')",
"def _camel_case_to_snake_case(text: str) -> str:\n return re.sub(r\"(?<!^)(?=[A-Z])\", \"_\", text).lower()",
"def _case_convert_snake_to_camel(token: str) -> str:\n while True:\n try:\n # find next underscore\n underscore_loc = token.index('_')\n except ValueError:\n # converted all underscores\n break\n # is the underscore at the end of the string?\n if underscore_loc == len(token) - 1:\n break\n\n orig = token\n token = f'{orig[:underscore_loc]}{orig[underscore_loc+1].upper()}'\n # is there more after the capital?\n if len(orig) > underscore_loc+2:\n token += f'{orig[underscore_loc+2:]}'\n return token",
"def camel_to_snake_case(value):\n return re_camel_case.sub(r\"_\\1\", value).strip(\"_\").lower()",
"def _camel_to_snake(name):\n s1 = re.sub(\"(.)([A-Z][a-z]+)\", r\"\\1_\\2\", name)\n return re.sub(\"([a-z0-9])([A-Z])\", r\"\\1_\\2\", s1).lower()",
"def snake_to_camel_case(name: str, initial: bool = False) -> str:\n chunks = name.split('_')\n converted = [s.capitalize() for s in chunks]\n if initial:\n return ''.join(converted)\n else:\n return chunks[0].lower() + ''.join(converted[1:])",
"def lowercase_dict_keys(self):\n lower1 = {}\n for key1, val1 in self.reffile_overrides.items():\n if isinstance(val1, dict):\n lower2 = {}\n for key2, val2 in val1.items():\n if isinstance(val2, dict):\n lower3 = {}\n for key3, val3 in val2.items():\n if isinstance(val3, dict):\n lower4 = {}\n for key4, val4 in val3.items():\n if isinstance(val4, dict):\n lower5 = {}\n for key5, val5 in val4.items():\n if isinstance(val5, dict):\n lower6 = {}\n for key6, val6 in val5.items():\n lower6[key6.lower()] = val6\n lower5[key5.lower()] = deepcopy(lower6)\n else:\n lower5[key5.lower()] = val5\n lower4[key4.lower()] = deepcopy(lower5)\n else:\n lower4[key4.lower()] = val4\n lower3[key3.lower()] = deepcopy(lower4)\n else:\n lower3[key3.lower()] = val3\n lower2[key2.lower()] = deepcopy(lower3)\n else:\n lower2[key2.lower()] = val2\n lower1[key1.lower()] = deepcopy(lower2)\n else:\n lower1[key1.lower()] = val1\n self.reffile_overrides = lower1",
"def convert_camel_case(name):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', name)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()",
"def snake_case_to_headless_camel_case(snake_string):\n return ''.join([snake_string.split('_')[0]] +\n list(sub_string.capitalize()\n for sub_string in snake_string.split('_')[1:]))",
"def snake_to_camel(snake_str):\n title_str = snake_str.split('_')\n return ' '.join(title_str).title()",
"def snake_to_camel(word):\n return ''.join(x.capitalize() or '_' for x in word.split('_'))",
"def snake_to_camel(word):\n return ''.join(x.capitalize() or '_' for x in word.split('_'))",
"def convert_dict_key_case(obj, converter):\n if not isinstance(obj, dict):\n return obj\n\n obj = obj.copy()\n for key in list(six.iterkeys(obj)):\n converted_key = converter(key)\n obj[converted_key] = convert_dict_key_case(obj.pop(key), converter)\n\n return obj",
"def snake_to_camel(name):\n if name == \"role_arn\":\n return \"roleARN\"\n temp = name.split(\"_\")\n return temp[0] + \"\".join(ele.title() for ele in temp[1:])"
] | [
"0.7801124",
"0.77649593",
"0.77066845",
"0.7620036",
"0.7379338",
"0.7279606",
"0.71873266",
"0.71728736",
"0.7140577",
"0.70116407",
"0.69478273",
"0.69024104",
"0.68458456",
"0.6818799",
"0.6726056",
"0.67258394",
"0.6712159",
"0.6710847",
"0.67080796",
"0.6662867",
"0.6595556",
"0.65670604",
"0.6565834",
"0.6563058",
"0.65605944",
"0.6554441",
"0.6543572",
"0.6543572",
"0.65242714",
"0.6453751"
] | 0.7839561 | 0 |
Ensure IPCMessageSubscriber.connect gets wrapped by salt.utils.asynchronous.SyncWrapper. | async def test_ipc_connect_sync_wrapped(io_loop, tmp_path):
if salt.utils.platform.is_windows():
socket_path = ports.get_unused_localhost_port()
else:
socket_path = str(tmp_path / "noexist.ipc")
subscriber = salt.utils.asynchronous.SyncWrapper(
salt.transport.ipc.IPCMessageSubscriber,
args=(socket_path,),
kwargs={"io_loop": io_loop},
loop_kwarg="io_loop",
)
with pytest.raises(tornado.iostream.StreamClosedError):
# Don't `await subscriber.connect()`, that's the purpose of the SyncWrapper
subscriber.connect() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sync_connect(self):\n loop = asyncio.get_event_loop()\n task = loop.create_task(self.connect())\n loop.run_until_complete(task)",
"async def _connect(self):\n pass",
"async def connect(self):\n raise NotImplementedError",
"async def on_connect(self) -> None:",
"async def connect(self):\n pass",
"async def on_connect(self):\n pass",
"async def on_connect(self):\r\n self._try_shutdown_twitch()\r\n self.stream_thread = self.connect_thread()",
"def _connect(self):\n raise NotImplementedError(\"ERROR: Unimplemented function.\")",
"def handle_connect(self):\n if self.use_ssl:\n self.ssl = ssl.wrap_socket(self.socket)\n self.set_socket(self.ssl)",
"async def connect(self):\n try:\n self._cmd_stream = await self._connect()\n self.inc_counter(\"%s.connected\" % self.objname)\n self.logger.info(\"Connected: %s\", self._extra_info)\n except Exception as e:\n self.logger.error(\"Connect Failed %r\", e)\n self.inc_counter(\"%s.failed\" % self.objname)\n raise e",
"async def _async_connect_to_chromecast(self):\n _LOGGER.debug(\n \"[%s %s] Connecting to cast device by service %s\",\n self._name,\n self._cast_info.friendly_name,\n self._cast_info.cast_info.services,\n )\n chromecast = await self.hass.async_add_executor_job(\n pychromecast.get_chromecast_from_cast_info,\n self._cast_info.cast_info,\n ChromeCastZeroconf.get_zeroconf(),\n )\n self._chromecast = chromecast\n\n if CAST_MULTIZONE_MANAGER_KEY not in self.hass.data:\n self.hass.data[CAST_MULTIZONE_MANAGER_KEY] = MultizoneManager()\n\n self.mz_mgr = self.hass.data[CAST_MULTIZONE_MANAGER_KEY]\n\n self._status_listener = CastStatusListener(\n self, chromecast, self.mz_mgr, self._mz_only\n )\n chromecast.start()",
"def _connect(self):\r\n self.sock = socket.socket()\r\n host = \"pubsub.pubnub.com\"\r\n port = 80\r\n if self.use_ssl:\r\n self.sock = ssl.wrap_socket(self.sock)\r\n port = 443\r\n self.sock.connect((host, port))\r\n self.connected = True",
"def connect(self, reconnect=True, *args, **kwargs):\n pass",
"async def async_connect(self):\n # Test the router is accessible.\n try:\n data = await self.connection.async_get_connected_devices()\n self.success_init = data is not None\n except OSError as ex:\n _LOGGER.warning(\n \"Error [%s] connecting %s to %s.\",\n str(ex),\n DOMAIN,\n self.host,\n )\n raise ConnectionError(\"Cannot connect to D-Link router\")\n\n if not self.connection.is_connected:\n _LOGGER.error(\"Error connecting %s to %s\", DOMAIN, self.host)\n raise ConnectionError(\"Cannot connect to D-Link router\")",
"async def __initiate_connection(self):\r\n\r\n chainlink_model = ChainlinkResolver.resolve(self.name)\r\n if chainlink_model is None:\r\n LoggerInterface.error(f'The chainlink {self.name} is not registered yet. Register it first!')\r\n return\r\n\r\n self.socket_client.set_callback(self.callback)\r\n self.socket_client.set_using_chainlink(chainlink_model)\r\n await self.socket_client.connect()",
"def connect(self):\n\t\tself.printed_sub = False\n\t\tself.client.connect(BROKER)\n\t\tself.client.loop_forever()",
"def _connect(self):\n #print(\"Connecting...\")\n self._connection = reactor.connectTCP(self.host, self.port, self.factory) #@UndefinedVariable",
"def test_connectEvent(self):\n reactor = self.buildReactor()\n\n self.listen(reactor, ServerFactory.forProtocol(Protocol))\n connected = []\n\n class CheckConnection(Protocol):\n def connectionMade(self):\n connected.append(self)\n reactor.stop()\n\n clientFactory = Stop(reactor)\n clientFactory.protocol = CheckConnection\n\n needsRunningReactor(reactor, lambda: self.connect(reactor, clientFactory))\n\n reactor.run()\n\n self.assertTrue(connected)",
"async def _connect(self):\n if not self._reader:\n self._reader = asyncio.create_task(self._read())",
"async def test_connection(self):\n await self.webhook_connection.connect()\n assert self.webhook_connection.is_connected is True",
"def _connectf(self, connection):\r\n\r\n # in case the SSL connection is still undergoing the handshaking\r\n # procedures (marked as connecting) ignores the call as this must\r\n # be a duplicated call to this method (to be ignored)\r\n if connection.ssl_connecting: return\r\n\r\n # verifies if there was an error in the middle of the connection\r\n # operation and if that's the case calls the proper callback and\r\n # returns the control flow to the caller method\r\n error = connection.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)\r\n if error: self.on_error(connection.socket); return\r\n\r\n # checks if the current connection is SSL based and if that's the\r\n # case starts the handshaking process (async non blocking) otherwise\r\n # calls the on connect callback with the newly created connection\r\n if connection.ssl: connection.add_starter(self._ssl_client_handshake)\r\n else: self.on_connect(connection)\r\n\r\n # runs the starter process (initial kick-off) so that all the starters\r\n # registered for the connection may start to be executed, note that if\r\n # the SSL handshake starter has been registered its first execution is\r\n # going to be triggered by this call\r\n connection.run_starter()",
"async def async_connect(self) -> None:\n # pylint: disable-next=import-outside-toplevel\n import paho.mqtt.client as mqtt\n\n result: int | None = None\n try:\n result = await self.hass.async_add_executor_job(\n self._mqttc.connect,\n self.conf[CONF_BROKER],\n self.conf.get(CONF_PORT, DEFAULT_PORT),\n self.conf.get(CONF_KEEPALIVE, DEFAULT_KEEPALIVE),\n )\n except OSError as err:\n _LOGGER.error(\"Failed to connect to MQTT server due to exception: %s\", err)\n\n if result is not None and result != 0:\n _LOGGER.error(\n \"Failed to connect to MQTT server: %s\", mqtt.error_string(result)\n )\n\n self._mqttc.loop_start()",
"def subscribe(self):\n fd = libplasma.subscribe(self.conn)\n self.notification_sock = socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM)\n # Make the socket non-blocking.\n self.notification_sock.setblocking(0)",
"async def async_connect_socket(streamer_obj: class_definition_and_manipulation.StreamerObj) -> None:\r\n reader, writer = await asyncio.open_connection(encryption_key.cfg_host,\r\n int(encryption_key.cfg_port))\r\n\r\n writer.write(f'CAP REQ :twitch.tv/membership twitch.tv/tags twitch.tv/commands\\r\\n'.encode('utf-8'))\r\n print(f\"Connecting to socket for {streamer_obj.name}\")\r\n\r\n writer.write(\"PASS {}\\r\\n\".format(encryption_key.decrypted_pass).encode('utf-8')) # password\r\n writer.write(\"NICK #zerg3rrbot\\r\\n\".encode('utf-8')) # bot name\r\n writer.write(f\"JOIN #{streamer_obj.name}\\r\\n\".encode('utf-8'))\r\n\r\n await writer.drain()\r\n streamer_obj.stream_socket_writer = writer\r\n streamer_obj.stream_socket_reader = reader",
"def connect(self):\n self.conn.add_listener(self.handle_connection_change)\n self.conn.start_async()",
"def subscribe(receiver):",
"def subscribe(receiver):",
"def subscribe(receiver):",
"async def async_connect(self) -> None:\n params = {\"ns\": self._namespace, \"accessToken\": self._access_token}\n try:\n await self._sio.connect(\n f\"{API_URL_BASE}?{urlencode(params)}\",\n namespaces=[self._namespace],\n transports=[\"websocket\"],\n )\n except (ConnError, SocketIOError) as err:\n raise WebsocketError(err) from None",
"def handle_connect(self):\n pass"
] | [
"0.65937483",
"0.6521959",
"0.6248328",
"0.60831505",
"0.6030129",
"0.6018049",
"0.58920634",
"0.58730686",
"0.58308804",
"0.5825015",
"0.58228856",
"0.5811319",
"0.58058876",
"0.57596046",
"0.57403564",
"0.5738906",
"0.5736388",
"0.5714165",
"0.5698439",
"0.5685224",
"0.56685305",
"0.5665709",
"0.5658102",
"0.56237024",
"0.56208843",
"0.5600711",
"0.5600711",
"0.5600711",
"0.55924636",
"0.556868"
] | 0.7188033 | 0 |
Receives a list and a search term. Use a loop to go through the list and see if the string is there. if it is return "string found". if not, return "string not found" | def search_for_string(lst_str, stringy):
if stringy in lst_str:
return "Found string"
else:
return "string not found" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def search_by_contains(self, tl):\n print(\"Search by string\")\n string = input(\"Please enter search string: \")\n return tl.findall_contains(string)",
"def search(self, q):\n for x in self.strings:\n if q in x:\n return True\n \n return False\n\n\n pass",
"def check_word_in_list_in_string(list, string):\n stuff = [string for word in list if(word in string)]\n return stuff",
"def listsearch(query, item):\n fh = ''\n if not isinstance(item, six.string_types):\n fh = item[1]\n item = item[0]\n\n return bool(re.search(query, item) or\n re.search(query, fh))",
"def not_found(tlist: list, search_string: str):\n fail_msg = \"No\"\n if \"actor\" in tlist:\n remain = len([t for t in tlist if t != 'actor'])\n if remain == 0:\n spot = \"\"\n if remain == 1:\n spot = \" or\"\n if remain == 2:\n spot = \",\"\n fail_msg = f\"{fail_msg} actor{spot}\"\n if \"indicator\" in tlist:\n remain = len([t for t in tlist if t != 'indicator'])\n fail_msg = f\"{fail_msg} indicator{' or' if remain > 0 else ''}\"\n if \"report\" in tlist:\n fail_msg = f\"{fail_msg} report\"\n fail_msg = f\"{fail_msg} matches found for {bold(search_string)}.\"\n\n raise SystemExit(fail_msg)",
"def contains(str_or_list, val_to_find):\n \n return (val_to_find in str_or_list)",
"def search(self, word):",
"def linear_search(key, my_list):\n key = word.upper()\n my_list = dictionary_list\n if key in my_list:\n if not key:\n print(word)",
"def find(self, search):\n if type(search) == str:\n search = [search]\n\n for s in search:\n if self.text.lower().find(s.lower()) != -1:\n return True\n\n return False",
"def search(self, word: str) -> bool:\n # Checking if the word is present in the list.\n return word in self.mylist",
"def search_by_string(self):\n print(\"*** String Search ***\\n\")\n print(\"Enter a search string.\\n\")\n print(\"- NAME and NOTE will be searched for all tasks -\")\n print(\"- Searching IS case-sensitive, but partial matches will be returned -\\n\")\n while True:\n try:\n search_string = input(\">>> \")\n results = self.regex_entry_search(search_string)\n except re.error:\n print(\"Couldn't parse search query. Please try again.\")\n else:\n clear_screen()\n print(f\"Found {len(results)} matches for string \\\"{search_string}\\\"...\\n\")\n self.print_selected_entries(results)\n break",
"def linearsearch(input, value):\n count = 0\n for i in input:\n if (value == i):\n count += 1\n if count > 0:\n return \"Value, {0}, is in the list\".format(value)\n else:\n return \"Value, {0}, cannot be found\".format(value)",
"def finddocname(string):\r\n for x in doclist:\r\n foundvar = f\"-->Doc name = {x.title()}\"\r\n if x in string:\r\n print(foundvar)\r\n break",
"def contains(self, searchstr: str):\n for x in self.sa:\n if searchstr in x:\n return True\n pass",
"def search(query_string):",
"def exact_search(string, row):\n clear_screen()\n found = False\n for item in row:\n if string.lower() in item[\"Task\"].lower() \\\n or string.lower() in item[\"Notes\"].lower():\n print_entry(item)\n found = True\n if found is False:\n print(\"No Entries Found..\")",
"def is_input_list(sentence_word,input_list):\r\n\t\r\n\tfor input_word in input_list:\r\n\t\tif input_word in sentence_word:\r\n\t\t\treturn input_word\r\n\t\t\r\n\treturn \"none\"",
"def findentity(string):\r\n for x in entitylist:\r\n if x in string:\r\n print(f\"(Doc.{i})--Entity = {x.title()}\")\r\n break",
"def pageContains(page, strList):\n for text in strList:\n if text in page['data']:\n logging.log(5, 'Found string %s' % text)\n return True\n\n return False",
"def search(self, term):",
"def __find_string_in_response(self, fullResponse, searchFor):\n check = True\n rawResponse = fullResponse;\n if \"result\" not in rawResponse.text:\n check = False\n else:\n responseJSON = rawResponse.json()\n length_responseJSON = len(responseJSON[\"result\"])\n for i in range(0,length_responseJSON,1):\n check = searchFor in responseJSON[\"result\"][i][\"first_name\"]\n if check == False:\n return check\n return check",
"def find_match(people, STRs):\n for person in people:\n if compare_str(person, STRs):\n return person[\"name\"]\n return \"No match\"",
"def search_keyword_in_list(keyword, input_list):\n\n match_list = []\n for element in input_list:\n if element.__name__ == keyword:\n if WarriorCliClass.mock or WarriorCliClass.sim:\n if element.__dict__.get(\"mockready\") is None:\n pNote_level(\"The selected keyword {} isn't supported in trial mode\".format(element.__name__), \"ERROR\")\n else:\n pNote_level(\"Keyword {} is being mocked\".format(element.__name__), \"INFO\")\n match_list.append(element)\n else:\n match_list.append(element)\n return match_list",
"def list_has_substring(substring, l):\n found_substring = False\n for item in l:\n if substring in item:\n found_substring = True\n break\n\n return found_substring",
"def substring_in_list(s, varlist):\n if varlist is None:\n return False\n is_sub = False\n for v in varlist:\n if v in s:\n is_sub = True\n break\n return is_sub",
"def find_item(value: str, items: WebElements) -> WebElement:\n for item in items:\n if value in item.text.lower():\n return item",
"def index_containing_substring(search_list, substring, multiples=True):\n num_found = 0\n list_index = -1\n\n for index, s in enumerate(search_list):\n if substring in s:\n if num_found == 0:\n list_index = index\n\n num_found += 1\n\n if list_index == -1:\n raise ValueError(search_list.index(substring))\n else:\n if not multiples and num_found > 1:\n raise MultipleStringError(\"Multiple {0} found in search_list.\".format(substring))\n else:\n return list_index",
"def word_in_list(word_list):\n word_set = set(word_list)\n inp_word = \"\"\n while inp_word != \"/q\":\n if inp_word == \"/q\":\n break\n inp_word = input(\"What word do you want to check? ('/q' to stop) > \")\n if inp_word in word_set:\n print(f\"Word '{inp_word}' is in the list!\")\n else:\n print(f\"Cannot find word '{inp_word}' in the list.\")",
"def find_by_exact_match(self):\n while True: \n self.task_name_search = input(\"What is the keyword/s you are looking\"\n \" for? Press Q to quit to the main screen: \").strip()\n if self.task_name_search.upper() in [\"Q\", \"QUIT\", \"EXIT\"]:\n x = self.dict_list\n return x\n self.find_by_exact_match_list = []\n count = 0\n for i in self.dict_list:\n for key, value in i.items():\n if re.search(self.task_name_search, value):\n self.find_by_exact_match_list.append(i)\n count+=1\n break\n if count == 0:\n print(\"There were no matches.\")\n else:\n self.display_style(self.find_by_exact_match_list)\n break\n self.del_or_edit()",
"def find_str(self, find_exp, where):\n found = False\n for item in where:\n if find_exp in str(item):\n self.assertTrue(True)\n found = True\n break\n if not found:\n self.assertTrue(False)"
] | [
"0.7277806",
"0.7085442",
"0.70570374",
"0.7042019",
"0.6853034",
"0.68284607",
"0.6799059",
"0.6741653",
"0.67367554",
"0.6710084",
"0.6704459",
"0.6700498",
"0.65898585",
"0.65151054",
"0.6475276",
"0.6447157",
"0.64241886",
"0.6397633",
"0.6358592",
"0.6356772",
"0.6352888",
"0.63513005",
"0.6316425",
"0.62975174",
"0.6293719",
"0.62935233",
"0.628278",
"0.6274371",
"0.6268759",
"0.6230866"
] | 0.8100969 | 0 |
andExpr = relationalExpr { "and" relationalExpr } | def andExpr( ): #DOUBLE CHECK THIS
tok = tokens.peek( )
if debug: print("andExpr: ", tok)
left = relationalExpr( ) #does the left side of the grammar
tok = tokens.peek( )
while tok == "and": #checks to see if there is the token "and" and will preform what is inside the curly bracket since it is a series
tokens.next()
right = relationalExpr( )
left = BinaryExpr(tok, left, right)#MIGHT HAVE TO CHANGE TO STRING
tok = tokens.peek( )
return left | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def AND(*expressions):\n return {'$and': list(expressions)}",
"def __and__(self, query):\r\n return And([self, query]).normalize()",
"def and_(a, b):",
"def And(*conditions):\n def andPred(db):\n from functools import reduce\n return reduce(lambda result, c: c(result),\n conditions, db)\n\n return andPred",
"def __and__(self, other):\n return self.fam.c_binop('and', self, other)",
"def convert_broadcast_logical_and(node, **kwargs):\n return create_basic_op_node('And', node, kwargs)",
"def and_(*args, **kwargs):\n ...",
"def _and(it):\n return 1 if it[0]==1 and it[1]==1 else 0",
"def __and__(self, other):\n if other is None:\n return self.copy()\n elif isinstance(other, (Query, QueryCompound)):\n return self.and_(other)\n else:\n out = self.copy()\n out.addMath(Query.Math.And, other)\n return out",
"def _prefix_and(*exprs, **kwargs):\n anded = ' AND '.join('(%s)' % expr for expr in exprs if expr)\n if len(anded) == 0:\n return ''\n return kwargs.get('prefix', 'WHERE ') + anded",
"def __and__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n raise excep.biogemeError(\n f'This is not a valid expression: {other}'\n )\n return And(self, other)",
"def test_and(\n self,\n left: Result[int, str],\n right: Result[int, str],\n exp: Result[int, str],\n ) -> None:\n assert left.and_(right) == exp",
"def Nand(*args):\n return Not(And(*args))",
"def AND(f, g):\n def _and(x):\n return f(x) & g(x)\n return _and",
"def _and(cls, arg1, arg2):\n return arg1 and arg2",
"def logical_and(lhs, rhs):\n return _make.logical_and(lhs, rhs)",
"def visit_and(self, left_result: T, right_result: T) -> T:",
"def f_and(*args):\n f = And(*args).factor()\n return f if f in B else f.factor()",
"def __and__(self, other: Any) -> Operators:\n return self.operate(and_, other)",
"def and_bexp(env, node):\n left_value = node.left.interpret(env)\n right_value = node.right.interpret(env)\n return 1 if left_value and right_value else 0",
"def conjuncts(s):\n return dissociate(\"AND\", s)",
"def test_andOperator(self):\n xp = XPathQuery(\"//bar[@attrib4='value4' and @attrib5='value5']\")\n self.assertEqual(xp.matches(self.e), True)\n self.assertEqual(xp.queryForNodes(self.e), [self.bar5])",
"def _daat_and(self):\n raise NotImplementedError",
"def AND(r, s):\n return lambda l, i: r(l, i) and s(l, i)",
"def simplify_and_node(parse_str=None, location=None, tokens=None):\n if len(tokens) == 1:\n return tokens[0]\n else:\n return AndNode(tokens.asList())",
"def and_list(conditionList):\n return functools.reduce(numpy.logical_and, conditionList)",
"def _and(self, _and):\n\n self.__and = _and",
"def _and(self, _and):\n\n self.__and = _and",
"def _and(self, _and):\n\n self.__and = _and",
"def _and(self, _and):\n\n self.__and = _and"
] | [
"0.7694933",
"0.7391504",
"0.7278942",
"0.72714126",
"0.72140443",
"0.71525294",
"0.71054393",
"0.7005105",
"0.6971053",
"0.6965941",
"0.69083124",
"0.68721735",
"0.68457156",
"0.68190354",
"0.6809348",
"0.6774739",
"0.67718345",
"0.6753265",
"0.6741417",
"0.67354465",
"0.67034066",
"0.66916305",
"0.6630332",
"0.66224724",
"0.661375",
"0.6570111",
"0.65543115",
"0.65543115",
"0.65543115",
"0.65543115"
] | 0.8083135 | 0 |
relationalExpr = addExpr [ relation addExpr ] | def relationalExpr( ):#MAKE SURE I USED THE RIGHT LOGIC FOR THIS
tok = tokens.peek( )
if debug: print("relationalExpr: ", tok)
left = addExpr( )
expr = ""
tok = tokens.peek( )
if tok in relations:
rel = relation( ) # expecting a relation to start off
right = expression( ) # if there is a relation we expect there to be an expression to the right of the relation
expr = BinaryExpr( rel, left, right )
return expr #fix this for syntax tree maybe
return left | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_relation(wn, source, target, new_rel, change_list=None):\n insert_rel(source, new_rel, target, change_list)\n if new_rel in inverse_synset_rels:\n inv_rel_type = inverse_synset_rels[new_rel]\n insert_rel(target, inv_rel_type, source, change_list)",
"def addExpr( ):\n\n\ttok = tokens.peek( )\n\tif debug: print (\"addExpr: \", tok)\n\tleft = term( )\n\ttok = tokens.peek( )\n\twhile tok == \"+\" or tok == \"-\":\n\t\ttokens.next()\n\t\tright = term( )\n\t\tleft = BinaryExpr( tok, left, right )\n\t\ttok = tokens.peek( )\n\treturn left",
"def as_relational(self, symbol):\n return And(*[set.as_relational(symbol) for set in self.args])",
"def polyrelsimp(expr):\n return expr.replace(lambda rel: isinstance(rel, Rel),\n lambda rel: expand_polyeq(rel))",
"def as_relational(self, symbol):\n A, B = self.args\n\n A_rel = A.as_relational(symbol)\n B_rel = B.as_relational(symbol)\n\n return Xor(A_rel, B_rel)",
"def add_relation(cls, row_id, rel_obj):\n obj = cls.query.filter_by(id=row_id).first()\n # obj = db.session.query(cls).filter_by(id=row_id).first()\n #print(type(obj))\n if cls.__name__ == 'Actor':\n obj.filmography.append(rel_obj)\n elif cls.__name__ == 'Movie':\n obj.cast.append(rel_obj)\n return commit(obj)",
"def createRelation(rid, rlabel, list, x, y):\n relation = Relation(rid, rlabel, x, y)\n list.append(relation)",
"def add_relation(self, qid, relation, qid2):\n if self._kg_symbols is not None:\n self._kg_symbols.add_relation(qid, relation, qid2)",
"def __radd__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(add, other)",
"def _setRelation(self, node):\n if getattr(self, \"relation\", None):\n element = etree.SubElement(node, 'relation')\n element.text = getattr(self, \"relation\")",
"def _add_related(related, dep, all_related, index, connector=None):\n doc = {}\n doc[\"relationForm\"] = dep\n doc[\"rawName\"] = related\n doc[\"tokenIndex\"] = int(index)\n doc[\"offsetStart\"] = A.lookup[int(index)][\"start\"]\n doc[\"offsetEnd\"] = A.lookup[int(index)][\"end\"]\n doc[\"connector\"] = \"\" if connector is None else connector\n if not doc in all_related:\n all_related.append(doc)\n return all_related",
"def _follow_relation_set(self, rel_expr,\n inverted):\n if not self.context.is_group(rel_expr.type_name):\n raise RelationNameError(rel_expr.type_name,\n 'Expression type is not a relation group.')\n g = self.context.get_group(rel_expr.type_name)\n if inverted == +1:\n with tf.name_scope('follow_group_%s' % rel_expr.type_name):\n return (self.follow(g.subject_rel, -1) *\n rel_expr.follow(g.relation_rel, -1)).follow(g.object_rel)\n else:\n with tf.name_scope('follow_group_%s_inverse' % rel_expr.type_name):\n return (self.follow(g.object_rel, -1) *\n rel_expr.follow(g.relation_rel, -1)).follow(g.subject_rel)",
"def Relation(self, paren=False):\n left = self.Addition(paren)\n if self.currtok[1].name in {\"GREATERTHAN\", \"LESSTHAN\", \"LET\", \"GET\"}:\n op = self.currtok[0]\n self.currtok = next(self.tg)\n right = self.Addition(paren)\n left = BinaryExpr(op, left, right, paren)\n return left",
"def __add__(self, other):\n if isinstance(other, NeuralQueryExpression):\n self._check_type_compatibility(self.type_name, other.type_name, 'add')\n provenance = NQExprProvenance(\n operation='add', inner=self.provenance, other=other.provenance)\n return self.context.as_nql(self.tf + other.tf, self.type_name, provenance)\n else:\n # hopefully a constant\n provenance = NQExprProvenance(\n operation='add',\n inner=self.provenance,\n args=(None, other),\n other=NQExprProvenance(operation='constant'))\n return self.context.as_nql(self.tf + other, self.type_name, provenance)",
"def __push_relation(self, id1, id2, id1_name, id2_name, table):\n # case: No entry about relation is in DB yet\n if not self.__postgre_db.is_in_table(table, id1_name + \"=\" + str(\n id1)):\n self.__postgre_db.insert(table, {\n id1_name: id1, id2_name: [id2], \"aggregation\": 0})\n\n # case: Entry about single_pattern is in DB\n else:\n old_list = self.__postgre_db.get(table, id1_name + \"=\" + str(\n id1), id2_name)\n new_list = list(set(old_list + [id2]))\n self.__postgre_db.update(\n table, id2_name + \"=\" + add_quotes(replace_brackets(str(new_list))), id1_name + \"=\" + str(id1))",
"def __add__(self, other):\n\n other = formula(other, namespace=self.namespace)\n terms = self.terms + other.terms\n pieces = [(term.name, term) for term in terms]\n pieces.sort()\n terms = [piece[1] for piece in pieces]\n return formula(terms, namespace=self.namespace)",
"def __add__(self, other: Any) -> ColumnOperators:\n return self.operate(add, other)",
"def add_sense_relation(wn, source, target, new_rel, change_list=None):\n insert_sense_rel(wn, source, new_rel, target, change_list)\n if new_rel in inverse_sense_rels:\n inv_rel_type = inverse_sense_rels[new_rel]\n insert_sense_rel(wn, target, inv_rel_type, source, change_list)",
"def as_relational(self, x):\n x = sympify(x)\n if self.right_open:\n right = x < self.end\n else:\n right = x <= self.end\n if self.left_open:\n left = self.start < x\n else:\n left = self.start <= x\n return And(left, right)",
"def __le__(self, other):\n return _generate_relational_expression(_le, self, other)",
"def add_unary_constraint(self, var, relation, integer):\n constraint = lambda left_value: relation(left_value, integer)\n if var in self.__constraints:\n self.__constraints[var].append(constraint)\n else:\n self.__constraints[var] = [constraint]",
"def __radd__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n error_msg = (\n f'Invalid expression during addition to {self}: [{other}]'\n )\n raise excep.biogemeError(error_msg)\n return Plus(other, self)",
"def add_expr_to_comp(self, comp, expr):\n if not isinstance(comp, cellml_component):\n comp = self.model.get_component_by_name(comp)\n if not hasattr(comp, u'math'):\n # Create the math element\n math = comp.xml_create_element(u'math', NSS[u'm'])\n comp.xml_append(math)\n # Append this expression\n comp.math.xml_append(expr)",
"def __add__(self, right):\n # TODO: move over to any coercion model!\n if not isinstance(right, MatrixMorphism):\n R = self.base_ring()\n return self.parent()(self.matrix() + R(right))\n if not right.parent() == self.parent():\n right = self.parent()(right)\n M = self.matrix() + right.matrix()\n return self.domain().Hom(right.codomain())(M)",
"def create_relation(self, left_node, rel, right_node):\n rel = Relationship(left_node, rel, right_node)\n self.graph.merge(rel)\n return",
"def add_relation(term_pair, term_info, tokenized_text, bags):\n tokenized_text = tokenized_text.copy()\n \n found_relation = False\n term_pair_key = \" -> \".join(term_pair)\n \n # restrict to closest occurence of the two terms in the sentence\n indices = get_closest_match(term_info[term_pair[0]][\"indices\"], \n term_info[term_pair[1]][\"indices\"])\n \n term1_text = \" \".join(tokenized_text[indices[0][0]:indices[0][1]])\n term2_text = \" \".join(tokenized_text[indices[1][0]:indices[1][1]])\n \n # tag term pair in the sentence\n tokenized_text = \" \".join(insert_relation_tags(tokenized_text, indices))\n \n if term_pair_key in bags[\"no-relation\"]:\n term_ix = bags[\"no-relation\"].index(term_pair_key)\n bags[\"no-relation\"][term_ix][\"sentences\"].append(tokenized_text)\n else:\n bags[\"no-relation\"].append({term_pair_key: {\"sentences\": [tokenized_text], \"relation\": \"no-relation\"}})\n \n return bags",
"def as_relational(self, symbol):\n A, B = self.args\n\n A_rel = A.as_relational(symbol)\n B_rel = Not(B.as_relational(symbol))\n\n return And(A_rel, B_rel)",
"def add_relationship(self, relationship):\n self.relationships[relationship.parent].append(relationship)",
"def __add__(self, right_rule):\n self.__subrules.append(right_rule)\n return self",
"def add_binary_constraint(self, left_var, relation, right_var):\n self.__constraints[(left_var, right_var)] = relation"
] | [
"0.6230691",
"0.61780995",
"0.60156584",
"0.590059",
"0.58807737",
"0.5856698",
"0.5843209",
"0.58185554",
"0.5784956",
"0.5745773",
"0.57166684",
"0.5676181",
"0.5653579",
"0.5636797",
"0.5615812",
"0.5576591",
"0.5564806",
"0.5537048",
"0.55229557",
"0.5472008",
"0.54082906",
"0.5404519",
"0.53998846",
"0.53828263",
"0.5378791",
"0.53647834",
"0.5357061",
"0.5326924",
"0.5315686",
"0.5312122"
] | 0.7208437 | 0 |
factor = number | '(' expression ')' | def factor( ):
tok = tokens.peek( )
if debug: print ("Factor: ", tok)
if re.match( Lexer.number, tok ):
expr = Number(tok)
tokens.next( )
tok = tokens.peek( )
return expr
if tok == "(":
tokens.next( ) # or match( tok )
expr = addExpr( )#might need to change to expression( )
tokens.peek( )
tok = match( ")" )
return expr
if re.match( Lexer.identifier, tok ): # added this to take into accout identifiers
expr = VarRef(tok)
tokens.next( )
return expr
if re.match( Lexer.String, tok ): # added this to take into account strings
expr = String( tok )
return expr
error( "Invalid operand" )
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _num_factor(number, factor):\n assert factor != 0\n return number // factor",
"def make_multiplier(factor):\n return lambda x: factor * x",
"def make_anonymous_factorial():\n return 'YOUR_EXPRESSION_HERE'",
"def make_anonymous_factorial():\n return 'YOUR_EXPRESSION_HERE'",
"def make_anonymous_factorial():\n return 'YOUR_EXPRESSION_HERE'",
"def make_anonymous_factorial():\n return 'YOUR_EXPRESSION_HERE'",
"def __init__(self, factor: FunctionType or Number = 1):\n\n if isinstance(factor, Number):\n self.func = lambda x: int(x * factor)\n elif isinstance(factor, FunctionType):\n self.func = lambda x: int(factor(x))",
"def factorize(x: int):\n pass",
"def visit_factor(self, node, children):\n if self.debug:\n print(\"Factor {}\".format(children))\n if len(children) == 1:\n return children[0]\n sign = -1 if children[0] == '-' else 1\n return sign * children[-1]",
"def _factor_non_decimal(value):\n result = 1\n factors = sympy.factorint(value)\n for factor, power in six.iteritems(factors):\n if factor not in [2, 5]:\n result *= factor ** power\n return result",
"def factorize(x):\n pass",
"def parseFactors(cmds):\n print(\"Factor\")\n if cmds[0] == 'D':\n parseExpr(cmds[2:len(cmds)-1])\n elif cmds[0] == '(':\n parseExpr(cmds[1:len(cmds)-1])\n else:\n parseNumber(cmds)",
"def fact(n: \"some non-negative integer\") -> \"n! or 0 if n < 0\":\n if n < 0:\n return 0\n elif n <= 1:\n return 1\n else:\n return n * fact(n - 1)",
"def Factor(self):\n if self.currtok[1].name in {\"MINUS\", \"NOT\"}:\n op = self.currtok[0]\n self.currtok = next(self.tg)\n prime = self.primary()\n return Factor(op, prime)\n return self.primary()",
"def factorial_eval(n):\n lst = list(range(1, n+1))\n return eval(str(lst).replace(', ', '*')[1:-1:])",
"def mul_factor(factors: List[Tuple[int, int]]) -> int:\n n = 1\n for f in factors:\n n *= f[0] ** f[1]\n return n",
"def calceNumerator ( term , numeratorN1 , numeratorN2 ) :\n if term == limit :\n if term % 3 == 0 :\n return ( 2 * int ( term / 3 ) * numeratorN1 ) + numeratorN2\n return numeratorN1 + numeratorN2\n\n multiplier = 1\n if term % 3 == 0 :\n multiplier = 2 * int ( term / 3 )\n numerator = multiplier * numeratorN1 + numeratorN2\n\n return calceNumerator ( term + 1 , numerator , numeratorN1 )",
"def factorize(number, factors, result=None):\n if result is None:\n result = []\n factor = _max_factor(number, factors)\n amount = _num_factor(number, factor)\n remain = _remainder(number, factor)\n result.append((amount, factor))\n if remain == 0:\n return result\n return factorize(remain, factors, result)",
"def make_anonymous_factorial():\n return lambda n: 1 if n == 1 else mul(n, make_anonymous_factorial()(sub(n, 1)))",
"def factor(expr, conj=False):\n if expr in B:\n return expr\n else:\n return expr.factor(conj)",
"def get_factorial(number):\n if number == 1:\n return 1\n else:\n return number * get_factorial(number - 1)",
"def exp(x):\n if isinstance(x, int):\n x = Expression(x)\n return _exp(x)",
"def make_power(number,pow):\r\n def dispatch(x):\r\n if(x==0):\r\n return number\r\n elif(x==1):\r\n return pow\r\n return dispatch",
"def multiple_of(factor):\n\n class multiple_of(int):\n \"\"\"Int type in [A; B] range.\"\"\"\n\n def __init__(self, k):\n assert int(k) % factor == 0, (k, factor)\n super(multiple_of, self).__init__()\n\n return multiple_of",
"def fact(num: int) -> int:\n return 1 if (num < 1) else num * fact(num - 1)",
"def make_anonymous_factorial():\n return lambda val : (lambda f, v : f(f, v)) (lambda f, v : 1 if v == 0 else mul(v, f(f, sub(v, 1))), val)",
"def factor(n):\r\n\t# Rewritten to align with SAGE. Previous semantics available as factors(n).\r\n\tif (abs(n) == 1): return \"Unable to factor \"+str(n) # Can't deal with units\r\n\tfactspow = []\r\n\tcurrfact = None\r\n\tfor thefact in factors(n):\r\n\t\tif thefact != currfact:\r\n\t\t\tif currfact != None:\r\n\t\t\t\tfactspow += [(currfact,thecount)]\r\n\t\t\tcurrfact = thefact\r\n\t\t\tthecount = 1\r\n\t\telse:\r\n\t\t\tthecount += 1\r\n\tfactspow += [(thefact,thecount)]\r\n\treturn factspow",
"def __mul__(self, factor):\n def mul(output, target, params):\n return self(output, target, params) * factor\n return type(self)(type(self).__reserved_init, mul, factor * (1. if self._fact is None else self._fact), self._name)",
"async def calculate_score(expression, score_factor):\n # The score asymptotically approaches the max score\n # based on the length of the expression.\n return (1 - (1 / ((len(expression) + 1) ** 2))) * score_factor",
"def task17_factorial(num):\n result = 1\n for i in range(1, num + 1):\n result *= i\n return result"
] | [
"0.65296596",
"0.6441182",
"0.641148",
"0.641148",
"0.641148",
"0.641148",
"0.63577914",
"0.60211015",
"0.5988639",
"0.5983069",
"0.59670454",
"0.5808318",
"0.5783513",
"0.57786024",
"0.57604265",
"0.5731007",
"0.5716077",
"0.57154644",
"0.5681956",
"0.5650473",
"0.56236184",
"0.56027806",
"0.5599781",
"0.5594414",
"0.5582844",
"0.5572393",
"0.5541159",
"0.547502",
"0.54528815",
"0.5436597"
] | 0.74144316 | 0 |
term = factor { ('' | '/') factor } | def term( ):
tok = tokens.peek( )
if debug: print ("Term: ", tok)
left = factor( )
tok = tokens.peek( )
while tok == "*" or tok == "/":
tokens.next()
right = factor( )
left = BinaryExpr( tok, left, right )
tok = tokens.peek( )
return left | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def visit_term(self, node, children):\n if self.debug:\n print(\"Term {}\".format(children))\n term = children[0]\n for i in range(2, len(children), 2):\n if children[i-1] == \"*\":\n term *= children[i]\n else:\n term /= children[i]\n if self.debug:\n print(\"Term = {}\".format(term))\n return term",
"def factor( ):\n\n\ttok = tokens.peek( )\n\tif debug: print (\"Factor: \", tok)\n\tif re.match( Lexer.number, tok ):\n\t\texpr = Number(tok)\n\t\ttokens.next( )\n\t\ttok = tokens.peek( )\n\t\treturn expr\n\tif tok == \"(\":\n\t\ttokens.next( ) # or match( tok )\n\t\texpr = addExpr( )#might need to change to expression( )\n\t\ttokens.peek( )\n\t\ttok = match( \")\" )\n\t\treturn expr\n\tif re.match( Lexer.identifier, tok ): # added this to take into accout identifiers\n\t\texpr = VarRef(tok)\n\t\ttokens.next( )\n\t\treturn expr\n\tif re.match( Lexer.String, tok ): # added this to take into account strings\n\t\texpr = String( tok )\n\t\treturn expr\n\n\terror( \"Invalid operand\" )\n\treturn",
"def from_term(term):\n if term is None:\n return term\n elif isinstance(term, (six.string_types, int, float)):\n return term\n elif isinstance(term, dict):\n return {k: from_term(v) for k, v in term.items()}\n elif isinstance(term, list):\n return [from_term(t) for i, t in enumerate(term)]\n elif issubclass(term.__class__, (Matcher,)):\n return term.generate()\n else:\n raise ValueError('Unknown type: %s' % type(term))",
"def parseTerms(cmds):\n if len(cmds) != 0:\n print(\"Term\")\n delimit = \"[*|/|%]+\"\n factors = re.split(delimit, cmds[0])\n parseFactors(cmds[0])\n parseTerms(cmds[1:])",
"def Term(self, paren=False):\n left = self.Factor()\n while self.currtok[1].name in {\"TIMES\", \"DIVISION\", \"MOD\"}:\n op = self.currtok[0]\n self.currtok = next(self.tg)\n right = self.Factor()\n left = BinaryExpr(op, left, right, paren)\n return left",
"def parse_term(self) -> SyntaxNode:\n return self._parse_cat_binary(\"M\", self.parse_value)",
"def quote_poly_term(term: str):\n match = re.match(r\"^(\\w+)\\^(\\d+)$\", term)\n if match:\n return (match[1], int(match[2]))\n else:\n return (term, 1)",
"def get_node_by_term(nodes, term):\n return nodes[sum([ord(c) for c in term]) % len(nodes)]",
"def process(self, term):\n #get the last character\n #is there a possibility multiple punctuation at start and end?\n length = len(term)\n firstChar = term[0:1]\n if str(firstChar).isalnum():\n term = term\n else:\n #print \"cutting first letter \" + firstChar + \" from \" +term\n term = term[1:length]\n #print \"term now \" +term\n #get length again incase punctuation at start and end\n length = len(term)\n lastChar = term[length-1:length]\n if str(lastChar).isalnum():\n term = term\n else:\n #print \"cutting last letter \" + lastChar + \"from \" + term\n term = term[0:length-1]\n #print \" is now \" + term\n\n #now check if there's nothing left, then don't add, if there is, add it\n if term:\n return term\n else:\n return None",
"def helper(s):\n opt = '+'\n stack = []\n cur = 0\n # add an extra opt in the end to trigger the last operation.\n for i, c in enumerate(s):\n if c.isdigit():\n cur = cur * 10 + int(c)\n # make sure the last operation is performed.\n if c != ' ' or i == len(s) - 1:\n if opt in ['+', '-']:\n sign = 1 if opt == '+' else -1\n stack.append(cur * sign)\n elif opt == '*':\n stack[-1] = stack[-1] * cur\n elif opt == '/':\n stack[-1] = int(stack[-1] / cur)\n opt = c\n cur = 0\n return sum(stack)",
"def tokenize(eq):\n\n def push(token):\n if token != \"\":\n if token[0].isdigit():\n tokens.append(int(token))\n else:\n tokens.append(token)\n\n tokens = []\n token = \"\"\n\n for t in eq:\n if t == \" \":\n push(token)\n token = \"\"\n if t == \"+\" or t == \"*\" or t == \"(\" or t == \")\":\n push(token)\n push(t)\n token = \"\"\n elif t.isdigit():\n token += t\n\n push(token)\n return tokens",
"def evaluate_terms(terms):\n expr_terms = [x for x in terms]\n\n while expr_terms.count('^') != 0:\n expr_terms = eval_expon(expr_terms)\n\n while MUL_DIV_RE.search(str(expr_terms)) is not None:\n expr_terms = eval_a_op_b(expr_terms, 'md')\n\n while len(expr_terms) != 1:\n expr_terms = eval_a_op_b(expr_terms, 'pm')\n\n return expr_terms[0]",
"def apply_on_each_term(query: str, function: Callable) -> str:\n\n is_inside_a_term = False\n search_term = ''\n final_query = ''\n for character in query:\n\n if character == '[':\n search_term += character\n is_inside_a_term = True\n continue\n\n if is_inside_a_term:\n search_term += character\n if character == ']':\n search_term = function(search_term)\n final_query += search_term\n search_term = ''\n is_inside_a_term = False\n else:\n final_query += character\n\n return final_query",
"def match_term(self,state,re_term):\r\n return None",
"def cry(s : str) -> CryptolTerm:\n return CryptolTerm(s)",
"def end_term(query):\n if query.endswith(' '):\n return query[query[:-1].rfind(' ')+1:]\n else:\n return query[query.rfind(' ')+1:]",
"def act_on_literal(self, *, value):\n assert not isinstance(value, PolarsTerm)",
"def format_term(term: Union[BNode, Literal, URIRef, Variable]) -> str:\n if isinstance(term, URIRef):\n return str(term)\n elif isinstance(term, BNode):\n return '?v_' + str(term)\n elif isinstance(term, Literal):\n return format_literal(term)\n else:\n return term.n3()",
"def Factor(self):\n if self.currtok[1].name in {\"MINUS\", \"NOT\"}:\n op = self.currtok[0]\n self.currtok = next(self.tg)\n prime = self.primary()\n return Factor(op, prime)\n return self.primary()",
"def tf(self, term, text):\n return text.count(term) / len(text)",
"def list_of(term):\n return (\n Optional(\n term +\n ZeroOrMore(Suppress(Literal(',')) + term) +\n Optional(Suppress(Literal(',')))\n )\n )",
"def sans_parens(s):\n s = prep_simple_str(s)\n \n total = s[0]\n \n for c in s[1:]:\n if c == \")\":\n return total\n elif c == \"*\":\n op = lambda a,b: a * b\n elif c == \"+\":\n op = lambda a,b: a + b\n else:\n total = op(total, c)\n return total",
"def compile_word(word):\r\n\r\n if word.isupper():\r\n terms = [('%s*%s' % (10**i, d)) for (i,d) in enumerate(word[::-1])]\r\n return '(' + '+'.join(terms) + ')'\r\n else:\r\n return word",
"def sanitize_input(term: str) -> str:\n return term.strip().replace(\"*\", \"\").replace(\"'\", \"\\\\'\").replace(\"~\", \"\")",
"def _split_term(term):\n if '*' in term:\n variable_and_parameter = term.split('*')\n variable_and_parameter = [label.strip()\n for label in variable_and_parameter]\n else:\n raise TermNotProduct(term)\n\n if len(variable_and_parameter) != 2:\n raise TermNotProduct(term)\n\n return variable_and_parameter",
"def fold_term(self, term):\n if isinstance(term, Var):\n return self.var(term.name)\n elif isinstance(term, Const):\n return self.const(term.name)\n elif isinstance(term, Dist):\n return self.dist(term.name)\n elif isinstance(term, Func):\n return self.func(\n term.funcname,\n tuple( self.fold_term(x)\n for x in term.args ))\n elif isinstance(term, Eq):\n return self.encode_eq(term)\n elif isinstance(term, Disj):\n return self.encode_disj(term)\n elif isinstance(term, Conj):\n return self.encode_conj(term)\n raise RuntimeError('Invalid term {}'.format(term))",
"def cry_f(s : str) -> CryptolTerm:\n return CryptolTerm(to_cryptol_str_customf(s, frames=1))",
"def _clean_term(self, term):\n return filter(lambda char: char in allowed_chars, term)",
"def next_term(x):\n if x%2 == 0:\n return x/2\n else:\n return 3*x + 1",
"def fold_term(self, init_repr, term):\n if isinstance(term, Var):\n return self.var(init_repr, term.name)\n elif isinstance(term, Const):\n return self.const(init_repr, term.name)\n elif isinstance(term, Dist):\n return self.dist(init_repr, term.name)\n elif isinstance(term, Func):\n return self.func(\n init_repr,\n term.funcname,\n [ self.fold_term(init_repr, x)\n for x in term.args ])\n elif isinstance(term, Eq):\n return self.eq(\n init_repr,\n term.pos,\n self.fold_term(init_repr, term.t1),\n self.fold_term(init_repr, term.t2))\n elif isinstance(term, Disj):\n return self.disj(\n init_repr,\n term.role,\n [ self.fold_term(init_repr, eq)\n for eq in term.eqs ])\n elif isinstance(term, Conj):\n return self.conj(\n init_repr,\n [ self.fold_term(init_repr, disj)\n for disj in term.disjs ])"
] | [
"0.6671328",
"0.5942621",
"0.578661",
"0.5665476",
"0.5546639",
"0.55136436",
"0.55068946",
"0.5483381",
"0.5461509",
"0.5405435",
"0.53730965",
"0.5351918",
"0.5335714",
"0.5334143",
"0.5312731",
"0.5308194",
"0.52843577",
"0.52693665",
"0.52682096",
"0.52596307",
"0.52582663",
"0.5255085",
"0.5236891",
"0.520382",
"0.5183893",
"0.51641446",
"0.515877",
"0.5132851",
"0.5126308",
"0.50719494"
] | 0.7452738 | 0 |
addExpr = term { ('+' | '') term } | def addExpr( ):
tok = tokens.peek( )
if debug: print ("addExpr: ", tok)
left = term( )
tok = tokens.peek( )
while tok == "+" or tok == "-":
tokens.next()
right = term( )
left = BinaryExpr( tok, left, right )
tok = tokens.peek( )
return left | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add(text):\n orig = dispb[\"text\"]\n new = orig + text\n ops = [\"+\",\"-\",\"*\",\"/\"]\n # conditions\n # length 21\n if len(new) > 21:\n dispb[\"text\"] = orig\n return 0\n \n # one calc at a time\n if len(orig) > 0:\n if (orig[-1] in ops) & (text in ops):\n dispb[\"text\"] = orig\n return 0\n\n dispb[\"text\"] = new\n return 0",
"def _reduce_expr(tree, tok):\n second = tree.pop()\n if len(tree) > 0 and not Parser._is_unary_op(tok):\n first = tree.pop()\n expr = BinaryExpression(first, tok, second)\n else:\n expr = UnaryExpression(second, tok)\n tree.append(expr)",
"def _add_to_expression(self, item):\n addition = ' {%s} ' % item.text()\n expression = self.ui.expression\n pos = expression.cursorPosition()\n text = str(expression.displayText())\n expression.setText(text[:pos] + addition + text[pos:])",
"def Addition(self, paren=False):\n left = self.Term(paren)\n while self.currtok[1].name in {\"PLUS\", \"MINUS\"}:\n op = self.currtok[0]\n self.currtok = next(self.tg)\n right = self.Term(paren)\n left = BinaryExpr(op, left, right, paren)\n return left",
"def convert_elementwise_add(node, **kwargs):\n return create_basic_op_node('Add', node, kwargs)",
"def __radd__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n error_msg = (\n f'Invalid expression during addition to {self}: [{other}]'\n )\n raise excep.biogemeError(error_msg)\n return Plus(other, self)",
"def _append_operator(self, operator):",
"def math_add():\n a = int(request.args.get(\"a\"))\n b = int(request.args.get(\"b\"))\n return str(add(a, b))",
"def adp(lhs,rhs):\n test=lambda s: s[0]=='`'\n assert test(lhs)==True,'error: lhs should be non-terminal'\n lhs=so.getSymbol(lhs[1:],terminal=False,autocreate=True)\n rhs=[so.getSymbol(s[1:],False,True) if test(s) else so.getSymbol(s,True,True) for s in rhs]\n return addProduction(lhs,rhs)",
"def quote_plus(s, safe='', encoding=None, errors=None):\n if ' ' in s:\n s = quote(s, safe + ' ', encoding, errors)\n return s.replace(' ', '+')\n return quote(s, safe, encoding, errors)",
"def __add__(self, other: Any) -> ColumnOperators:\n return self.operate(add, other)",
"def convert_addn(node, **kwargs):\n return create_basic_op_node('Sum', node, kwargs)",
"def addition():\r\n error_handler()\r\n f1.delete(0, END)\r\n a1 = float(operand.get())\r\n a2 = float(operator.get())\r\n result = a1 + a2\r\n f1.insert(10, str(result))",
"def __iadd__(self, term):\n self.add(term)\n return self",
"def __add__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n error_msg = (\n f'Invalid expression during addition to {self}: [{other}]'\n )\n raise excep.biogemeError(error_msg)\n return Plus(self, other)",
"def __add(thiselement, char):\n if thiselement == None:\n return char\n return thiselement + char",
"def add(*args):\n\n # TODO: Fill sum with the correct value, based on the\n # args provided.\n sum = str(args[0] + args[1])\n return sum",
"def __radd__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(add, other)",
"def test_sqpp_plus_expr1_minus_paren_expr2(self):\n self.assertEqual(self.parser.parse_query(\"+ expr1 - (expr2)\"),\n ['+', 'expr1', '-', 'expr2'])",
"def visit_expression(self, node, children):\n if self.debug:\n print(\"Expression {}\".format(children))\n expr = 0\n start = 0\n # Check for unary + or - operator\n if text(children[0]) in \"+-\":\n start = 1\n\n for i in range(start, len(children), 2):\n if i and children[i - 1] == \"-\":\n expr -= children[i]\n else:\n expr += children[i]\n\n if self.debug:\n print(\"Expression = {}\".format(expr))\n\n return expr",
"def add_concat(infix_regex: str):\n\n result = \"\"\n\n # we use None to symbolize the start of the string\n cant_concat_from = ['(', '|', None]\n cant_concat_to = ['*', '+', ')', '|']\n last_char = None\n\n for char in infix_regex:\n if char not in cant_concat_to and last_char not in cant_concat_from:\n result += '.'\n result += char\n last_char = char\n\n return result",
"def brackets(expr):\n expr_latex = sp.latex(expr)\n if '+' in expr_latex or '-' in expr_latex:\n return \"(\" + expr_latex + \")\"\n else:\n return expr_latex",
"def add_expr_to_comp(self, comp, expr):\n if not isinstance(comp, cellml_component):\n comp = self.model.get_component_by_name(comp)\n if not hasattr(comp, u'math'):\n # Create the math element\n math = comp.xml_create_element(u'math', NSS[u'm'])\n comp.xml_append(math)\n # Append this expression\n comp.math.xml_append(expr)",
"def add_subtract(statement):\r\n operators = list(filter(lambda x: x in ('+', '-'), statement))\r\n index = statement.index(operators[0])\r\n\r\n # Find operands\r\n op1, op2 = find_operands(statement, index)\r\n\r\n # Perform operation\r\n if operators[0] == '+':\r\n result = op1 + op2\r\n elif operators[0] == '-':\r\n result = op1 - op2\r\n\r\n # Replace operator and operands with result\r\n remove_and_replace(statement, index, result)\r\n\r\n return statement",
"def visit_Unary(self, node):\n op = node.op.type\n if op == PLUS:\n return +self.visit(node.expr)\n elif op == MINUS:\n return -self.visit(node.expr)",
"def __add__(self, other):\n if isinstance(other, NeuralQueryExpression):\n self._check_type_compatibility(self.type_name, other.type_name, 'add')\n provenance = NQExprProvenance(\n operation='add', inner=self.provenance, other=other.provenance)\n return self.context.as_nql(self.tf + other.tf, self.type_name, provenance)\n else:\n # hopefully a constant\n provenance = NQExprProvenance(\n operation='add',\n inner=self.provenance,\n args=(None, other),\n other=NQExprProvenance(operation='constant'))\n return self.context.as_nql(self.tf + other, self.type_name, provenance)",
"def plus(self, a, b):\n return a + b",
"def add(a, b):\n c = Calculator()\n result = c.add(a, b)\n click.echo('{} + {} = {}'.format(a, b, result))",
"def parse_single_op_string(opstring) :\n ops = {'+' : \"plus\",\n '?' : \"opt\" , \n '*' : \"star\"}\n return '('.join(ops[c] for c in reversed(opstring)) + '('",
"def test_evaluate_add_expression(self):\n value = self.evaluate_common(\"2M add 2M\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Decimal, \"Expected Decimal\")\n self.assertTrue(value.value == 4, \"Expected 4\")\n value = self.evaluate_common(\"2D add 2M\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Double, \"Expected Double\")\n self.assertTrue(value.value == 4.0, \"Expected 4\")\n value = self.evaluate_common(\"2F add 2D\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Double, \"Expected Double\")\n self.assertTrue(value.value == 4.0, \"Expected 4\")\n value = self.evaluate_common(\"2 add 2L\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Int64, \"Expected Int64\")\n self.assertTrue(value.value == 4, \"Expected 4\")\n try:\n value = self.evaluate_common(\"2 add '2'\")\n self.fail(\"String promotion to int\")\n except odata.EvaluationError:\n pass\n value = self.evaluate_common(\"2 add null\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Int32, \"Expected Int32\")\n self.assertTrue(value.value is None, \"Expected None\")"
] | [
"0.6888404",
"0.63378376",
"0.63330656",
"0.6294168",
"0.6236394",
"0.6221723",
"0.6219546",
"0.6147867",
"0.61287045",
"0.6115096",
"0.60734725",
"0.60724515",
"0.60708827",
"0.6070692",
"0.60704505",
"0.60387814",
"0.60252726",
"0.5984255",
"0.5975357",
"0.5954446",
"0.59498906",
"0.5937147",
"0.5928731",
"0.5924677",
"0.5919146",
"0.58840847",
"0.5876093",
"0.5847572",
"0.58436257",
"0.5836227"
] | 0.8184933 | 0 |
whileStatement = "while" expression block | def parseWhileStatement( ): # parse rountine for while and uses the while class to print out the appropriate string
tok = tokens.peek( )
if debug: print( "whileStatement: ", tok )
start = match( "while" )
expr = expression( )
blk = parseBlock( )
tok = tokens.peek( )
whileString = whileStatement( start, expr, blk )
return whileString | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def visit_while(self: Parser, node: doc.While) -> None:\n with self.var_table.with_frame():\n cond = self.eval_expr(node.test)\n with T.While(cond):\n self.visit_body(node.body)",
"def _While(self, t):\n self.fill(\"while (\")\n self.dispatch(t.test)\n self.write(\")\")\n self.enter()\n self.dispatch(t.body)\n self.leave()\n if t.orelse:\n self.RaiseError(t, \"While else not supported\")",
"def _analyse_stmt_While(self, statement: ast.While, *, next: CFNode) -> CFNode:\n # Analyse the else branch.\n else_node = self._analyse_statements(statement.orelse, next=next)\n\n # Analyse the body.\n dummy_node = self._dummy_node()\n with self._updated_context(break_=next, continue_=dummy_node):\n body_node = self._analyse_statements(statement.body, next=dummy_node)\n\n # Analyse the condition, if a constant.\n branches: Dict[str, CFNode] = {}\n test_is_constant, test_value = self._expression_as_constant(statement.test)\n if test_is_constant:\n if test_value:\n branches.update(enter=body_node)\n else:\n branches.update(else_=else_node)\n else:\n branches.update(enter=body_node, else_=else_node, error=self._raise)\n\n loop_node = self._ast_node(statement, **branches)\n self._graph.collapse_node(dummy_node, loop_node)\n return loop_node",
"def link_while_stmt(self, stmt):\n self.link_expr(stmt.cond)\n self.link_stmt(stmt.body)",
"def WhileStatement(self):\n self.currtok = next(self.tg)\n if self.currtok[1].name == \"LPAREN\":\n self.currtok = next(self.tg)\n express = self.Expression()\n if self.currtok[1].name == \"RPAREN\":\n self.currtok = next(self.tg)\n state = self.Statement()\n return whileStmt(express, state)\n raise SLUCSyntaxError(\"ERROR: Missing right paren on line {0}\".format(str(self.currtok[2] - 1)))\n raise SLUCSyntaxError(\"ERROR: Missing left paren on line {0}\".format(str(self.currtok[2] - 1)))",
"def gen_while(self, stmt: statements.While) -> None:\n condition_block = self.builder.new_block()\n body_block = self.builder.new_block()\n final_block = self.builder.new_block()\n self.break_block_stack.append(final_block)\n self.continue_block_stack.append(condition_block)\n self.builder.emit_jump(condition_block)\n self.builder.set_block(condition_block)\n self.gen_condition(stmt.condition, body_block, final_block)\n self.builder.set_block(body_block)\n self.gen_stmt(stmt.body)\n self.builder.emit_jump(condition_block)\n self.builder.set_block(final_block)\n self.break_block_stack.pop()\n self.continue_block_stack.pop()",
"def syntax_while():\n i = 0\n while i < 5:\n print(i)\n i += 1\n\n ## Output\n # 0\n # 1\n # 2\n # 3\n # 4",
"def compile_while(self) -> None:\n self._consume('while')\n self._consume('(')\n\n while_lbl = f\"WHILE_{self._while_count}\"\n while_false_lbl = f\"WHILE_FALSE{self._while_count}\"\n self._while_count += 1\n self.writer.write_label(while_lbl)\n\n self.compile_expression()\n self._consume(')')\n\n self._consume('{')\n self.writer.write_if(while_false_lbl)\n\n self.compile_statements()\n self.writer.write_goto(while_lbl)\n self.writer.write_label(while_false_lbl)\n\n self._consume('}')",
"def add_while(self, input_name, body_function, cond_function, name=None):\n return self._build_op(\n 'while', [input_name],\n name=name,\n attr={\n 'body_function': body_function,\n 'cond_function': cond_function\n })",
"def test_42_while(self):\n\t\tinput = \"\"\"function foo():integer; var a:integer; begin\n\t\twhile(1+true) do begin end\n\t\treturn 1;\n\t\tend\n\t\tprocedure main(); var x:integer; begin x:=foo(); foo(); end\"\"\"\n\t\texpect = \"Type Mismatch In Expression: BinaryOp(+,IntLiteral(1),BooleanLiteral(True))\"\n\t\tself.assertTrue(TestChecker.test(input,expect,442))",
"def test_41_while(self):\n\t\tinput = \"\"\"function foo():integer; var a:integer; begin\n\t\twhile(1) do begin end\n\t\treturn 1;\n\t\tend\n\t\tprocedure main(); var x:integer; begin x:=foo(); foo(); end\"\"\"\n\t\texpect = \"Type Mismatch In Statement: While(IntLiteral(1),[])\"\n\t\tself.assertTrue(TestChecker.test(input,expect,441))",
"def compile_while(self):\r\n lab1 = self.class_name + \".L\" + str(self.label_index)\r\n self.label_index += 1\r\n lab2 = self.class_name + \".L\" + str(self.label_index)\r\n self.label_index += 1\r\n self.tokenizer.advance() # ignore 'while' keyword\r\n self.tokenizer.advance() # ignore '(' symbol\r\n self.code_writer.write_label(lab1)\r\n self.compile_expression()\r\n self.code_writer.write_arithmetic(\"not\")\r\n self.code_writer.write_if(lab2)\r\n self.tokenizer.advance() # ignore ')' symbol\r\n self.tokenizer.advance() # ignore '{'\r\n self.compile_statements()\r\n self.code_writer.write_goto(lab1)\r\n self.code_writer.write_label(lab2)",
"def compile_while(self):\r\n start_label = \"WHILE_\" + str(self.__while_count)\r\n end_label = \"WHILE_END_\" + str(self.__while_count)\r\n self.__while_count += 1\r\n self.__advance(n=2) # Advance after the '(' token\r\n self.__vmwriter.write_label(start_label)\r\n self.compile_expression()\r\n self.__advance(n=2) # Advance after the '{' token\r\n self.__vmwriter.write_arithmetic(\"not\")\r\n self.__vmwriter.write_if(end_label)\r\n self.compile_statements()\r\n self.__advance() # Advance after the '}' token\r\n self.__vmwriter.write_goto(start_label)\r\n self.__vmwriter.write_label(end_label)",
"def compile_while(self):\n\n\t\txml = \"<whileStatement>\\n\" + self.tokenizer.keyword() + self.tokenizer.symbol()\n\t\tself.outfile.write(xml)\n\n\t\tself.compile_expression()\n\n\t\txml = self.tokenizer.symbol() + self.tokenizer.symbol() + '<statements>\\n'\n\t\tself.outfile.write(xml)\n\n\t\twhile self.tokenizer.get_token() != '}':\n\t\t\tself.compile_statements()\n\n\t\txml = '</statements>\\n' + self.tokenizer.symbol() + '</whileStatement>\\n'\n\t\tself.outfile.write(xml)",
"def visit_WhileNode(self, node: WhileNode, symbol_table: SymbolTable) -> None:\n while True:\n if self.visit(node.cond, symbol_table).value == 0:\n break\n else:\n for expr in node.body:\n if expr is not None:\n if isinstance(expr, ReturnNode):\n return expr\n res = self.visit(expr, symbol_table)\n if isinstance(res, ReturnNode):\n return res",
"def _in_while_loop(control_flow_node_map, op_name):\n return op_name in control_flow_node_map and \"LoopCond\" in control_flow_node_map[op_name]",
"def _while_loop(self):\n bind_map = {}\n wl = set_span(tvm.relay.var(\"while_loop\"), self._loop_name)\n sb = tvm.relay.scope_builder.ScopeBuilder()\n\n lv_list = []\n expr_list = []\n extra_vars = []\n\n for i, lv in enumerate(self.loop_vars):\n if self._loop_name not in self._lvar2expr:\n self._lvar2expr[self._loop_name] = {}\n\n # Handle the case when loop var is not properly lifted.\n # This can happen when loop var node name is set accidentally\n # beginning with loop name.\n if lv not in self._lvar2expr[self._loop_name]:\n var_name = f\"{self._loop_name}_loop_var_{i}\"\n var_type = _infer_type(lv, self._mod).checked_type\n loop_var = set_span(tvm.relay.var(var_name, type_annotation=var_type), var_name)\n self._lvar2expr[self._loop_name][loop_var] = lv\n bind_map[lv] = loop_var\n self.loop_vars[i] = loop_var\n lv = loop_var\n\n lv_list.append(lv)\n expr_list.append(self._lvar2expr[self._loop_name][lv])\n\n if bind_map:\n self.cond = rewrite_subgraph(self.cond, bind_map)\n self.body = [rewrite_subgraph(b, bind_map) for b in self.body]\n\n cond = set_span(tvm.relay.op.min(self.cond), self.cond.span)\n\n for lv, exp in self._lvar2expr[self._loop_name].items():\n if lv not in self.loop_vars:\n var_checker = VarChecker(lv)\n for bd in self.body + [cond]:\n var_checker.visit(bd)\n if var_checker.used:\n lv_list.append(lv)\n expr_list.append(exp)\n extra_vars.append(lv)\n break\n\n with sb.if_scope(cond):\n sb.ret(wl(*list(self.body + extra_vars)))\n with sb.else_scope():\n sb.ret(tvm.relay.Tuple(lv_list))\n\n loop_fn = tvm.relay.Function(lv_list, sb.get())\n sb = tvm.relay.scope_builder.ScopeBuilder()\n sb.let(wl, loop_fn)\n loop_ret = wl(*expr_list)\n\n sb.ret(loop_ret)\n ret = sb.get()\n return ret",
"def while_(self):\n if self.line.startswith('wh'):\n if self.line.endswith('while') is False:\n return True",
"def while_loop_op(op):\n return (control_flow_util.IsLoopSwitch(op) or\n control_flow_util.IsLoopMerge(op) or\n control_flow_util.IsLoopEnter(op) or\n control_flow_util.IsLoopExit(op) or\n TensorTracer.loop_cond_op(op) or\n op.type in ('RefNextIteration', 'NextIteration'))",
"def whileLoop(count):\n result=''\n while count>=1:#判断条件\n result+=\"sorry\"\n count-=1#while的步进\n return result",
"def syntax_while_break():\n i = 0\n while i < 5:\n if i >= 3:\n break\n print(i)\n i += 1\n\n ## Output\n # 0\n # 1\n # 2",
"def whilestmt(self, w):\n invs = self.assemble_invariants(w)\n b_mid = self.flatten([Tree('assume', [w[0]]), w[-1], Tree('assert', invs), Tree('assume', [Tree('const_false', [])])])\n b = self.flatten([Tree('assert', invs),\n self.assemble_havoc(w),\n Tree('assume', invs),\n Tree('wpor', [Tree('block', b_mid), Tree('assume', self._not(w[0]))])])\n return b",
"def while_loop(self):\n if self._loop is None:\n self._loop = self._while_loop()\n return self._loop\n return self._loop",
"def convert_while(self, condition):\n\n # Run super definition\n condition = super().convert_while(condition)\n\n # Make while template\n while_template = \"while {cond}:\"\n\n # Replace logical operators\n condition = self.replace_logical_ops(condition, direction=\"from\")\n\n # Return converted if statement\n return [while_template.format(cond=condition)], []",
"def syntax_while_continue():\n i = 0\n while i < 5:\n if i % 2 == 1:\n i += 1\n continue\n print(i)\n i += 1\n\n ## Output\n # 0\n # 2\n # 4",
"def DoWhile(name, condition_blob_or_net, nets_or_steps):\n condition_not_net, stop_blob = NotNet(condition_blob_or_net)\n if isinstance(condition_blob_or_net, core.Net):\n nets_or_steps = _AppendNets(\n nets_or_steps, condition_blob_or_net, condition_not_net)\n else:\n nets_or_steps = _AppendNets(nets_or_steps, condition_not_net)\n\n # If stop_blob is pre-set to True (this may happen when DoWhile() is\n # called twice), the loop will exit after executing the first net/step\n # in nets_or_steps. This is not what we want. So we use BootNet to\n # set stop_blob to False.\n bool_net = BoolNet((stop_blob, False))\n return Do(name + '/DoWhile', bool_net, core.scoped_execution_step(\n _get_next_step_name('DoWhile-inner', name),\n nets_or_steps,\n should_stop_blob=stop_blob,\n ))",
"def While(name, condition_blob_or_net, nets_or_steps):\n condition_not_net, stop_blob = NotNet(condition_blob_or_net)\n if isinstance(condition_blob_or_net, core.Net):\n nets_or_steps = _PrependNets(\n nets_or_steps, condition_blob_or_net, condition_not_net)\n else:\n nets_or_steps = _PrependNets(nets_or_steps, condition_not_net)\n\n def while_step(control_name):\n return core.scoped_execution_step(\n _get_next_step_name(control_name, name),\n nets_or_steps,\n should_stop_blob=stop_blob,\n )\n\n if _IsNets(nets_or_steps):\n # In this case, while_step has sub-nets:\n # [condition_blob_or_net, condition_not_net, nets_or_steps]\n # If stop_blob is pre-set to True (this may happen when While() is\n # called twice), the loop will exit after executing\n # condition_blob_or_net. So we use BootNet to set stop_blob to\n # False.\n bool_net = BoolNet((stop_blob, False))\n return Do(name + '/While', bool_net, while_step('While-inner'))\n else:\n return while_step('While')",
"def while_do(condition: Callable[[Any], bool], source: ObservableBase) -> ObservableBase:\n from ..operators.observable.whiledo import while_do\n return while_do(condition, source)",
"def is_while(self, file, i):\n\n # Save line to local variable\n line = file[i].strip()\n\n # If line starts with while and ends with ':' return True, else False\n if line.startswith(\"while\") and line.endswith(\":\"):\n return True\n return False",
"def while_loop(n):\n\tprint(\"\\n\\nlet's see what a while loop is\\n\")\n\twhile n > 0:\n\t\tprint(f\"n = {n}\")\n\t\tn -= 1"
] | [
"0.806411",
"0.7875308",
"0.76918495",
"0.7558932",
"0.74995136",
"0.7440557",
"0.7426091",
"0.73713285",
"0.7269703",
"0.72638226",
"0.7247464",
"0.7088606",
"0.6975367",
"0.69033563",
"0.6820734",
"0.68020827",
"0.66737217",
"0.6664865",
"0.6610299",
"0.6537933",
"0.63982236",
"0.6364233",
"0.6342724",
"0.6286428",
"0.6284967",
"0.6202966",
"0.61382717",
"0.6128669",
"0.6100077",
"0.60431015"
] | 0.7981215 | 1 |
ifStatement = "if" expression block [ "else" block ] | def parseIfStatement( ): # parse rountine for the if and uses the if class to print out the appropriate string
tok = tokens.peek( )
if debug: print( "ifStatement: ", tok )
start = match( "if" )
expr = expression( )
blk = parseBlock( )
elseblk = None
tok = tokens.peek( )
if tok == "else":
match( "else" )
elseblk = parseBlock( )
return ifStatement(expr, blk, elseblk) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stmt_if(executor, stmt):\n e = Expression()\n result = e.eval(stmt._tokens, symbols=executor._symbols)\n if not result:\n executor.goto_next_line()",
"def test_if_elseif_paren_statement():\n r = convert_code(\n \"{if foo}\\nbar\\n{elseif (foo and bar) or foo and (bar or (foo and bar))}\\nfoo{/if}\")\n assert r == \"{% if foo %}\\nbar\\n{% elseif (foo and bar) or foo and (bar or (foo and bar)) %}\\nfoo{% endif %}\"",
"def IF(logical_statement, expression_true, expression_false):\n if(type(logical_statement) == bool):\n if(logical_statement == True):\n return(expression_true)\n else:\n return(expression_false)\n else:\n print('Invalid type: logical statement does not evaluate to True or False.')",
"def test_if_paren_statement():\n r = convert_code(\n \"{if (foo and bar) or foo and (bar or (foo and bar))}\\nbar\\n{else}\\nfoo{/if}\")\n assert r == \"{% if (foo and bar) or foo and (bar or (foo and bar)) %}\\nbar\\n{% else %}\\nfoo{% endif %}\"",
"def conditional(self) -> global___Statement.Conditional:",
"def test_if_elseif_else_statement():\n r = convert_code(\n \"{if foo}\\nbar\\n{elseif blue}\\nfoo\\n{else}bar{/if}\")\n assert r == \"{% if foo %}\\nbar\\n{% elseif blue %}\\nfoo\\n{% else %}bar{% endif %}\"",
"def test_if_elseif_and_statement():\n r = convert_code(\n \"{if foo}\\nbar\\n{elseif awesome.sauce[1] and blue and 'hello'}\\nfoo{/if}\")\n assert r == \"{% if foo %}\\nbar\\n{% elseif awesome.sauce[1] and blue and 'hello' %}\\nfoo{% endif %}\"",
"def compile_if(self):\r\n lab1 = self.class_name + \".L\" + str(self.label_index)\r\n self.label_index += 1\r\n lab2 = self.class_name + \".L\" + str(self.label_index)\r\n self.label_index += 1\r\n self.tokenizer.advance() # ignore 'if' keyword\r\n self.tokenizer.advance() # ignore '(' symbol\r\n self.compile_expression()\r\n self.code_writer.write_arithmetic(\"not\")\r\n self.tokenizer.advance() # ignore ')' symbol\r\n self.tokenizer.advance() # ignore '{'\r\n self.code_writer.write_if(lab1)\r\n self.compile_statements()\r\n self.code_writer.write_goto(lab2)\r\n self.tokenizer.advance() # ignore '}' symbol\r\n self.code_writer.write_label(lab1)\r\n if (self.tokenizer.token_type() == JackTokenizer.KEYWORD_T and\r\n self.tokenizer.key_word() == \"else\"):\r\n self.tokenizer.advance()\r\n self.tokenizer.advance() # ignore '{' symbol\r\n self.compile_statements()\r\n self.tokenizer.advance() # ignore '}' symbol\r\n self.code_writer.write_label(lab2)",
"def test_if_else_statement():\n r = convert_code(\"{if foo}\\nbar\\n{else}\\nfoo{/if}\")\n assert r == \"{% if foo %}\\nbar\\n{% else %}\\nfoo{% endif %}\"",
"def _If(self, t):\n self.fill(\"if (\")\n self.dispatch(t.test)\n self.write(\")\")\n self.enter()\n self.dispatch(t.body)\n self.leave()\n # collapse nested ifs into equivalent elifs.\n while (t.orelse and len(t.orelse) == 1 and\n isinstance(t.orelse[0], ast.If)):\n t = t.orelse[0]\n self.fill(\"else if (\")\n self.dispatch(t.test)\n self.write(\")\")\n self.enter()\n self.dispatch(t.body)\n self.leave()\n # final else\n if t.orelse:\n self.fill(\"else\")\n self.enter()\n self.dispatch(t.orelse)\n self.leave()",
"def if_function(condition, true_result, false_result):\n if condition:\n return true_result\n else:\n return false_result",
"def if_function(condition, true_result, false_result):\n if condition:\n return true_result\n else:\n return false_result",
"def if_function(condition, true_result, false_result):\n if condition:\n return true_result\n else:\n return false_result",
"def if_function(condition, true_result, false_result):\n if condition:\n return true_result\n else:\n return false_result",
"def test_if_elseif_statement():\n r = convert_code(\n \"{if foo}\\nbar\\n{elseif blue}\\nfoo{/if}\")\n assert r == \"{% if foo %}\\nbar\\n{% elseif blue %}\\nfoo{% endif %}\"",
"def IfStatement(self):\n self.currtok = next(self.tg)\n if self.currtok[1].name == \"LPAREN\":\n self.currtok = next(self.tg)\n express = self.Expression()\n if self.currtok[1].name == \"RPAREN\":\n self.currtok = next(self.tg)\n state = self.Statement()\n if self.currtok[1].name == \"else\":\n self.currtok = next(self.tg)\n state2 = self.Statement()\n return ifelseStmt(express, state, state2)\n else:\n return ifStmt(express, state)\n raise SLUCSyntaxError(\"ERROR: Missing right paren on line {0}\".format(str(self.currtok[2] - 1)))\n raise SLUCSyntaxError(\"ERROR: Missing left paren on line {0}\".format(str(self.currtok[2] - 1)))",
"def _IfExp(self, t):\n self.dispatch(t.test)\n self.write(\" ? \")\n self.dispatch(t.body)\n self.write(\" : \")\n self.dispatch(t.orelse)",
"def compile_if(self):\r\n else_label = \"ELSE_\" + str(self.__if_count)\r\n end_label = \"END_IF_\" + str(self.__if_count)\r\n self.__if_count += 1\r\n self.__advance(n=2)\r\n self.compile_expression()\r\n self.__vmwriter.write_arithmetic(\"not\")\r\n self.__vmwriter.write_if(else_label)\r\n self.__advance(n=2)\r\n self.compile_statements()\r\n self.__vmwriter.write_goto(end_label)\r\n self.__vmwriter.write_label(else_label)\r\n self.__advance()\r\n if self.__tokenizer.keyword() == TYPES_DIC[\"ELSE\"]:\r\n self.__advance(n=2)\r\n self.compile_statements()\r\n self.__advance()\r\n self.__vmwriter.write_label(end_label)",
"def visit_if(self: Parser, node: doc.If) -> None:\n with self.var_table.with_frame():\n with T.If(self.eval_expr(node.test)):\n with T.Then():\n with self.var_table.with_frame():\n self.visit_body(node.body)\n if node.orelse:\n with T.Else():\n with self.var_table.with_frame():\n self.visit_body(node.orelse)",
"def ifelse(test, if_true, if_false):\n if test:\n return if_true\n else:\n return if_false",
"def compile_if(self) -> None:\n self._consume('if')\n self._consume('(')\n self.compile_expression()\n self._consume(')')\n\n end_lbl = f'IF_END_{self._if_count}'\n false_lbl = f'IF_FALSE_{self._if_count}'\n self._if_count += 1\n\n self._consume('{')\n self.writer.write_if(false_lbl)\n\n self.compile_statements()\n self.writer.write_goto(end_lbl)\n self.writer.write_label(false_lbl)\n\n self._consume('}')\n\n if self._get_current_token() == 'else':\n self._consume('else')\n self._consume('{')\n self.compile_statements()\n self._consume('}')\n\n self.writer.write_label(end_lbl)",
"def compile_if(self):\n\n\t\txml = '<ifStatement>\\n' + self.tokenizer.keyword() + self.tokenizer.symbol()\n\t\tself.outfile.write(xml)\n\n\t\tself.compile_expression()\n\n\t\txml = self.tokenizer.symbol() + self.tokenizer.symbol() + '<statements>\\n'\n\t\tself.outfile.write(xml)\n\n\t\twhile self.tokenizer.get_token() != '}':\n\t\t\tself.compile_statements()\n\n\t\tself.outfile.write('</statements>\\n' + self.tokenizer.symbol())\n\n\t\tif self.tokenizer.get_token() == 'else':\n\t\t\tself.compile_else()\n\n\t\tself.outfile.write('</ifStatement>\\n')",
"def test_28_if(self):\n\t\tinput = \"\"\"procedure main(); var x:integer; begin x:=foo(); end\n\t\tfunction foo():integer; var a:real; begin\n\t\twith a:integer;b:real; do begin if a>0 then return; else return 0; end\n\t\tend\"\"\"\n\t\texpect = \"Type Mismatch In Statement: Return(None)\"\n\t\tself.assertTrue(TestChecker.test(input,expect,428))",
"def test_29_if(self):\n\t\tinput = \"\"\"procedure main(); var x:integer; begin x:=foo(); end\n\t\tfunction foo():integer; var a:real; begin\n\t\twith a:integer;b:real; do begin if a>0 then return 1; else b:=0; end\n\t\tend\"\"\"\n\t\texpect = \"Function foo Not Return\"\n\t\tself.assertTrue(TestChecker.test(input,expect,429))",
"def test_30_if(self):\n\t\tinput = \"\"\"procedure main(); var x:integer; begin x:=foo(); end\n\t\tfunction foo():integer; var a:real; begin\n\t\twith a:integer;b:real; do begin if a>0 then a:=1; else return 0; end\n\t\tend\"\"\"\n\t\texpect = \"Function foo Not Return\"\n\t\tself.assertTrue(TestChecker.test(input,expect,430))",
"def test_if_variable_statement():\n r = convert_code(\n \"{if $foo}\\nbar\\n{/if}\")\n assert r == \"{% if foo %}\\nbar\\n{% endif %}\"",
"def test_27_if(self):\n\t\tinput = \"\"\"procedure main(); var x:integer; begin x:=foo(); end\n\t\tfunction foo():integer; var a:real; begin\n\t\twith a:integer;b:real; do begin if a>0 then a:=0; else b:=0; end\n\t\tend\"\"\"\n\t\texpect = \"Function foo Not Return\"\n\t\tself.assertTrue(TestChecker.test(input,expect,427))",
"def _analyse_stmt_If(self, statement: ast.If, *, next: CFNode) -> CFNode:\n # Analyse both branches unconditionally: even if they're not reachable,\n # they still need to exist in the graph produced.\n if_branch = self._analyse_statements(statement.body, next=next)\n else_branch = self._analyse_statements(statement.orelse, next=next)\n\n # Analyse the condition, if a constant.\n branches: Dict[str, CFNode] = {}\n test_is_constant, test_value = self._expression_as_constant(statement.test)\n if test_is_constant:\n if test_value:\n branches.update(enter=if_branch)\n else:\n branches.update(else_=else_branch)\n else:\n branches.update(enter=if_branch, else_=else_branch, error=self._raise)\n\n return self._ast_node(statement, **branches)",
"def test_if_string_statement():\n r = convert_code(\n \"{if 'hello'}\\nbar\\n{/if}\")\n assert r == \"{% if 'hello' %}\\nbar\\n{% endif %}\"",
"def _ifelse(self):\n debug.show(\"ifelse:Stack = \" + str(self.opStack))\n if self.opStack.size() >= 3:\n falseCode = check.isCode(self.opStack.pop()) # Make sure it is code (a list)\n trueCode = check.isCode(self.opStack.pop()) # Make sure it is code (a list)\n if check.isBool(self.opStack.pop()):\n debug.show(\"ifelse:True\")\n self.evaluate(trueCode)\n else:\n debug.show(\"ifelse:False\")\n self.evaluate(falseCode)\n else:\n debug.err(\"not enough items on the stack\")\n return None"
] | [
"0.75067496",
"0.71437025",
"0.69992465",
"0.69453186",
"0.6932306",
"0.6923417",
"0.6896828",
"0.68624914",
"0.6846336",
"0.68457514",
"0.683607",
"0.683607",
"0.683607",
"0.683607",
"0.6810346",
"0.68025327",
"0.6774503",
"0.6755583",
"0.6644169",
"0.66357064",
"0.6629795",
"0.66026986",
"0.6570067",
"0.6538245",
"0.6513971",
"0.6508838",
"0.6467018",
"0.6465875",
"0.64132684",
"0.63112146"
] | 0.7603634 | 0 |
assign = ident "=" expression eoln | def parseAssign( ): # parse rountine for the assign and uses the assign class to print out the appropriate string
tok = tokens.peek( )
if debug: print( "assign: ", tok )
if re.match( Lexer.identifier, tok ):
ident = VarRef( tok )
else:
error( "Invalid identifier" )
tok = tokens.next( )
equals = match( "=" )
tok = tokens.peek( )
expr = expression( )
match( ";" )
equals = VarRef( equals )
statement = assign( equals, ident, expr )
return statement | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def visit_assign(self: Parser, node: doc.Assign) -> None:\n if len(node.targets) != 1:\n self.report_error(node, \"Consequential assignments like 'a = b = c' are not supported.\")\n lhs = node.targets[0]\n\n if isinstance(node.value, doc.Subscript):\n check_slices = []\n if isinstance(node.value.slice, doc.Slice):\n check_slices = [node.value.slice]\n elif isinstance(node.value.slice, doc.Tuple):\n for p in node.value.slice.elts:\n if isinstance(p, doc.Slice):\n check_slices.append(p)\n for s in check_slices:\n if not s.step and s.upper and s.lower:\n s.step = doc.Constant(\n 1,\n None,\n 1,\n 1,\n s.upper.lineno,\n s.upper.end_col_offset + 1,\n s.upper.lineno,\n s.upper.end_col_offset + 2,\n )\n\n rhs = self.eval_expr(node.value)\n if isinstance(lhs, doc.Subscript):\n if isinstance(lhs.slice, doc.Tuple):\n indices = []\n for index in lhs.slice.elts:\n indices.append(self.eval_expr(index))\n else:\n indices = self.eval_expr(lhs.slice)\n T.buffer_store(self.eval_expr(lhs.value), rhs, indices)\n else:\n self.eval_assign(target=lhs, source=rhs, bind_value=bind_assign_value)",
"def test_50_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tfunction f(): array[1 .. 3] of real;\n\t\tvar a: array[1 .. 3] of real;\n\t\tbegin a[2]:=1.1; return a; end\n\t\tprocedure main(); var x:array[1 .. 2]of real;\n\t\tbegin f()[1]:=x[1]:=1; with y:real;y:real; do begin end end\"\"\"\n\t\texpect = \"Redeclared Variable: y\"\n\t\tself.assertTrue(TestChecker.test(input,expect,450))",
"def test_45_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tprocedure main(); var x:real; begin x:=1;\n\t\twith y:integer;y:real; do begin end end\"\"\"\n\t\texpect = \"Redeclared Variable: y\"\n\t\tself.assertTrue(TestChecker.test(input,expect,445))",
"def _analyse_stmt_Assign(self, statement: ast.Assign, *, next: CFNode) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)",
"def test_47_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tfunction f(): array[1 .. 3] of real;\n\t\tvar a: array[1 .. 3] of real;\n\t\tbegin a[2]:=1.1; return a; end\n\t\tprocedure main(); var x:array[1 .. 3]of real;\n\t\tbegin f()[1]:=x[1]:=1; with y:real;y:real; do begin end end\"\"\"\n\t\texpect = \"Redeclared Variable: y\"\n\t\tself.assertTrue(TestChecker.test(input,expect,447))",
"def test_46_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tprocedure main(); var x:array[1 .. 3]of real; begin x[1]:=1;\n\t\twith y:integer;y:real; do begin end end\"\"\"\n\t\texpect = \"Redeclared Variable: y\"\n\t\tself.assertTrue(TestChecker.test(input,expect,446))",
"def irgen_assign(stmt, builder, table):\n lvalue = irgen_lvalue(stmt.exprs[0], builder, table)\n expr = irgen_expr(stmt.exprs[1], builder, table)\n builder.store(expr, lvalue)",
"def visit_Assign(self, node):\n self.generic_visit(node)\n\n if node.col_offset == 0:\n mnode = ast.parse(\"\")\n mnode.body = [node]\n mnode = ast.fix_missing_locations(mnode)\n code = compile(mnode, \"<ast>\", \"exec\")\n try:\n exec(code, self.globals_)\n except Exception:\n pass\n self.globals_.pop(\"__builtins__\", None)\n self.globals_.pop(\"builtins\", None)",
"def visit_Assign(self, node):\n self.generic_visit(node)\n target = get_single_target(node)\n if isinstance(target, ast.Attribute):\n args = [ target.value, ast.Str(target.attr), node.value ]\n return ast.Expr(to_call(to_name('setattr'), args))\n return node",
"def visit_simple_assign(self, node):\n temp = gensym()\n temp_target = to_name(temp, ast.Store())\n stmts = [ ast.Assign([temp_target], node.value) ]\n stmts += [ ast.Assign([target], to_name(temp))\n for target in node.targets ]\n return stmts",
"def do_assign(parser, token):\n bits = token.contents.split()\n if len(bits) != 3:\n raise template.TemplateSyntaxError(\"'%s' tag takes two arguments\" % bits[0])\n value = parser.compile_filter(bits[2])\n return AssignNode(bits[1], value)",
"def do_assign(parser, token):\n bits = token.contents.split()\n if len(bits) != 3:\n raise template.TemplateSyntaxError(\"'%s' tag takes two arguments\" % bits[0])\n value = parser.compile_filter(bits[2])\n return AssignNode(bits[1], value)",
"def eval_assignment(exp, env):\n set_variable_value(assignment_variable(exp), m_eval(assignment_value(exp), env), env)\n return quote(\"ok\")",
"def _Assign(self, t):\n if len(t.targets) > 1:\n self.RaiseError(t, \"Assignment to multiple targets not supported\")\n if not isinstance(t.targets[0], ast.Name):\n self.RaiseError(t, \"Assignment to complex expressions not supported\")\n self.fill()\n # check if target exists in locals\n if t.targets[0].id not in self._locals :\n self.write(\"auto \")\n self._locals.append(t.targets[0].id)\n self.dispatch(t.targets[0])\n self.write(\" = \")\n self.dispatch(t.value)\n self.write(\";\")",
"def _analyse_stmt_AnnAssign(\n self, statement: ast.AnnAssign, *, next: CFNode\n ) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)",
"def _AugAssign(self, t):\n if not isinstance(t.target, ast.Name):\n self.RaiseError(t, \"Augmented assignment to complex expressions not supported\")\n # check if target exists in locals\n if t.target.id not in self._locals :\n self.RaiseError(t, \"Augmented assignment not permitted on variables not already assigned previously\")\n self.fill()\n self.dispatch(t.target)\n self.write(\" \"+self.binop[t.op.__class__.__name__]+\"= \")\n self.dispatch(t.value)\n self.write(\";\")",
"def visit_ann_assign(self: Parser, node: doc.AnnAssign) -> None:\n lhs = node.target\n rhs = self.eval_expr(node.value)\n ann_var = self.visit_tvm_annotation(node.annotation)\n if not isinstance(ann_var, Var):\n self.report_error(node.annotation, \"Annotation should be Var\")\n self.eval_assign(target=lhs, source=ann_var, bind_value=bind_assign_value)\n frame = T.LetStmt(rhs, var=ann_var)\n frame.add_callback(partial(frame.__exit__, None, None, None))\n frame.__enter__()",
"def visit_Assign(self, node):\n assign_stmts = []\n value = node.value\n reversed_targets = node.targets\n reversed_targets.reverse()\n assign_stmts.append(stypy_functions.create_blank_line())\n if len(reversed_targets) > 1:\n assign_stmts.append(\n stypy_functions.create_src_comment(\n \"Multiple assignment of {0} elements.\".format(len(reversed_targets))))\n else:\n if hasattr(node, 'lineno'):\n assign_stmts.append(stypy_functions.create_src_comment(\n \"Assigning a {1} to a {0} (line {2}):\".format(type(reversed_targets[0]).__name__,\n type(value).__name__, node.lineno)))\n else:\n assign_stmts.append(stypy_functions.create_src_comment(\n \"Assigning a {1} to a {0}:\".format(type(reversed_targets[0]).__name__,\n type(value).__name__)))\n for assign_num in xrange(len(reversed_targets)):\n target = reversed_targets[assign_num]\n # Function guard is true? execute handler\n for handler_func_guard_tuple in self.__assignment_handlers:\n if handler_func_guard_tuple[0](target, value):\n id_str, handler_func = handler_func_guard_tuple[1]\n self.performed_transformations |= handler_func(target, value, assign_stmts, node, id_str)\n assign_stmts = stypy_functions.flatten_lists(assign_stmts)\n value = target\n break\n\n if len(assign_stmts) > 0:\n return assign_stmts\n return node",
"def visit_Assign(self, node):\n self.generic_visit(node)\n target = get_single_target(node)\n if isinstance(target, ast.Subscript):\n fun = to_attribute(self.operator, 'setitem')\n args = [target.value, self.index_to_expr(target.slice), node.value]\n return ast.Expr(to_call(fun, args))\n return node",
"def convert_assign(g, op, block):\n\n out = g.get_node(op.input(\"X\")[0])\n g.add_node(op.output(\"Out\")[0], out)",
"def visit_Assign(self, node):\n var_name = node.left.value\n self.VARIABLES[var_name] = self.visit(node.right)",
"def _analyse_stmt_AugAssign(\n self, statement: ast.AugAssign, *, next: CFNode\n ) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)",
"def visit_VarAssignNode(self, node: VarAssignNode, symbol_table: SymbolTable) -> None:\n if isinstance(node.name, AccessNode) and isinstance(node.name.item_to_access, NumberNode):\n var = self.visit(node.name.accessor, symbol_table)\n var.vals[int(node.name.item_to_access.tok.value)] = self.visit(node.value, symbol_table)\n if isinstance(var, List):\n var.value = [item[idx].value for idx, item in enumerate(var.vals.values())]\n else:\n return f'Strings are immutable'\n else:\n assignment = self.visit(node.value, symbol_table)\n\n symbol_table[node.name] = assignment",
"def single_assignment_handler(target, value, assign_stmts, node, id_str):\n #print(\"single_assignment_handler\")\n\n temp_stmts = core_language.create_Assign(target, value)\n if hasattr(node, 'lineno'):\n temp_stmts.lineno = node.lineno\n temp_stmts.col_offset = node.col_offset\n\n assign_stmts.append(temp_stmts)\n return False",
"def test_49_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tfunction f(): array[1 .. 2] of real;\n\t\tvar a: array[1 .. 3] of real;\n\t\tbegin a[2]:=1.1; return a; end\n\t\tprocedure main(); var x:array[1 .. 3]of real;\n\t\tbegin f()[1]:=x[1]:=1; end\"\"\"\n\t\texpect = \"Type Mismatch In Statement: Return(Some(Id(a)))\"\n\t\tself.assertTrue(TestChecker.test(input,expect,449))",
"def syntax_var_assign():\n a = 'Hello'\n print(f'{a} is stored at {hex(id(a))}')\n a = \"World\"\n print(f'{a} is stored at {hex(id(a))}')\n\n ## Output\n # Hello is stored at 0x10d251340\n # World is stored at 0x10d251378\n\n ## Notes\n # id()\n # Return the “identity” of an object. This is an integer (or long integer) which is guaranteed\n # to be unique and constant for this object during its lifetime.",
"def test_48_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tfunction f(): array[1 .. 3] of real;\n\t\tvar a: array[1 .. 2] of real;\n\t\tbegin a[2]:=1.1; return a; end\n\t\tprocedure main(); var x:array[1 .. 3]of real;\n\t\tbegin f()[1]:=x[1]:=1; end\"\"\"\n\t\texpect = \"Type Mismatch In Statement: Return(Some(Id(a)))\"\n\t\tself.assertTrue(TestChecker.test(input,expect,448))",
"def create_Assign(left_hand_side, right_hand_side):\n right_hand_side.ctx = ast.Load()\n left_hand_side.ctx = ast.Store()\n return ast.Assign(targets=[left_hand_side], value=right_hand_side)",
"def visit_Assign(self, node):\n self.generic_visit(node)\n is_multiple = len(node.targets) > 1\n is_compound = any(map(is_sequence_node, node.targets))\n is_simple = not is_compound\n if is_simple and is_multiple:\n return self.visit_simple_assign(node)\n elif is_compound and (is_multiple or is_sequence_node(node.value)):\n return self.visit_compound_assign(node)\n return node",
"def assert_assignment(text, operator, left, right):\n try:\n node = parse_single_statement(text)\n eq_(node.op, operator)\n eq_(node.target.name, left)\n eq_( node.right.value, right)\n except AssertionError as e:\n node.show()\n raise e"
] | [
"0.7246561",
"0.6989228",
"0.6973773",
"0.6966724",
"0.6946717",
"0.6939313",
"0.6896663",
"0.6890361",
"0.6872121",
"0.67357904",
"0.6725393",
"0.6725393",
"0.6697676",
"0.66803247",
"0.6641363",
"0.6609404",
"0.6593395",
"0.65624285",
"0.648081",
"0.63984084",
"0.638135",
"0.6370385",
"0.63329434",
"0.6301359",
"0.6300919",
"0.62792206",
"0.6184669",
"0.6113256",
"0.6111942",
"0.6094138"
] | 0.74569654 | 0 |
statement = ifStatement | whileStatement | assign | def statement( ): # parse rountin for statement that makes sure the token is one of the following, eventually there will be an error caught
tok = tokens.peek( )
if debug: print( "statement: ", tok )
if tok == "if":
stat = parseIfStatement( )
return stat
elif tok == "while":
stat = parseWhileStatement( )
return stat
else:
stat = parseAssign( )
return stat | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stmt_if(executor, stmt):\n e = Expression()\n result = e.eval(stmt._tokens, symbols=executor._symbols)\n if not result:\n executor.goto_next_line()",
"def _analyse_stmt_Assign(self, statement: ast.Assign, *, next: CFNode) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)",
"def link_while_stmt(self, stmt):\n self.link_expr(stmt.cond)\n self.link_stmt(stmt.body)",
"def statement_eval(node, table):\n\n if node.kind == \"MOD_OP\":\n table = mod_op_eval(node, table)\n\n elif node.kind == \"SWAP_OP\":\n table = swap_op_eval(node, table)\n\n elif node.kind == \"FROM_LOOP\":\n block_node = node.block\n\n # TODO: check start condition\n\n while True:\n # Execute the block.\n table = block_eval(block_node, table)\n\n # Break if the end condition is satisfied.\n if expr_eval(node.end_condition, table):\n break\n\n elif node.kind == \"FOR_LOOP\":\n var_dec = node.var_declaration\n until_node = node.end_condition\n increment_node = node.increment_statement\n\n # Initialize the variable.\n table[var_dec.name] = expr_eval(var_dec.expr, table)\n\n while True:\n # Execute the block and increment statement.\n if not node.inc_at_end:\n table = mod_op_eval(increment_node, table)\n \n table = block_eval(node.block, table)\n\n if node.inc_at_end:\n table = mod_op_eval(increment_node, table)\n\n # Break if the end condition is satisfied.\n if table.refs[until_node.name] == expr_eval(until_node.expr, table):\n break\n\n table = var_condition_eval(until_node, table)\n\n elif node.kind == \"IF\":\n # Check the condition; if it fails, execute the\n # 'false' branch if it exists.\n\n if expr_eval(node.condition, table):\n table = block_eval(node.true, table)\n elif \"false\" in node.data:\n table = block_eval(node.false, table)\n\n elif node.kind == \"DO/UNDO\":\n # Do the action_block, then do the yielding block,\n # then undo the action block.\n table = block_eval(node.action_block, table)\n\n if \"yielding_block\" in node.data:\n table = block_eval(node.yielding_block, table)\n\n table = block_eval(inverter.unblock(node.action_block), table)\n\n elif node.kind == \"RESULT\":\n # Overwrites the variable 'result' with the given expression.\n table[\"result\"] = expr_eval(node.expr, table)\n\n elif node.kind == \"VAR_DEC\":\n table[node.name] = expr_eval(node.expr, table)\n\n elif node.kind == \"VAR_CONDITION\":\n table = var_condition_eval(node, table)\n\n elif node.kind == \"BLOCK\":\n table = block_eval(node, table)\n\n elif node.kind == \"FUNCTION_CALL\":\n # Call the function, then update table with the results.\n function = shared.program.functions[node.name]\n\n output = function.evaluate(\n node.backwards,\n node.ref_args,\n [expr_eval(arg, table) for arg in node.ref_args],\n [expr_eval(arg, table) for arg in node.const_args]\n )\n\n # After evaluating the function, the output table will\n # contain changed variables.\n table.update_refs(output)\n\n elif node.kind == \"UN\":\n inverted_node = inverter.unstatement(node.statement)\n table = statement_eval(inverted_node, table)\n\n elif node.kind == \"EXIT\":\n if expr_eval(node.condition, table):\n # We return by raising an exception.\n raise shared.ReturnException(expr_eval(node.value, table))\n\n elif node.kind == \"ENTER\":\n # Do nothing when we actually encounter these.\n pass\n\n return table",
"def conditional(self) -> global___Statement.Conditional:",
"def _While(self, t):\n self.fill(\"while (\")\n self.dispatch(t.test)\n self.write(\")\")\n self.enter()\n self.dispatch(t.body)\n self.leave()\n if t.orelse:\n self.RaiseError(t, \"While else not supported\")",
"def switch(cond, ift, iff):",
"def visit_while(self: Parser, node: doc.While) -> None:\n with self.var_table.with_frame():\n cond = self.eval_expr(node.test)\n with T.While(cond):\n self.visit_body(node.body)",
"def test_45_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tprocedure main(); var x:real; begin x:=1;\n\t\twith y:integer;y:real; do begin end end\"\"\"\n\t\texpect = \"Redeclared Variable: y\"\n\t\tself.assertTrue(TestChecker.test(input,expect,445))",
"def _analyse_stmt_While(self, statement: ast.While, *, next: CFNode) -> CFNode:\n # Analyse the else branch.\n else_node = self._analyse_statements(statement.orelse, next=next)\n\n # Analyse the body.\n dummy_node = self._dummy_node()\n with self._updated_context(break_=next, continue_=dummy_node):\n body_node = self._analyse_statements(statement.body, next=dummy_node)\n\n # Analyse the condition, if a constant.\n branches: Dict[str, CFNode] = {}\n test_is_constant, test_value = self._expression_as_constant(statement.test)\n if test_is_constant:\n if test_value:\n branches.update(enter=body_node)\n else:\n branches.update(else_=else_node)\n else:\n branches.update(enter=body_node, else_=else_node, error=self._raise)\n\n loop_node = self._ast_node(statement, **branches)\n self._graph.collapse_node(dummy_node, loop_node)\n return loop_node",
"def test_46_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tprocedure main(); var x:array[1 .. 3]of real; begin x[1]:=1;\n\t\twith y:integer;y:real; do begin end end\"\"\"\n\t\texpect = \"Redeclared Variable: y\"\n\t\tself.assertTrue(TestChecker.test(input,expect,446))",
"def single_assignment_handler(target, value, assign_stmts, node, id_str):\n #print(\"single_assignment_handler\")\n\n temp_stmts = core_language.create_Assign(target, value)\n if hasattr(node, 'lineno'):\n temp_stmts.lineno = node.lineno\n temp_stmts.col_offset = node.col_offset\n\n assign_stmts.append(temp_stmts)\n return False",
"def toggle(condition, if_true, if_false):\n return (if_true if condition else if_false)",
"def test_42_while(self):\n\t\tinput = \"\"\"function foo():integer; var a:integer; begin\n\t\twhile(1+true) do begin end\n\t\treturn 1;\n\t\tend\n\t\tprocedure main(); var x:integer; begin x:=foo(); foo(); end\"\"\"\n\t\texpect = \"Type Mismatch In Expression: BinaryOp(+,IntLiteral(1),BooleanLiteral(True))\"\n\t\tself.assertTrue(TestChecker.test(input,expect,442))",
"def __init__(self, depth, condition, body_statement):\n super(WhileStatement, self).__init__(depth)\n self.condition = condition\n self.body_statement = body_statement",
"def asserter(stmt):\n if not stmt:\n raise AssertionError('CheckReadBuffer case failed')",
"def test_30_if(self):\n\t\tinput = \"\"\"procedure main(); var x:integer; begin x:=foo(); end\n\t\tfunction foo():integer; var a:real; begin\n\t\twith a:integer;b:real; do begin if a>0 then a:=1; else return 0; end\n\t\tend\"\"\"\n\t\texpect = \"Function foo Not Return\"\n\t\tself.assertTrue(TestChecker.test(input,expect,430))",
"def test_50_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tfunction f(): array[1 .. 3] of real;\n\t\tvar a: array[1 .. 3] of real;\n\t\tbegin a[2]:=1.1; return a; end\n\t\tprocedure main(); var x:array[1 .. 2]of real;\n\t\tbegin f()[1]:=x[1]:=1; with y:real;y:real; do begin end end\"\"\"\n\t\texpect = \"Redeclared Variable: y\"\n\t\tself.assertTrue(TestChecker.test(input,expect,450))",
"def link_if_stmt(self, stmt):\n self.link_expr(stmt.cond)\n self.link_stmt(stmt.true_body)\n if stmt.false_body is not None:\n self.link_stmt(stmt.false_body)",
"def Assignment(self):\n id = self.primary()\n if self.currtok[1].name == \"DECLERATION\":\n self.currtok = next(self.tg)\n if self.functions.get(self.currtok[0]) is not None:\n\n express = self.FunctionCall()\n return assignmentStmt(id, express)\n else:\n express = self.Expression()\n\n if self.currtok[1].name == \"SEMI\":\n self.currtok = next(self.tg)\n return assignmentStmt(id, express)\n raise SLUCSyntaxError(\"ERROR: Missing Semicolon on line {0}\".format(str(self.currtok[2] - 1)))\n raise SLUCSyntaxError(\"ERROR: Missing assignment on line {0}\".format(str(self.currtok[2] - 1)))",
"def irgen_assign(stmt, builder, table):\n lvalue = irgen_lvalue(stmt.exprs[0], builder, table)\n expr = irgen_expr(stmt.exprs[1], builder, table)\n builder.store(expr, lvalue)",
"def test_28_if(self):\n\t\tinput = \"\"\"procedure main(); var x:integer; begin x:=foo(); end\n\t\tfunction foo():integer; var a:real; begin\n\t\twith a:integer;b:real; do begin if a>0 then return; else return 0; end\n\t\tend\"\"\"\n\t\texpect = \"Type Mismatch In Statement: Return(None)\"\n\t\tself.assertTrue(TestChecker.test(input,expect,428))",
"def parseIfStatement( ): # parse rountine for the if and uses the if class to print out the appropriate string\n\n\ttok = tokens.peek( )\n\tif debug: print( \"ifStatement: \", tok )\n\tstart = match( \"if\" )\n\texpr = expression( )\n\tblk = parseBlock( )\n\telseblk = None\n\ttok = tokens.peek( )\n\tif tok == \"else\":\n\t\tmatch( \"else\" )\n\t\telseblk = parseBlock( )\n\treturn ifStatement(expr, blk, elseblk)",
"def test_29_if(self):\n\t\tinput = \"\"\"procedure main(); var x:integer; begin x:=foo(); end\n\t\tfunction foo():integer; var a:real; begin\n\t\twith a:integer;b:real; do begin if a>0 then return 1; else b:=0; end\n\t\tend\"\"\"\n\t\texpect = \"Function foo Not Return\"\n\t\tself.assertTrue(TestChecker.test(input,expect,429))",
"def multi_statement() -> None:\n pass; print(\"hello\")",
"def test_47_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tfunction f(): array[1 .. 3] of real;\n\t\tvar a: array[1 .. 3] of real;\n\t\tbegin a[2]:=1.1; return a; end\n\t\tprocedure main(); var x:array[1 .. 3]of real;\n\t\tbegin f()[1]:=x[1]:=1; with y:real;y:real; do begin end end\"\"\"\n\t\texpect = \"Redeclared Variable: y\"\n\t\tself.assertTrue(TestChecker.test(input,expect,447))",
"def visit_if(self: Parser, node: doc.If) -> None:\n with self.var_table.with_frame():\n with T.If(self.eval_expr(node.test)):\n with T.Then():\n with self.var_table.with_frame():\n self.visit_body(node.body)\n if node.orelse:\n with T.Else():\n with self.var_table.with_frame():\n self.visit_body(node.orelse)",
"def test_27_if(self):\n\t\tinput = \"\"\"procedure main(); var x:integer; begin x:=foo(); end\n\t\tfunction foo():integer; var a:real; begin\n\t\twith a:integer;b:real; do begin if a>0 then a:=0; else b:=0; end\n\t\tend\"\"\"\n\t\texpect = \"Function foo Not Return\"\n\t\tself.assertTrue(TestChecker.test(input,expect,427))",
"def test_41_while(self):\n\t\tinput = \"\"\"function foo():integer; var a:integer; begin\n\t\twhile(1) do begin end\n\t\treturn 1;\n\t\tend\n\t\tprocedure main(); var x:integer; begin x:=foo(); foo(); end\"\"\"\n\t\texpect = \"Type Mismatch In Statement: While(IntLiteral(1),[])\"\n\t\tself.assertTrue(TestChecker.test(input,expect,441))",
"def stmts(obj, next, token):\n while token is not EOF:\n token = assignlist(obj, next, token)"
] | [
"0.6292355",
"0.6017129",
"0.58480483",
"0.5790392",
"0.57717943",
"0.57585835",
"0.5709609",
"0.5649995",
"0.56101805",
"0.5588036",
"0.5515717",
"0.550607",
"0.5487956",
"0.5476171",
"0.5472926",
"0.5407076",
"0.54064995",
"0.54023397",
"0.5397795",
"0.5369516",
"0.53665644",
"0.5356292",
"0.53379697",
"0.5337857",
"0.53210163",
"0.5304556",
"0.52869856",
"0.5272078",
"0.52514863",
"0.5237886"
] | 0.635177 | 0 |
stmtList = { statement } | def stmtList( ):
tok = tokens.peek( )
if debug: print( "stmtList: ", tok )
stat = statement( )
return stat | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stmts_to_stmt(statements):\n if len(statements) == 1:\n return statements[0]\n array = FakeArray(statements, arr_type=pr.Array.NOARRAY)\n return FakeStatement([array])",
"def __init__(self):\n self.Statement = []",
"def parseStmtList( tokens ):\n\n\ttok = tokens.peek( )\n\tast = list( ) # list that keeps track of the all the returns from the parse rountines \n\twhile tok is not None:\n # need to store each statement in a list\n\t\tstatement = stmtList( )\n\t\tast.append( statement )\n\t\ttok = tokens.peek( )\t\t\n\treturn ast",
"def _initialize_statements(self):\n return [SqlStatement(x) for x in self._raw_statements]",
"def get_statement_list(self, insupdel=0):\n #NOTE: statement = [record, {...}]\n result = []\n try:\n if insupdel == StatementType.INSERT:\n statements = self.statements_insert\n elif insupdel == StatementType.UPDATE:\n statements = self.statements_update\n elif insupdel == StatementType.DELETE:\n statements = self.statements_delete\n if statements is not None:\n for statement in statements:\n result.append(statement[1])\n except Exception as ex:\n print \"Error retrieving statement list: \", ex\n return result",
"def statements_to_txns(statement_list):\n new_list = [[stmt] for stmt in statement_list]\n return new_list",
"def construct_statement(*args):\n\n INPUT_STATEMENT = \"\"\n for statement in args:\n INPUT_STATEMENT += statement\n \n\n return INPUT_STATEMENT",
"def statements(civic_eid2997_statement, civic_aid6_statement):\n return [civic_eid2997_statement, civic_aid6_statement]",
"def insert(statement: str) -> []:\n raise NotImplementedError",
"def execute_list(self, stmt: List[loxStmtAST.Stmt]) -> None:\n for st in stmt:\n st.accept(self)",
"def stmt2list(self, stmt):\n temp = ['%s\\n' % line for line in stmt.split('\\n')]\n return temp",
"async def _insert_stmt(self):\n raise NotImplementedError",
"def _sqllist(values):\n items = []\n items.append('(')\n for i, v in enumerate(values):\n if i != 0:\n items.append(', ')\n items.append(sqlparam(v))\n items.append(')')\n return SQLQuery(items)",
"def prepare(self, connection, stmt):\n return Statement(connection, stmt)",
"def prepare(self, connection, stmt):\n return Statement(connection, stmt)",
"def statements(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Statement]:",
"def execute_query_list(cur, conn, query_list):\n try:\n for query in query_list:\n cur.execute(query)\n conn.commit()\n except psycopg2.Error as e:\n print(\"Error executing query list\")\n print(e)",
"def create_table_statements() -> [str]:\n pass",
"async def _update_stmt(self):\n raise NotImplementedError",
"def insert_(statement: str, db_conf) -> []:\n try:\n result = db_conf.execute(statement)\n if result:\n return result\n except psycopg2.IntegrityError:\n pass\n return []",
"def query(mdx_stmt):",
"def compile_statement(statements: List[Any]) -> List[Any]:\n properties = ['action', 'resource', 'notresource', 'notaction']\n for statement in statements:\n for statement_property in properties:\n if statement_property in statement:\n statement[statement_property] = [compile_regex(item) for item in statement[statement_property]]\n return statements",
"def parse_statements(script):\n # pylint: disable=too-many-branches\n stmt = ''\n quote = None\n for char in script:\n if quote != '--':\n stmt += char\n if quote is None:\n if char == ';':\n yield stmt.strip()\n stmt = ''\n elif char == \"'\":\n quote = \"'\"\n elif char == '\"':\n quote = '\"'\n elif char == '$':\n quote = '$'\n elif char == '-':\n quote = '-'\n elif quote in ('\"', \"'\"):\n if quote == char:\n quote = None\n elif quote == '-':\n if char == '-':\n quote = '--'\n stmt = stmt[:-2]\n else:\n quote = None\n elif quote == '--':\n if char == '\\n':\n quote = None\n elif quote.startswith('$'):\n if quote != '$' and quote.endswith('$'):\n if stmt.endswith(quote):\n quote = None\n else:\n quote += char\n stmt = stmt.strip()\n if stmt:\n yield stmt",
"def multi_statement() -> None:\n pass; print(\"hello\")",
"def _analyse_statements(\n self, statements: List[ast.stmt], *, next: CFNode\n ) -> CFNode:\n for statement in reversed(statements):\n analyse = getattr(self, \"_analyse_stmt_\" + type(statement).__name__)\n next = analyse(statement, next=next)\n return next",
"def doctest_DKBCCCsvStatementParser():",
"def test_update_from_empty(self):\r\n ctx = {}\r\n col = columns.List(columns.Integer, db_field=\"TEST\")\r\n statements = col.get_update_statement([1, 2, 3], [], ctx)\r\n\r\n #only one variable /statement should be generated\r\n assert len(ctx) == 1\r\n assert len(statements) == 1\r\n\r\n assert ctx.values()[0].value == [1, 2, 3]\r\n assert statements[0] == '\"TEST\" = :{}'.format(ctx.keys()[0])",
"def insert_statement() -> str:\n pass",
"def mk_sql_list(ls):\n res = \"(\" + ' '.join([str(elem) for elem in intersperse(\",\", ls)]) + \")\"\n return res",
"def statements(self):\n return self._statements"
] | [
"0.6858657",
"0.6821307",
"0.6613819",
"0.6559915",
"0.64512044",
"0.63355",
"0.6250831",
"0.61308306",
"0.5997265",
"0.58391565",
"0.57354546",
"0.5710025",
"0.5697581",
"0.567703",
"0.567703",
"0.5657745",
"0.55945855",
"0.5572425",
"0.5521451",
"0.5517517",
"0.55145484",
"0.5508067",
"0.55078167",
"0.5487551",
"0.5474311",
"0.54192215",
"0.54119086",
"0.53842944",
"0.5371576",
"0.5356417"
] | 0.8045383 | 0 |
Returns ssh username for connecting to cluster workers. | def get_ssh_user():
return getpass.getuser() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_ssh_user(self):\n if self.configuration.get(\"pg_ssh_user\"):\n return \"%s@\" % self.configuration.get(\"pg_ssh_user\")\n else:\n return \"%s@\" % DEFAULT_SSH_USER",
"def get_username(self) -> str:\n try:\n return self[\"user\"]\n except KeyError:\n raise MarathonNotConfigured(\n \"Could not find marathon user in system marathon config\"\n )",
"def head_node_user(self):\n return self._get_param(\"ClusterUser\")",
"def username(self) -> str:\n return self.get_env_var(self.username_var)",
"def username(self) -> str:\n return self.get_env_var(self.username_var)",
"def username(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"username\")",
"def __get_username(self) -> str:\n\t\treturn os.getenv('MQTT_DRIVER_USERNAME')",
"def username(self) -> str:\n return pulumi.get(self, \"username\")",
"def username(self) -> str:\n return pulumi.get(self, \"username\")",
"def username(self) -> str:\n return pulumi.get(self, \"username\")",
"def get_remote_user(self, username):\n return 'ec2-user'",
"def username(self) :\n\t\ttry :\n\t\t\treturn self._username\n\t\texcept Exception as e:\n\t\t\traise e",
"def username(self):\n return self._query_config()['username']",
"def username(self):\n return self._authenticator.username()",
"def __get_username(self) -> str:\n\t\treturn os.getenv('MQTT_DRIVEN_USERNAME')",
"def username(self) -> str:\n return self._username",
"def git_username(self):\n return self._git_username",
"def username(self) -> str:\n raise NotImplementedError",
"def get_username(self) -> str:\n return self._username",
"def get_sshhost(self):\n return self._sshhost.gethost()",
"def remote_hostname(self):\n return pn_connection_remote_hostname(self._impl)",
"def driver_username(self):\n return self._driver_username",
"def _what_is_username(self):\n prompt = \"-?- Send to: \"\n sn = self._input(prompt)\n return sn",
"def username(self):\n return self._username()",
"def username(self):\n return self._username",
"def username(self):\n return self._username",
"def username(self):\n return self._username",
"def get_username(self):\n raise NotImplementedError('get_username')",
"def username(self) -> Optional[str]:\n return self._state.get(\"username\", None)",
"def get_weak_username(self, host):\n try:\n return self.weak_hosts.get(host)[1]\n except IndexError:\n return \" \""
] | [
"0.7331437",
"0.6813522",
"0.67854804",
"0.67478967",
"0.67478967",
"0.6733212",
"0.66904676",
"0.66705793",
"0.66705793",
"0.66705793",
"0.6658738",
"0.66570204",
"0.6605207",
"0.65863043",
"0.65245265",
"0.6501686",
"0.6480417",
"0.6475567",
"0.6475105",
"0.64678997",
"0.6448277",
"0.64340156",
"0.64275813",
"0.6421704",
"0.64067924",
"0.64067924",
"0.64067924",
"0.63855106",
"0.63849294",
"0.6384408"
] | 0.72012144 | 1 |
Returns ssh key to connecting to cluster workers. If the env var TUNE_CLUSTER_SSH_KEY is provided, then this key will be used for syncing across different nodes. | def get_ssh_key():
path = os.environ.get("TUNE_CLUSTER_SSH_KEY",
os.path.expanduser("~/ray_bootstrap_key.pem"))
if os.path.exists(path):
return path
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def host_key(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"host_key\")",
"def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")",
"def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")",
"def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")",
"def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")",
"def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")",
"def cluster_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cluster_id\")",
"def cluster_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cluster_id\")",
"def cluster_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cluster_id\")",
"def cluster_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cluster_id\")",
"def host_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"host_key\")",
"def host_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"host_key\")",
"def host_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"host_key\")",
"def host_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"host_key\")",
"def host_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"host_key\")",
"def host_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"host_key\")",
"def key_encryption_key_identity(self) -> Optional[pulumi.Input['ClusterPropertiesKeyEncryptionKeyIdentityArgs']]:\n return pulumi.get(self, \"key_encryption_key_identity\")",
"def partition_key(self) -> str:\n return pulumi.get(self, \"partition_key\")",
"def client_key(self) -> str:\n return pulumi.get(self, \"client_key\")",
"def client_key(self) -> str:\n return pulumi.get(self, \"client_key\")",
"def kms_key(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"kms_key\")",
"def hostkey(self):\n return self.__get_option('hostkey_file')",
"def cluster_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"cluster_id\")",
"def cluster_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"cluster_id\")",
"def kms_key_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"kms_key_id\")",
"def cluster_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_id\")",
"def cluster_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_id\")",
"def cluster_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_id\")",
"def cluster_identifier(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_identifier\")",
"def get_cluster_command(cls):\n if 'cluster_command' in cls.global_settings:\n return cls.global_settings['cluster_command']\n else:\n return None"
] | [
"0.63742065",
"0.61436695",
"0.61436695",
"0.61436695",
"0.61436695",
"0.61436695",
"0.6095634",
"0.6095634",
"0.6095634",
"0.6095634",
"0.60669327",
"0.60669327",
"0.60669327",
"0.60669327",
"0.60669327",
"0.60669327",
"0.60596585",
"0.597747",
"0.58629495",
"0.58629495",
"0.5853532",
"0.5850277",
"0.5831395",
"0.5831395",
"0.5812002",
"0.5805047",
"0.5805047",
"0.5805047",
"0.5762766",
"0.57547444"
] | 0.70618993 | 0 |
writes uuids and extras of given nodes to a file (json). This is useful for import/export because currently extras are lost. Therefore this can be used to save and restore the extras on the nodes. | def export_extras(nodes, filename='node_extras.txt'):
#outstring = ''#' node uuid | extras \n'
outdict = {}
for node in nodes:
if isinstance(node, int): #pk
node = load_node(node)
elif isinstance(node, basestring): #uuid
node = load_node(node)
if not isinstance(node, Node):
print('skiped node {}, is not an AiiDA node, did not know what to do.'.format(node))
continue
uuid = node.uuid
extras_dict = node.get_extras()
outdict[uuid] = extras_dict
#line = '{} | {}\n'.format(uuid, extras_dict)
#outstring = outstring + line
#outfile = open(filename, 'w')
#outfile.write(outstring)
#outfile.close()
json.dump(outdict, open(filename,'w'))
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_nodes(node, filename):\n\n with open(filename, 'w', newline='') as f:\n writer = csv.DictWriter(f,\n fieldnames=node[0].keys(),\n quoting=csv.QUOTE_ALL)\n writer.writeheader()\n writer.writerows(node)",
"def write(node, filepath):\n data = read(node)\n\n if not data:\n return\n\n with open(filepath, 'w') as f:\n json.dump(data, f, indent=4, sort_keys=True)\n\n return data",
"def import_extras(filename):\n\n all_extras = {}\n\n # read file\n #inputfile = open(filename, 'r')\n #lines = inputfile.readlines()\n #for line in lines[1:]:\n # splitted = line.split(' | ')\n # uuid = splitted[0].rstrip(' ')\n # extras = splitted[1].rstrip(' ')\n # #extras = dict(extras)\n # print(extras)\n # all_extras[uuid] = extras\n #inputfile.close()\n try:\n all_extras = json.load(open(filename))\n except:\n print('The file has to be loadabel by json. i.e json format (which it is not).')\n\n for uuid, extras in all_extras.iteritems():\n\n try:\n node = load_node(uuid)\n except:\n # Does not exists\n print('node with uuid {} does not exist in DB'.format(uuid))\n node = None\n continue\n if isinstance(node, Node):\n node.set_extras(extras)\n else:\n print('node is not instance of an AiiDA node')\n #print(extras)\n return",
"def save(file_path, nodes=[]):\n\n t = time.time()\n data = get_data(nodes)\n\n if not data:\n mc.warning('Nothing to save for selected nodes!')\n return\n\n if not file_path.endswith(file_extention):\n file_path = os.path.splitext(file_path)[0]+file_extention\n\n utils.write_json(file_path, data)\n return file_path\n print time.time() - t",
"def save(file_path, nodes):\n\n t = time.time()\n data = get_data(nodes)\n\n if not data:\n mc.warning('Nothing to save for selected nodes!')\n return\n\n if not file_path.endswith(file_extention):\n file_path = os.path.splitext(file_path)[0]+file_extention\n\n utils.write_json(file_path, data)\n print time.time() - t",
"def write_node_features(node_features, node_file):\n dgl.data.utils.save_tensors(node_file, node_features)",
"def save_config(node, force=False):\n filepath = os.path.join(\"nodes/\", env.host_string + \".json\")\n tmp_filename = 'tmp_{0}.json'.format(env.host_string)\n files_to_create = [tmp_filename]\n if not os.path.exists(filepath) or force:\n # Only save to nodes/ if there is not already a file\n print \"Saving node configuration to {0}...\".format(filepath)\n files_to_create.append(filepath)\n for node_file in files_to_create:\n with open(node_file, 'w') as f:\n f.write(json.dumps(node, indent=4))\n return tmp_filename",
"def write_graph_to_file(self, path):\n graph = nx.Graph()\n for node in self.graph.nodes(data=True):\n new_node = deepcopy(node)\n new_node[1]['blocks'] = list(new_node[1]['blocks'])\n graph.add_node(*new_node)\n graph.add_edges_from(self.graph.edges())\n json.dump(json_graph.node_link_data(graph), open(path, 'w'))",
"def write_edges(\n edges: Mapping[str, Any],\n filename: str,\n jsonlines: bool = False,\n gzipflag: bool = False,\n yaml: bool = False,\n):\n pass",
"def write_subgraph_nodeids(filename, nodelist):\n with open(filename, 'w') as f:\n f.write('nodeid\\n')\n for i in nodelist:\n f.write(str(i) + '\\n')",
"def save_info_to_file(filepath, tokens):\n with open(filepath, 'w') as f:\n json.dump(tokens, f)",
"def write_out():\n os.replace(\"recipes.json\", \".recipes.json.backup\")\n with open(\"recipes.json\", \"w\") as recipes_file:\n json.dump(recipebook.to_json_list(),recipes_file)",
"def write(self, *, tp_nodes, tp_edges: Mapping[str, Edge], tp_namespaces, tn_nodes, tn_edges, tn_namespaces):\n with open(os.path.join(self.graph_dir_path, \"tp_nodes.pkl\"), \"wb\") as file:\n pickle.dump(tp_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, \"tp_edges.pkl\"), \"wb\") as file:\n pickle.dump(tp_edges, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, \"tp_namespaces.pkl\"), \"wb\") as file:\n pickle.dump(tp_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, \"tn_nodes.pkl\"), \"wb\") as file:\n pickle.dump(tn_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, \"tn_edges.pkl\"), \"wb\") as file:\n pickle.dump(tn_edges, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, \"tn_namespaces.pkl\"), \"wb\") as file:\n pickle.dump(tn_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)",
"def writeLinkoJson(linkograph, file):\n # Convert the linkograph into a form json can use.\n jsonLinko = [linkograph.labels]\n\n \"\"\"\n for entry in linkograph:\n tolist = list(map(list, entry))\n jsonLinko.append(tolist)\n \"\"\"\n for index in range(len(linkograph)):\n tolist = list(map(list, linkograph[index]))\n tolist.append(linkograph.uuids[index])\n jsonLinko.append(tolist)\n\n with open(file, 'w') as jsonFile:\n json.dump(jsonLinko, jsonFile, indent=4)",
"def generate_file(file_name, node_keys):\n if file_name is None:\n raise ValueError(\"'file_name' is not present. This was created by @Edd1e234\")\n if node_keys is None or len(node_keys) is 0:\n raise ValueError(\"'node_keys' has no values. This was created by @Edd1e234\")\n\n file = open(file_name, \"w+\")\n for i in node_keys:\n file.write(i + \"\\n\")",
"def writeNodes(fil, nodes, nofs=1):\n fil.write(' NODAL COORDINATES 2.2.30\\n')\n for i,n in enumerate(nodes):\n fil.write(\"%10d%20.11e%20.11e%20.11e\\n\" % ((i+nofs,)+tuple(n)))\n fil.write('ENDOFSECTION\\n')",
"def write_edge_features(edge_features, edge_file):\n dgl.data.utils.save_tensors(edge_file, edge_features)",
"def nx_to_json(graph, filename):\n graph_data = json_graph.node_link_data(graph)\n\n with open(filename, \"w\") as f:\n json.dump(graph_data, f, indent=4)",
"def save_associated_genes(identifiers=[DEFAULT_IDENTIFIER]):\n for identifier in identifiers:\n file_path = os.path.join(EXTERNAL_DATA_PATH, \"{}.json\".format(identifier))\n if os.path.isfile(file_path):\n continue\n associated_genes = get_associated_genes(identifier)\n content = {\"identifier\": get_string_db_identifier(identifier), \"data\": associated_genes}\n with open(file_path, 'w') as f:\n f.write(json.dumps(content, sort_keys=True, indent=4, separators=(',', ': ')))\n print(\"Saved associated genes for {}\".format(identifier))",
"def create_json_stash(self, path, node_params):\n json_file_path = os.path.join(path, self.name + '.json')\n\n if not os.path.isdir(path):\n os.makedirs(path)\n\n if os.path.isfile(json_file_path):\n return json_file_path\n\n with open(json_file_path, 'w+') as json_file:\n\n json_file.write(json.dumps(node_params, indent=4))\n\n self.json_stash = json_file_path\n\n return json_file_path",
"def dump_to_json(fileinfos, out):\n jsonarray = json.dumps(fileinfos)\n json_filename = \"all_elements_used.json\"\n text_file = open(os.path.join(out,out_dir_name,json_filename), \"w\")\n text_file.write(jsonarray)\n text_file.close()\n stdout.write(\"... \"+json_filename+\" created\\n\")",
"def _dumpJson(self, data, file):\n name, ext = os.path.splitext(file)\n tempFile = \"{0}.tmp\".format(name)\n with open(tempFile, \"w\") as f:\n json.dump(data, f, indent=4)\n shutil.copyfile(tempFile, file)\n os.remove(tempFile)",
"def write_json_file(self, fname, content):\n pass",
"def write_json_file(self, fname, content):\n pass",
"def writeProductsToFile():\n # Call getProducts() and save file as JSON. Could also put to DB or use MyJson, but writing to file for simplicity right now\n data = getRequest(getProducts())\n\n # Try to create file\n try:\n # Store data in JSON file \n with open('products.json', 'w') as outfile:\n json.dump(data, outfile)\n print(\"JSON file for products created!\")\n except:\n print(\"Could not dump JSON to file\")\n raise",
"def write_node_shp(self,shpname,extra_fields=[]):\n assert len(extra_fields)==0 # not yet supported!\n\n # zero-based index of node (why does write_edge_shp create 1-based ids?)\n base_dtype = [('node_id',np.int32)]\n\n node_geoms=[geometry.Point( self.nodes['x'][i] )\n for i in self.valid_node_iter() ]\n\n node_data=self.nodes[~self.nodes['deleted']].copy()\n\n # don't need to write all of the original fields out:\n node_data=utils.recarray_del_fields(node_data,['x','deleted'])\n\n wkb2shp.wkb2shp(shpname,input_wkbs=node_geoms,fields=node_data,\n overwrite=True)",
"def write_json(tables, file: str, only_var=False) -> None:\n with Path(file).open(\"w\", encoding=\"utf-8\") as __f:\n dump(tables.var if only_var else tables, __f, indent=\" \")",
"def export_json(contents, filename):\n with open(filename, 'w') as f:\n json.dump(contents, f)",
"def write_json_file(self, file, content):\n with open(file, \"w\", encoding=\"utf-8\") as f:\n json.dump(content, f, indent=2)",
"def save_node(self, node: Node):"
] | [
"0.6882683",
"0.6683414",
"0.6664386",
"0.6651517",
"0.664519",
"0.64958185",
"0.6158971",
"0.5804926",
"0.56545246",
"0.5637596",
"0.56242365",
"0.5573552",
"0.5563774",
"0.55553705",
"0.5554492",
"0.5551853",
"0.5548628",
"0.5515836",
"0.55090535",
"0.5494786",
"0.54754364",
"0.54623634",
"0.5442637",
"0.5442637",
"0.54394776",
"0.54345083",
"0.54229504",
"0.542062",
"0.5415291",
"0.54023105"
] | 0.846316 | 0 |
reads in nodes uuids and extras from a file and aplies them to nodes in the DB. This is useful for import/export because currently extras are lost. Therefore this can be used to save and restore the extras on the nodes. | def import_extras(filename):
all_extras = {}
# read file
#inputfile = open(filename, 'r')
#lines = inputfile.readlines()
#for line in lines[1:]:
# splitted = line.split(' | ')
# uuid = splitted[0].rstrip(' ')
# extras = splitted[1].rstrip(' ')
# #extras = dict(extras)
# print(extras)
# all_extras[uuid] = extras
#inputfile.close()
try:
all_extras = json.load(open(filename))
except:
print('The file has to be loadabel by json. i.e json format (which it is not).')
for uuid, extras in all_extras.iteritems():
try:
node = load_node(uuid)
except:
# Does not exists
print('node with uuid {} does not exist in DB'.format(uuid))
node = None
continue
if isinstance(node, Node):
node.set_extras(extras)
else:
print('node is not instance of an AiiDA node')
#print(extras)
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def export_extras(nodes, filename='node_extras.txt'):\n\n #outstring = ''#' node uuid | extras \\n'\n outdict = {}\n for node in nodes:\n if isinstance(node, int): #pk\n node = load_node(node)\n elif isinstance(node, basestring): #uuid\n node = load_node(node)\n\n if not isinstance(node, Node):\n print('skiped node {}, is not an AiiDA node, did not know what to do.'.format(node))\n continue\n uuid = node.uuid\n extras_dict = node.get_extras()\n outdict[uuid] = extras_dict\n #line = '{} | {}\\n'.format(uuid, extras_dict)\n #outstring = outstring + line\n\n #outfile = open(filename, 'w')\n #outfile.write(outstring)\n #outfile.close()\n json.dump(outdict, open(filename,'w'))\n return",
"def read_node_features_file(nodes_features_file):\n\n node_features = dgl.data.utils.load_tensors(nodes_features_file, False)\n return node_features",
"def import_data(self, filename=None, rawdata=None, append=False):\n \n if filename:\n with open(filename,\"r\") as f:\n data = f.read()\n elif rawdata:\n data = rawdata\n else:\n raise Exception(\"No data given\")\n\n if not append:\n self.nodelist = []\n\n d = deserialize(data, self.consolidator)\n self.nodelist += list(d.nodes.values())\n if append:\n self.domain_obj = None #mark as outdated\n else:\n self.domain_obj = d",
"def load_recipes_from_file(cls, args):\n with open(args.recipes_file, 'r') as f:\n reader = csv.DictReader(f)\n for row in reader:\n cls._recipes.append(row)\n cls._add_indices_to_recipes()\n cls._initialize_recipes_status()\n logging.info(\"Recipes loaded.\")",
"def _read_node_file(self):\n self.node_df = gt.remove_colons(pd.read_csv(self.node_file, dtype=str))",
"def load_entity_data(self, data_file):\n load_xml_seed(data_file)",
"def load_nodes(filename):\n\n with open(filename) as f:\n reader = csv.DictReader(f)\n return [item for item in reader]",
"def load_nodes(path):\n global parents\n with open(path, 'r') as r:\n for line in r:\n (taxid, parent, other) = re.split(r'\\s*\\|\\s*', line.strip('|\\n\\t '), 2)\n parents[taxid] = parent",
"def read_relations(db, openfile):\n pass",
"def import_data(self, filename=None, rawdata=None, append=False):\n if filename:\n tree = ET.parse(filename)\n elif rawdata:\n tree = ET.ElementTree(ET.fromstring(rawdata))\n else:\n raise Exception(\"No data given\")\n\n root = tree.getroot()\n features = root.find(\"Features\")\n\n if not append:\n self.nodelist = []\n\n feature_id_table = {}\n\n # map all feature ids to name\n for feature in features.iter('Feature'):\n feature_id_table[feature.attrib[\"id\"]] = feature.attrib[\"data\"]\n\n # build relation structure\n for feature in features.iter('Feature'):\n fobj = Node(feature.attrib[\"data\"])\n tmp = feature.find('description')\n if tmp != None:\n fobj.text = tmp.text\n else:\n tmp = feature.find('speak')\n if tmp != None:\n fobj.text = tmp.text\n neighbors = feature.find('neighbors')\n for neighbor in neighbors.iter('neighbor'):\n fobj.add_relation(\n neighbor.attrib['relationship'],\n feature_id_table[neighbor.attrib['dest']])\n self.nodelist.append(fobj)",
"def load(self, filename):\n\t\tf = open(filename).read().split(\"\\n\")\n\n\t\tfor item in f:\n\t\t\tcommand = item.split(\":\")\n\n\t\t\t# Add node\n\t\t\tif len(command) == 2:\n\t\t\t\t_id = command[0].strip()\n\t\t\t\t_label = command[1].strip() or None\n\n\t\t\t\t# Duplicate id\n\t\t\t\tif _id in self.nodes:\n\t\t\t\t\traise ValueError\n\n\t\t\t\t# Add node\n\t\t\t\tself.nodes[_id] = Node(_id, _label)\n\n\t\t\t# Add link\n\t\t\telif len(command) == 3:\n\t\t\t\t_from = command[0].strip()\n\t\t\t\t_label = command[1].strip() or None\n\t\t\t\t_to = command[2].strip()\n\n\t\t\t\t# Non-existent Nodes\n\t\t\t\tif _from not in self.nodes or _to not in self.nodes:\n\t\t\t\t\traise ValueError\n\n\t\t\t\tself.nodes[_from].add_neighbour(self.nodes[_to], _label)",
"def load_users():\n filepath = \"./seed_data/u.user\"\n users = open(filepath)\n\n\n for user in users:\n user = user.rstrip().split('|')\n db_user = User(user_id=user[0], age=user[1], zipcode=user[4])\n db.session.add(db_user)\n\n db.session.commit()",
"def add_graph_attributes(G, filename):\n Ef = dict() # feature -> edges\n Nf = dict() # node -> features\n with open(filename) as f:\n for line in f: # for each node, list of features it belongs to\n d = line.split()\n u = int(d[0])\n features = d[1:]\n for f in features:\n Ef.setdefault(f, []).extend(G.in_edges(u)) # add feature-dependent edges\n #G.node[u]['Fu'] = features\n G.nodes[u]['Fu'] = features\n Nf[u] = features\n print('Read graph attributes')\n return Ef, Nf",
"def get_nodes(self):\n with open('node_list.txt', 'r') as file:\n self.nodes = [line.rstrip('\\n') for line in file]",
"def create_from_files():\n logging.info('\"Create from files\" task started using config file %s', args.config)\n file_dir_path = config['input_dir']\n files = os.listdir(file_dir_path)\n\n for file_name in files:\n filename_without_extension = os.path.splitext(file_name)[0]\n if len(filename_without_extension) > 255:\n message = 'Truncating the filename \"' + filename_without_extension + '\" since it exceeds Drupal\\'s maximum node title length of 255 characters.'\n logging.error(message)\n filename_without_extension = filename_without_extension[:255]\n\n islandora_model = set_model_from_extension(file_name, config)\n\n node_json = {\n 'type': [\n {'target_id': config['content_type'],\n 'target_type': 'node_type'}\n ],\n 'title': [\n {'value': filename_without_extension}\n ],\n 'status': [\n {'value': config['published']}\n ],\n 'field_model': [\n {'target_id': islandora_model,\n 'target_type': 'taxonomy_term'}\n ]\n }\n\n node_headers = {\n 'Content-Type': 'application/json'\n }\n node_endpoint = '/node?_format=json'\n node_response = issue_request(config, 'POST', node_endpoint, node_headers, node_json, None)\n if node_response.status_code == 201:\n node_uri = node_response.headers['location']\n print('+ Node for \"' + filename_without_extension + '\" created at ' + node_uri + '.')\n logging.info('Node for \"%s\" created at %s.', filename_without_extension, node_uri)\n if 'output_csv' in config.keys():\n write_to_output_csv(config, '', node_response.text)\n\n file_path = os.path.join(config['input_dir'], file_name)\n media_type = set_media_type(file_path, config)\n media_response_status_code = create_media(config, file_name, node_uri)\n allowed_media_response_codes = [201, 204]\n if media_response_status_code in allowed_media_response_codes:\n print('+ ' + media_type.title() + \" media for \" + filename_without_extension + \" created.\")\n logging.info(\"Media for %s created.\", file_path)\n else:\n logging.error('Node for \"%s\" not created, HTTP response code was %s.', os.path.join(config['input_dir'], file_name), node_response.status_code)",
"def add_from_uuid_list(self):\n\n uuids = self._read_file()\n if not uuids:\n return\n\n for uuid in uuids:\n uuid = uuid.split('\\n')[0]\n\n # Checks if lenght of the uuid is correct\n if not check_uuid_authenticity(uuid):\n self.report.add('Invalid uuid lenght.')\n continue\n \n self.add_record.push_record_by_uuid(self.global_counters, uuid)\n return",
"def load_ratings():\n filepath = \"./seed_data/u.data\"\n ratings = open(filepath)\n\n for rating in ratings:\n rating = rating.rstrip().split()\n\n db_rating = Rating(movie_id=rating[1], user_id=rating[0],\n score=rating[2])\n db.session.add(db_rating)\n\n db.session.commit()",
"def add_users_from_file(args):\n with open(args.users_from_file) as file:\n for line in file:\n name, email_address = line.split(',')\n add_user(name, email_address.strip().lower())",
"def main(load: bool, load_only: bool, force: bool, with_sudo: bool):\n paths = []\n na = NodeAssembler()\n for processor_cls in processor_resolver:\n if not processor_cls.importable:\n continue\n click.secho(f\"Checking {processor_cls.name}\", fg=\"green\", bold=True)\n if not load_only:\n if (\n force\n or not processor_cls.nodes_path.is_file()\n or not processor_cls.nodes_indra_path.is_file()\n or not processor_cls.edges_path.is_file()\n ):\n processor = processor_cls()\n click.secho(\"Processing...\", fg=\"green\")\n # FIXME: this is redundant, we get nodes twice\n nodes = list(processor.get_nodes())\n processor.dump()\n else:\n click.secho(\"Loading cached nodes...\", fg=\"green\")\n with open(processor_cls.nodes_indra_path, \"rb\") as fh:\n nodes = pickle.load(fh)\n na.add_nodes(nodes)\n\n paths.append((processor_cls.nodes_path, processor_cls.edges_path))\n\n nodes_path = pystow.module(\"indra\", \"cogex\", \"assembled\").join(name=\"nodes.tsv.gz\")\n if not load_only:\n if force or not nodes_path.is_file():\n # Now create and dump the assembled nodes\n assembled_nodes = na.assemble_nodes()\n assembled_nodes = sorted(assembled_nodes, key=lambda x: (x.db_ns, x.db_id))\n Processor._dump_nodes_to_path(assembled_nodes, nodes_path)\n\n if load or load_only:\n sudo_prefix = \"\" if not with_sudo else \"sudo\"\n command = dedent(\n f\"\"\"\\\n {sudo_prefix} neo4j-admin import \\\\\n --database=indra \\\\\n --delimiter='TAB' \\\\\n --skip-duplicate-nodes=true \\\\\n --skip-bad-relationships=true \\\\\n --nodes {nodes_path}\n \"\"\"\n ).rstrip()\n for _, edge_path in paths:\n command += f\"\\\\\\n --relationships {edge_path}\"\n\n click.secho(\"Running shell command:\")\n click.secho(command, fg=\"blue\")\n os.system(command) # noqa:S605",
"def populate(infile):\n main(infile)",
"def seed_user_data(filename):\n\n #open file and go through it line by line\n log_file = open(filename)\n\n for line in log_file:\n data = line.strip().split(\"|\") #data is a list\n\n #get data from split line\n id = int(data[0])\n age = int(data[1])\n zip = data[4]\n\n #create a new record and add it to the queue\n new_user = User(user_id=id, age=age, zipcode=zip)\n db.session.add(new_user)\n\n #commit changes\n db.session.commit()",
"def getNodeData(self, file):\n\n with open('./data/{}.json'.format(file), 'r') as json_file:\n try:\n objs = []\n data = json_file.read()\n dic = json.loads(data)['data']\n for i in dic:\n objs.append(Entity(i['id'], i['name']))\n return objs\n except Exception as e:\n print(e)",
"def loadTaxi(file):\n arr = []\n with open(file, newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n row['fare_amount'] = float(row['fare_amount'])\n row['pickup_longitude'] = float(row['pickup_longitude'])\n row['pickup_latitude'] = float(row['pickup_latitude'])\n row['dropoff_longitude'] = float(row['dropoff_longitude'])\n row['dropoff_latitude'] = float(row['dropoff_latitude'])\n row['pickup_datetime'] = datetime.strptime(\n row['pickup_datetime'], '%Y-%m-%d %H:%M:%S %Z')\n arr.append(row)\n\n inserted_ids = db.taxi.insert_many(arr).inserted_ids\n print(\"{} taxi docs inserted\".format(len(inserted_ids)))",
"def writeImports2File(self, file, indent = \" \"):\r\n # import each entity and its associated graphical file\r\n for obj in self.listNodes.keys():\r\n file.write(indent+\"from \"+obj+\" import \"+obj+\"\\n\")\r\n if not obj[0:4] == \"ASG_\":\r\n file.write(indent+\"from graph_\"+obj+\" import graph_\"+obj+\"\\n\")",
"def load_from_file(self, file):\n\n if (args.replacetopip): #create list of IP addresses and the number of times they occur\n with open(args.dirty) as dirty_file:\n for line in dirty_file:\n ip = self._extract_by_key(line, self._attr_key)\n if (self.ip_dict.has_key(ip)):\n self.ip_dict[ip] += 1\n else:\n self.ip_dict[ip] = 1\n #sort list\n self.top_ip = sorted(self.ip_dict.items(), key=operator.itemgetter(1), reverse=True)\n count = 0\n with open(file) as ip_file:\n for line in ip_file:\n if (args.replacetopip): #replace top IP addresses from the sorted list with new ones from the file\n ip_old = self.top_ip[count][0]\n ip_new = line.strip()\n count += 1\n else:\n ip_old,ip_new = line.split(\",\")\n self._insts[ip_old] = ip_new.strip()",
"def seed_rating_data(filename):\n\n #open file and go through it line by line\n log_file = open(filename)\n\n for line in log_file:\n data = line.strip().split(\"\\t\")\n\n #get data from split line\n user_id = data[0]\n movie_id = data[1]\n score = data[2]\n\n #create a new record and add it to the queue\n new_rating = Rating(movie_id=movie_id, user_id=user_id, \n score=score)\n db.session.add(new_rating)\n\n #commit changes\n db.session.commit()",
"def fileparse(filename, node):\n\n fd = open(filename)\n line = fd.readline().strip('\\r\\n')\n\n while line != '':\n node.Add(line, node)\n line = fd.readline().strip('\\r\\n')",
"def fromfile(self,file):\n self.d.update(params_file(file))",
"def import_db(import_file):\n import_data(import_file)",
"def read_extras(self, f):\n\n gb = f[\"base state\"]\n for name in gb:\n self.base[name] = Basestate(self.cc_data.grid.ny, ng=self.cc_data.grid.ng)\n self.base[name].d[:] = gb[name]"
] | [
"0.673297",
"0.5395848",
"0.5311601",
"0.5258237",
"0.5242158",
"0.52234995",
"0.50911427",
"0.5082365",
"0.50697577",
"0.5054883",
"0.49930757",
"0.4956298",
"0.4920614",
"0.49156678",
"0.48891574",
"0.4886341",
"0.48851725",
"0.48767012",
"0.48547512",
"0.4849261",
"0.4848845",
"0.48449424",
"0.48419544",
"0.48360765",
"0.48305762",
"0.48027632",
"0.4802578",
"0.47970206",
"0.4793884",
"0.4791032"
] | 0.8099154 | 0 |
This method deletes all AiiDA nodes in the DB, which have a extra trash=True And all their children. Could be advanced to a garbage collector. Be careful to use it. | def delete_trash():
#query db for marked trash
q = QueryBuilder()
nodes_to_delete_pks = []
q.append(Node,
filters = {'extras.trash': {'==' : True}
}
)
res = q.all()
for node in res:
nodes_to_delete_pks.append(node[0].dbnode.pk)
print('pk {}, extras {}'.format(node[0].dbnode.pk, node[0].get_extras()))
#Delete the trash nodes
print('deleting nodes {}'.format(nodes_to_delete_pks))
delete_nodes(nodes_to_delete_pks)
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _delete_all(self):\n logging.info(\"Remove all nodes and relations from database.\")\n self.graph.delete_all()\n return",
"def DeleteAllItems(self):\r\n\r\n self.DeleteRoot()",
"def clear_db():\n humans = Human4j.nodes.all()\n for h in humans:\n h.delete()\n binomes = Binome4j.nodes.all()\n for b in binomes:\n b.delete()\n projects = Project4j.nodes.all()\n for p in projects:\n p.delete()\n sherpas = Sherpa4j.nodes.all()\n for sh in sherpas:\n sh.delete()\n students = Pioupiou4j.nodes.all()\n for piou in students:\n piou.delete()\n partenaires = Partenaire4j.nodes.all()\n for part in partenaires:\n part.delete()\n ps = Planete_Solidaire.nodes.all()\n for misc in ps:\n misc.delete()",
"def resetTree(self):\n for fila in self.verDatos.get_children():\n self.verDatos.delete(fila)",
"def clean():\n os.system('killall -9 lnd')\n os.system('killall -9 btcd')\n \n shutil.rmtree(btcd_dir)\n os.remove(btcd_log)\n\n index = 0\n while True:\n node = Node.from_index(index)\n try:\n shutil.rmtree(node.path())\n os.remove(node.log())\n except:\n click.echo(f'removed {index} nodes.')\n break\n index += 1",
"def cleanup(self):\n for child in self.children():\n child.deleteLater()",
"def delete_nodes(pks_to_delete):\n from django.db import transaction\n from django.db.models import Q\n from aiida.backends.djsite.db import models\n from aiida.orm import load_node\n\n # Delete also all children of the given calculations\n # Here I get a set of all pks to actually delete, including\n # all children nodes.\n all_pks_to_delete = set(pks_to_delete)\n for pk in pks_to_delete:\n all_pks_to_delete.update(models.DbNode.objects.filter(\n parents__in=pks_to_delete).values_list('pk', flat=True))\n\n print \"I am going to delete {} nodes, including ALL THE CHILDREN\".format(\n len(all_pks_to_delete))\n print \"of the nodes you specified. Do you want to continue? [y/N]\"\n answer = raw_input()\n\n if answer.strip().lower() == 'y':\n # Recover the list of folders to delete before actually deleting\n # the nodes. I will delete the folders only later, so that if\n # there is a problem during the deletion of the nodes in\n # the DB, I don't delete the folders\n folders = [load_node(pk).folder for pk in all_pks_to_delete]\n\n with transaction.atomic():\n # Delete all links pointing to or from a given node\n models.DbLink.objects.filter(\n Q(input__in=all_pks_to_delete) |\n Q(output__in=all_pks_to_delete)).delete()\n # now delete nodes\n models.DbNode.objects.filter(pk__in=all_pks_to_delete).delete()\n\n # If we are here, we managed to delete the entries from the DB.\n # I can now delete the folders\n for f in folders:\n f.erase()",
"def delete_all(self):\n query = \"\"\"MATCH(n) DETACH DELETE n\"\"\"\n return self.create_tx(query)",
"def delete_orphan_nodes(self):\n used=np.zeros( self.Nnodes(),'b1')\n valid_cells=~self.cells['deleted']\n valid_nodes=self.cells['nodes'][valid_cells,:].ravel()\n valid_nodes=valid_nodes[ valid_nodes>=0 ]\n used[ valid_nodes ]=True\n\n valid_edges=~self.edges['deleted']\n valid_nodes=self.edges['nodes'][valid_edges,:].ravel()\n used[ valid_nodes ]=True\n \n self.log.info(\"%d nodes found to be orphans\"%np.sum(~used))\n\n for n in np.nonzero(~used)[0]:\n self.delete_node(n)",
"def destroyNodes(self):\r\n for nt in self.listNodes.keys(): \t# for all kind of nodes...\r\n for node in self.listNodes[nt]: \t# for all nodes of type <nt>\r\n if node.graphObject_: node.graphObject_.destroy()",
"def clean_database(databasePathname):\n print '# loading database ' + databasePathname\n try:\n db = gdbm.open(databasePathname, 'w')\n except:\n print \"# \" + databasePathname + \" could not be loaded\"\n sys.exit(-1)\n\n # even though gdbm supports memory efficient iteration over\n # all keys, I want to order my traversal across similar\n # paths to leverage caching of directory files:\n allKeys=db.keys()\n print '# finished loaded keys from ' + databasePathname\n allKeys.sort()\n print '# finished sorting keys from ' + databasePathname\n print '# deleting dead nodes'\n count=0\n for currKey in allKeys:\n try:\n os.stat(currKey)\n sys.stdout.write('.')\n except OSError:\n del db[currKey]\n sys.stdout.write('*')\n count=count+1\n sys.stdout.flush()\n print \"\\n# reorganizing \" + databasePathname\n db.reorganize()\n db.sync()\n db.close()\n print '# done cleaning ' + databasePathname + ', removed ' + str(count) + ' dead nodes!'",
"def clean(self):\n for nodeId in list(self.nodes.keys()):\n if not self.nodes[nodeId].safe:\n del self.nodes[nodeId]",
"def destroy_all(self):\n self.log.info(\"Destroying the %s cluster\" % self.cluster_name)\n for n in self.all_nodes:\n n.destroy()\n remove(self.save_file)",
"def delete(self, nodes):\n # Check indices.\n N = len(self)\n if not isinstance(nodes, (set, list, tuple)):\n nodes = [nodes]\n if not all(0 < node <= N for node in nodes):\n raise IndexError()\n\n # Reparent orphaned nodes.\n # Lift the arc until the parent is non-deleted node.\n # If all parents are deleted, we will hit the root eventually.\n deleted = set(nodes)\n alive_heads = [None] * N\n for node in range(1, N + 1):\n head = self.heads(node)\n while head in deleted:\n head = self.heads(head)\n alive_heads[node - 1] = head\n\n # Remap.\n new_nodes = {0: 0}\n new_node = 1\n\n for node in range(1, N + 1):\n if node in deleted:\n continue\n new_nodes[node] = new_node\n new_node += 1\n\n # Gather non-deleted stuff.\n forms = []\n lemmas = []\n cpostags = []\n postags = []\n feats = []\n heads = []\n deprels = []\n\n for node in range(1, N + 1):\n if node in deleted:\n continue\n forms.append(self.forms(node))\n lemmas.append(self.lemmas(node))\n cpostags.append(self.cpostags(node))\n postags.append(self.postags(node))\n feats.append(self.feats(node))\n heads.append(new_nodes[alive_heads[node - 1]])\n deprels.append(self.deprels(node))\n \n # Construct new tree.\n self.__init__(forms, lemmas, cpostags, postags, feats, heads, deprels)",
"def delete_all_entities(self):\n self._delete_all_acls()\n self._delete_all_containers()\n self._delete_all_orders()\n self._delete_all_secrets()",
"def delete_relatives(self):\n category_ratings = list(self.category_ratings.all())\n self.category_ratings.clear()\n for category_rating in category_ratings:\n if category_rating.isOrphaned():\n category_rating.delete()\n\n word_counts = list(self.word_counts.all())\n self.word_counts.clear()\n for word_count in word_counts:\n if word_count.isOrphaned():\n word_count.delete()",
"def delete_all_dags(db: Redis[bytes]) -> None:\n for dag in db.smembers(DAG_INDEX):\n db.delete(join(DAG_OPERATIONS, dag.decode())) # type:ignore\n db.delete(join(DAG_STATUS, dag.decode())) # type:ignore\n # Remove old index\n db.delete(DAG_INDEX)",
"def deleteAll(tx):\n query = (\n\n \"MATCH(p1:Person)-[a:APP_CONTACT]->(p2:Person)\"\n \"WHERE a.date < date() - duration({Days: 10}) OR (a.date = date() - duration({Days: 10}) AND a.hour < time())\"\n \"DELETE a\"\n\n )\n\n tx.run(query)",
"def deleteAll():\n _table.deleteAll()\n _initialiseGlobals()\n\n return",
"def deleteAllSteps(self):\n\n self.dbase.deleteAllSteps(self.scene)\n return",
"def remove_all():\n \"\"\" Removes all from the database \"\"\"\n redis_store.flushall()",
"def remove_stale_files(self) -> None:\n\n for db in self.dbnodes:\n db.remove_stale_dbnode_files()",
"def delete_all():\n if os.path.exists(DATA_DIR):\n shutil.rmtree(DATA_DIR)",
"def _delete_all_containers(self):\n for container_ref in self.created_entities['container']:\n self.barbicanclient.containers.delete(container_ref)",
"def delete(self): # DirObj.delete\n self.deleted=True\n for name, d in self.subdirs.iteritems():\n d.delete()\n for name, f in self.files.iteritems():\n f.delete()",
"def delete_all(self):\n raise NotImplementedError()",
"def DeleteAllItems(self):\r\n\r\n if self._anchor:\r\n self.Delete(self._anchor)",
"def deleteAll(self):\n self.db.execute(\"DELETE FROM MATCH;\", ())",
"def delete_all(self):\n # delete everything\n shutil.rmtree(self.location)",
"def deleteAllNeedlesFromScene(self):\n #productive #onButton\n profprint()\n while slicer.util.getNodes('python-catch-round_'+str(self.round)+'*') != {}:\n nodes = slicer.util.getNodes('python-catch-round_'+str(self.round)+'*')\n for node in nodes.values():\n slicer.mrmlScene.RemoveNode(node)"
] | [
"0.75391054",
"0.7246509",
"0.6745943",
"0.6743173",
"0.66865116",
"0.66524655",
"0.6503817",
"0.64772564",
"0.6394675",
"0.63327867",
"0.6293654",
"0.62701166",
"0.6248863",
"0.6222633",
"0.6217673",
"0.6183407",
"0.61382097",
"0.6128062",
"0.61192054",
"0.6104992",
"0.60861146",
"0.6066739",
"0.60219026",
"0.60211426",
"0.60163176",
"0.6010808",
"0.6009833",
"0.60089463",
"0.59942514",
"0.59707975"
] | 0.7323777 | 1 |
returns a list of node uuids for a given group as, name, pk, uuid or group object | def get_nodes_from_group(group, return_format='uuid'):
from aiida.orm import Group
from aiida.common.exceptions import NotExistent
nodes = []
g_nodes = []
try:
group_pk = int(group)
except ValueError:
group_pk = None
group_name = group
if group_pk is not None:
try:
str_group = Group(dbgroup=group_pk)
except NotExistent:
str_group = None
message = ('You have to provide a valid pk for a Group '
'or a Group name. Reference key: "group".'
'given pk= {} is not a valid group'
'(or is your group name integer?)'.format(group_pk))
print(message)
elif group_name is not None:
try:
str_group = Group.get_from_string(group_name)
except NotExistent:
str_group = None
message = ('You have to provide a valid pk for a Group or a Group name.'
'given group name= {} is not a valid group'
'(or is your group name integer?)'.format(group_name))
print(message)
elif isinstance(group, Group):
str_group = group
else:
str_group = None
print('I could not handle given input, either Group, pk, or group name please.')
return nodes
g_nodes = str_group.nodes
for node in g_nodes:
if return_format == 'uuid':
nodes.append(node.uuid)
elif return_format == 'pk':
nodes.append(node.pk)
return nodes | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getGroup(group: int, name=\"\") -> list:\n groups = mongo.db.groups.find({'id':group},{'_id':0})\n userID_list = []\n user_list = []\n for entry in groups:\n if entry[\"id\"] == group:\n userID_list = userID_list + entry[\"members\"]\n if len(userID_list) != 0:\n for entry in userID_list:\n x = fetchUser(userId=entry)\n user_list = user_list + x\n return user_list",
"def GetGroupMembers(self, group):\n return []",
"def _get_group_uuid(self, nova, name):\n hints = {}\n try:\n groups = nova.server_groups.list()\n for group in groups:\n gname = getattr(group, 'name', '')\n if name == gname:\n hints['group'] = getattr(group, 'id', '')\n except Exception as e:\n LOG.exception(e)\n finally:\n LOG.info(\"%s:%s() %s: %s\", self.__class__.__name__,\n sys._getframe().f_code.co_name, name, hints)\n return hints",
"def list_groups():\n return _list_tindyb_unique_values(\"group\", dbpath=__dbpath__)",
"def list_group(group):\n\n members = group_members(group)\n ret = {}\n if members:\n for member in members:\n info = get(member)\n if info:\n ret[uid2dn(member)] = info\n return ret",
"def group_nodes(self, group, namespace=None):\n source = self._source(namespace)\n return self._list(source, 'map', group)",
"def getGroupInfo(groupId):\n url = f\"https://groups.roblox.com/v1/groups/{groupId}\"\n r = requests.get(url)\n j = json.loads(r.text)\n return j",
"def _get_nodes_by_instance(self, instance_uuid):\n try:\n node = pecan.request.dbapi.get_node_by_instance(instance_uuid)\n return [node]\n except exception.InstanceNotFound:\n return []",
"def _get_node_group(self, node_name):\n\n pass",
"def getNodeLVMGroups(self,node):\n data = self.connect('get','nodes/%s/scan/lvm' % (node),None)\n return data",
"def get_uuids_in_node(self, node, project_id):\n program, project = project_id.split(\"-\", 1)\n\n try:\n res = self.paginate_query(node, project_id)\n uuids = [x[\"id\"] for x in res[\"data\"][node]]\n except:\n raise Gen3Error(\n \"Failed to get UUIDs in node '\"\n + node\n + \"' of project '\"\n + project_id\n + \"'.\"\n )\n\n return uuids",
"def get_uuids(things):\n return [thing.uuid for thing in things]",
"def find_with_uuid(self, groupid, objects, section):\n if groupid in self._total[section]:\n # we get the objects by name to avoid memory corruption issues,\n # but we're not checking if the names change!\n return objects[self._total[section][groupid]]\n else:\n for obj in objects:\n obj_uuid = self.get_uuid(obj)\n if obj_uuid:\n #self._total[section][obj_uuid] = obj.name\n if obj_uuid == groupid:\n return obj",
"def _find_groups_for_user(email):\n return [g['name'] for g in groups.find({\"users\":email})]",
"def test_get_device_group_by_id(self):\n pass",
"def group_members(group):\n\n group = ldapi.lookup(ld, 'cn', group, cfg['ldap_groups_base'])\n\n if group and 'uniqueMember' in group:\n r = re.compile('^uid=([^,]*)')\n return map(lambda x: r.match(x).group(1), group['uniqueMember'])\n return []",
"def get_groups(self):\n user_node = self.get()\n grouplist = []\n if user_node:\n for rel in graph.match(start_node=user_node, rel_type='in'):\n grouplist.append(Usergroup(id=rel.end_node()['id']))\n return grouplist",
"def get_group(tkn: Token = Depends(from_authotization_header_nondyn),):\n assert_has_clearance(tkn.owner, \"sni.read_group\")\n return [\n GetGroupShortOut(group_id=str(grp.pk), group_name=grp.group_name)\n for grp in Group.objects().order_by(\"group_name\")\n ]",
"def find_group(self,id):\n result = []\n cursor = self._cnx.cursor()\n command = \"SELECT group_id FROM teilnahmen WHERE id={}\".format(id)\n cursor.execute(command)\n tuples = cursor.fetchall()\n\n for (group) in tuples:\n teilnahme = Teilnahme()\n teilnahme.set_le()\n result.append(teilnahme)\n\n self._cnx.commit()\n cursor.close()\n return result",
"def _ListGroupDevices(self, group):\n for run_target in six.itervalues(group.run_targets):\n for d in six.itervalues(run_target.devices):\n yield d",
"def getTGTasksName(all_taskgroup_objects, tg_name):\n tg_id = all_taskgroup_objects.id\n tg_task_obj = TaskgroupTask.objects.filter(\n taskgroup_id=tg_id\n )\n tg_task = TaskgroupTaskSerializers(\n tg_task_obj,\n many=True\n )\n tasks_list = []\n tg_list = []\n try:\n for i in range(len(tg_task.data)):\n tasks = dict(tg_task.data[i].items())\n print(\"#############\", tasks)\n task_obj = Tafv2Task.objects.get(id=tasks['task_id'])\n tasks_list.append({\n \"task_name\": task_obj.script,\n \"task_id\": task_obj.id\n })\n tg_list.append({\n \"tg_name\": tg_name,\n \"tg_id\": tg_id\n })\n\n context = {'tgList': tg_list, 'tasksList': tasks_list}\n print(\"$$$$$$$$$$$$$$\", context)\n return context\n except Exception as e:\n print(e)",
"def get_group_names(self):\r\n return self.groups.keys()",
"def get_group_users(groupname):\n return jsonify(admin.get_group_users(current_app.scoped_session(), groupname))",
"def _groupNamesToList(settings):\n return [getattr(GroupName, val) for val in settings.dhGroups]",
"def find_with_uuid(self, groupid, objects, section):\n if groupid in self._total[section]:\n # we get the objects by name to avoid memory corruption issues,\n # but we're not checking if the names change!\n return self.wrap_object(objects[self._total[section][groupid]],\n section)\n else:\n for obj in objects:\n obj_uuid = self.get_uuid(obj)\n if obj_uuid:\n self._total[section][obj_uuid] = obj.name\n if obj_uuid == groupid:\n return self.wrap_object(obj, section)",
"def groups(self):\n #return self.get('{}/groups'.format(ApiVersion.A1.value))\n return self.get('{}/groups'.format(ApiVersion.CM1.value))",
"def _get_nodes(self):\n viewpoint = \"shiva_{}\".format(cherrypy.session[\"id\"])\n messages_db = self.mongo[viewpoint][\"messages\"]\n people_db = self.mongo[viewpoint][\"people\"]\n #\n senders = messages_db.distinct(\"sender\")\n owner_id = cherrypy.session[\"id\"]\n nodes = list()\n for sender in senders:\n person = people_db.find_one({\"id\": sender})\n if person is None:\n name = \"id{}\".format(sender)\n else:\n name = person[\"display_name\"]\n records = list(messages_db.aggregate([{\n \"$match\": {\n \"$or\": [\n {\"sender\": owner_id, \"receiver\": sender},\n {\"sender\": sender, \"receiver\": owner_id}\n ]\n }\n }, {\"$group\": {\"_id\": None, \"count\": {\"$sum\": 1}}}]))\n if not records:\n records = 0\n else:\n records = records[0][\"count\"]\n info = \"Total records: {}\".format(records)\n history_link = \"/vk/read?id={}\".format(sender)\n statistics_link = \"#\"\n if records > 0:\n nodes.append({\n \"id\": sender,\n \"name\": name,\n \"info\": info,\n \"records\": records,\n \"history_link\": history_link,\n \"statistics_link\": statistics_link\n })\n #\n return nodes",
"def construct_groups_string(nodes):\n groups = get_groups(nodes)\n if len(groups) <= 1:\n return \"\"\n else:\n result = []\n for color in groups:\n # +1 because .tsp nodes are indexed with 1\n group = [node.nid + 1 for node in nodes if node.color == color]\n result.append(group)\n return str(result)",
"def get_groups(nodes):\n return list(set([node.color for node in nodes]))",
"def keys(self):\n list_all_dict = self.list_all()\n return list_all_dict[\"nodes\"] + list_all_dict[\"groups\"]"
] | [
"0.6142465",
"0.5999166",
"0.59815145",
"0.58869386",
"0.5741488",
"0.5735394",
"0.5697292",
"0.5640951",
"0.563847",
"0.56142646",
"0.55772024",
"0.5554826",
"0.5521682",
"0.5469941",
"0.5396493",
"0.5393709",
"0.53740793",
"0.5341698",
"0.5340419",
"0.5333693",
"0.53306866",
"0.53110385",
"0.5308188",
"0.53049964",
"0.5273193",
"0.5263094",
"0.5262305",
"0.5256786",
"0.52505016",
"0.52147794"
] | 0.73784983 | 0 |
This function returns the default anchors given the image shapes and the anchors per grid point. The grid has width and height equal to the final's layer output. | def set_anchors(mc):
H, W, C = _get_output_shape(mc)
B = mc.ANCHOR_PER_GRID
X = np.array(mc.INITIAL_ANCHOR_SHAPES)
X[:,0] *= mc.IMAGE_WIDTH
X[:,1] *= mc.IMAGE_HEIGHT
anchor_shapes = np.reshape( # it refers to the anchor width and height
[X] * H * W,
(H, W, B, 2)
)
center_x = np.reshape(
np.transpose(
np.reshape(
np.array([np.arange(1, W+1)*float(mc.IMAGE_WIDTH)/(W+1)]*H*B),
(B, H, W)
),
(1, 2, 0)
),
(H, W, B, 1)
)
center_y = np.reshape(
np.transpose(
np.reshape(
np.array([np.arange(1, H+1)*float(mc.IMAGE_HEIGHT)/(H+1)]*W*B),
(B, W, H)
),
(2, 1, 0)
),
(H, W, B, 1)
)
anchors = np.reshape(
np.concatenate((center_x, center_y, anchor_shapes), axis=3),
(-1, 4)
)
return anchors | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_grid_anchors(grid_sizes, strides, cell_anchors):\n anchors = []\n assert cell_anchors is not None\n\n for size, stride, base_anchors in zip(grid_sizes, strides, cell_anchors):\n grid_height, grid_width = size\n stride_height, stride_width = stride\n\n # For output anchor, compute [x_center, y_center, x_center, y_center]\n shifts_x = np.arange(0, grid_width) * stride_width\n shifts_y = np.arange(0, grid_height) * stride_height\n shift_x, shift_y = np.meshgrid(shifts_y, shifts_x)\n shift_x = shift_x.reshape(-1)\n shift_y = shift_y.reshape(-1)\n shifts = np.stack((shift_x, shift_y, shift_x, shift_y), axis=1)\n\n # For every (base anchor, output anchor) pair,\n # offset each zero-centered base anchor by the center of the output anchor.\n anchors.append(\n (shifts.reshape((-1, 1, 4)) + base_anchors.reshape((1, -1, 4))).reshape(-1, 4)\n )\n\n return anchors",
"def create_cell_anchors():\n k_max, k_min = cfg.FPN.RPN_MAX_LEVEL, cfg.FPN.RPN_MIN_LEVEL\n scales_per_octave = cfg.RETINANET.SCALES_PER_OCTAVE\n aspect_ratios = cfg.RETINANET.ASPECT_RATIOS\n anchor_scale = cfg.RETINANET.ANCHOR_SCALE\n A = scales_per_octave * len(aspect_ratios)\n anchors = {}\n for lvl in range(k_min, k_max + 1):\n # create cell anchors array\n stride = 2. ** lvl\n cell_anchors = np.zeros((A, 4))\n a = 0\n for octave in range(scales_per_octave):\n octave_scale = 2 ** (octave / float(scales_per_octave))\n for aspect in aspect_ratios:\n anchor_sizes = (stride * octave_scale * anchor_scale, )\n anchor_aspect_ratios = (aspect, )\n cell_anchors[a, :] = generate_anchors(\n stride=stride, sizes=anchor_sizes,\n aspect_ratios=anchor_aspect_ratios)\n a += 1\n anchors[lvl] = cell_anchors\n return anchors",
"def _get_anchor_grid(self, width, height, batch_size):\n anchors = tf.cast(self._anchors, dtype = self.dtype)\n anchors = tf.reshape(anchors, [1, -1])\n anchors = tf.repeat(anchors, width*height, axis = 0)\n anchors = tf.reshape(anchors, [1, width, height, self._num, -1])\n anchors = tf.repeat(anchors, batch_size, axis = 0)\n return anchors",
"def get_all_anchors(stride=None, sizes=None):\n if stride is None:\n stride = cfg.ANCHOR.ANCHOR_STRIDE\n if sizes is None:\n sizes = cfg.ANCHOR.ANCHOR_SIZES\n # Generates a NAx4 matrix of anchor boxes in (x1, y1, x2, y2) format. Anchors\n # are centered on stride / 2, have (approximate) sqrt areas of the specified\n # sizes, and aspect ratios as given.\n cell_anchors = generate_anchors(\n stride,\n scales=np.array(sizes, dtype=np.float) / stride,\n ratios=np.array(cfg.ANCHOR.ANCHOR_RATIOS, dtype=np.float))\n # anchors are intbox here.\n # anchors at featuremap [0,0] are centered at fpcoor (8,8) (half of stride)\n\n max_size = cfg.DATA.MAX_SIZE\n field_size = int(np.ceil(max_size / stride))\n shifts = np.arange(0, field_size) * stride\n shift_x, shift_y = np.meshgrid(shifts, shifts)\n shift_x = shift_x.flatten()\n shift_y = shift_y.flatten()\n shifts = np.vstack((shift_x, shift_y, shift_x, shift_y)).transpose()\n # Kx4, K = field_size * field_size\n K = shifts.shape[0]\n\n A = cell_anchors.shape[0]\n field_of_anchors = (\n cell_anchors.reshape((1, A, 4)) +\n shifts.reshape((1, K, 4)).transpose((1, 0, 2)))\n field_of_anchors = field_of_anchors.reshape((field_size, field_size, A, 4))\n # FSxFSxAx4\n # Many rounding happens inside the anchor code anyway\n # assert np.all(field_of_anchors == field_of_anchors.astype('int32'))\n field_of_anchors = field_of_anchors.astype('float32')\n field_of_anchors[:, :, :, [2, 3]] += 1\n return field_of_anchors",
"def get_anchors(self, image_shape):\n backbone_shapes = compute_backbone_shapes(self.config, image_shape)\n # Cache anchors and reuse if image shape is the same\n if not hasattr(self, \"_anchor_cache\"):\n self._anchor_cache = {}\n if not tuple(image_shape) in self._anchor_cache:\n # Generate Anchors\n a = utils.generate_pyramid_anchors(\n self.config.RPN_ANCHOR_SCALES,\n self.config.RPN_ANCHOR_RATIOS,\n backbone_shapes,\n self.config.BACKBONE_STRIDES,\n self.config.RPN_ANCHOR_STRIDE)\n # Keep a copy of the latest anchors in pixel coordinates because\n # it's used in inspect_model notebooks.\n # TODO: Remove this after the notebook are refactored to not use it\n self.anchors = a\n # Normalize coordinates\n self._anchor_cache[tuple(image_shape)] = utils.norm_boxes(a, image_shape[:2])\n return self._anchor_cache[tuple(image_shape)]",
"def anchors(self, img_shape, dtype=np.float32):\n return np_methods.ssd_anchors_all_layers(img_shape,\n self.params.feat_shapes,\n self.params.anchor_sizes,\n self.params.anchor_ratios,\n self.params.anchor_steps,\n self.params.anchor_offset,\n dtype)",
"def generate_anchors(self):\n self.anchors = np.zeros((self.anchor_num, 4), dtype=np.float32)\n size = self.stride * self.stride\n count = 0\n for r in self.ratios:\n ws = int(math.sqrt(size * 1. / r))\n hs = int(ws * r)\n\n for s in self.scales:\n w = ws * s\n h = hs * s\n self.anchors[count][:] = [-w * 0.5, -h * 0.5, w * 0.5, h * 0.5][:]\n count += 1",
"def get_anchor_points(self):\n rows, cols = np.where(self.overlap_mask)\n self.anchor_points = tuple(zip(rows, cols))[:: self.sampling_int]\n print(\"# of anchors: {}\".format(len(self.anchor_points)))",
"def _mkanchors(ws, hs, x_ref, y_ref):\n\n ws = ws[:, np.newaxis]\n hs = hs[:, np.newaxis]\n\n anchors = np.hstack(\n (\n x_ref - 0.5 * (ws - 1),\n y_ref - 0.5 * (hs - 1),\n x_ref + 0.5 * (ws - 1),\n y_ref + 0.5 * (hs - 1)\n )\n )\n return anchors",
"def _mkanchors(ws, hs, x_ctr, y_ctr):\n ws = ws[:, np.newaxis]\n hs = hs[:, np.newaxis]\n anchors = np.hstack((x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1), x_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1)))\n return anchors",
"def _mkanchors(ws, hs, x_ctr, y_ctr):\n\n ws = ws[:, np.newaxis]\n hs = hs[:, np.newaxis]\n anchors = np.hstack((x_ctr - 0.5 * (ws - 1),\n y_ctr - 0.5 * (hs - 1),\n x_ctr + 0.5 * (ws - 1),\n y_ctr + 0.5 * (hs - 1)))\n return anchors",
"def _mkanchors(ws, hs, x_ctr, y_ctr):\n ws = ws[:, np.newaxis]\n hs = hs[:, np.newaxis]\n anchors = np.hstack(\n (\n x_ctr - 0.5 * (ws - 1),\n y_ctr - 0.5 * (hs - 1),\n x_ctr + 0.5 * (ws - 1),\n y_ctr + 0.5 * (hs - 1),\n )\n )\n return anchors",
"def _generate_anchors(point, sizes, aspect_ratios, layout, beta, include_depth):\n\n distance = point[2]\n base_size = sizes[0]\n scales = sizes[1:] / base_size\n # beta = 8\n scales = (beta/distance)*scales\n\n center = (point[0], point[1])\n anchor = np.array([center[0] - base_size/2.0, center[1] - base_size/2.0,\n center[0] + base_size/2.0, center[1] + base_size/2.0],\n dtype=np.float)\n\n anchors = _ratio_enum(anchor, aspect_ratios)\n anchors = np.vstack(\n [_scale_enum(anchors[i, :], scales) for i in range(anchors.shape[0])]\n )\n\n all_anchors = np.empty((0,4))\n for l in layout:\n new_anchors = _shift_anchors(anchors, l)\n all_anchors = np.vstack((all_anchors, new_anchors))\n\n if int(include_depth)==1:\n # Add the distance as the 5th element to all anchors\n new_shape = (all_anchors.shape[0], all_anchors.shape[1]+1)\n new_anchors = np.ones(new_shape) * distance\n new_anchors[:,:-1] = all_anchors\n all_anchors = new_anchors\n\n return all_anchors",
"def make_anchors(self, img_shape, dtype=np.float32):\n return anchor_utils.ssd_anchors_all_layers(img_shape,\n self.params.feat_shapes,\n self.params.anchor_sizes,\n self.params.anchor_ratios,\n self.params.anchor_steps,\n self.params.anchor_offset,\n dtype)",
"def adjust_regular_roi_anchors(bounds: QRectF, anchors: list):\n for point in anchors:\n off = point.boundingRect().width() / 2\n if point.position == AnchorPosition.LEFT:\n point.setPos(bounds.left() - off, bounds.top() - off + bounds.height() / 2)\n elif point.position == AnchorPosition.RIGHT:\n point.setPos(bounds.right() - off, bounds.top() - off + bounds.height() / 2)\n elif point.position == AnchorPosition.TOP:\n point.setPos(bounds.left() - off + bounds.width() / 2, bounds.top() - off)\n elif point.position == AnchorPosition.TOP_LEFT:\n point.setPos(bounds.left() - off, bounds.top() - off)\n elif point.position == AnchorPosition.TOP_RIGHT:\n point.setPos(bounds.right() - off, bounds.top() - off)\n elif point.position == AnchorPosition.BOTTOM:\n point.setPos(bounds.left() - off + bounds.width() / 2, bounds.bottom() - off)\n elif point.position == AnchorPosition.BOTTOM_LEFT:\n point.setPos(bounds.left() - off, bounds.bottom() - off)\n elif point.position == AnchorPosition.BOTTOM_RIGHT:\n point.setPos(bounds.right() - off, bounds.bottom() - off)",
"def generate_anchors(base_size=16, feat_stride=16, anchor_scales=(8, 16, 32), anchor_ratios=(0.5, 1, 2)):\n anchors = generate_base_anchors(base_size=base_size, ratios=np.array(anchor_ratios), scales=np.array(anchor_scales))\n A = anchors.shape[0]\n shift_x = np.arange(0, IM_SCALE // feat_stride) * feat_stride\n shift_x, shift_y = np.meshgrid(shift_x, shift_x)\n shifts = np.stack([shift_x, shift_y, shift_x, shift_y], -1)\n all_anchors = shifts[:, :, None] + anchors[None, None]\n return all_anchors",
"def center_image_grid_anchors(image_grid):\n for image in image_grid:\n center_image_anchor(image)",
"def generate_anchors_info():\n original_height, original_width = 512, 640\n input_anchor = Anchor(\n min_level=2,\n max_level=6,\n num_scales=1,\n aspect_ratios=[1.0, 2.0, 0.5],\n anchor_size=8,\n image_size=(_IMAGE_SIZE.value, _IMAGE_SIZE.value))\n anchor_boxes = input_anchor.multilevel_boxes\n for key in anchor_boxes:\n anchor_boxes[key] = anchor_boxes[key].numpy()\n\n scale = min(_IMAGE_SIZE.value / original_height,\n _IMAGE_SIZE.value / original_width)\n image_info = np.array([[[original_height, original_width],\n [_IMAGE_SIZE.value, _IMAGE_SIZE.value],\n [scale, scale], [0, 0]]])\n\n return anchor_boxes, image_info",
"def anchors(self):\n dims = self.dims\n anchors = []\n for peak in self:\n possible_anchors = []\n for combination in combinations(range(dims), 2):\n spins = [peak[i] for i in combination]\n if any(s.res_num is None or s.atom is None for s in spins):\n continue\n res_nums = [spin.res_num for spin in spins]\n atoms = [spin.atom for spin in spins]\n elements = [atom[0] for atom in atoms]\n positions = [atom[1:] for atom in atoms]\n same_res_num = res_nums[0] == res_nums[1]\n valid_pairs = [set(('H', 'N')), set(('H', 'C'))]\n is_proton_heavy_pair = set(elements) in valid_pairs\n same_position = all(c[0] == c[1] for c in zip(*positions))\n if same_res_num and is_proton_heavy_pair and same_position:\n if '' in positions and set(elements) != set(('H', 'N')):\n # One of the atom names must have been 'H', 'N' or 'C'\n # Of these, only the amide proton anchor is valid\n continue\n if elements[0] == 'H':\n possible_anchors.append(combination)\n else:\n possible_anchors.append(combination[::-1])\n if len(possible_anchors) > 1:\n pa_sets = [set(pa) for pa in possible_anchors]\n overlap = set.intersection(*pa_sets)\n if overlap:\n # Ambiguous, overlapping anchors\n continue\n for poss_anc in possible_anchors:\n if poss_anc not in anchors:\n anchors.append(poss_anc)\n anchors = tuple(anchors)\n return anchors",
"def anchor_target_layer(rpn_cls_score, gt_boxes, im_info, _feat_stride, all_anchors, num_anchors):\n A = num_anchors\n total_anchors = all_anchors.shape[0]\n K = total_anchors / num_anchors\n\n # allow boxes to sit over the edge by a small amount\n _allowed_border = 0\n\n # map of shape (..., H, W)\n height, width = rpn_cls_score.shape[1:3]\n\n # only keep anchors inside the image\n inds_inside = np.where(\n (all_anchors[:, 0] >= -_allowed_border) &\n (all_anchors[:, 1] >= -_allowed_border) &\n (all_anchors[:, 2] < im_info[1] + _allowed_border) & # width\n (all_anchors[:, 3] < im_info[0] + _allowed_border) # height\n )[0]\n\n # keep only inside anchors\n anchors = all_anchors[inds_inside, :]\n\n # label: 1 is positive, 0 is negative, -1 is dont care\n labels = np.empty((len(inds_inside),), dtype=np.float32)\n labels.fill(-1)\n\n # overlaps between the anchors and the gt boxes\n # overlaps (ex, gt)\n overlaps = bbox_overlaps(\n np.ascontiguousarray(anchors, dtype=np.float),\n np.ascontiguousarray(gt_boxes, dtype=np.float))\n argmax_overlaps = overlaps.argmax(axis=1)\n max_overlaps = overlaps[np.arange(len(inds_inside)), argmax_overlaps]\n gt_argmax_overlaps = overlaps.argmax(axis=0)\n gt_max_overlaps = overlaps[gt_argmax_overlaps,\n np.arange(overlaps.shape[1])]\n gt_argmax_overlaps = np.where(overlaps == gt_max_overlaps)[0]\n\n if not cfg.TRAIN.RPN_CLOBBER_POSITIVES:\n # assign bg labels first so that positive labels can clobber them\n # first set the negatives\n labels[max_overlaps < cfg.TRAIN.RPN_NEGATIVE_OVERLAP] = 0\n\n # fg label: for each gt, anchor with highest overlap\n labels[gt_argmax_overlaps] = 1\n\n # fg label: above threshold IOU\n labels[max_overlaps >= cfg.TRAIN.RPN_POSITIVE_OVERLAP] = 1\n\n if cfg.TRAIN.RPN_CLOBBER_POSITIVES:\n # assign bg labels last so that negative labels can clobber positives\n labels[max_overlaps < cfg.TRAIN.RPN_NEGATIVE_OVERLAP] = 0\n\n # subsample positive labels if we have too many\n num_fg = int(cfg.TRAIN.RPN_FG_FRACTION * cfg.TRAIN.RPN_BATCHSIZE)\n fg_inds = np.where(labels == 1)[0]\n if len(fg_inds) > num_fg:\n disable_inds = npr.choice(\n fg_inds, size=(len(fg_inds) - num_fg), replace=False)\n labels[disable_inds] = -1\n\n # subsample negative labels if we have too many\n num_bg = cfg.TRAIN.RPN_BATCHSIZE - np.sum(labels == 1)\n bg_inds = np.where(labels == 0)[0]\n if len(bg_inds) > num_bg:\n disable_inds = npr.choice(\n bg_inds, size=(len(bg_inds) - num_bg), replace=False)\n labels[disable_inds] = -1\n\n bbox_targets = np.zeros((len(inds_inside), 4), dtype=np.float32)\n bbox_targets = _compute_targets(anchors, gt_boxes[argmax_overlaps, :])\n\n bbox_inside_weights = np.zeros((len(inds_inside), 4), dtype=np.float32)\n # only the positive ones have regression targets\n bbox_inside_weights[labels == 1, :] = np.array(cfg.TRAIN.RPN_BBOX_INSIDE_WEIGHTS)\n\n bbox_outside_weights = np.zeros((len(inds_inside), 4), dtype=np.float32)\n if cfg.TRAIN.RPN_POSITIVE_WEIGHT < 0:\n # uniform weighting of examples (given non-uniform sampling)\n num_examples = np.sum(labels >= 0)\n positive_weights = np.ones((1, 4)) * 1.0 / num_examples\n negative_weights = np.ones((1, 4)) * 1.0 / num_examples\n else:\n assert ((cfg.TRAIN.RPN_POSITIVE_WEIGHT > 0) &\n (cfg.TRAIN.RPN_POSITIVE_WEIGHT < 1))\n positive_weights = (cfg.TRAIN.RPN_POSITIVE_WEIGHT /\n np.sum(labels == 1))\n negative_weights = ((1.0 - cfg.TRAIN.RPN_POSITIVE_WEIGHT) /\n np.sum(labels == 0))\n bbox_outside_weights[labels == 1, :] = positive_weights\n bbox_outside_weights[labels == 0, :] = negative_weights\n\n # map up to original set of anchors\n labels = _unmap(labels, total_anchors, inds_inside, fill=-1)\n bbox_targets = _unmap(bbox_targets, total_anchors, inds_inside, fill=0)\n bbox_inside_weights = _unmap(bbox_inside_weights, total_anchors, inds_inside, fill=0)\n bbox_outside_weights = _unmap(bbox_outside_weights, total_anchors, inds_inside, fill=0)\n\n # labels\n labels = labels.reshape((1, height, width, A)).transpose(0, 3, 1, 2)\n labels = labels.reshape((1, 1, A * height, width))\n rpn_labels = labels\n\n # bbox_targets\n bbox_targets = bbox_targets \\\n .reshape((1, height, width, A * 4))\n\n rpn_bbox_targets = bbox_targets\n # bbox_inside_weights\n bbox_inside_weights = bbox_inside_weights \\\n .reshape((1, height, width, A * 4))\n\n rpn_bbox_inside_weights = bbox_inside_weights\n\n # bbox_outside_weights\n bbox_outside_weights = bbox_outside_weights \\\n .reshape((1, height, width, A * 4))\n\n rpn_bbox_outside_weights = bbox_outside_weights\n return rpn_labels, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights",
"def get_anchors(self, featmap_sizes, img_metas, device='cuda'):\n num_imgs = len(img_metas)\n\n # since feature map sizes of all images are the same, we only compute\n # anchors for one time\n multi_level_anchors = self.anchor_generator.grid_anchors(\n featmap_sizes, device)\n anchor_list = [multi_level_anchors for _ in range(num_imgs)]\n\n # for each image, we compute valid flags of multi level anchors\n valid_flag_list = []\n for img_id, img_meta in enumerate(img_metas):\n multi_level_flags = self.anchor_generator.valid_flags(\n featmap_sizes, img_meta['pad_shape'], device)\n valid_flag_list.append(multi_level_flags)\n\n return anchor_list, valid_flag_list",
"def _mkanchors(ws, ctr):\n ws = ws[:, np.newaxis]\n anchors = np.hstack(\n (\n ctr - 0.5 * ws,\n ctr + 0.5 * ws,\n )\n )\n return anchors",
"def rpn_anchor_boxes(image_size, *args, **kwargs):\n anchor_boxes = generate_anchor_boxes(image_size, *args, **kwargs)\n valid_ab_indices = valid_anchor_boxes(anchor_boxes, image_size)\n return anchor_boxes, valid_ab_indices",
"def generate_anchors(base_size, ratios, scales, rotations):\n num_anchors = len(ratios) * len(scales) * len(rotations)\n # initialize output anchors\n anchors = np.zeros((num_anchors, 5))\n # scale base_size\n anchors[:, 2:4] = base_size * np.tile(scales, (2, len(ratios) * len(rotations))).T\n # compute areas of anchors\n areas = anchors[:, 2] * anchors[:, 3]\n # correct for ratios\n anchors[:, 2] = np.sqrt(areas / np.repeat(ratios, len(scales) * len(rotations)))\n anchors[:, 3] = anchors[:, 2] * np.repeat(ratios, len(scales) * len(rotations))\n # add rotations\n anchors[:, 4] = np.tile(np.repeat(rotations, len(scales)), (1, len(ratios))).T[:, 0]\n # transform from (x_ctr, y_ctr, w, h) -> (x1, y1, x2, y2)\n anchors[:, 0:3:2] -= np.tile(anchors[:, 2] * 0.5, (2, 1)).T\n anchors[:, 1:4:2] -= np.tile(anchors[:, 3] * 0.5, (2, 1)).T\n return anchors",
"def generate_anchor_boxes(image_size, feature_map_size, sizes, scales):\n image_height, image_width = image_size\n fm_height, fm_width = feature_map_size\n height_stride = int(image_height / fm_height)\n width_stride = int(image_width / fm_width)\n\n # Compose horizontal and vertical positions into grid and reshape result into (-1, 2)\n y_centers = np.arange(0, image_height, height_stride)\n x_centers = np.arange(0, image_width, width_stride)\n centers = np.dstack(np.meshgrid(y_centers, x_centers)).reshape((-1, 2))\n\n # Creates anchor boxes pyramid. Somewhat vectorized version of itertools.product\n r_scales = np.repeat([scales], len(sizes), axis=0).ravel()\n r_sides = np.repeat([sizes], len(scales), axis=1).ravel()\n ab_pyramid = np.transpose([r_sides / (r_scales ** .5),\n r_sides * (r_scales ** .5)]).astype(int)\n\n # Creates combinations of all anchor boxes centers and sides\n r_centers = np.repeat(centers, len(ab_pyramid), axis=0)\n r_ab_pyramid = np.repeat([ab_pyramid], len(centers), axis=0).reshape((-1, 2))\n return np.hstack((r_centers, r_ab_pyramid))",
"def __create_anchors(self, sizes, aspects):\n k = len(sizes) * len(aspects)\n img_anchors = []\n for i in sizes:\n for j in aspects:\n img_anchors.append(\n [0, 0, 2 * i * j[0] / (j[0] + j[1]), 2 * i * j[1] / (j[0] + j[1])])\n\n self.anchors = np.asarray(img_anchors)",
"def generate_all_anchors(self):\n self.feature_sizes = [int(np.round(self.resolution/stride)) for stride in self.strides]\n \n #generate all anchors for each level of the FPN\n all_anchors = [self.generate_feature_level_base_anchors(size=size) for size in self.sizes]\n all_anchors = [self.shift_and_duplicate(layer_anchors, feature_size, stride) for layer_anchors, feature_size, stride in zip(all_anchors, self.feature_sizes, self.strides)]\n all_anchors = tf.concat(all_anchors, axis=0)\n\n return all_anchors",
"def generate_anchors(scales=(32,), aspect_ratios=(0.5, 1, 2), dtype=np.float32):\n scales = np.array(scales)\n aspect_ratios = np.array(aspect_ratios, dtype=dtype)\n h_ratios = np.sqrt(aspect_ratios)\n w_ratios = 1 / h_ratios\n\n ws = (w_ratios[:, None] * scales[None, :]).reshape(-1)\n hs = (h_ratios[:, None] * scales[None, :]).reshape(-1)\n\n base_anchors = np.stack([-ws, -hs, ws, hs], axis=1) / 2\n return base_anchors",
"def generate_anchors(base_size=16, ratios=None, scales=None):\n\n if ratios is None:\n ratios = np.array([0.5, 1, 2])\n\n if scales is None:\n scales = np.array([2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)])\n\n num_anchors = len(ratios) * len(scales)\n\n # initialize output anchors\n anchors = np.zeros((num_anchors, 4))\n\n # scale base_size\n anchors[:, 2:] = base_size * np.tile(scales, (2, len(ratios))).T\n\n # compute areas of anchors\n areas = anchors[:, 2] * anchors[:, 3]\n\n # correct for ratios\n anchors[:, 2] = np.sqrt(areas / np.repeat(ratios, len(scales)))\n anchors[:, 3] = anchors[:, 2] * np.repeat(ratios, len(scales))\n\n # transform from (x_ctr, y_ctr, w, h) -> (x1, y1, x2, y2)\n anchors[:, 0::2] -= np.tile(anchors[:, 2] * 0.5, (2, 1)).T\n anchors[:, 1::2] -= np.tile(anchors[:, 3] * 0.5, (2, 1)).T\n\n return anchors",
"def generate_anchors(base_size=16, ratios=None, scales=None):\n\n if ratios is None:\n ratios = np.array([0.5, 1, 2])\n\n if scales is None:\n scales = np.array([2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)])\n\n num_anchors = len(ratios) * len(scales)\n\n # initialize output anchors\n anchors = np.zeros((num_anchors, 4))\n\n # scale base_size\n anchors[:, 2:] = base_size * np.tile(scales, (2, len(ratios))).T\n\n # compute areas of anchors\n areas = anchors[:, 2] * anchors[:, 3]\n\n # correct for ratios\n anchors[:, 2] = np.sqrt(areas / np.repeat(ratios, len(scales)))\n anchors[:, 3] = anchors[:, 2] * np.repeat(ratios, len(scales))\n\n # transform from (x_ctr, y_ctr, w, h) -> (x1, y1, x2, y2)\n anchors[:, 0::2] -= np.tile(anchors[:, 2] * 0.5, (2, 1)).T\n anchors[:, 1::2] -= np.tile(anchors[:, 3] * 0.5, (2, 1)).T\n\n return anchors"
] | [
"0.71884936",
"0.7036838",
"0.70205647",
"0.692611",
"0.6890038",
"0.6888356",
"0.68680495",
"0.6852056",
"0.68242455",
"0.6721483",
"0.6713985",
"0.6680048",
"0.6649211",
"0.6587107",
"0.6582483",
"0.6504692",
"0.6504201",
"0.65007716",
"0.64910555",
"0.6475948",
"0.64250964",
"0.6394033",
"0.6307537",
"0.6254617",
"0.6244812",
"0.62378734",
"0.62375695",
"0.6226987",
"0.6209553",
"0.6209553"
] | 0.73078704 | 0 |
Returns a list of created posts for the given author | def create_multiple_posts(author, num, ptext = TEXT, visibility = ACL_DEFAULT):
posts = []
for i in range(num):
posts.append(Post.objects.create(content = ptext, author = author, visibility=visibility))
return posts | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_queryset(self):\r\n\r\n user = get_object_or_404(User, username=self.kwargs.get('username'))\r\n return Post.objects.filter(author=user).order_by('-date_posted')",
"def get_queryset(self):\n id = self.kwargs['pk']\n target_author=get_object_or_404(Author, pk = id)\n return Post.objects.filter(author=target_author)",
"def author_posts(request, author_id):\n id = int(author_id)\n user = myUser.objects.get(user_id=id)\n if user.is_admin:\n posts = Post.objects.select_related('author').order_by('-modified')\n else:\n posts = Post.objects.select_related('author').filter(author_id=id).order_by('-modified')\n\n return render(request, 'posts/authors.html',\n {'posts': posts})",
"def postCreate(post):\n post_list = list()\n comments = commentList(post)\n comment_url = \"{}/api/posts/{}/comments\".format(DOMAIN, post.id)\n visible_to = list()\n visible = post.visibleTo.all()\n if visible:\n for author in visible:\n auth = \"{}/api/author/{}\".format(DOMAIN, author.id)\n visible_to.append(auth)\n\n # visible_to = list(post.visibleTo)\n post_dict = {'author': addAuthor(post.author), 'title': post.title, 'description': post.description,\n 'contentType': post.contentType, 'content': post.content, 'published': post.published,\n 'visibility': post.visibility, 'visibleTo': visible_to, 'unlisted': post.unlisted, 'id': post.id,\n 'comments': comments[:5], 'next': comment_url, 'count': len(comments),\n 'source': \"{}/api/posts/{}\".format(DOMAIN, post.id),\n 'origin': \"{}/api/posts/{}\".format(DOMAIN, post.id)}\n post_list.append(post_dict)\n return post_list",
"def author_articles(self):\n return ArticlePage.objects.live().filter(author=self).order_by('-date')",
"def get_queryset(self):\n author = self.kwargs['author']\n target_author = get_object_or_404(Blog, author=author)\n return Blog.objects.filter(author=target_author)",
"def recent_posts(self):\n\n try:\n jsondoc = json.load(urllib.urlopen(\"http://reddit.com/user/%s.json\" % self.username))\n except:\n raise self.DoesNotExist\n \n posts = []\n for item in jsondoc['data']['children']:\n if item['kind'] == 't1':\n posts.append(Comment(item['data']))\n elif item['kind'] == 't3':\n posts.append(item['data'])\n\n return posts",
"def get_quotes_for_author(self, author: str) -> List[Quote]:\n params = (f'%{author}%',)\n query = '''\n SELECT *\n FROM quotes\n WHERE author LIKE ?\n ORDER BY created_at DESC\n '''\n\n ret = self.__execute_query(query, params)\n\n return self.__build_quotes_from_query_result(ret.fetchall())",
"def postList(posts):\n post_list = list()\n for post in posts:\n visible_to = list()\n visible = post.visibleTo.all()\n if visible:\n for author in visible:\n auth = \"{}/api/author/{}\".format(DOMAIN, author.id)\n visible_to.append(auth)\n\n comments = commentList(post)\n comment_url = \"{}/api/posts/{}/comments\".format(DOMAIN, post.id)\n post_dict = {'author': addAuthor(post.author), 'title': post.title, 'description': post.description,\n 'contentType': post.contentType, 'content': post.content, 'published': post.published,\n 'visibility': post.visibility, 'visibleTo': visible_to, 'unlisted': post.unlisted, 'id': post.id,\n 'comments': comments[:5], 'next': comment_url, 'count': len(comments),\n 'origin': \"{}/api/posts/{}\".format(DOMAIN, post.id),\n 'source': \"{}/api/posts/{}\".format(DOMAIN, post.id)}\n post_list.append(post_dict)\n return post_list",
"def get_posts(self):\n return Post.select().where (Post.user == self)",
"def getPosts():\n\n cur, user_id = initialise(3)\n cur.execute(\"SELECT username FROM users WHERE id = ?\", [user_id])\n name = cur.fetchall()[0][0]\n cur.execute(\"SELECT * FROM posts WHERE name IN (SELECT following FROM followers WHERE user = ?) OR name = ?\", (name, name))\n posts = cur.fetchall()\n return posts",
"def get_queryset(self):\n user: User = self.request.user\n following_users = user.profile.following.all()\n return Post.objects.filter(author__in=following_users).order_by('created')",
"def test_discussion_filter_author(self):\n author_vals = (\n ('DoesNotExist', 0),\n ('admin', 1),\n ('jsocol', 4),\n )\n\n for name, number in author_vals:\n u = UserFactory(username=name)\n for i in range(number):\n thread1 = ThreadFactory(title=u'audio')\n PostFactory(thread=thread1, author=u)\n\n self.refresh()\n\n qs = {'a': 1, 'w': 4, 'format': 'json'}\n\n for author, total in author_vals:\n qs.update({'author': author})\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(total, json.loads(response.content)['total'])",
"def getAuthor(self):\n\t\tself.authorList = [submission.author for submission in self.subreddit.top(time_filter = 'day', limit = self.limits)]\n\t\treturn self.authorList",
"def get_posts_for_user(account_pk):\n where = \"WHERE account_pk = ?\"\n values = (account_pk, )\n orders = \"ORDER BY time DESC\"\n return Post.select_many(where, orders, values)",
"def getMyPosts():\n \n cur, user_id = initialise(3)\n cur.execute(\"SELECT username FROM users WHERE id = ?\", [user_id])\n name = cur.fetchall()[0][0]\n cur.execute(\"SELECT * FROM posts WHERE name = ?\", [name])\n posts = cur.fetchall()\n return posts",
"def get_posts(self):\n return self.blog_posts.all()",
"def get_all_posts(self):\n cur = self.conn.cursor()\n\n query = 'SELECT blog.blog_id as id, blog.title as title, ' \\\n 'blog.subtitle as subtitle, ' \\\n 'blog.content as content, blog.date as date, ' \\\n 'author.name as author ' \\\n 'FROM blog, author ' \\\n 'WHERE blog.author_id = author.author_id ' \\\n 'ORDER BY blog_id DESC '\n\n posts = []\n cur.execute(query)\n\n for row in cur.fetchall():\n posts.append(dict(row))\n\n return posts",
"def list_posts(request):\n if request.method == 'POST':\n category = request.POST.get('category', False)\n posts = Post.objects.select_related('author')\\\n .filter(category=category)\\\n .order_by('-modified')\n # import pdb; pdb.set_trace()\n return render(request, 'posts/index.html',\n {'posts': posts})\n\n posts = Post.objects.select_related('author').order_by('-modified')\n likes = Likes.objects.select_related('post')\n\n return render(request, 'posts/index.html',\n {'posts': posts})",
"def remotePostList(host, posts, public):\n post_list = list()\n posts = posts.get('posts')\n for post in posts:\n author = remoteAddAuthor(post.get('author'))\n title = post.get('title')\n description = post.get('description')\n contentType = post.get('contentType')\n content = post.get('content')\n published = utc.localize(datetime.strptime(post.get('published'), '%Y-%m-%dT%H:%M:%S.%fZ'))\n visibility = post.get('visibility')\n unlisted = post.get('unlisted')\n id = post.get('id')\n origin = post.get('source')\n comments = remoteCommentList(post)\n count = post.get('count')\n next = \"{}/api/posts/{}/comments\".format(DOMAIN, id)\n if host.endswith(\"/\"):\n host = host[:-1]\n source = \"{}/posts/{}\".format(host, post.get('id'))\n\n post_dict = {'author': author, 'title': title, 'description': description,\n 'contentType': contentType, 'content': content, 'published': published,\n 'visibility': visibility, 'unlisted': unlisted, 'id': id,\n 'comments': comments, 'origin': origin,\n 'source': source, 'count': count, 'next': next}\n post_list.append(post_dict)\n return post_list",
"def get_posts(self): #return list of posts that are associated with this blog_id\n return Post.find_posts_for_blog_id(self.blog_id) #this will return a list of posts objects",
"def addAuthor2():\n\n author_list = list()\n\n authors = Author.objects.all()\n\n for author in authors:\n author_dict = dict()\n author_dict['id'] = \"{}/api/author/{}\".format(DOMAIN, author.id)\n author_dict['host'] = \"{}/api/\".format(author.host_url)\n author_dict['displayName'] = author.username\n author_dict['url'] = \"{}/api/author/{}\".format(DOMAIN, author.id)\n\n author_list.append(author_dict)\n\n return author_list",
"def feed(request):\n followers = request.user.profile.followers.values_list('pk', flat=True)\n posts = Post.objects.filter(author_id__in=followers)\n\n return render(request,\n 'posts/feed.html',\n {'posts': posts})",
"def posts_for_feed():\n user_id = session.get('user_id')\n friend_posts = Post.query.join(Friend, db.and_(Post.user_id == Friend.user_2,\n Friend.active == True)).outerjoin(Comment, db.and_(Comment.post_id == Post.post_id,\n Comment.active == True)).filter(Friend.user_1 == user_id,\n Post.active == True).order_by(Post.post_id.desc()).all()\n\n post_list = []\n for post in friend_posts:\n post_list.append(post.to_dict_for_json())\n\n resp = make_response(jsonify(post_list), 200)\n return resp",
"def get_posts():\n url = app.config['POSTS_ENDPOINT']\n response = requests.get(url, params={})\n if response.status_code == 200:\n return parse_posts(response.json())\n raise RuntimeError('Error in retrieving posts.')",
"def get_posts(self, published=False) -> Type[QuerySet]:\n categories = self.get_descendant_categories()\n posts = Post.objects.filter(categories__in=categories)\n if published:\n posts = posts.filter(published__lte=timezone.now())\n return posts",
"def users_posts():\n\n user_id = session.get('user_id')\n posts = Post.query.outerjoin(Comment, db.and_(Comment.post_id == Post.post_id, \n Comment.active == True)).filter(Post.user_id == user_id,\n Post.active == True).order_by(Post.post_id.desc()).all()\n post_list = []\n for post in posts:\n post_list.append(post.to_dict_for_json())\n\n resp = make_response(jsonify(post_list), 200)\n\n return resp",
"def task_fetch_posts(\n author_id,\n count=28,\n posts_out='data/posts_data.xlsx'):\n\n # Create query instances for posts\n post_query = Query(PostParser)\n\n # Query posts data\n post_data = post_query.query_all(POSTS_QUERY_HASH_PARAM, {\n \"id\": author_id,\n \"first\": 50,\n }, count)\n logger.info(\"Count of posts data: %d\" % len(post_data))\n\n # Save the posts data\n post_data_df = pd.DataFrame(post_data)\n post_data_df.to_excel(posts_out, encoding='utf-8', index=False)\n logger.info(\"Save the posts data to %s.\" % posts_out)",
"def published_posts(self) -> Type[QuerySet]:\n return Post.objects.filter(published__lt=timezone.now()).order_by('-published')",
"def add(self, author, post):\n if not author in self.authors:\n self.authors.append(author)\n self.posts[author].append(post)\n return"
] | [
"0.6751652",
"0.6719958",
"0.6679745",
"0.64851755",
"0.6355382",
"0.63139474",
"0.6307772",
"0.62711054",
"0.6250475",
"0.62041193",
"0.61662126",
"0.6099204",
"0.6088763",
"0.60696316",
"0.6060949",
"0.59918046",
"0.59400725",
"0.58352584",
"0.58273214",
"0.58035225",
"0.5796463",
"0.57482696",
"0.5721598",
"0.5719084",
"0.570795",
"0.5680417",
"0.56639403",
"0.56167555",
"0.561523",
"0.5614806"
] | 0.6749558 | 1 |
Test to ensure that all authors added to relationship are in the returned data Called after a retrieve relationship test has passed | def authors_in_relation(context, data, authors):
guids = [a.id for a in authors]
guids = map( lambda x: str(x).replace('-', ''), guids)
for guid in guids:
context.assertTrue(unicode(guid) in data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_retrieve_authors(self):\n sample_author()\n sample_author()\n\n res = self.client.get(reverse('authors'))\n authors = Author.objects.all()\n serializer = AuthorSerializer(authors, many=True)\n self.assertEqual(res.data, serializer.data)\n self.assertEqual(res.status_code, status.HTTP_200_OK)",
"def test_known_related_objects_identity_preservation(self):\n self.assertIs(self.aldous, self.brave_new_world.author)",
"def test_retrieve_books(self):\n book = sample_book(publisher=self.publisher)\n book.author.add(sample_author())\n # book.publisher.add(sample_publisher())\n\n res = self.client.get(reverse('books'))\n books = Book.objects.all()\n serializer = BookSerializer(books, many=True)\n self.assertEqual(res.data, serializer.data)\n self.assertEqual(res.status_code, status.HTTP_200_OK)",
"def test_get_all_authors(self):\n self.register_user()\n token = self.login_user()\n response = self.client.get(self.user_author, format='json', HTTP_AUTHORIZATION='Token ' +token)\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_get_authors_from_data(self):\n responses.add(\n responses.GET,\n \"https://openlibrary.org/authors/OL382982A\",\n json={\n \"name\": \"George Elliott\",\n \"personal_name\": \"George Elliott\",\n \"last_modified\": {\n \"type\": \"/type/datetime\",\n \"value\": \"2008-08-31 10:09:33.413686\",\n },\n \"remote_ids\": {\n \"isni\": \"000111\",\n },\n \"key\": \"/authors/OL453734A\",\n \"type\": {\"key\": \"/type/author\"},\n \"id\": 1259965,\n \"revision\": 2,\n },\n status=200,\n )\n results = self.connector.get_authors_from_data(self.work_data)\n result = list(results)[0]\n self.assertIsInstance(result, models.Author)\n self.assertEqual(result.name, \"George Elliott\")\n self.assertEqual(result.openlibrary_key, \"OL453734A\")\n self.assertEqual(result.isni, \"000111\")",
"def test_list_all_authors(self):\n response = self.client.get(reverse('authors') + '?page=2')\n self.assertEqual(response.status_code, 200)\n self.assertTrue('is_paginated' in response.context)\n self.assertTrue(response.context['is_paginated'] is True)\n self.assertTrue(len(response.context['author_list']) == 3)",
"def test_multiple_authors(self):\r\n args = self.page_kwargs.copy()\r\n content = Page(**args)\r\n assert content.authors == [content.author]\r\n args['metadata'].pop('author')\r\n args['metadata']['authors'] = ['First Author', 'Second Author']\r\n content = Page(**args)\r\n assert content.authors\r\n assert content.author == content.authors[0]",
"def test_create_authors(self):\n payload = {\n 'first_name': 'testname1',\n 'last_name': 'testname2',\n 'nickname': 'testnick1'\n }\n\n res = self.client.post(reverse('authors'), payload)\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n author = Author.objects.get(id=res.data['id'])\n\n for key in payload.keys():\n self.assertEqual(payload[key], getattr(author, key))",
"def relationships(self):",
"def test_authors():\n assert(hasattr(tekel, '__authors__'))",
"def test_get_relationship_templates(self):\n pass",
"def authors(self):\n authors = [\n n.people for n in self.pymbake_person_relationship.all()\n ]\n\n return authors",
"def test_get_related_nodes(self):\n pass",
"def test_retrieve_l_organizations(self):\n pass",
"def test_get_specific_authors_profile(self):\n self.register_user()\n token = self.login_user()\n response = self.client.get(reverse(\"profiles:profile\", kwargs={\n 'username':self.register_data['user']['username'],\n }), format='json', HTTP_AUTHORIZATION='Token ' +token)\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def find_relations_among_authors():\n for book in books:\n if len(books[book]) > 1:\n for i in range(len(books[book])):\n known_relations[books[book][i]] = books[book][:i] + books[book][i+1:]",
"def test_author_sorted_articles(self):\n\n self.make_test('articles', ArticleListSerializer, 'author:articles')",
"def author_ManyToMany_entry_check(): #Old it was OneToMany before adding multiple authors\n import itertools\n entry_author_ids = itertools.chain(*Entry.objects.all().values_list('author_ids', flat=True))\n entry_author_ids_set = set(entry_author_ids)\n user_ids = set(User.objects.all().values_list('id',flat=True))\n\n author_id_not_in_user = entry_author_ids_set - user_ids\n\n if author_id_not_in_user:\n return (\"Error: There are entries without a correct cross relation with user: {}\"\n .format(\",\".join(str(s) for s in author_id_not_in_user)))\n else:\n return \"OK\"",
"def all_authors( data ) :\n return list(set( chain.from_iterable( [ authors(x) for x in data ] ) ))",
"def test_book_related(self):\n client = APIClient()\n client.login(username=self.students[0].username, password=\"salam*123\")\n response = client.get(\"/books/4/related/\")\n json = response.json()\n self.assertEqual(json[\"count\"], 2)\n self.assertEqual(json[\"results\"][0][\"id\"], 5)\n self.assertEqual(json[\"results\"][1][\"id\"], 2)",
"def test_author_list_equality_with_valid_authentication(self) -> None:\n\n # Set the Authorization header to the appropriate\n # format as the rest_framework expects using utils.\n self.client.credentials(HTTP_AUTHORIZATION=u.auth_header(\n self.super_author.get_key()\n ))\n\n response = self.client.get(self.url)\n data = u.get_json(response)\n\n self.assertEqual(data, self.serialized_data, msg=data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def assertAuthorsInPosts(context, authors, posts):\n cross_check(context, authors, posts)",
"def test_author_filtering(self):\n # Get a valid author\n entry = Entry.objects.get(id=1)\n params = {\"author\": entry.first_author.id}\n\n self._test_filtering(**params)",
"def test_add_followers(self):\n pass",
"def add_authors(self, author_data, instance):\n for idx, author in enumerate(author_data):\n Author.objects.create(dataset=instance, order=idx, author=author)",
"def test_retrieve_publishers(self):\n sample_publisher()\n sample_publisher()\n\n res = self.client.get(reverse('publishers'))\n publishers = Publisher.objects.all()\n serializer = PublisherSerializer(publishers, many=True)\n self.assertEqual(res.data, serializer.data)\n self.assertEqual(res.status_code, status.HTTP_200_OK)",
"def test_create_edition_from_data(self):\n work = models.Work.objects.create(title=\"Hello\")\n responses.add(\n responses.GET,\n \"https://openlibrary.org/authors/OL382982A\",\n json={\"hi\": \"there\"},\n status=200,\n )\n with patch(\n \"bookwyrm.connectors.openlibrary.Connector.get_authors_from_data\"\n ) as mock:\n mock.return_value = []\n result = self.connector.create_edition_from_data(work, self.edition_data)\n self.assertEqual(result.parent_work, work)\n self.assertEqual(result.title, \"Sabriel\")\n self.assertEqual(result.isbn_10, \"0060273224\")\n self.assertEqual(result.description, self.edition_data[\"description\"][\"value\"])\n self.assertEqual(result.languages[0], \"English\")\n self.assertEqual(result.publishers[0], \"Harper Trophy\")\n self.assertEqual(result.pages, 491)\n self.assertEqual(result.subjects[0], \"Fantasy.\")\n self.assertEqual(result.physical_format, \"Hardcover\")",
"def test_discussion_filter_author(self):\n author_vals = (\n ('DoesNotExist', 0),\n ('admin', 1),\n ('jsocol', 4),\n )\n\n for name, number in author_vals:\n u = UserFactory(username=name)\n for i in range(number):\n thread1 = ThreadFactory(title=u'audio')\n PostFactory(thread=thread1, author=u)\n\n self.refresh()\n\n qs = {'a': 1, 'w': 4, 'format': 'json'}\n\n for author, total in author_vals:\n qs.update({'author': author})\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(total, json.loads(response.content)['total'])",
"def popAuthors(self):\r\n# cur = self.dbConn.execute(\"SELECT * FROM People WHERE PersonID>0 ORDER BY Lastname\")\r\n# res = cur.fetchall()\r\n res = self.dbConn.execute(\"SELECT * FROM People WHERE PersonID>0 ORDER BY Lastname\").fetchall()\r\n\r\n self.authorList = [formatNameSQL(ln) for ln in res]\r\n self.quickAuthors = [ln[\"Lastname\"].lower() for ln in res]\r\n vals = [ln[\"PersonID\"] for ln in res]\r\n \r\n self.authorLookup = dict(zip(self.authorList,vals))",
"def addAuthor2():\n\n author_list = list()\n\n authors = Author.objects.all()\n\n for author in authors:\n author_dict = dict()\n author_dict['id'] = \"{}/api/author/{}\".format(DOMAIN, author.id)\n author_dict['host'] = \"{}/api/\".format(author.host_url)\n author_dict['displayName'] = author.username\n author_dict['url'] = \"{}/api/author/{}\".format(DOMAIN, author.id)\n\n author_list.append(author_dict)\n\n return author_list"
] | [
"0.719106",
"0.6605452",
"0.65803057",
"0.64502394",
"0.6409618",
"0.6390426",
"0.62981063",
"0.6223942",
"0.6172895",
"0.6143107",
"0.6092482",
"0.60106426",
"0.5917884",
"0.587733",
"0.5856236",
"0.5841362",
"0.58273274",
"0.5820912",
"0.5778369",
"0.5765315",
"0.574128",
"0.5734344",
"0.57250065",
"0.57065344",
"0.56937903",
"0.56928355",
"0.5686367",
"0.56197584",
"0.55989265",
"0.557787"
] | 0.7145637 | 1 |
Create Friends and Friends of Friends and associated posts | def create_friends(friend, friendors, create_post = True, visibility = ACL_DEFAULT):
for friendor in friendors:
friend.add_friend(friendor)
friendor.add_friend(friend)
# FriendRelationship.objects.create(friendor = friendor, friend = friend)
if create_post:
Post.objects.create(content = TEXT, author = friendor, visibility = visibility) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_friend(request, profile_pk, friend_pk):\n\n profile_object = Profile.objects.get(pk=profile_pk)\n friend_object = profile_object.get_friend_suggestions().get(pk=friend_pk)\n \n profile_object.friends.add(friend_object)\n profile_object.save()\n\n return redirect(reverse('show_profile_page', kwargs={'pk': profile_pk}))",
"def create_friend(user_id, friend_user_id):\n\n friend = User_Friend(user_id=user_id, friend_user_id=friend_user_id)\n\n db.session.add(friend)\n db.session.commit()\n\n return friend",
"def add_friend(request):\n required_fields = ['source_user_id', 'dest_user_id', 'token']\n\n # Check if the post request contain the required fields\n if set(required_fields) != set(list(request.data.keys())):\n return Response({'error': str('Missing required fields!')}, status=status.HTTP_400_BAD_REQUEST)\n\n # POST Request content\n data = request.data\n\n # Here check if user_id matches the token with the database\n if not db.check_user(data['source_user_id'], data['token']):\n return Response({'error': str('UNAUTHORIZED')}, status=status.HTTP_401_UNAUTHORIZED)\n\n # Send friend request\n if not mock_db.add_friend(data['source_user_id'], data['dest_user_id']):\n return Response({'error': str('Error when adding friend!')},\n status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n return Response({'status': 'success'})",
"def create(self, request):\n friend_obj = Friend.objects.add_friend(\n request.user, # The sender\n get_object_or_404(User, pk=request.data['user_id']), # The recipient\n message=request.data.get('message', '')\n )\n\n return Response(\n FriendshipRequestSerializer(friend_obj).data,\n status.HTTP_201_CREATED\n )",
"def share_post(self):\n for friend in random.sample(self._friends, len(self._friends)):\n post: Post = friend.get_random_post()\n if not post:\n continue\n # self._posts.append(post)\n self.write_post(post.tags, friend)\n friend.update_relation(self, SHARE_POST)\n friend.append_share(post, user=self)\n # self.update_relation(friend, SHARE_POST)\n break",
"def post(owner_id=None, friends_only=None, from_group=None, message=None,\\\n attachments=None, services=None, signed=None, publish_date=None,\\\n lat=None, long=None, place_id=None, post_id=None, guid=None, mark_as_ads=None):\n params = {\n 'owner_id': owner_id,\n 'friends_only': friends_only,\n 'from_group': from_group,\n 'message': message,\n 'attachments': attachments,\n 'services': services,\n 'signed': signed,\n 'publish_date': publish_date,\n 'lat': lat,\n 'long': long,\n 'place_id': place_id,\n 'post_id': post_id,\n 'guid': guid,\n 'mark_as_ads': mark_as_ads\n }\n result = call('wall.post', **params)\n return parse_response(result)",
"def accept(self):\n receiver_friend_list = FriendList.objects.filter(user_id=self.receiver_id)\n sender_friend_list = FriendList.objects.filter(user_id=self.sender_id)\n if(receiver_friend_list.exists()):\n receiver_friend_list = receiver_friend_list[0]\n else:\n receiver_friend_list = FriendList.objects.create(user_id=self.receiver_id)\n\n if(sender_friend_list.exists()):\n sender_friend_list = sender_friend_list[0]\n else:\n sender_friend_list = FriendList.objects.create(user_id=self.sender_id)\n\n if receiver_friend_list:\n receiver_friend_list.add_friend(self.sender_id)\n if sender_friend_list:\n sender_friend_list.add_friend(self.receiver_id)\n self.is_active = False\n self.save()",
"def post(self):\n\t\tdb = getattr(g, 'db', None)\n\t\tobj = request.get_json()\n\n\t\tif ('username' not in obj) or ('session' not in obj):\n\t\t\treturn {'status':'MISSING_PARAMS'}\n\t\telif not authenticate(obj['username'],obj['session']):\n\t\t\treturn {'status':'AUTH_FAIL'}\n\t\telif ('action' not in obj):\n\t\t\treturn {'status':'MISSING_PARAMS'}\n\t\telse:\n\t\t\taction = obj['action']\n\t\t\tif action == 'ADD' and 'friend' in obj:\n\t\t\t\tqry = \"INSERT INTO friends VALUES ((SELECT id FROM profiles WHERE username = %s),\\\n\t\t\t\t\t(SELECT id FROM profiles WHERE username = %s));\"\n\t\t\t\twith db as cur:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tlines = cur.execute(qry, (obj['username'],obj['friend']))\n\n\t\t\t\t\t\tif lines > 0:\n\t\t\t\t\t\t\treturn {'status':'FRIEND_ADDED'}\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\treturn {'status':'QUERY_FAILED'}\n\t\t\t\t\texcept sql.IntegrityError:\n\t\t\t\t\t\treturn {'status':'DUPLICATE_USER'}\n\t\t\t\t\texcept sql.OperationalError:\n\t\t\t\t\t\treturn {'status':'NO_SUCH_USER'}\n\n\t\t\telif action == 'GET':\n\t\t\t\t\"\"\" Retrieve all friends belonging to user. \"\"\"\n\t\t\t\tfriends = [] #accepted, both ends\n\t\t\t\tpending = [] #pending answer from friend\n\n\t\t\t\t# retrieve canonical friends\n\t\t\t\tuserqry = \"SELECT id FROM profiles WHERE username = %s\"\n\t\t\t\tfriendsqry = \"SELECT friend FROM friends WHERE target = ANY(\"+userqry+\")\"\n\t\t\t\tqry = \"SELECT username FROM profiles WHERE id = ANY(\"+friendsqry+\");\"\n\t\t\t\twith db as cur:\n\t\t\t\t\tlines = cur.execute(qry, (obj['username'],))\n\t\t\t\t\tfor friend in cur.fetchall():\n\t\t\t\t\t\tfriends += friend\n\n\t\t\t\t# retrieve pending requests\n\t\t\t\tuserqry = \"SELECT id FROM profiles WHERE username = %s\"\n\t\t\t\tfriendsqry = \"SELECT target FROM friends WHERE friend = ANY(\"+userqry+\")\"\n\t\t\t\tqry = \"SELECT username FROM profiles WHERE id = ANY(\"+friendsqry+\");\"\n\t\t\t\twith db as cur:\n\t\t\t\t\tlines = cur.execute(qry, (obj['username'],))\n\t\t\t\t\tprint \"friends:\"+str(friends)\n\t\t\t\t\tfor req in cur.fetchall():\n\t\t\t\t\t\tif not req[0] in friends:\n\t\t\t\t\t\t\tpending += req\n\n\t\t\t\tif not (len(friends)<=0 and len(pending)<=0):\n\t\t\t\t\treturn {'status':'QUERY_OK', 'friends':friends, 'pending':pending}\n\t\t\t\telse:\n\t\t\t\t\treturn {'status':'NO_FRIENDS'}\n\n\t\t\telif action == 'DELETE' and 'friend' in obj:\n\t\t\t\tqry = \"DELETE FROM friends WHERE target = (SELECT id FROM profiles WHERE username = %s)\\\n\t\t\t\t\tand friend = (SELECT id FROM profiles WHERE username = %s);\"\n\t\t\t\twith db as cur:\n\t\t\t\t\tlines = cur.execute(qry, (obj['username'], obj['friend']))\n\t\t\t\t\tif lines>0:\n\t\t\t\t\t\treturn {'status':'FRIEND_DELETED'}\n\t\t\t\t\telse:\n\t\t\t\t\t\treturn {'status':'QUERY_FAILED'}\n\n\t\t\telse:\n\t\t\t\treturn {'status':'INVALID_ACTION'}",
"def addfriend(self, second_user_id):\n second_user = User.objects.get(id=second_user_id)\n new_friendship = Friendship.objects.create(friend_user=self, friend=second_user.gameplanuser)\n new_friendship.save()",
"def post(self, request, *args, **kwargs):\n frompath = urlparse(request.DATA.get('from_person')).path\n topath = urlparse(request.DATA.get('to_person')).path\n\n #print(request.DATA)\n if type(frompath) is str and type(topath) is str:\n frompath_elements = frompath.split('/')\n topath_elements = topath.split('/')\n else:\n return Response({'error: invalid data'}, status=status.HTTP_400_BAD_REQUEST)\n\n fromPerson = get_object_or_404(Person, username=frompath_elements[-2])\n toPerson = get_object_or_404(Person, username=topath_elements[-2])\n count = Relationship.objects.filter(from_person=fromPerson, to_person=toPerson).count()\n\n #Reject a request to create Relationship with self\n if request.user.person.username == toPerson.username or count > 0:\n return Response({'error: Relationship with self not permitted'}, status=status.HTTP_400_BAD_REQUEST)\n\n if request.user.person.username == fromPerson.username or request.user.is_staff:\n return self.create(request, *args, **kwargs)\n return Response({'error': 'from_user does not match authenticated User'}, status=status.HTTP_400_BAD_REQUEST)",
"def create_friend_request():\n if request.method == \"GET\":\n friend_requests = [f.to_dict() for f in g.user.get_friend_requests()]\n return jsonify({'success': True, 'friend_requests': friend_requests})\n\n if request.method == \"POST\":\n # Get recieving user id from request\n json = request.get_json()\n if json is None:\n raise CustomError(400, message=\"No JSON included or Content-Type\"\n \"is not application/json\")\n\n if 'recieving_user_id' not in json:\n raise CustomError(400, message=\"Must include recieving_user_id\")\n\n recieving_user_id = json['recieving_user_id']\n\n # Get the user object\n recieving_user = User.query.get(recieving_user_id)\n if recieving_user is None:\n raise CustomError(\n 404,\n message='User with id: {} was not found.'.format(\n recieving_user_id)\n )\n\n # Check friendship does not already exist\n friendship_exists = Friendship.query.filter(\n (Friendship.actioning_user_id == g.user.id) |\n (Friendship.recieving_user_id == g.user.id),\n (Friendship.actioning_user_id == recieving_user_id) |\n (Friendship.recieving_user_id == recieving_user_id)\n ).first()\n\n if friendship_exists:\n raise CustomError(\n 409,\n message=\"There is either a pending friend request between the\"\n \"two users or the two users are already friends.\"\n )\n\n # Insert friend request\n friend_request = Friendship(g.user, recieving_user)\n db.session.add(friend_request)\n db.session.commit()\n\n return jsonify({'success': True}), 201",
"def add_friend():\n if request.method == 'POST':\n username = get_username()\n user_id = get_id_from_username(username)\n friend_to_add = get_id_from_username(request.form['add_user'])\n if not friend_to_add or friend_to_add==user_id:\n return redirect(url_for('message.converse'))\n add_friend_db(user_id, friend_to_add)\n return redirect(url_for('message.converse'))",
"def post(self, request, *args, **kwargs):\n\n user_wall_post_comment = self.get_object()\n user_wall_post_comment.likes.add(self.request.user)\n return Response(status=201)",
"def react_to_post(self):\n for friend in random.sample(self._friends, len(self._friends)):\n post: Post = friend.get_random_post()\n if not post:\n continue\n attitude = self._interests[random.choice(post.tags)]\n self.update_positive_and_negative_actions(friend, attitude)\n reaction = Reaction(attitude, self.unique_id)\n post.add_reaction(reaction)\n friend.update_relation(self, REACT)\n friend.append_reaction(post, reaction)\n # self.update_relation(friend, REACT)\n break",
"def add_friend(self, User):\n if not User in self.friends.all():\n self.friend.add(User)\n #self.save()",
"def post_create(faker_obj, profile_obj, tag_list, num=3):\n for i in range(num):\n obj = faker_obj\n title = obj.sentence(nb_words=random.randint(5, 10))\n author = User.objects.get(id=profile_obj)\n body = \" \".join(obj.paragraphs(nb=random.randint(8, 20)))\n status = \"published\"\n post = Post.objects.create(title=title, author=author, body=body, status=status)\n post.tags.add(\", \".join(random.sample(tag_list, 1)))\n print(\n \"Created post title:'{}' for user '{}'\".format(post.title, author.username)\n )\n create_comment_list(obj, post)",
"def add_relation(request, id):\n user = request.user\n friend = get_object_or_404(User, id=id)\n user.profile.relations.add(friend)\n user.profile.friends.remove(friend)\n messages.success(\n request,\n 'Friend added to your family list'\n )\n return redirect('profiles:my_family')",
"def add_direct(request):\n friend = request.POST['friend'].strip()\n\n if userauth_models.User.objects.filter(username=friend).exists():\n friendUser = userauth_models.User.objects.get(username=friend)\n elif userauth_models.User.objects.filter(phone_number=friend):\n friendUser = userauth_models.User.objects.get(phone_number=friend)\n elif userauth_models.User.objects.filter(email=friend):\n friendUser = userauth_models.User.objects.get(email=friend)\n else:\n return HttpResponse(status=403) #no friend :(\n\n threadName = request.user.username + friendUser.username\n\n if models.MessageThread.objects.filter(title=threadName).exists():\n thread = models.MessageThread.objects.get(title=threadName)\n elif models.MessageThread.objects.filter(title=(friendUser.username + \\\n request.user.username)).exists():\n thread = models.MessageThread.objects.get(title=(friendUser.username \\\n + request.user.username))\n else:\n thread = models.MessageThread(title=threadName, psk=threadName, \\\n admin=request.user.username, friend1 = friendUser.username, is_direct=True)\n #thread = models.MessageThread(title=threadName, psk=threadName)\n thread.save()\n\n if not request.user in thread.clients.all():\n thread.clients.add(request.user)\n #thread.clients.add(friendUser)\n channel_layer = get_channel_layer()\n if 'channel_name' in request.session:\n async_to_sync(channel_layer.group_add)(thread.hash_id,request.session['channel_name'])\n \n #if not friendUser in thread.clients.all():\n # thread.clients.add(friendUser)\n # channel_layer = get_channel_layer()\n\n # if 'channel_name' in request.session:\n # async_to_sync(channel_layer.group_add)(thread.hash_id,request.session['channel_name'])\n\n thread_data = serializers.MessageThreadSerializer(thread).data\n\n return HttpResponse(status=200)",
"def postCreate(post):\n post_list = list()\n comments = commentList(post)\n comment_url = \"{}/api/posts/{}/comments\".format(DOMAIN, post.id)\n visible_to = list()\n visible = post.visibleTo.all()\n if visible:\n for author in visible:\n auth = \"{}/api/author/{}\".format(DOMAIN, author.id)\n visible_to.append(auth)\n\n # visible_to = list(post.visibleTo)\n post_dict = {'author': addAuthor(post.author), 'title': post.title, 'description': post.description,\n 'contentType': post.contentType, 'content': post.content, 'published': post.published,\n 'visibility': post.visibility, 'visibleTo': visible_to, 'unlisted': post.unlisted, 'id': post.id,\n 'comments': comments[:5], 'next': comment_url, 'count': len(comments),\n 'source': \"{}/api/posts/{}\".format(DOMAIN, post.id),\n 'origin': \"{}/api/posts/{}\".format(DOMAIN, post.id)}\n post_list.append(post_dict)\n return post_list",
"def post(self):\n user_id = get_jwt_identity()\n user = find_user(user_id)\n args = post_parser.parse_args()\n\n # check circles\n circles = []\n for circle_id in args['circle_ids']:\n found_circle = find_circle(user, circle_id)\n if not found_circle:\n return {'msg': f'Circle {circle_id} is not found'}, 404\n circles.append(found_circle)\n\n # check reshare\n reshared_from = args['reshared_from']\n reshared_from_post = None\n if reshared_from:\n reshared_from_post = dangerously_get_post(reshared_from)\n if not reshared_from_post:\n return {\"msg\": f\"Post {reshared_from} is not found\"}, 404\n\n # check media\n media_object_names = args['media_object_names']\n if reshared_from and media_object_names:\n return {'msg': \"Reshared post is not allowed to have media\"}, 400\n\n post = create_post(\n user,\n content=args['content'],\n is_public=args['is_public'],\n circles=circles,\n reshareable=args['reshareable'],\n reshared_from=reshared_from_post,\n media_list=check_media_object_names(media_object_names, MaxPostMediaCount),\n mentioned_users=check_mentioned_user_ids(args['mentioned_user_ids']),\n is_update_avatar=False\n )\n if not post:\n return {\"msg\": f\"Not allowed to reshare post {reshared_from}\"}, 403\n return post, 201",
"def add_to_following(sender, instance, created, **kwargs):\r\n sender_= instance.sender\r\n receiver_ = instance.receiver\r\n if instance.status == 'accepted':\r\n sender_.following.add(receiver_.user)",
"def test_addFriend(self):\n \n u = AppUser(id = 1)\n u.django_user = User.objects.create(username='Testuser')\n u.save()\n f = AppUser(id = 2)\n f.django_user = User.objects.create(username='Testuser2')\n f.save()\n # c = Client()\n # c.post(\"/requestFriend\",{'Friend':f.id,'User':u.id})\n # client = Client()\n # response = client.get(\"/requestFriend\")\n # request = response.wsgi_request \n\n #request.POST({'Friend':f.id,'User':u.id})\n #response = self.client.get(reverse('meetup_finder_app:requestFriend'))\n #f.requested_friends.add(u)\n #requestFriend(request)\n data = {'Friend':f.id,'User':u.id}\n #request = self.factory.post('/a/test/path/', data, content_type='application/json')\n # request = self.factory.post('/requestFriend/', data, content_type='application/json')\n # print(request.POST['User'])\n # request.user = self.user\n # requestFriend(request)\n\n # poll_1 = Poll.objects.get(pk=1)\n # self.assertEqual(poll_1.choice_set.get(pk=1).votes, 1)\n\n resp = self.client.post('/addFriend/', {'User': u.id, 'Friend': f.id})\n self.assertEqual(resp.status_code, 302)\n\n self.assertIs(u in f.friends.all(), True)",
"def like(self, request, pk=None):\n\n user_wall_post = self.get_object()\n user_wall_post.likes.add(self.request.user)\n to_user = user_wall_post.owner\n from_user = request.user\n\n UserNotification.create_post_friend_liked_notification(from_user, to_user, 'Right', id=pk)\n return Response(status=201)",
"def test_requested_friends_asymmetrical(self):\n u = AppUser(id = 1)\n u.django_user = User.objects.create(username='Testuser')\n u.save()\n f = AppUser(id = 2)\n f.django_user = User.objects.create(username='Testuser2')\n f.save()\n \n f.requested_friends.add(u)\n self.assertIs(u in f.requested_friends.all(), True)\n self.assertIs(f in u.requested_friends.all(), False)",
"def post(self, request, format=None):\n serializer = PostSerializer(data=request.data)\n notification = NotificationViewList()\n if serializer.is_valid():\n\n if self.request.user.is_authenticated():\n try:\n target = User.objects.get(id=request.data['target_id'])\n serializer.save(\n user=User.objects.get(id=self.request.user.id),\n target_name=(target.first_name+' '+target.last_name)\n )\n except User.DoesNotExist:\n serializer.save(\n user=User.objects.get(id=self.request.user.id),\n target_name=''\n )\n data = {}\n data['type'] = 'user'\n if request.data['target_id'] is not None:\n data['user_id'] = request.data['target_id']\n data['firstname'] = User.objects.get(\n id=request.data['target_id']\n ).first_name\n data['lastname'] = User.objects.get(\n id=request.data['target_id']\n ).last_name\n else:\n data['user_id'] = None\n json_data = json.dumps(data)\n if request.data['target_id'] is not None:\n notification.add(\n request.user,\n request.data,\n User.objects.filter(\n id__in=Friend.objects.filter(\n from_user=self.request.user.id\n ).values('to_user')),\n ContentType.objects.get(model='post'),\n JSONRenderer().render(serializer.data).decode('utf-8'),\n json_data\n )\n return Response(\n serializer.data,\n status=status.HTTP_201_CREATED\n )\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)",
"def dispatch(self, request, *args, **kwargs):\n user_to = User.objects.get(pk=kwargs['pk'])\n user_from = self.request.user\n ###\n if user_to not in wanna_be_friends(user_from):\n friendship = FriendshipInvitation.objects.create(\n from_user=user_from, to_user=user_to, status=\"0\")\n\n notif = Notification.objects.create(sender=user_from,\n receiver=user_to,\n notif_type='friend_request')\n # Aca se ha enviado la solicitud\n else:\n return HttpResponseRedirect(\"/fr_req_fail/\")\n return HttpResponseRedirect(\"/\")",
"def create(self, validated_data):\n \"\"\" Create post with a location \"\"\"\n location_data = validated_data.pop('location')\n\n # create a new one or get a old for reference\n this_location = Location.objects.get_or_create(\n **location_data\n )\n\n # pop the photo url's data\n photo_data = validated_data.pop('photo')\n\n # must pop the tags data before it would used to create a post \n tags_data = validated_data.pop('tag')\n # create a instance of this post\n this_post = Post.objects.create(\n location = this_location[0],\n **validated_data)\n\n \"\"\"Associate tag's informatiion to post\"\"\"\n for tag in tags_data:\n this_tag = Tag.objects.get_or_create(name = tag.get('name'))\n print(tag.get('name'))\n print(this_tag)\n # attach this tag to this photos_datapost \n this_post.tag.add(this_tag[0])\n\n \"\"\"Associate the photo url \"\"\"\n for photo in photo_data:\n this_post.photo.create(name = photo.get('name'))\n # return the created post \n this_post.save()\n return this_post",
"def posts_for_feed():\n user_id = session.get('user_id')\n friend_posts = Post.query.join(Friend, db.and_(Post.user_id == Friend.user_2,\n Friend.active == True)).outerjoin(Comment, db.and_(Comment.post_id == Post.post_id,\n Comment.active == True)).filter(Friend.user_1 == user_id,\n Post.active == True).order_by(Post.post_id.desc()).all()\n\n post_list = []\n for post in friend_posts:\n post_list.append(post.to_dict_for_json())\n\n resp = make_response(jsonify(post_list), 200)\n return resp",
"def write_comment_to_post(self):\n for friend in random.sample(self._friends, len(self._friends)):\n post: Post = friend.get_random_post()\n if not post:\n continue\n attitude = self._interests[random.choice(post.tags)]\n self.update_positive_and_negative_actions(friend, attitude)\n comment = Comment(attitude, self.unique_id)\n post.add_comment(comment)\n friend.update_relation(self, WRITE_COMMENT)\n friend.append_comment(post, comment)\n # self.update_relation(friend, WRITE_COMMENT)\n break",
"def posts_create(request):\n if request.method == 'POST':\n form = PostForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n return redirect('feed')\n\n else:\n form = PostForm()\n\n return render(\n request=request,\n template_name='posts/new.html',\n context={\n 'form': form,\n 'user': request.user,\n 'perfil': request.user.perfil\n }\n )"
] | [
"0.6206124",
"0.60353947",
"0.5980641",
"0.5965954",
"0.59487104",
"0.56830245",
"0.56672275",
"0.56354594",
"0.56294787",
"0.55971295",
"0.55717593",
"0.5568948",
"0.5557959",
"0.5542263",
"0.54640216",
"0.54584336",
"0.54564565",
"0.54361814",
"0.5435222",
"0.54146063",
"0.53731936",
"0.5340057",
"0.5336341",
"0.53200805",
"0.5292797",
"0.5281593",
"0.5280947",
"0.5277363",
"0.52401006",
"0.52210206"
] | 0.7941469 | 0 |
Takes post author, comment author and creates a post and associated comment | def create_post_with_comment(pauthor, cauthor, visibility, ptext, ctext):
post = Post.objects.create(content = ptext, author = pauthor, visibility=visibility)
comment = Comment.objects.create(comment = ctext, post = post, author = cauthor)
return (post, comment) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_comment(post, author, content):\n return Comment.objects.create(post=post, author=author, content=content)",
"def postCreate(post):\n post_list = list()\n comments = commentList(post)\n comment_url = \"{}/api/posts/{}/comments\".format(DOMAIN, post.id)\n visible_to = list()\n visible = post.visibleTo.all()\n if visible:\n for author in visible:\n auth = \"{}/api/author/{}\".format(DOMAIN, author.id)\n visible_to.append(auth)\n\n # visible_to = list(post.visibleTo)\n post_dict = {'author': addAuthor(post.author), 'title': post.title, 'description': post.description,\n 'contentType': post.contentType, 'content': post.content, 'published': post.published,\n 'visibility': post.visibility, 'visibleTo': visible_to, 'unlisted': post.unlisted, 'id': post.id,\n 'comments': comments[:5], 'next': comment_url, 'count': len(comments),\n 'source': \"{}/api/posts/{}\".format(DOMAIN, post.id),\n 'origin': \"{}/api/posts/{}\".format(DOMAIN, post.id)}\n post_list.append(post_dict)\n return post_list",
"def post(self, request, pk):\n\n post = Blog.objects.get(pk=int(pk))\n user_id = self.request.session.get('USER_ID')\n\n try:\n user = User.objects.get(pk=user_id)\n except:\n pass\n body = self.request.POST.get('body')\n\n if user_id is None:\n messages.add_message(request, messages.ERROR, \"Please login to add comments.\")\n return HttpResponseRedirect(self)\n\n comments = Comment.objects.create(post=post, author=user, body=body)\n\n d = model_to_dict(post)\n messages.add_message(request, messages.SUCCESS, \"Comment added successfully.\")\n return self.render_to_response(d)",
"def createcomment(request, pk):\n issue = get_object_or_404(Issue, pk=pk)\n if request.method == \"POST\":\n form = CommentCreationForm(request.POST)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.issue = issue\n comment.author = request.user\n comment.created_at = timezone.now()\n comment.save()\n return redirect('office:issue', pk=pk)\n else:\n form = CommentForm()\n return render(request, 'blog/add_comment_to_post.html', {'form': form})",
"def post(self):\n post_id = int(self.request.get('post_id'))\n post = Posts.get_by_id(post_id)\n comment = self.request.get('comment')\n submitter_id = self.get_active_user().key().id()\n\n if submitter_id:\n comment = Comments(post_id=post_id, content=comment,\n submitter_id=submitter_id)\n comment.put()\n self.redirect('/%s' % str(post.key().id()))\n else:\n self.error(403)",
"def add_comment(cls, post_id, user_id, content):\n c = cls(parent=comment_key(),\n post_id=post_id,\n user_id=user_id,\n content=content)\n c.put()",
"def make_comments(post, comments):\n for comment in comments:\n try:\n com = RedditComment(reddit_post=post, **comment)\n com.save()\n except Exception as ex:\n print 'comment could not be created'\n print ex",
"def post(self, request, *args, **kwargs):\n serializer = CommentSerializer(data=request.data)\n post_pk = self.kwargs['post_pk']\n post = Post.objects.get(pk=post_pk)\n if serializer.is_valid():\n serializer.save(author=request.user, post=post)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)",
"def createComment(owner_id=None, post_id=None, from_group=None, message=None,\\\n reply_to_comment=None, attachments=None, sticker_id=None,\\\n guid=None):\n params = {\n 'owner_id': owner_id,\n 'post_id': post_id,\n 'from_group': from_group,\n 'message': message,\n 'reply_to_comment': reply_to_comment,\n 'attachments': attachments,\n 'sticker_id': sticker_id,\n 'guid': guid\n }\n result = call('wall.createComment', **params)\n return parse_response(result)",
"async def create_reply(*, comment: models.Comment = Depends(resolve_comment), created_comment: CreateComment,\n current_user: models.User = Depends(resolve_current_user), db: Session = Depends(get_db)):\n return crud.create_comment(db, author_id=current_user.id, parent_resub_id=comment.parent_resub_id,\n parent_post_id=comment.parent_post_id, parent_comment_id=comment.id,\n content=created_comment.content)",
"def create_post(request):\n if request.method == 'POST':\n title = request.POST['title']\n content = request.POST['content']\n user_id = request.POST['author_id']\n category = request.POST['category']\n\n slug = \"-\".join(list(map(lambda word: word.lower(), title.split())))\n author = User.objects.get(id=int(user_id))\n\n # save info in models\n post = Post()\n post.author = author\n post.category = category\n post.title = title\n post.content = content\n post.slug = slug\n post.save()\n return redirect('post')\n\n return render(request, 'posts/create_post.html')",
"def process_comment(request, comment, post):\n\n if request.user.is_authenticated:\n # We already set auth user's name and email in the form's inital vals.\n comment.author = request.user\n\n # Is this a threaded comment?\n if request.POST.get(\"parent_id\"):\n comment.parent = Comment.objects.get(id=request.POST.get(\"parent_id\"))\n\n # If commenter is logged in, override name and email with stored values from User object\n if request.user.is_authenticated:\n comment.name = request.user.get_full_name()\n comment.email = request.user.email\n\n # Set required relationship to Post object\n comment.post = post\n\n # Get commenter's IP and User-Agent string\n # ip = get_ip(request)\n # if ip is not None:\n # comment.ip_address = ip\n comment.user_agent = request.META.get(\"HTTP_USER_AGENT\", \"\")\n\n # Run spam check\n comment.spam = spam_check(comment)\n\n # Strip disallowed HTML tags. See tangerine docs to customize.\n comment.body = sanitize_comment(comment.body)\n\n # Call comment approval workflow\n comment.approved = get_comment_approval(comment.email, request.user.is_authenticated)\n if comment.approved:\n messages.add_message(request, messages.SUCCESS, \"Your comment has been posted.\")\n else:\n messages.add_message(request, messages.INFO, \"Your comment has been held for moderation.\")\n\n comment.save()\n\n # Alert post author that comment needs moderation, or that it's been auto-published:\n send_comment_moderation_email(comment)",
"def create_comment_immediately_below_post():\n post = create_a_post()\n comment = Comment.create(post=post, body=\"I'm a comment right below a post\")\n comment.save()\n return comment",
"def create_post(category, author, name, content, status):\n return Post.objects.create(category=category, author=author, name=name, content=content, status=status)",
"def post(self, post_id):\n comment_content = self.request.get(\"comment_content\")\n Post.add_comment(int(post_id), int(\n self.user.get_id()), comment_content)\n self.redirect(\"/blog/\" + post_id + \"/comments\")",
"def test_add_comment(self):\n post = PostFactory()\n comment = PostFactory()\n post.add_comment(comment)\n self.assertEqual(comment.title, post.title)\n self.assertTrue(comment.is_comment)\n self.assertEqual(comment.parent_post, post)",
"def add(self, author, post):\n if not author in self.authors:\n self.authors.append(author)\n self.posts[author].append(post)\n return",
"def create_a_post():\n subj = create_subject()\n post = Post.create(subject=subj, title=\"A great title\", body=\"Just a great day!\")\n post.save()\n return post",
"def createPost(content):\n\n cur, user_id, con = initialise(3, True)\n cur.execute(\"INSERT INTO posts (name, content) VALUES ((SELECT username FROM users WHERE id = ?), ?)\", (user_id, content))\n finish(con)",
"def setUp(self):\n self.comment = Comment()\n self.comment.comment_description = 'This is a test comment'\n self.post = create_post()\n self.post.user = self.create_user()\n self.post.save()\n self.comment.post = self.post\n self.comment.user = self.user\n self.comment.save()",
"def test_comment_creation(self):\n response = self.client.post(reverse('posts:comment_create'),\n data={\n 'post': self.post.id,\n 'user': self.user.id,\n 'comment_description': 'This is a '\n 'test_comment'\n }, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(response.data, {\n 'post': self.post.id,\n 'user': self.user.id,\n 'comment_description': 'This is a '\n 'test_comment'\n })",
"def new_comment(self, post_id, comment):\n # *don't* pass in username and password. if you do, that wordpress user's\n # name and url override the ones we provide in the xmlrpc call.\n #\n # also, use '' instead of None, even though we use allow_none=True. it\n # converts None to <nil />, which wordpress's xmlrpc server interprets as\n # \"no parameter\" instead of \"blank parameter.\"\n #\n # note that this requires anonymous commenting to be turned on in wordpress\n # via the xmlrpc_allow_anonymous_comments filter.\n return self.proxy.wp.newComment(self.blog_id, '', '', post_id, comment)",
"def _add_comment(self, comment, post_id, page_id, parent_comment=None):\n user_id = self._get_or_create_user(comment['from'])\n message = self._clean_message(comment)\n if len(message) > 0:\n columns = '(user, post, page, fb_id, created_time, message, like_count, comment_count'\n values = (user_id, post_id, page_id, comment['id'], comment['created_time'],\n message, comment['like_count'], comment['comment_count'])\n values_placeholder = '(%s,%s,%s,%s,%s,%s,%s,%s'\n if parent_comment is None:\n columns = columns + ')'\n values_placeholder = values_placeholder + ')'\n else:\n columns = columns + ',parent_comment)'\n values = values + (parent_comment,)\n values_placeholder = values_placeholder + ',%s)'\n return self._insert_if_possible('INSERT INTO comment {} VALUES {}'.format(columns, values_placeholder),\n values)\n else:\n return False",
"def remotePostCreate(host, post):\n post = post.get('posts')[0]\n author = remoteAddAuthor(post.get('author'))\n title = post.get('title')\n description = post.get('description')\n contentType = post.get('contentType')\n content = post.get('content')\n published = utc.localize(datetime.strptime(post.get('published'), '%Y-%m-%dT%H:%M:%S.%fZ'))\n visibility = post.get('visibility')\n unlisted = post.get('unlisted')\n id = post.get('id')\n origin = post.get('origin')\n count = post.get('count')\n comments = remoteCommentList(post)\n source = \"{}/api/posts/{}\".format(DOMAIN, post.get('id'))\n\n post_dict = {'author': author, 'title': title, 'description': description,\n 'contentType': contentType, 'content': content, 'published': published,\n 'visibility': visibility, 'unlisted': unlisted, 'id': id,\n 'comments': comments, 'origin': origin, 'count': count,\n 'source': source}\n return post_dict",
"def create():\n if request.method == 'POST':\n title = request.form['title']\n body = request.form['body']\n error = None\n\n if not title:\n error = 'Title is required.'\n\n if error is not None:\n flash(error)\n else:\n db = get_db()\n db.execute(\n 'INSERT INTO post (title, body, author_id)'\n ' VALUES (?, ?, ?)',\n (title, body, g.user['id'])\n )\n db.commit()\n return redirect(url_for('blog.index'))\n\n return render_template('blog/create.html')",
"def addPost(self,text,id,url,date):\n self.topComments.append(Post(text,id,url,date))\n return None",
"def create_post():\n\n #Get prompt id\n prompt_id = request.form.get('prompt_id')\n\n # Get post text\n post_text = request.form.get('user_post')\n\n # Create post timestamp\n created_at = datetime.now()\n user_facing_date = created_at.strftime(\"%B %d, %Y\")\n\n # Save post and related data to database\n post = crud.create_post(session['user_id'], prompt_id, post_text, session['lat'], session['lng'], session['user_facing_location'], created_at)\n\n return render_template('post_data.html', post=post, user_facing_date=user_facing_date)",
"def add_comment_to_post(request, pk):\n post = get_object_or_404(Post, pk=pk)\n if request.method == 'POST':\n form = CommentForm(request.POST)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.post = post\n comment.save()\n return redirect('post_detail', pk=post.pk)\n else:\n form = CommentForm()\n return render(request, 'blog/comment_form.html', {'form': form})",
"def create(self, comment):\r\n url = self.get_url()\r\n\r\n # when creating commits they don't get wrapped in {\"body\": <comment>}\r\n return http.Request('POST', url, params=comment), parsers.parse_json",
"def create_comment(session: Session) -> Comment:\n try:\n regular_user = (\n session.query(User).filter(User.username == \"obnoxioustroll69\").first()\n )\n post = session.query(Post).filter(Post.id == 1).first()\n comment = Comment(\n user_id=regular_user.id,\n post_id=post.id,\n body=\"This post about SQLAlchemy is awful. You didn't even bother to explain how to install Python, which is where I (and so many others) got stuck. Plus, your code doesn't even work!! I cloned your code and it keeps giving me `environment variable` errors... WTF are environment variables?!!?!?\",\n upvotes=2,\n )\n session.add(comment) # Add the Comment\n session.commit() # Commit the change\n LOGGER.success(f\"Created comment {comment} posted by user {regular_user}\")\n return comment\n except IntegrityError as e:\n LOGGER.error(e.orig)"
] | [
"0.7968736",
"0.69776434",
"0.680873",
"0.6670445",
"0.6624297",
"0.6575736",
"0.6529041",
"0.6456722",
"0.64194477",
"0.6396711",
"0.6375334",
"0.6340401",
"0.63245595",
"0.6278583",
"0.62078166",
"0.6168181",
"0.6167977",
"0.61602026",
"0.61574775",
"0.6155002",
"0.6143866",
"0.6140876",
"0.6121726",
"0.61149126",
"0.61007464",
"0.60991216",
"0.608715",
"0.60754097",
"0.60615396",
"0.6057131"
] | 0.79196894 | 1 |
Takes response.data and confirms no repeated guids (No repeated posts) | def assertNoRepeatGuids(context, posts):
guids = [p['guid'] for p in posts]
context.assertTrue(len(set(guids)) == len(posts), "Some guids repeated") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_response_reusage_after_replied(self):\n\n post1 = self._create_tweet(\n content=\"I need a foo.\",\n channel=self.inbound,\n demand_matchables=True)\n\n resp1 = Response.objects.upsert_from_post(post1)\n\n support = UserProfile.objects.upsert('Twitter', dict(screen_name='@test2'))\n self._create_tweet(\n user_profile=support,\n content=\"We cant help you right now. Sorry.\",\n channel=self.outbound,\n demand_matchables=True,\n in_reply_to=post1)\n\n post2 = self._create_tweet(\n content=\"I still need a foo.\",\n channel=self.inbound,\n demand_matchables=True)\n resp2 = Response.objects.upsert_from_post(post2)\n self.assertNotEqual(resp1.id, resp2.id)",
"def check_medtag_reports(request):\n\n json_resp = {}\n json_resp['count'] = 0\n medtag_arts = Report.objects.all().exclude(institute = 'PUBMED')\n # for el in pubmed_arts:\n # if el.id_report.startswith('PUBMED'):\n json_resp['count'] = medtag_arts.count()\n return JsonResponse(json_resp,safe=False)",
"def check_for_duplicate_phone_numbers(d):\n\n print('# This function is under maintenance. Please try again later.')\n return d",
"def test_response_reusage(self):\n\n post1 = self._create_db_post(content=\"@test I need a foo.\",\n channel=self.sc.inbound,\n demand_matchables=True,\n user_profile={'screen_name': 'customer'})\n self.assertTrue(self.sc.inbound_channel.is_assigned(post1))\n\n conv1 = self.sc.upsert_conversation(post1)\n post2 = self._create_db_post(content=\"I still need a foo!\",\n channel=self.sc.inbound,\n demand_matchables=True,\n user_profile={'screen_name': 'customer'})\n conv2 = self.sc.upsert_conversation(post2)\n\n resp1 = Response.objects.upsert_from_post(post1)\n resp2 = Response.objects.upsert_from_post(post2)\n self.assertEqual(conv1.id, conv2.id)\n self.assertEqual(resp1.id, resp2.id)\n self.assertTrue(resp2.post_date > resp1.post_date)",
"def check_soundcloud_ids_mismatch():\n wiki = pywikibot.Site('en', 'wikipedia')\n category = pywikibot.Category(wiki, CATEGORY)\n pages = pagegenerators.CategorizedPageGenerator(category)\n\n total_pages = 0\n processed = 0\n result = []\n\n for page in pages:\n total_pages += 1\n res = compare_soundcloud_ids(page, wiki)\n\n if res == True:\n # The IDs are the same, nothing to do. The category may contains cached entries\n print('The ID for \"%s\" are the same in both the article and Wikidata.' % page.title())\n processed += 1\n continue\n elif not res:\n print('Skipping %s. It has no SoundCloud ID' % page.title())\n processed += 1\n continue\n\n result.append([res, page.title()])\n\n for ids, title in result:\n # Now we have two IDs (one from article, another from repo).\n # Let us check their associated movie titles in the website\n repoId = ids['repoId']\n wikiId = ids['articleId']\n c_url, response_code1 = check_soundcloud_id(repoId)\n c_url2, response_code2 = check_soundcloud_id(wikiId)\n\n if c_url == c_url2:\n # Both valid\n print('''Both SoundClouds IDs are valid for the title. %s''' % title)\n processed += 1\n elif response_code1 == 404 and response_code1 != response_code2:\n # Handle case\n processed += 1\n elif response_code2 == 404 and response_code2 != response_code1:\n # Handle case\n processed += 1\n else:\n # Handle final case\n pass\n\n print('Finished! Total pages: %s. Processed: %s' %(total_pages, processed))",
"def check_dataset_duplicate_ids(self, dataset):\n ids = [a['_id'] for a in dataset]\n # Construct list of duplicates\n dupe_ids = [a for n, a in enumerate(ids) \n if a in ids[:n]]\n if len(dupe_ids) > 0:\n # Get list of names for the duplicate pandas\n dupe_names = [a['en.name'] for a in dataset \n if a['_id'] in dupe_ids]\n raise IdError(\"ERROR: duplicate ids for en.names: %s\" \n % str(dupe_names))",
"def _process_response(self, resp):\n signals = []\n resp = resp.json()\n fresh_posts = posts = resp['data']\n paging = resp.get(self._paging_field) is not None\n self.logger.debug(\"Facebook response contains %d posts\" % len(posts))\n\n # we shouldn't see empty responses, but we'll protect our necks.\n if len(posts) > 0:\n self.update_freshness(posts)\n fresh_posts = self.find_fresh_posts(posts)\n paging = len(fresh_posts) == self.limit()\n\n # store the timestamp of the oldest fresh post for use in url\n # preparation later.\n if len(fresh_posts) > 0:\n self.prev_stalest = self.created_epoch(fresh_posts[-1])\n\n signals = [FacebookSignal(p) for p in fresh_posts]\n self.logger.debug(\"Found %d fresh posts\" % len(signals))\n\n return signals, paging",
"def check_for_duplicate_subject_identifier(self):\n pass",
"def check_if_duplicate(self, data):\n\n query = \"SELECT * FROM {} WHERE topic = '{}' AND location = '{}'\\\n \".format(self.table, data['topic'], data['location'])\n\n result = self.fetch_one(query)\n if result:\n return True, 'Meetup with same topic at the same venue\\\n already exists'\n\n query = \"SELECT * FROM {} WHERE happening_on = '{}' AND location = '{}'\\\n \".format(self.table, data['happening_on'], data['location'])\n\n result = self.fetch_one(query)\n if result:\n return True, 'Meetup happening the same date at the same venue \\\n already exists'\n\n query = \"SELECT * FROM {} WHERE topic = '{}' AND happening_on = '{}'\\\n \".format(self.table, data['topic'], data['happening_on'])\n\n result = self.fetch_one(query)\n if result:\n return True, 'Meetup happening the same date with same topic \\\n already exists'\n\n return False, None",
"def is_unique(self, id, items):\r\n copies = 0\r\n for i in items:\r\n if type(i) is dict:\r\n if i['id'] == id:\r\n copies = copies + 1\r\n else:\r\n if i.id == id:\r\n copies = copies + 1\r\n if copies >= 2:\r\n return False\r\n else:\r\n return True",
"def _raise_if_duplicates(counts: Dict[str, int]) -> None:\n duplicates: List[str] = []\n for nickname, count in counts.items():\n if count > 1:\n duplicates.append(nickname)\n if len(duplicates) > 0:\n # TODO This is not always nickname\n raise ValueError(f'\\'nickname\\' not unique {duplicates}')",
"def check_repost_exists(type, id):\n \n try:\n soundcloud.get('/e1/me/{}_reposts/{}'.format(type, id))\n return True\n except HTTPError as e:\n if e.response.status_code == 404:\n db.mark_as_deleted(type, id)\n return False\n else:\n raise",
"def similar_random(id):\n result_ids = get_similar(tree, ids, embeds, id, 50)\n\n if result_ids == False:\n return 'ID not found', 404\n\n shuffle(result_ids)\n\n result_ids = result_ids[0:10]\n\n out = {\n 'ids': result_ids\n }\n\n return out, 200",
"def test_retrieve_tags_unique(self):\n tag1 = Tag.objects.create(user=self.user, name='Tag1')\n Tag.objects.create(user=self.user, name='Tag2')\n recipe1 = Recipe.objects.create(\n user=self.user,\n title=\"Rec1\",\n time_minutes=20,\n price=Decimal('4.85')\n )\n recipe2 = Recipe.objects.create(\n user=self.user,\n title=\"Rec2\",\n time_minutes=20,\n price=Decimal('4.85')\n )\n\n recipe1.tags.add(tag1)\n recipe2.tags.add(tag1)\n\n res = self.client.get(TAGS_URL, {'assigned_only': 1})\n\n serializer1 = TagSerializer(tag1)\n\n self.assertIn(serializer1.data, res.data)\n self.assertEqual(len(res.data), 1) # unique results",
"def test_retrieve_tags_assigned_unique(self):\n tag1 = Tag.objects.create(user = self.user,name='Breakfast')\n tag2 = Tag.objects.create(user=self.user,name='Lunch')\n\n recipe1 = Recipe.objects.create(user=self.user,title='Goose Liver on toast',price=5.00,time_minutes=15)\n recipe2 = Recipe.objects.create(user = self.user,title='Egg Benedict',price=5.00,time_minutes=15)\n recipe1.tags.add(tag1)\n recipe2.tags.add(tag1)\n\n res = self.client.get(TAGS_URL,{'assigned_only':1})\n self.assertEqual(len(res.data),1)",
"def test_duplicate_entries(self):",
"def check_missing_unique_link():\n from mspray.apps.main.utils import queryset_iterator\n\n queryset = SprayDay.objects.filter(spraypoint__isnull=True).only(\n \"pk\", \"location_id\"\n )\n for record in queryset_iterator(queryset):\n add_unique_record(record.pk, record.location_id)\n gc.collect()",
"def user_response_to_post(self, request, pk):\n post_objects_count = Post.objects.filter(id=pk, liked_users__id=request.user.id).count()\n post_objects = Post.objects.get(id=pk)\n if post_objects_count !=0:\n post_objects.liked_users.remove(request.user)\n response_msg = \"You disliked the post\"\n else:\n post_objects.liked_users.add(request.user)\n response_msg = \"You have liked the post\"\n return Response({'data': response_msg}, status=status.HTTP_200_OK)",
"def test_retrive_tags_assigned_unique(self):\n tag = Tag.objects.create(user=self.user, name=\"Breakfast\")\n Tag.objects.create(user=self.user, name=\"Lunch\")\n recipe1 = Recipe.objects.create(\n title=\"Pancake\",\n making_time_minutes=10,\n price=5.00,\n user=self.user\n )\n recipe2 = Recipe.objects.create(\n title=\"Italian Fried Egg\",\n making_time_minutes=5,\n price=10.00,\n user=self.user\n )\n recipe1.tags.add(tag)\n recipe2.tags.add(tag)\n\n res = self.client.get(TAGS_URL, {'assigned_only': 1})\n self.assertEqual(len(res.data), 1)",
"def test_get_posts_missing_ids(client):\n response = client.simulate_get('/page/get_records')\n assert response.status_code == 400",
"def test_duplicate_questions(self):\n self.is_authenticated()\n self.post_question()\n response = self.post_question()\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_report_article_more_than_once(self):\n from rest_framework.test import APIClient\n client = APIClient()\n\n response = client.post('/api/report/epic/', self.report,\n HTTP_AUTHORIZATION='Token ' + self.token_1,\n format='json')\n result = json.loads(response.content)\n\n response = client.post('/api/report/epic/', self.report,\n HTTP_AUTHORIZATION='Token ' + self.token_1,\n format='json')\n result = json.loads(response.content)\n\n self.assertEqual(result['errors'],'You can only report an article once')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def check_unique_ids(request: Request, policy: RequestPolicy, logger: Logger) -> None:\n seen: Dict[str, int] = {}\n for bundle in request.bundles:\n if bundle.id in seen:\n raise KSR_BUNDLE_UNIQUE_Violation(\n f\"More than one bundle with id {bundle.id}\"\n )\n seen[bundle.id] = 1\n\n _num_bundles = len(request.bundles)\n logger.info(f\"KSR-BUNDLE-UNIQUE: All {_num_bundles} bundles have unique ids\")\n return",
"def _get_unique_genres(connection):\n print('---Getting unique genres---')\n genreDict = {}\n cursor = connection.cursor()\n cursor.execute(\"SELECT * FROM shared_genres;\")\n res = cursor.fetchall()\n num_genres = 0\n for genre in res:\n if genre[1] not in genreDict:\n genreDict[genre[1]] = num_genres\n num_genres += 1\n return genreDict",
"def assert_json_failure_response_is_username_collision(self, response):\r\n self.assertEqual(400, response.status_code)\r\n payload = json.loads(response.content)\r\n self.assertFalse(payload.get('success'))\r\n self.assertIn('already exists', payload.get('value'))",
"def test_retrieve_ingredients_assigned_unique(self):\n\n ingredient = create_sample_ingredient(user=self.user, name=\"Orange\")\n create_sample_ingredient(user=self.user, name='Chocolate')\n\n recipe1 = create_sample_recipe(\n user=self.user,\n title=\"Orange Juice\",\n time_minutes=10,\n price=6.00\n )\n\n recipe2 = create_sample_recipe(\n user=self.user,\n title=\"Orange Pie\",\n time_minutes=40,\n price=20.00\n )\n\n recipe1.ingredients.add(ingredient)\n recipe2.ingredients.add(ingredient)\n\n response = self.client.get(INGREDIENTS_URL, {\"assigned_only\": 1})\n\n self.assertEqual(len(response.data), 1)\n # we will return 1, because we assigned only 1 id to two recipes\n # also here id is in int",
"def _validate_duplicate_names(res_data, name, _id=None):\n if _id:\n for data in res_data:\n if data.get(\"name\") == name and data.get(\"id\") != _id:\n return False\n return True\n else:\n for data in res_data:\n if data.get(\"name\") == name:\n return False\n return True",
"def get_onehundred_new_posts():\n one_hundred_new_posts = []\n for post in reddit.subreddit(\"all\").new():\n try:\n one_hundred_new_posts.append(post.id)\n submission = reddit.submission(id=post.id)\n except:\n one_hundred_new_posts.remove(post.id)\n return one_hundred_new_posts",
"def test_retrieve_tags_assigned_unique(self):\n tag = Tag.objects.create(user=self.user, name='Breakfast')\n Tag.objects.create(user=self.user, name='Lunch')\n\n recipe1 = Recipe.objects.create(\n title='Pancakes',\n time_minutes=10,\n price=5.00,\n user=self.user\n )\n recipe1.tags.add(tag)\n\n recipe2 = Recipe.objects.create(\n title='Porridge',\n time_minutes=15,\n price=12.00,\n user=self.user\n )\n recipe2.tags.add(tag)\n\n res = self.client.get(TAGS_URL, {'assigned_only': 1})\n\n self.assertEqual(len(res.data), 1)",
"def test_retrieve_tags_assigned_unique(self):\n\n tag = Tag.objects.create(user=self.user, name='Breakfast')\n Tag.objects.create(user=self.user, name='Lunch')\n\n recipe1 = Recipe.objects.create(\n title='Pancakes',\n time_minutes=5,\n price=3.00,\n user=self.user\n )\n recipe2 = Recipe.objects.create(\n title='Porridge',\n time_minutes=3,\n price=1.00,\n user=self.user\n )\n\n recipe1.tags.add(tag)\n recipe2.tags.add(tag)\n\n response = self.client.get(URL_TAGS, {'assigned_only': True})\n\n self.assertEqual(len(response.data), 1)"
] | [
"0.5659422",
"0.55370516",
"0.5480621",
"0.5440656",
"0.5439443",
"0.5430286",
"0.53764886",
"0.53743136",
"0.5368204",
"0.53578305",
"0.53345466",
"0.532668",
"0.530157",
"0.5287817",
"0.5268618",
"0.52629507",
"0.5260671",
"0.5257835",
"0.5253612",
"0.52466136",
"0.52329594",
"0.5224962",
"0.5224153",
"0.5218604",
"0.5210464",
"0.5204218",
"0.51977813",
"0.5195533",
"0.5182614",
"0.51810396"
] | 0.6993569 | 0 |
Compares a list of authors against a list of displaynames | def cross_check(context, authors, poscom):
displaynames = [x['author']['displayname'] for x in poscom]
for author in authors:
if author.user.username not in displaynames:
context.assertFalse(True, "%s not in list" %author.user.username) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def all_authors( data ) :\n return list(set( chain.from_iterable( [ authors(x) for x in data ] ) ))",
"def display_authors(self, *args):\n return ', '.join(author.name for author in args[0].authors.all()[:3])",
"def test_author_many_lastnames(self):\n inv_search = 'author:\"alvarez gaume, j* r* r*\"'\n spi_search = 'find a alvarez gaume, j r r'\n self._compare_searches(inv_search, spi_search)",
"def test_author_initials(self):\n inv_search = 'author:\"polyakov, a* m*\"'\n spi_search = 'find a a m polyakov'\n self._compare_searches(inv_search, spi_search)",
"def test_author_simple(self):\n invenio_search = 'author:\"ellis, j*\"'\n spires_search = 'find a ellis, j'\n self._compare_searches(invenio_search, spires_search)",
"def test_author_many_initials(self):\n inv_search = 'author:\"bach, p* d* q*\"'\n spi_search = 'find a p d q bach'\n self._compare_searches(inv_search, spi_search)",
"def test_author_simplest(self):\n invenio_search = 'author:ellis'\n spires_search = 'find a ellis'\n self._compare_searches(invenio_search, spires_search)",
"def test_exactauthor_simple(self):\n invenio_search = 'exactauthor:\"ellis, j\"'\n spires_search = 'find ea ellis, j'\n self._compare_searches(invenio_search, spires_search)",
"def authors(self, key, value):\n _authors = self.get(\"authors\", [])\n item = build_ils_contributor(value)\n if item and item not in _authors:\n _authors.append(item)\n try:\n if \"u\" in value:\n other = [\"et al.\", \"et al\"]\n val_u = list(force_list(value.get(\"u\")))\n if [i for i in other if i in val_u]:\n self[\"other_authors\"] = True\n except UnexpectedValue:\n pass\n return _authors",
"def alt_authors(self, key, value):\n _authors = self.get(\"authors\", [])\n if _authors:\n for i, v in enumerate(force_list(value)):\n _authors[i].update({\"alternative_names\": clean_val(\"a\", v, str)})\n return _authors",
"def citing_authors(self, entities):\n result = self.db.execute(u'''SELECT DISTINCT(author_id)\n FROM \"entity_occurrences\"\n WHERE entity IN ({}) AND rho > ?'''.format(join_entities_sql(entities)), (DEFAULT_MIN_SCORE,)).fetchall()\n return [t[0] for t in result]",
"def test_refersto_author_multi_name(self):\n inv_search = 'author:ellis refersto:author:\"parke, s. j.\"'\n spi_search = 'find a ellis and refersto author \"parke, s. j.\"'\n self._compare_searches(inv_search, spi_search)",
"def make_author_list(res):\n try:\n r = [\", \".join([clean_txt(x['family']).capitalize(), clean_txt(x['given']).capitalize()]) for x in res['author']]\n except KeyError as e:\n print(\"No 'author' key, using 'Unknown Author'. You should edit the markdown file to change the name and citationkey.\")\n r = [\"Unknown Authors\"]\n return r",
"def authors_in_relation(context, data, authors):\n guids = [a.id for a in authors]\n guids = map( lambda x: str(x).replace('-', ''), guids)\n\n for guid in guids:\n context.assertTrue(unicode(guid) in data)",
"def test_super_short_author_name(self):\n spi_search = \"fin a er and cn cms\"\n inv_search = \"author:er collaboration:cms\"\n self._compare_searches(inv_search, spi_search)",
"def test_author_reverse(self):\n invenio_search = 'author:\"ellis, j*\"'\n spires_search = 'find a j ellis'\n self._compare_searches(invenio_search, spires_search)",
"def test_author_full_first(self):\n invenio_search = 'author:\"ellis, john*\" or exactauthor:\"ellis, j *\" or exactauthor:\"ellis, j\" or exactauthor:\"ellis, jo\" or exactauthor:\"ellis, joh\" or author:\"ellis, john, *\"'\n spires_search = 'find a ellis, john'\n self._compare_searches(invenio_search, spires_search)",
"def test_author_full_initial(self):\n inv_search = 'author:\"klebanov, ig* r*\" or exactauthor:\"klebanov, i r\"'\n spi_search = \"find a klebanov, ig.r.\"\n self._compare_searches(inv_search, spi_search)",
"def has_duplicates_authors(L):\r\n # make a copy of t to avoid modifying the parameter\r\n s = L[:]\r\n s.sort()\r\n\r\n # check for adjacent elements that are equal\r\n for i in range(len(s)-1):\r\n if s[i] == s[i+1]:\r\n return True\r\n return False",
"def __generate_author_string__(self, list_of_authors):\n author_string = \"\"\n return author_string.join(list_of_authors)",
"def make_citation_authors(res):\n if \"author\" in res.keys():\n first_author = res['author'][0]['family'] + \", \" + res['author'][0]['given']\n last_author = res['author'][-1]['given'] + \" \" + res['author'][-1]['family']\n middle_authors = \", \".join(\" \".join([x['given'], x['family']]) for x in res['author'][1:-1])\n #assemble authors\n author_string = first_author\n author_string = author_string + \", \" + middle_authors if middle_authors != '' else author_string\n author_string = author_string + \", and \" + last_author if len(res['author']) > 1 else author_string\n \n author_string = author_string + \".\" if author_string[-1] != \".\" else author_string\n else:\n author_string = \"Unknown Authors\"\n\n return clean_txt(author_string.capitalize())",
"def test_refersto_author_multi_name_no_quotes(self):\n inv_search = 'author:ellis refersto:(author:\"parke, sj*\" or exactauthor:\"parke, s *\" or exactauthor:\"parke, s\" or author:\"parke, sj, *\")'\n spi_search = \"find a ellis and refersto author parke, sj\"\n self._compare_searches(inv_search, spi_search)",
"def test_multiple_authors(self):\r\n args = self.page_kwargs.copy()\r\n content = Page(**args)\r\n assert content.authors == [content.author]\r\n args['metadata'].pop('author')\r\n args['metadata']['authors'] = ['First Author', 'Second Author']\r\n content = Page(**args)\r\n assert content.authors\r\n assert content.author == content.authors[0]",
"def test_citedby_author(self):\n inv_search = 'citedby:author:doggy'\n spi_search = 'find citedby author doggy'\n self._compare_searches(inv_search, spi_search)",
"def get_authors(draft):\n authors = []\n for a in draft.authors.all():\n initial = ''\n prefix, first, middle, last, suffix = a.person.name_parts()\n if first:\n initial = first + '. '\n entry = '%s%s <%s>' % (initial,last,a.address)\n authors.append(entry)\n return authors",
"def format_authors(self, style):\n def format_one_author(author, style):\n \"\"\"\n Helper function that does it for one author.\n \"\"\"\n # Check If there's no given name.\n # We should probably try to handle the no family name case, but\n # I'm not sure when we will actually come across an example...\n if \"given\" not in author or author[\"given\"] == []:\n return author[\"family\"]\n # Otherwise...\n family_name = author[\"family\"]\n given_names = author[\"given\"]\n\n # deal with a pathological case, 10.1016/j.jmr.2018.02.009\n ns = given_names.split()\n for i, name in enumerate(ns):\n if i >= 1 and name.startswith('-'):\n this_name = ns.pop(i)\n ns[i - 1] += this_name\n given_names = \" \".join(ns)\n\n if style == \"display\":\n return (\"\".join(n[0] for n in re.split(r\"[\\s-]\", given_names))\n + \" \" + family_name)\n elif style == \"acs\":\n # \"Jean-Baptiste Simon\" -> [[\"Jean\", \"Baptiste\"], [\"Simon\"]]\n split_both = [name.split('-') for name in given_names.split()]\n # [[\"Jean\", \"Baptiste\"], [\"Simon\"]] -> \"J.-B. S\"\n joined_both = \". \".join([\".-\".join(n[0] for n in names)][0]\n for names in split_both)\n return (family_name + \", \" + joined_both + \".\")\n elif style == \"bib\":\n s = family_name + \", \" + given_names\n return s.replace(\". \", \".\\\\ \") # Must use control spaces\n elif style == \"full\":\n return given_names + \" \" + family_name\n # Otherwise, grumble.\n else:\n raise ValueError(f\"Invalid value '{style}' for style.\")\n\n if self.authors is not None:\n return [format_one_author(author, style) for author in self.authors]",
"def test_absorbs_naked_author_search(self):\n invenio_search = \"author:ellis\"\n spi_search = \"author ellis\"\n self._compare_searches(invenio_search, spi_search)",
"def authors(author_ids):\n if author_ids is None:\n return ''\n else:\n ids = []\n for author_id in author_ids.split(','):\n ids.append(User.id == int(author_id))\n authors = User.query.filter(or_(*ids)).all()\n if authors is None:\n return ''\n else:\n return 'by ' + ', '.join([author.name for author in authors])",
"def parse_authors():\n import subprocess\n try:\n output = subprocess.check_output(['git', 'shortlog', '-s'],\n universal_newlines=True)\n except Exception as ex:\n print('ex = {!r}'.format(ex))\n return []\n else:\n striped_lines = (l.strip() for l in output.split('\\n'))\n freq_authors = [line.split(None, 1) for line in striped_lines if line]\n freq_authors = sorted((int(f), a) for f, a in freq_authors)[::-1]\n # keep authors with uppercase letters\n authors = [a for f, a in freq_authors if a.lower() != a]\n return authors",
"def print_authors(popular_authors):\n\n print('\\nThe list of authors being listed as per their popularity:\\n')\n for author in popular_authors:\n print(author[0] + '\\t-\\t' + str(author[1]) + ' views \\n')\n print('-------------------------------------------------------\\n')"
] | [
"0.64632857",
"0.63929",
"0.6384049",
"0.6332246",
"0.62227446",
"0.6216081",
"0.6199922",
"0.615091",
"0.6108188",
"0.6102564",
"0.6097298",
"0.6095483",
"0.6072117",
"0.60518503",
"0.59991443",
"0.5962012",
"0.595125",
"0.59264",
"0.5892751",
"0.5881848",
"0.5834737",
"0.5828909",
"0.58197445",
"0.5809561",
"0.57794464",
"0.5775128",
"0.5758521",
"0.5700122",
"0.56798327",
"0.5664223"
] | 0.68802696 | 0 |
Cross checks a list of authors against post | def assertAuthorsInPosts(context, authors, posts):
cross_check(context, authors, posts) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cross_check(context, authors, poscom):\n displaynames = [x['author']['displayname'] for x in poscom]\n\n for author in authors:\n if author.user.username not in displaynames:\n context.assertFalse(True, \"%s not in list\" %author.user.username)",
"def author_ManyToMany_entry_check(): #Old it was OneToMany before adding multiple authors\n import itertools\n entry_author_ids = itertools.chain(*Entry.objects.all().values_list('author_ids', flat=True))\n entry_author_ids_set = set(entry_author_ids)\n user_ids = set(User.objects.all().values_list('id',flat=True))\n\n author_id_not_in_user = entry_author_ids_set - user_ids\n\n if author_id_not_in_user:\n return (\"Error: There are entries without a correct cross relation with user: {}\"\n .format(\",\".join(str(s) for s in author_id_not_in_user)))\n else:\n return \"OK\"",
"def authors_in_relation(context, data, authors):\n guids = [a.id for a in authors]\n guids = map( lambda x: str(x).replace('-', ''), guids)\n\n for guid in guids:\n context.assertTrue(unicode(guid) in data)",
"def test_multiple_authors(self):\r\n args = self.page_kwargs.copy()\r\n content = Page(**args)\r\n assert content.authors == [content.author]\r\n args['metadata'].pop('author')\r\n args['metadata']['authors'] = ['First Author', 'Second Author']\r\n content = Page(**args)\r\n assert content.authors\r\n assert content.author == content.authors[0]",
"def all_authors( data ) :\n return list(set( chain.from_iterable( [ authors(x) for x in data ] ) ))",
"def test_discussion_filter_author(self):\n author_vals = (\n ('DoesNotExist', 0),\n ('admin', 1),\n ('jsocol', 4),\n )\n\n for name, number in author_vals:\n u = UserFactory(username=name)\n for i in range(number):\n thread1 = ThreadFactory(title=u'audio')\n PostFactory(thread=thread1, author=u)\n\n self.refresh()\n\n qs = {'a': 1, 'w': 4, 'format': 'json'}\n\n for author, total in author_vals:\n qs.update({'author': author})\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(total, json.loads(response.content)['total'])",
"def assertAuthorsInComments(context, authors, comments):\n cross_check(context, authors, comments)",
"def authors(self, key, value):\n _authors = self.get(\"authors\", [])\n item = build_ils_contributor(value)\n if item and item not in _authors:\n _authors.append(item)\n try:\n if \"u\" in value:\n other = [\"et al.\", \"et al\"]\n val_u = list(force_list(value.get(\"u\")))\n if [i for i in other if i in val_u]:\n self[\"other_authors\"] = True\n except UnexpectedValue:\n pass\n return _authors",
"def get_authors_from_papers(papers):\n auth_set = set()\n for p in papers:\n auth_set.update(p['authors'])\n return list(auth_set)",
"def citing_authors(self, entities):\n result = self.db.execute(u'''SELECT DISTINCT(author_id)\n FROM \"entity_occurrences\"\n WHERE entity IN ({}) AND rho > ?'''.format(join_entities_sql(entities)), (DEFAULT_MIN_SCORE,)).fetchall()\n return [t[0] for t in result]",
"def test_author_list_equality_with_invalid_authentication(self) -> None:\n\n # Let's check for a request with no authorization\n\n response: Response = self.client.get(self.url)\n data = u.get_json(response)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n self.assertEqual(data, {\n 'detail': 'Authentication credentials were not provided.'\n })\n\n # Now lets check with an Author without permissions.\n\n # Select the underprivileged author randomly.\n author: Author = random.choice(self.authors)\n\n self.client.credentials(HTTP_AUTHORIZATION=u.auth_header(author.get_key()))\n\n response: Response = self.client.get(self.url)\n data: typing.Dict[typing.Any, typing.Any] = u.get_json(response)\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(data, {\n 'detail': 'You do not have permission to perform this action.'\n })",
"def test_list_all_authors(self):\n response = self.client.get(reverse('authors') + '?page=2')\n self.assertEqual(response.status_code, 200)\n self.assertTrue('is_paginated' in response.context)\n self.assertTrue(response.context['is_paginated'] is True)\n self.assertTrue(len(response.context['author_list']) == 3)",
"def get_author_citations(updated_redic_list, citedbydict, initial_author_dict, config):\n\n #sorry bout repeated code to get the tags\n tags = ['first_author', 'additional_author', 'alternative_author_name']\n tagvals = {}\n for t in tags:\n try:\n x = config.get(config.get(\"rank_method\", \"function\"), t)\n tagvals[t] = x\n except:\n register_exception(prefix=\"attribute \"+t+\" missing in config\", alert_admin=True)\n return initial_author_dict\n\n #parse the tags\n mainauthortag = tagify(parse_tag(tagvals['first_author']))\n coauthortag = tagify(parse_tag(tagvals['additional_author']))\n extauthortag = tagify(parse_tag(tagvals['alternative_author_name']))\n if task_get_task_param('verbose') >= 9:\n write_message(\"mainauthortag \"+mainauthortag)\n write_message(\"coauthortag \"+coauthortag)\n write_message(\"extauthortag \"+extauthortag)\n\n author_cited_in = initial_author_dict\n if citedbydict:\n i = 0 #just a counter for debug\n write_message(\"Checking records referred to in new records\")\n for u in updated_redic_list:\n if (i % 1000 == 0):\n mesg = \"Author ref done \"+str(i)+\" of \"+str(len(updated_redic_list))+\" records\"\n write_message(mesg)\n task_update_progress(mesg)\n i = i + 1\n\n if citedbydict.has_key(u):\n these_cite_k = citedbydict[u]\n if (these_cite_k is None):\n these_cite_k = [] #verify it is an empty list, not None\n authors = get_fieldvalues(u, mainauthortag)\n coauthl = get_fieldvalues(u, coauthortag)\n extauthl = get_fieldvalues(u, extauthortag)\n authors.extend(coauthl)\n authors.extend(extauthl)\n for a in authors:\n if a and author_cited_in.has_key(a):\n #add all elements in these_cite_k\n #that are not there already\n for citer in these_cite_k:\n tmplist = author_cited_in[a]\n if (tmplist.count(citer) == 0):\n tmplist.append(citer)\n author_cited_in[a] = tmplist\n else:\n author_cited_in[a] = these_cite_k\n\n mesg = \"Author ref done fully\"\n write_message(mesg)\n task_update_progress(mesg)\n\n #go through the dictionary again: all keys but search only if new records are cited\n write_message(\"Checking authors in new records\")\n i = 0\n for k in citedbydict.keys():\n if (i % 1000 == 0):\n mesg = \"Author cit done \"+str(i)+\" of \"+str(len(citedbydict.keys()))+\" records\"\n write_message(mesg)\n task_update_progress(mesg)\n i = i + 1\n\n these_cite_k = citedbydict[k]\n if (these_cite_k is None):\n these_cite_k = [] #verify it is an empty list, not None\n #do things only if these_cite_k contains any new stuff\n intersec_list = list(set(these_cite_k)&set(updated_redic_list))\n if intersec_list:\n authors = get_fieldvalues(k, mainauthortag)\n coauthl = get_fieldvalues(k, coauthortag)\n extauthl = get_fieldvalues(k, extauthortag)\n authors.extend(coauthl)\n authors.extend(extauthl)\n for a in authors:\n if a and author_cited_in.has_key(a):\n #add all elements in these_cite_k\n #that are not there already\n for citer in these_cite_k:\n tmplist = author_cited_in[a]\n if (tmplist.count(citer) == 0):\n tmplist.append(citer)\n author_cited_in[a] = tmplist\n else:\n author_cited_in[a] = these_cite_k\n\n mesg = \"Author cit done fully\"\n write_message(mesg)\n task_update_progress(mesg)\n\n return author_cited_in",
"def alt_authors(self, key, value):\n _authors = self.get(\"authors\", [])\n if _authors:\n for i, v in enumerate(force_list(value)):\n _authors[i].update({\"alternative_names\": clean_val(\"a\", v, str)})\n return _authors",
"def get_coauthored_publications_by_authors(cached_list, cached_set, author1_name, author2_name):\n publications = { 'cdblp': [], 'dblp': [] }\n pub1 = DBLPQuery.get_publications_by_author(cached_list, cached_set, author1_name)\n author2 = DBLPQuery.author_distinct(cached_list, cached_set, author2_name)\n #pub2 = DBLPQuery.get_publications_by_author(cached_list, cached_set, author2_name)\n for cdblp_pub in pub1.get('cdblp', []):\n authors = set(cdblp_pub.get('authors', []))\n authors_en = set(map(lambda a: CDBLPAuthor.getEnglishName(a)['full_name'], authors))\n if author2.get('cdblp', {}).get('author_name', {}).get('zh') in authors or author2.get('dblp', {}).get('author_name') in authors_en:\n publications['cdblp'].append(cdblp_pub)\n\n for dblp_pub in pub1.get('dblp', []):\n authors = set(map(lambda a: a.get('name'), dblp_pub.get('authors', [])))\n if author2.get('dblp', {}).get('author_name') in authors or author2.get('cdblp', {}).get('author_name', {}).get('full_name') in authors:\n publications['dblp'].append(dblp_pub)\n\n return publications",
"def test_author_filtering(self):\n # Get a valid author\n entry = Entry.objects.get(id=1)\n params = {\"author\": entry.first_author.id}\n\n self._test_filtering(**params)",
"def test_absorbs_naked_author_search(self):\n invenio_search = \"author:ellis\"\n spi_search = \"author ellis\"\n self._compare_searches(invenio_search, spi_search)",
"def extra_bibparse(db):\n for key,entry in db.entries.items():\n for auth in entry.persons[\"author\"]:\n if (\"Harrison\" not in auth.first_names or\n \"Chapman\" not in auth.last_names):\n entry.add_person(auth, \"otherauthor\")",
"def test_refersto_author_multi_name(self):\n inv_search = 'author:ellis refersto:author:\"parke, s. j.\"'\n spi_search = 'find a ellis and refersto author \"parke, s. j.\"'\n self._compare_searches(inv_search, spi_search)",
"def test_refersto_author_multi_name_no_quotes(self):\n inv_search = 'author:ellis refersto:(author:\"parke, sj*\" or exactauthor:\"parke, s *\" or exactauthor:\"parke, s\" or author:\"parke, sj, *\")'\n spi_search = \"find a ellis and refersto author parke, sj\"\n self._compare_searches(inv_search, spi_search)",
"def test_author_list_equality_with_valid_authentication(self) -> None:\n\n # Set the Authorization header to the appropriate\n # format as the rest_framework expects using utils.\n self.client.credentials(HTTP_AUTHORIZATION=u.auth_header(\n self.super_author.get_key()\n ))\n\n response = self.client.get(self.url)\n data = u.get_json(response)\n\n self.assertEqual(data, self.serialized_data, msg=data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_author_reverse(self):\n invenio_search = 'author:\"ellis, j*\"'\n spires_search = 'find a j ellis'\n self._compare_searches(invenio_search, spires_search)",
"def test_author_many_initials(self):\n inv_search = 'author:\"bach, p* d* q*\"'\n spi_search = 'find a p d q bach'\n self._compare_searches(inv_search, spi_search)",
"def test_get_all_authors(self):\n self.register_user()\n token = self.login_user()\n response = self.client.get(self.user_author, format='json', HTTP_AUTHORIZATION='Token ' +token)\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def popAuthors(self):\r\n# cur = self.dbConn.execute(\"SELECT * FROM People WHERE PersonID>0 ORDER BY Lastname\")\r\n# res = cur.fetchall()\r\n res = self.dbConn.execute(\"SELECT * FROM People WHERE PersonID>0 ORDER BY Lastname\").fetchall()\r\n\r\n self.authorList = [formatNameSQL(ln) for ln in res]\r\n self.quickAuthors = [ln[\"Lastname\"].lower() for ln in res]\r\n vals = [ln[\"PersonID\"] for ln in res]\r\n \r\n self.authorLookup = dict(zip(self.authorList,vals))",
"def make_author_list(res):\n try:\n r = [\", \".join([clean_txt(x['family']).capitalize(), clean_txt(x['given']).capitalize()]) for x in res['author']]\n except KeyError as e:\n print(\"No 'author' key, using 'Unknown Author'. You should edit the markdown file to change the name and citationkey.\")\n r = [\"Unknown Authors\"]\n return r",
"def add_authors(self, author_data, instance):\n for idx, author in enumerate(author_data):\n Author.objects.create(dataset=instance, order=idx, author=author)",
"def test_author_simple(self):\n invenio_search = 'author:\"ellis, j*\"'\n spires_search = 'find a ellis, j'\n self._compare_searches(invenio_search, spires_search)",
"def test_retrieve_authors(self):\n sample_author()\n sample_author()\n\n res = self.client.get(reverse('authors'))\n authors = Author.objects.all()\n serializer = AuthorSerializer(authors, many=True)\n self.assertEqual(res.data, serializer.data)\n self.assertEqual(res.status_code, status.HTTP_200_OK)",
"def mixed_author_sources(args: Dict[str, Any]) -> List[object]:\n query = [\n {\n \"$match\": {\n \"body\": {\"$ne\": \"\"},\n \"quotesUpdated\": {\"$exists\": True},\n \"outlet\": {\"$in\": args[\"outlets\"]},\n \"publishedAt\": {\n \"$gte\": args[\"begin_date\"],\n \"$lt\": args[\"end_date\"] + timedelta(days=1),\n },\n \"authorsFemaleCount\": {\"$gt\": 0},\n \"authorsMaleCount\": {\"$gt\": 0},\n }\n },\n {\n \"$project\": {\n \"_id\": 1,\n \"outlet\": 1,\n \"authors\": 1,\n \"authorsMale\": 1,\n \"authorsFemale\": 1,\n \"authorsUnknown\": 1,\n \"sourcesMaleCount\": 1,\n \"sourcesFemaleCount\": 1,\n \"sourcesUnknownCount\": 1,\n }\n },\n {\n \"$group\": {\n \"_id\": \"$outlet\",\n \"totalArticles\": {\"$sum\": 1},\n \"totalMaleSources\": {\"$sum\": \"$sourcesMaleCount\"},\n \"totalFemaleSources\": {\"$sum\": \"$sourcesFemaleCount\"},\n \"totalUnknownSources\": {\"$sum\": \"$sourcesUnknownCount\"},\n }\n },\n ]\n return query"
] | [
"0.72640485",
"0.6881462",
"0.6769553",
"0.65692633",
"0.6538516",
"0.63877165",
"0.6319493",
"0.62692463",
"0.60791737",
"0.6033017",
"0.5904452",
"0.5835843",
"0.58165914",
"0.579224",
"0.5788046",
"0.57680523",
"0.57557714",
"0.5726369",
"0.5697314",
"0.56669766",
"0.5661073",
"0.5650174",
"0.5646964",
"0.5643317",
"0.5618991",
"0.5617926",
"0.55870587",
"0.55630904",
"0.55617523",
"0.55578864"
] | 0.7797011 | 0 |
Cross checks a list of authors against comments | def assertAuthorsInComments(context, authors, comments):
cross_check(context, authors, comments) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cross_check(context, authors, poscom):\n displaynames = [x['author']['displayname'] for x in poscom]\n\n for author in authors:\n if author.user.username not in displaynames:\n context.assertFalse(True, \"%s not in list\" %author.user.username)",
"def assertAuthorsInPosts(context, authors, posts):\n cross_check(context, authors, posts)",
"def all_authors( data ) :\n return list(set( chain.from_iterable( [ authors(x) for x in data ] ) ))",
"def authors_in_relation(context, data, authors):\n guids = [a.id for a in authors]\n guids = map( lambda x: str(x).replace('-', ''), guids)\n\n for guid in guids:\n context.assertTrue(unicode(guid) in data)",
"def get_author_citations(updated_redic_list, citedbydict, initial_author_dict, config):\n\n #sorry bout repeated code to get the tags\n tags = ['first_author', 'additional_author', 'alternative_author_name']\n tagvals = {}\n for t in tags:\n try:\n x = config.get(config.get(\"rank_method\", \"function\"), t)\n tagvals[t] = x\n except:\n register_exception(prefix=\"attribute \"+t+\" missing in config\", alert_admin=True)\n return initial_author_dict\n\n #parse the tags\n mainauthortag = tagify(parse_tag(tagvals['first_author']))\n coauthortag = tagify(parse_tag(tagvals['additional_author']))\n extauthortag = tagify(parse_tag(tagvals['alternative_author_name']))\n if task_get_task_param('verbose') >= 9:\n write_message(\"mainauthortag \"+mainauthortag)\n write_message(\"coauthortag \"+coauthortag)\n write_message(\"extauthortag \"+extauthortag)\n\n author_cited_in = initial_author_dict\n if citedbydict:\n i = 0 #just a counter for debug\n write_message(\"Checking records referred to in new records\")\n for u in updated_redic_list:\n if (i % 1000 == 0):\n mesg = \"Author ref done \"+str(i)+\" of \"+str(len(updated_redic_list))+\" records\"\n write_message(mesg)\n task_update_progress(mesg)\n i = i + 1\n\n if citedbydict.has_key(u):\n these_cite_k = citedbydict[u]\n if (these_cite_k is None):\n these_cite_k = [] #verify it is an empty list, not None\n authors = get_fieldvalues(u, mainauthortag)\n coauthl = get_fieldvalues(u, coauthortag)\n extauthl = get_fieldvalues(u, extauthortag)\n authors.extend(coauthl)\n authors.extend(extauthl)\n for a in authors:\n if a and author_cited_in.has_key(a):\n #add all elements in these_cite_k\n #that are not there already\n for citer in these_cite_k:\n tmplist = author_cited_in[a]\n if (tmplist.count(citer) == 0):\n tmplist.append(citer)\n author_cited_in[a] = tmplist\n else:\n author_cited_in[a] = these_cite_k\n\n mesg = \"Author ref done fully\"\n write_message(mesg)\n task_update_progress(mesg)\n\n #go through the dictionary again: all keys but search only if new records are cited\n write_message(\"Checking authors in new records\")\n i = 0\n for k in citedbydict.keys():\n if (i % 1000 == 0):\n mesg = \"Author cit done \"+str(i)+\" of \"+str(len(citedbydict.keys()))+\" records\"\n write_message(mesg)\n task_update_progress(mesg)\n i = i + 1\n\n these_cite_k = citedbydict[k]\n if (these_cite_k is None):\n these_cite_k = [] #verify it is an empty list, not None\n #do things only if these_cite_k contains any new stuff\n intersec_list = list(set(these_cite_k)&set(updated_redic_list))\n if intersec_list:\n authors = get_fieldvalues(k, mainauthortag)\n coauthl = get_fieldvalues(k, coauthortag)\n extauthl = get_fieldvalues(k, extauthortag)\n authors.extend(coauthl)\n authors.extend(extauthl)\n for a in authors:\n if a and author_cited_in.has_key(a):\n #add all elements in these_cite_k\n #that are not there already\n for citer in these_cite_k:\n tmplist = author_cited_in[a]\n if (tmplist.count(citer) == 0):\n tmplist.append(citer)\n author_cited_in[a] = tmplist\n else:\n author_cited_in[a] = these_cite_k\n\n mesg = \"Author cit done fully\"\n write_message(mesg)\n task_update_progress(mesg)\n\n return author_cited_in",
"def author_ManyToMany_entry_check(): #Old it was OneToMany before adding multiple authors\n import itertools\n entry_author_ids = itertools.chain(*Entry.objects.all().values_list('author_ids', flat=True))\n entry_author_ids_set = set(entry_author_ids)\n user_ids = set(User.objects.all().values_list('id',flat=True))\n\n author_id_not_in_user = entry_author_ids_set - user_ids\n\n if author_id_not_in_user:\n return (\"Error: There are entries without a correct cross relation with user: {}\"\n .format(\",\".join(str(s) for s in author_id_not_in_user)))\n else:\n return \"OK\"",
"def authors(self, key, value):\n _authors = self.get(\"authors\", [])\n item = build_ils_contributor(value)\n if item and item not in _authors:\n _authors.append(item)\n try:\n if \"u\" in value:\n other = [\"et al.\", \"et al\"]\n val_u = list(force_list(value.get(\"u\")))\n if [i for i in other if i in val_u]:\n self[\"other_authors\"] = True\n except UnexpectedValue:\n pass\n return _authors",
"def test_discussion_filter_author(self):\n author_vals = (\n ('DoesNotExist', 0),\n ('admin', 1),\n ('jsocol', 4),\n )\n\n for name, number in author_vals:\n u = UserFactory(username=name)\n for i in range(number):\n thread1 = ThreadFactory(title=u'audio')\n PostFactory(thread=thread1, author=u)\n\n self.refresh()\n\n qs = {'a': 1, 'w': 4, 'format': 'json'}\n\n for author, total in author_vals:\n qs.update({'author': author})\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(total, json.loads(response.content)['total'])",
"def make_author_list(res):\n try:\n r = [\", \".join([clean_txt(x['family']).capitalize(), clean_txt(x['given']).capitalize()]) for x in res['author']]\n except KeyError as e:\n print(\"No 'author' key, using 'Unknown Author'. You should edit the markdown file to change the name and citationkey.\")\n r = [\"Unknown Authors\"]\n return r",
"def get_coauthored_publications_by_authors(cached_list, cached_set, author1_name, author2_name):\n publications = { 'cdblp': [], 'dblp': [] }\n pub1 = DBLPQuery.get_publications_by_author(cached_list, cached_set, author1_name)\n author2 = DBLPQuery.author_distinct(cached_list, cached_set, author2_name)\n #pub2 = DBLPQuery.get_publications_by_author(cached_list, cached_set, author2_name)\n for cdblp_pub in pub1.get('cdblp', []):\n authors = set(cdblp_pub.get('authors', []))\n authors_en = set(map(lambda a: CDBLPAuthor.getEnglishName(a)['full_name'], authors))\n if author2.get('cdblp', {}).get('author_name', {}).get('zh') in authors or author2.get('dblp', {}).get('author_name') in authors_en:\n publications['cdblp'].append(cdblp_pub)\n\n for dblp_pub in pub1.get('dblp', []):\n authors = set(map(lambda a: a.get('name'), dblp_pub.get('authors', [])))\n if author2.get('dblp', {}).get('author_name') in authors or author2.get('cdblp', {}).get('author_name', {}).get('full_name') in authors:\n publications['dblp'].append(dblp_pub)\n\n return publications",
"def test_refersto_author_multi_name_no_quotes(self):\n inv_search = 'author:ellis refersto:(author:\"parke, sj*\" or exactauthor:\"parke, s *\" or exactauthor:\"parke, s\" or author:\"parke, sj, *\")'\n spi_search = \"find a ellis and refersto author parke, sj\"\n self._compare_searches(inv_search, spi_search)",
"def citing_authors(self, entities):\n result = self.db.execute(u'''SELECT DISTINCT(author_id)\n FROM \"entity_occurrences\"\n WHERE entity IN ({}) AND rho > ?'''.format(join_entities_sql(entities)), (DEFAULT_MIN_SCORE,)).fetchall()\n return [t[0] for t in result]",
"def test_multiple_authors(self):\r\n args = self.page_kwargs.copy()\r\n content = Page(**args)\r\n assert content.authors == [content.author]\r\n args['metadata'].pop('author')\r\n args['metadata']['authors'] = ['First Author', 'Second Author']\r\n content = Page(**args)\r\n assert content.authors\r\n assert content.author == content.authors[0]",
"def test_refersto_author_multi_name(self):\n inv_search = 'author:ellis refersto:author:\"parke, s. j.\"'\n spi_search = 'find a ellis and refersto author \"parke, s. j.\"'\n self._compare_searches(inv_search, spi_search)",
"def get_authors_from_papers(papers):\n auth_set = set()\n for p in papers:\n auth_set.update(p['authors'])\n return list(auth_set)",
"def test_list_all_authors(self):\n response = self.client.get(reverse('authors') + '?page=2')\n self.assertEqual(response.status_code, 200)\n self.assertTrue('is_paginated' in response.context)\n self.assertTrue(response.context['is_paginated'] is True)\n self.assertTrue(len(response.context['author_list']) == 3)",
"def test_author_list_equality_with_invalid_authentication(self) -> None:\n\n # Let's check for a request with no authorization\n\n response: Response = self.client.get(self.url)\n data = u.get_json(response)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n self.assertEqual(data, {\n 'detail': 'Authentication credentials were not provided.'\n })\n\n # Now lets check with an Author without permissions.\n\n # Select the underprivileged author randomly.\n author: Author = random.choice(self.authors)\n\n self.client.credentials(HTTP_AUTHORIZATION=u.auth_header(author.get_key()))\n\n response: Response = self.client.get(self.url)\n data: typing.Dict[typing.Any, typing.Any] = u.get_json(response)\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(data, {\n 'detail': 'You do not have permission to perform this action.'\n })",
"def test_draft_list_only_display_authors_aids(client, contributor):\n\n AidFactory(name='Is this the real life?', author=contributor)\n AidFactory(name='Is this just fantasy?')\n\n client.force_login(contributor)\n drafts_url = reverse('aid_draft_list_view')\n res = client.get(drafts_url)\n\n content = res.content.decode('utf-8')\n assert 'Is this the real life?' in content\n assert 'Is this just fantasy?' not in content",
"def test_author_many_initials(self):\n inv_search = 'author:\"bach, p* d* q*\"'\n spi_search = 'find a p d q bach'\n self._compare_searches(inv_search, spi_search)",
"def make_citation_authors(res):\n if \"author\" in res.keys():\n first_author = res['author'][0]['family'] + \", \" + res['author'][0]['given']\n last_author = res['author'][-1]['given'] + \" \" + res['author'][-1]['family']\n middle_authors = \", \".join(\" \".join([x['given'], x['family']]) for x in res['author'][1:-1])\n #assemble authors\n author_string = first_author\n author_string = author_string + \", \" + middle_authors if middle_authors != '' else author_string\n author_string = author_string + \", and \" + last_author if len(res['author']) > 1 else author_string\n \n author_string = author_string + \".\" if author_string[-1] != \".\" else author_string\n else:\n author_string = \"Unknown Authors\"\n\n return clean_txt(author_string.capitalize())",
"def check_comments(self, args):\n\n for submission in args.comments:\n if any(char.isalpha() for char in submission[1]) \\\n or self._illegal_chars.search(submission[1]) != None:\n raise ValueError",
"def test_absorbs_naked_author_search(self):\n invenio_search = \"author:ellis\"\n spi_search = \"author ellis\"\n self._compare_searches(invenio_search, spi_search)",
"def extra_bibparse(db):\n for key,entry in db.entries.items():\n for auth in entry.persons[\"author\"]:\n if (\"Harrison\" not in auth.first_names or\n \"Chapman\" not in auth.last_names):\n entry.add_person(auth, \"otherauthor\")",
"def test_citedby_author(self):\n inv_search = 'citedby:author:doggy'\n spi_search = 'find citedby author doggy'\n self._compare_searches(inv_search, spi_search)",
"def test_author_simple(self):\n invenio_search = 'author:\"ellis, j*\"'\n spires_search = 'find a ellis, j'\n self._compare_searches(invenio_search, spires_search)",
"def test_author_initials(self):\n inv_search = 'author:\"polyakov, a* m*\"'\n spi_search = 'find a a m polyakov'\n self._compare_searches(inv_search, spi_search)",
"def __generate_author_string__(self, list_of_authors):\n author_string = \"\"\n return author_string.join(list_of_authors)",
"def find_authors(code):\n url = baseurl(code)\n page = req(url)\n soup = BeautifulSoup(page, 'lxml')\n addr = [t.attrs.get('content', None) \n for t in soup.find_all(\"meta\", {\"name\": \"citation_author_email\"})]\n \n # corresponding authors will have their email under another tag too\n corr = [t.find('a').attrs.get('href', None)\n for t in soup.find_all(None, {\"class\": \"author-corresp-email-link\"})]\n\n addr = [a for a in addr if a is not None]\n corr = [a.replace('mailto:', '') for a in corr if a is not None]\n\n return dict(corr=list(set(corr)), all=list(set(addr)))",
"def test_author_full_initial(self):\n inv_search = 'author:\"klebanov, ig* r*\" or exactauthor:\"klebanov, i r\"'\n spi_search = \"find a klebanov, ig.r.\"\n self._compare_searches(inv_search, spi_search)",
"def get_coauthors_by_author(cached_list, cached_set, author_name):\n author = DBLPQuery.author_distinct(cached_list, cached_set, author_name)\n coauthors = {}\n if author['dblp'].__contains__('coauthors'):\n for author_key in author['dblp']['coauthors']:\n coauthors[author_key] = { 'en': author_key, 'zh': '' }\n\n if author['cdblp'].__contains__('coauthors'):\n for author_key in author['cdblp']['coauthors']:\n if coauthors.__contains__(author_key['full_name']):\n coauthors[author_key['full_name']]['zh'] = author_key['zh']\n else:\n coauthors[author_key['full_name']] = { 'en': author_key['full_name'], 'zh': author_key['zh'] }\n\n return coauthors"
] | [
"0.74669504",
"0.65934074",
"0.6494307",
"0.6315845",
"0.6253867",
"0.62438315",
"0.61274093",
"0.6090594",
"0.5927208",
"0.59244573",
"0.58965456",
"0.5887174",
"0.58157754",
"0.579177",
"0.57523257",
"0.5713481",
"0.5709423",
"0.5708671",
"0.5706471",
"0.56644917",
"0.5663394",
"0.5654191",
"0.5637352",
"0.5629413",
"0.56229705",
"0.56206954",
"0.56068504",
"0.55586964",
"0.552714",
"0.55246806"
] | 0.7966482 | 0 |
Takes a list of cachedauthors and adds them to the author follower list | def create_cached_author_followers(author, followers):
for f in followers:
author.followers.add(f) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def addAuthor2():\n\n author_list = list()\n\n authors = Author.objects.all()\n\n for author in authors:\n author_dict = dict()\n author_dict['id'] = \"{}/api/author/{}\".format(DOMAIN, author.id)\n author_dict['host'] = \"{}/api/\".format(author.host_url)\n author_dict['displayName'] = author.username\n author_dict['url'] = \"{}/api/author/{}\".format(DOMAIN, author.id)\n\n author_list.append(author_dict)\n\n return author_list",
"def get_coauthors_by_author(cached_list, cached_set, author_name):\n author = DBLPQuery.author_distinct(cached_list, cached_set, author_name)\n coauthors = {}\n if author['dblp'].__contains__('coauthors'):\n for author_key in author['dblp']['coauthors']:\n coauthors[author_key] = { 'en': author_key, 'zh': '' }\n\n if author['cdblp'].__contains__('coauthors'):\n for author_key in author['cdblp']['coauthors']:\n if coauthors.__contains__(author_key['full_name']):\n coauthors[author_key['full_name']]['zh'] = author_key['zh']\n else:\n coauthors[author_key['full_name']] = { 'en': author_key['full_name'], 'zh': author_key['zh'] }\n\n return coauthors",
"def follow(self, followerId: int, followeeId: int) -> None:\n if followeeId not in self.followList.get(followerId, [followerId]):\n self.followList[followerId] = self.followList.get(followerId, [followerId]) + [followeeId]\n # print(self.followList)",
"def author_following(self):\n\t\tpass",
"def popAuthors(self):\r\n# cur = self.dbConn.execute(\"SELECT * FROM People WHERE PersonID>0 ORDER BY Lastname\")\r\n# res = cur.fetchall()\r\n res = self.dbConn.execute(\"SELECT * FROM People WHERE PersonID>0 ORDER BY Lastname\").fetchall()\r\n\r\n self.authorList = [formatNameSQL(ln) for ln in res]\r\n self.quickAuthors = [ln[\"Lastname\"].lower() for ln in res]\r\n vals = [ln[\"PersonID\"] for ln in res]\r\n \r\n self.authorLookup = dict(zip(self.authorList,vals))",
"def authors(self, authors):\n\n self._authors = authors",
"def follow(self, followerId, followeeId):\n if followerId in self.follows:\n self.follows[followerId].add(followeeId)\n else:\n self.follows[followerId] = set([followeeId])",
"def updateAuthors(self,event=None):\r\n self.popAuthors()\r\n self.primeAuthor.updateVals(self.authorList)\r\n self.coAuthor.updateVals(self.authorList)\r\n self.correspond.updateVals(self.authorList)",
"def load_authors(keys: [str]):\n not_in_local_cache = []\n result = []\n for key in keys:\n try:\n result.append(_author_data_cache[key].to_dict())\n del _author_data_cache[key]\n except KeyError:\n not_in_local_cache.append(key)\n \n if len(not_in_local_cache):\n doc_refs = [db.collection(AUTHOR_CACHE_COLLECTION).document(key)\n for key in keys]\n data = db.get_all(doc_refs)\n for datum in data:\n if not datum.exists:\n raise cache_buddy.CacheMiss(datum.id)\n result.append(datum.to_dict())\n return [_decompress_record(r) for r in result]",
"def __add_author(self, key_name, others_names, personal_information):\n for name in others_names:\n self.author_to_authorID[name] = (key_name, personal_information)",
"def follow_following_followers(self):\n self.logger.log(\"starting follow_following_followers...\")\n follows_accounts = self.following\n random.shuffle(follows_accounts)\n for acc in follows_accounts:\n try:\n try:\n followw = perform_with_ran_delay(self.instagram.get_followers, acc, 150, 15,\n delayed=True)\n accountstofollow = followw[\"accounts\"]\n random.shuffle(accountstofollow)\n if len(accountstofollow) > 10:\n accountstofollow = accountstofollow[:10]\n for ac in accountstofollow:\n if not self.is_user_following(ac.identifier):\n self.add_following(ac.identifier)\n self.logger.log(\"following: {}\".format(ac.username))\n except Exception as e:\n print(e)\n self.logger.log(str(e))\n finally:\n sleep(3)",
"def follow(self, followerId, followeeId):\n if followerId not in self.follow_map:\n self.follow_map[followerId] = set()\n \n self.follow_map[followerId].add(followeeId)",
"def get_author_citations(updated_redic_list, citedbydict, initial_author_dict, config):\n\n #sorry bout repeated code to get the tags\n tags = ['first_author', 'additional_author', 'alternative_author_name']\n tagvals = {}\n for t in tags:\n try:\n x = config.get(config.get(\"rank_method\", \"function\"), t)\n tagvals[t] = x\n except:\n register_exception(prefix=\"attribute \"+t+\" missing in config\", alert_admin=True)\n return initial_author_dict\n\n #parse the tags\n mainauthortag = tagify(parse_tag(tagvals['first_author']))\n coauthortag = tagify(parse_tag(tagvals['additional_author']))\n extauthortag = tagify(parse_tag(tagvals['alternative_author_name']))\n if task_get_task_param('verbose') >= 9:\n write_message(\"mainauthortag \"+mainauthortag)\n write_message(\"coauthortag \"+coauthortag)\n write_message(\"extauthortag \"+extauthortag)\n\n author_cited_in = initial_author_dict\n if citedbydict:\n i = 0 #just a counter for debug\n write_message(\"Checking records referred to in new records\")\n for u in updated_redic_list:\n if (i % 1000 == 0):\n mesg = \"Author ref done \"+str(i)+\" of \"+str(len(updated_redic_list))+\" records\"\n write_message(mesg)\n task_update_progress(mesg)\n i = i + 1\n\n if citedbydict.has_key(u):\n these_cite_k = citedbydict[u]\n if (these_cite_k is None):\n these_cite_k = [] #verify it is an empty list, not None\n authors = get_fieldvalues(u, mainauthortag)\n coauthl = get_fieldvalues(u, coauthortag)\n extauthl = get_fieldvalues(u, extauthortag)\n authors.extend(coauthl)\n authors.extend(extauthl)\n for a in authors:\n if a and author_cited_in.has_key(a):\n #add all elements in these_cite_k\n #that are not there already\n for citer in these_cite_k:\n tmplist = author_cited_in[a]\n if (tmplist.count(citer) == 0):\n tmplist.append(citer)\n author_cited_in[a] = tmplist\n else:\n author_cited_in[a] = these_cite_k\n\n mesg = \"Author ref done fully\"\n write_message(mesg)\n task_update_progress(mesg)\n\n #go through the dictionary again: all keys but search only if new records are cited\n write_message(\"Checking authors in new records\")\n i = 0\n for k in citedbydict.keys():\n if (i % 1000 == 0):\n mesg = \"Author cit done \"+str(i)+\" of \"+str(len(citedbydict.keys()))+\" records\"\n write_message(mesg)\n task_update_progress(mesg)\n i = i + 1\n\n these_cite_k = citedbydict[k]\n if (these_cite_k is None):\n these_cite_k = [] #verify it is an empty list, not None\n #do things only if these_cite_k contains any new stuff\n intersec_list = list(set(these_cite_k)&set(updated_redic_list))\n if intersec_list:\n authors = get_fieldvalues(k, mainauthortag)\n coauthl = get_fieldvalues(k, coauthortag)\n extauthl = get_fieldvalues(k, extauthortag)\n authors.extend(coauthl)\n authors.extend(extauthl)\n for a in authors:\n if a and author_cited_in.has_key(a):\n #add all elements in these_cite_k\n #that are not there already\n for citer in these_cite_k:\n tmplist = author_cited_in[a]\n if (tmplist.count(citer) == 0):\n tmplist.append(citer)\n author_cited_in[a] = tmplist\n else:\n author_cited_in[a] = these_cite_k\n\n mesg = \"Author cit done fully\"\n write_message(mesg)\n task_update_progress(mesg)\n\n return author_cited_in",
"def followers(congressDict, twitterAPI):\n most = twitterAPI.get_user(list(congressDict.items())[0][1]) # Choose an arbitrary starting point from the dictionary and assign it their user details.\n least = most\n for name in congressDict:\n tempAPI = twitterAPI.get_user(congressDict[name]) # Get the user details of each congress members' twitter handle.\n numFollowers = tempAPI._json['followers_count']\n if (numFollowers > most._json['followers_count']): # If the follower count is greater than most, replace the user details with current one.\n most = tempAPI\n elif (numFollowers < least._json['followers_count']): # If the follower count is lower than least, replace the user details with the current one.\n least = tempAPI\n return [most._json[\"name\"], least._json[\"name\"]]",
"def get_authors(self):\n return [aer.author for aer in self.authorentryrank_set.all()]",
"def follow(self, followerId: int, followeeId: int) -> None:\n # Time Complexity: O(1)\n if followerId != followeeId:\n if followerId not in self.followees:\n self.followees[followerId] = set()\n\n self.followees[followerId].add(followeeId)",
"def all_followers(twitter_dict, twitter_name): \r\n \r\n following_list = []\r\n for user in twitter_dict:\r\n f_list = twitter_dict[user]['following']\r\n if twitter_name in f_list:\r\n following_list.append(user) \r\n return following_list",
"def addFriends(author):\n friends = author.friends.all()\n remote_friends = RemoteFriend.objects.all().filter(author=author)\n friend_list = list()\n if friends:\n for friend in friends:\n friend_dict = {'id': \"{}/api/{}\".format(DOMAIN, friend.id), 'host': friend.host_url,\n 'displayName': friend.username, 'url': \"{}/api/{}\".format(DOMAIN, friend.id)}\n friend_list.append(friend_dict)\n\n if remote_friends:\n for remote in remote_friends:\n friend_dict = {'id': remote.url, 'host': remote.host,\n 'displayName': remote.displayName, 'url': remote.url}\n friend_list.append(friend_dict)\n\n remote = check_remote_friends(author)\n friend_list += remote\n return friend_list",
"def follow(self, followerId, followeeId):\r\n if followerId != followeeId:\r\n self.follows[followerId].add(followeeId)",
"def follow(self, follower, followee):\n pass",
"def authors(self, key, value):\n _authors = self.get(\"authors\", [])\n item = build_ils_contributor(value)\n if item and item not in _authors:\n _authors.append(item)\n try:\n if \"u\" in value:\n other = [\"et al.\", \"et al\"]\n val_u = list(force_list(value.get(\"u\")))\n if [i for i in other if i in val_u]:\n self[\"other_authors\"] = True\n except UnexpectedValue:\n pass\n return _authors",
"def add_authors(self, author_data, instance):\n for idx, author in enumerate(author_data):\n Author.objects.create(dataset=instance, order=idx, author=author)",
"def get_coauthored_publications_by_authors(cached_list, cached_set, author1_name, author2_name):\n publications = { 'cdblp': [], 'dblp': [] }\n pub1 = DBLPQuery.get_publications_by_author(cached_list, cached_set, author1_name)\n author2 = DBLPQuery.author_distinct(cached_list, cached_set, author2_name)\n #pub2 = DBLPQuery.get_publications_by_author(cached_list, cached_set, author2_name)\n for cdblp_pub in pub1.get('cdblp', []):\n authors = set(cdblp_pub.get('authors', []))\n authors_en = set(map(lambda a: CDBLPAuthor.getEnglishName(a)['full_name'], authors))\n if author2.get('cdblp', {}).get('author_name', {}).get('zh') in authors or author2.get('dblp', {}).get('author_name') in authors_en:\n publications['cdblp'].append(cdblp_pub)\n\n for dblp_pub in pub1.get('dblp', []):\n authors = set(map(lambda a: a.get('name'), dblp_pub.get('authors', [])))\n if author2.get('dblp', {}).get('author_name') in authors or author2.get('cdblp', {}).get('author_name', {}).get('full_name') in authors:\n publications['dblp'].append(dblp_pub)\n\n return publications",
"def query_authors(cls):\n authors = from_cache('AuthorsList')\n if not authors:\n authors = SuiAuthor.all().order('name').fetch(400)\n to_cache('AuthorsList', authors)\n return authors",
"def all_authors( data ) :\n return list(set( chain.from_iterable( [ authors(x) for x in data ] ) ))",
"def __add_publication(self, authors, publication):\n for author in authors:\n\n if author not in self.author_to_publications:\n self.author_to_publications[author] = set()\n self.author_to_publications[author].add(publication)",
"def follow(self, followerId: int, followeeId: int) -> None:\n self.user_followed[followerId].append(followeeId)",
"def alt_authors(self, key, value):\n _authors = self.get(\"authors\", [])\n if _authors:\n for i, v in enumerate(force_list(value)):\n _authors[i].update({\"alternative_names\": clean_val(\"a\", v, str)})\n return _authors",
"def Authors(self, default=[{}]):\n tmp = self.data.get('authors', default)\n return [HEP.AuthorReducedObject(i) for i in tmp]",
"def Authors(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('authors', default)\n return [HEP.AuthorObject(i) for i in tmp]"
] | [
"0.64747363",
"0.6431524",
"0.6423831",
"0.6329321",
"0.6274541",
"0.6242031",
"0.6238875",
"0.62265736",
"0.6178677",
"0.61373085",
"0.6131011",
"0.6087273",
"0.60318464",
"0.59967387",
"0.5986646",
"0.5947148",
"0.5934538",
"0.5911428",
"0.59073174",
"0.590661",
"0.59045154",
"0.59034413",
"0.5875903",
"0.5860391",
"0.5859604",
"0.58438224",
"0.58397263",
"0.58224577",
"0.5816134",
"0.5798356"
] | 0.81595033 | 0 |
Creates an interior node with the given operator (a token), and left and right operands (other nodes). | def __init__(self, opToken, leftOper, rightOper):
self.operator = opToken
self.leftOperand = leftOper
self.rightOperand = rightOper | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __create_internal_node_by_operator(operator: PatternStructure, sliding_window: timedelta, parent: Node = None):\n operator_type = operator.get_top_operator()\n if operator_type == SeqOperator:\n return SeqNode(sliding_window, parent)\n if operator_type == AndOperator:\n return AndNode(sliding_window, parent)\n if operator_type == KleeneClosureOperator:\n return KleeneClosureNode(sliding_window, operator.min_size, operator.max_size, parent)\n raise Exception(\"Unknown or unsupported operator %s\" % (operator_type,))",
"def __init__(self, token, left = None, right = None):\n LinkedBinaryTree.__init__(self) # LinkedBinaryTree initialization\n if not isinstance(token, str):\n raise TypeError('Token must be a string')\n self._add_root(token) # use inherited, nonpublic method\n if left is not None:\n if token not in '+-*x/':\n raise ValueError('token must be valid operator')\n self._attach(self.root(), left, right) # use inherited, nonpublic method",
"def visit_BinaryOperator(self, node: BinaryOperator) -> Instruction:\n\n left = self.visit(node.left)\n right = self.visit(node.right)\n\n if isinstance(left, VarSymbol):\n left_symbol = self.GLOBAL_MEMORY[left.name]\n else:\n left_symbol = left\n\n if isinstance(right, VarSymbol):\n right_symbol = self.GLOBAL_MEMORY[right.name]\n else:\n right_symbol = right\n\n if node.operator.type == TokenType.PLUS:\n return self.builder.fadd(left_symbol, right_symbol, \"addtmp\")\n elif node.operator.type == TokenType.MINUS:\n return self.builder.fsub(left_symbol, right_symbol, \"subtmp\")\n elif node.operator.type == TokenType.MUL:\n return self.builder.fmul(left_symbol, right_symbol, \"multmp\")\n elif node.operator.type == TokenType.INTEGER_DIV:\n return self.builder.fdiv(left_symbol, right_symbol, \"udivtmp\")\n elif node.operator.type == TokenType.FLOAT_DIV:\n return self.builder.fdiv(left_symbol, right_symbol, \"fdivtmp\")",
"def create_operator(statement_a, operator, statement_b):\n return S(statement_a=statement_a, operator=operator, statement_b=statement_b)",
"def operator(self):\n col = self.pos\n operators = [\"||\", \"&&\", \">>\", \"<<\", \"!=\", \">=\", \"<=\", \"==\", \"##\"] + \\\n [\"-\", \"+\", \"!\", \"*\", \"/\", \"|\", \"&\", \"^\", \"<\", \">\", \"?\", \":\", \"~\", \"#\", \"=\", \"%\"]\n try:\n index = self.match_any(operators)\n\n op = Operator(self.line, col, self.prev_white, operators[index])\n return op\n except TokenError:\n self.pos = col\n raise TokenError(\"Invalid operator.\")",
"def build_expression_tree(token_list: Sequence[tokens.Token]) -> nodes.ExpNode:\r\n\r\n def is_unary_op(op) -> bool:\r\n return op in UNARYOP_TABLE\r\n\r\n def is_open_bracket(token) -> bool:\r\n return isinstance(token, tokens.TokenOpenBracket)\r\n\r\n def is_close_bracket(token) -> bool:\r\n return isinstance(token, tokens.TokenCloseBracket)\r\n\r\n def is_comma(token) -> bool:\r\n return isinstance(token, tokens.TokenSymbol) and token.symbol == Separators.SEP_COMMA\r\n\r\n def is_higher_or_equal_op_priority(op1, op2, table) -> bool:\r\n oi1 = table.get(op1)\r\n oi2 = table.get(op2)\r\n\r\n p1 = 0 if oi1 is None else oi1.priority\r\n p2 = 0 if oi2 is None else oi2.priority\r\n\r\n return p1 >= p2\r\n\r\n def read_exp_chain(index) -> Tuple[nodes.ExpNode, int]:\r\n token = token_list[index]\r\n if isinstance(token, tokens.TokenSymbol):\r\n if is_open_bracket(token):\r\n node, i = read_exp(index)\r\n elif is_unary_op(token.symbol):\r\n if UNARYOP_TABLE[token.symbol].affix == OperatorAffix.PREFIX:\r\n node, i = read_prefix_unary_exp(index)\r\n else:\r\n raise ParsingException(f\"unary operator '{token.symbol}' is not a prefix operator\", token.pos)\r\n else:\r\n raise ParsingException(f\"unexpected symbol '{token.symbol}'\", token.pos)\r\n else:\r\n node, i = read_exp(index)\r\n\r\n if i < len(token_list):\r\n # look ahead for 1 token\r\n next_token = token_list[i]\r\n if isinstance(next_token, tokens.TokenSymbol) and is_unary_op(next_token.symbol):\r\n if UNARYOP_TABLE[next_token.symbol].affix == OperatorAffix.POSTFIX:\r\n node, i = read_postfix_unary_exp(i, node)\r\n else:\r\n return (node, i)\r\n\r\n if i < len(token_list):\r\n # look ahead for 1 token\r\n next_token = token_list[i]\r\n if is_close_bracket(next_token):\r\n return (node, i)\r\n elif isinstance(next_token, tokens.TokenSymbol):\r\n if next_token.symbol == Separators.SEP_COMMA:\r\n return (node, i)\r\n elif next_token.symbol in BINOP_TABLE:\r\n return read_binary_exp(i, node)\r\n else:\r\n raise ParsingException(f\"unexpected symbol '{next_token.symbol}'\", next_token.pos)\r\n else:\r\n raise ParsingException(\"unexpected token\", next_token.pos)\r\n else:\r\n return (node, i)\r\n\r\n def read_exp(index) -> Tuple[nodes.ExpNode, int]:\r\n if index >= len(token_list):\r\n raise ParsingException(\"unexpected token\", token_list[-1].pos)\r\n\r\n token = token_list[index]\r\n if is_open_bracket(token):\r\n return read_bracket_exp(index)\r\n elif isinstance(token, tokens.TokenNumber):\r\n return (nodes.NumberNode(token.num, pos=token.pos), index + 1)\r\n elif isinstance(token, tokens.TokenName):\r\n if (index + 1) < len(token_list) and is_open_bracket(token_list[index + 1]):\r\n return read_func_call(index)\r\n else:\r\n return (nodes.NameConstantNode(token.name, pos=token.pos), index + 1)\r\n elif isinstance(token, tokens.TokenSymbol):\r\n raise ParsingException(f\"unexpected symbol '{token.symbol}'\", token.pos)\r\n else:\r\n raise ParsingException(\"unexpceted token\", token.pos)\r\n\r\n def read_bracket_exp(index) -> Tuple[nodes.ExpNode, int]:\r\n node, i = read_exp_chain(index + 1)\r\n\r\n if i < len(token_list) and is_close_bracket(token_list[i]):\r\n return (node, i + 1)\r\n else:\r\n raise ParsingException(\"unmatch '('\", token_list[index].pos)\r\n\r\n def read_prefix_unary_exp(index) -> Tuple[nodes.UnaryOpNode, int]:\r\n node, i = read_exp(index + 1)\r\n token = token_list[index]\r\n return (nodes.UnaryOpNode(token.symbol, node, pos=token.pos), i)\r\n\r\n def read_postfix_unary_exp(index, child: nodes.ExpNode) -> Tuple[nodes.UnaryOpNode, int]:\r\n token = token_list[index]\r\n\r\n if isinstance(child, nodes.UnaryOpNode):\r\n if is_higher_or_equal_op_priority(token.symbol, child.op, UNARYOP_TABLE):\r\n node = nodes.UnaryOpNode(token.symbol, child.child, pos=token.pos)\r\n child.child = node\r\n node = child\r\n else:\r\n node = nodes.UnaryOpNode(token.symbol, child, pos=token.pos)\r\n else:\r\n node = nodes.UnaryOpNode(token.symbol, child, pos=token.pos)\r\n\r\n return (node, index + 1)\r\n\r\n def read_binary_exp(index, left: nodes.ExpNode) -> Tuple[nodes.BinaryOpNode, int]:\r\n right, i = read_exp_chain(index + 1)\r\n\r\n token = token_list[index]\r\n if isinstance(right, nodes.BinaryOpNode) and not is_open_bracket(token_list[index + 1]):\r\n # check operator priority and rotate the expression tree when necessary.\r\n # when priority of two operators are equal, we also should rotate the tree\r\n # in case these operators don't follow the commutative law.\r\n if is_higher_or_equal_op_priority(token.symbol, right.op, BINOP_TABLE):\r\n node = nodes.BinaryOpNode(token.symbol, left, right.left, pos=token.pos)\r\n right.left = node\r\n node = right\r\n else:\r\n node = nodes.BinaryOpNode(token.symbol, left, right, pos=token.pos)\r\n else:\r\n node = nodes.BinaryOpNode(token.symbol, left, right, pos=token.pos)\r\n\r\n return (node, i)\r\n\r\n def read_func_call(index) -> Tuple[nodes.FuncCallNode, int]:\r\n name_token = token_list[index]\r\n index += 2 # skip '('\r\n\r\n token_count = len(token_list)\r\n\r\n node = None\r\n i = index\r\n args = []\r\n\r\n while i < token_count and not is_close_bracket(token_list[i]):\r\n node, i = read_exp_chain(i)\r\n args.append(node)\r\n if i < token_count and is_comma(token_list[i]):\r\n i += 1\r\n else:\r\n break\r\n\r\n if i < token_count and is_close_bracket(token_list[i]):\r\n func_node = nodes.FuncCallNode(name_token.name, args, pos=name_token.pos)\r\n return (func_node, i + 1)\r\n else:\r\n raise ParsingException(\"unclose func call\", name_token.pos)\r\n\r\n\r\n node, i = read_exp_chain(0)\r\n\r\n if i < len(token_list):\r\n last_token = token_list[i]\r\n if is_close_bracket(last_token):\r\n raise ParsingException(\"unmatch ')'\", last_token.pos)\r\n else:\r\n raise ParsingException(\"unexpected token\", last_token.pos)\r\n else:\r\n return node",
"def __init__(self, operation, left, right):\n self.operation = operation\n self.left = left\n self.right = right",
"def addExpr( ):\n\n\ttok = tokens.peek( )\n\tif debug: print (\"addExpr: \", tok)\n\tleft = term( )\n\ttok = tokens.peek( )\n\twhile tok == \"+\" or tok == \"-\":\n\t\ttokens.next()\n\t\tright = term( )\n\t\tleft = BinaryExpr( tok, left, right )\n\t\ttok = tokens.peek( )\n\treturn left",
"def build_expression_tree(tokens):\n S = [] # we use Python list as stack\n for t in tokens:\n if t in '+-*/': # t is an operator symbol\n S.append(t) # push the operator symbol\n elif t not in '()': # consider t to be a literal\n S.append(ExpressionTree(t)) # push trivial tree storing value\n elif t == ')': # compose a new tree from three constituent parts\n right = S.pop() # right subtree as per LIFO\n op = S.pop() # operator symbol\n left = S.pop() # left subtree\n S.append(ExpressionTree(op, left, right)) # repush tree\n # we ignore a left parenthesis\n return S.pop()",
"def build_expression_tree(tokens):\n S = [] # we use Python list as stack\n for t in tokens:\n if t in '+-x*/': # t is an operator symbol\n S.append(t) # push the operator symbol\n elif t not in '()': # consider t to be a literal\n S.append(ExpressionTree(t)) # push trivial tree storing value\n elif t == ')': # compose a new tree from three constituent parts\n right = S.pop() # right subtree as per LIFO\n op = S.pop() # operator symbol\n left = S.pop() # left subtree\n S.append(ExpressionTree(op, left, right)) # repush tree\n # we ignore a left parenthesis\n return S.pop()",
"def __init__(self, operation, operand):\n self.operation = operation\n self.right = operand",
"def binary_operator(op):\n # When combining a Factor with a NumericalExpression, we use this\n # attrgetter instance to defer to the commuted implementation of the\n # NumericalExpression operator.\n commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))\n\n def binary_operator(self, other):\n # This can't be hoisted up a scope because the types returned by\n # binop_return_type aren't defined when the top-level function is\n # invoked in the class body of Factor.\n return_type = binop_return_type(op)\n if isinstance(self, NumExprFactor):\n self_expr, other_expr, new_inputs = self.build_binary_op(\n op, other,\n )\n return return_type(\n \"({left}) {op} ({right})\".format(\n left=self_expr,\n op=op,\n right=other_expr,\n ),\n new_inputs,\n )\n elif isinstance(other, NumExprFactor):\n # NumericalExpression overrides ops to correctly handle merging of\n # inputs. Look up and call the appropriate reflected operator with\n # ourself as the input.\n return commuted_method_getter(other)(self)\n elif isinstance(other, Factor):\n if self is other:\n return return_type(\n \"x_0 {op} x_0\".format(op=op),\n (self,),\n )\n return return_type(\n \"x_0 {op} x_1\".format(op=op),\n (self, other),\n )\n elif isinstance(other, Number):\n return return_type(\n \"x_0 {op} ({constant})\".format(op=op, constant=other),\n binds=(self,),\n )\n raise BadBinaryOperator(op, self, other)\n\n binary_operator.__doc__ = \"Binary Operator: '%s'\" % op\n return binary_operator",
"def __create_nested_structure(nested_operator: PatternStructure):\n order = list(range(len(nested_operator.args))) if isinstance(nested_operator, CompositeStructure) else [0]\n operator_type = None\n if isinstance(nested_operator, AndOperator):\n operator_type = OperatorTypes.AND\n elif isinstance(nested_operator, SeqOperator):\n operator_type = OperatorTypes.SEQ\n ret = TreePlanLeafNode(order[0])\n for i in range(1, len(order)):\n ret = TreePlanBinaryNode(operator_type, ret, TreePlanLeafNode(order[i]))\n return ret",
"def expression( ):#DOUBLE CHECK THIS\n\t\n\ttok = tokens.peek( )\n\tif debug: print(\"Expression: \", tok)\n\tleft = andExpr( ) #does the left side of the grammar \n\ttok = tokens.peek( )\n\twhile tok == \"or\": #checks to see if there is the token or and will preform what is inside the curly bracket since it is a series \n\t\ttokens.next()\n\t\tright = andExpr( )\n\t\tleft = BinaryExpr(tok, left, right) # MIGHT HAVE TO CHANGE THIS TO STRING CAUSE ITS \"or\"\n\t\ttok = tokens.peek( )\n\treturn left",
"def create_basic_op_node(op_name, node, kwargs):\n name, input_nodes, _ = get_inputs(node, kwargs)\n\n node = onnx.helper.make_node(\n op_name,\n input_nodes,\n [name],\n name=name\n )\n return [node]",
"def expression_tree(postfix:str) -> Node:\n stack = deque()\n for ch in postfix:\n if ch not in {'+', '-', '*', '/', '^'}:\n stack.append(Node(ch))\n else:\n middle_node = Node(ch)\n right_node = stack.pop()\n left_node = stack.pop()\n middle_node ._right = right_node\n middle_node._left = left_node\n stack.append(middle_node)\n return stack.pop()",
"def operation(self, other=None, operator=None):\n terms = [self]\n if other is not None and operator is not EmptyQuery:\n terms.append(other)\n return Operation(terms, operator=operator)",
"def _append_operator(self, operator):",
"def make_binary(sv, piece, o, op):\r\n here=piece.rfind(op) # look for last occurrence\r\n there=here+len(op)\r\n t1=piece[:here].strip(Space) # first term (sometimes omitted)\r\n t2=piece[there:].strip(Space) # second term must be present\r\n if not t2: \r\n print(\"\\n\", Err_op_syntax, o) # *** Syntax error in operator ***\r\n print(\" \", piece)\r\n raise ReferenceError\r\n first=tree_build(sv, t1) # process each term RECURSIVE\r\n second=tree_build(sv, t2)\r\n return (o, first, second)",
"def gen_binop(self, expr: expressions.BinaryOperator):\n if expr.op in [\"*\", \"/\", \"%\", \"^\", \"|\", \"&\", \">>\", \"<<\"]:\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n op = expr.op\n\n ir_typ = self.get_ir_type(expr.typ)\n value = self.builder.emit_binop(lhs, op, rhs, ir_typ)\n elif expr.op == \",\":\n # Handle the comma operator by returning the second result\n self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n value = rhs\n elif expr.op == \"+\":\n # Pay attention to pointer arithmetics!\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n\n # left and right are swapped in semantics if right is pointer.\n if expr.a.typ.is_pointer:\n assert expr.b.typ.is_integer\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n rhs = self.builder.emit_cast(rhs, ir.ptr)\n\n ir_typ = self.get_ir_type(expr.typ)\n value = self.builder.emit_binop(lhs, \"+\", rhs, ir_typ)\n elif expr.op == \"-\":\n # Pay attention to pointer arithmetics!\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n ir_typ = self.get_ir_type(expr.typ)\n if expr.a.typ.is_pointer:\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if expr.b.typ.is_pointer:\n # pointer - pointer\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir.ptr)\n value = self.emit(ir.Cast(value, \"typecast\", ir_typ))\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", ir_typ))\n value = self.emit(\n ir.Binop(value, \"/\", esize, \"rhs\", ir_typ)\n )\n else:\n # pointer - numeric\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n rhs = self.builder.emit_cast(rhs, ir.ptr)\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir_typ)\n else:\n # numeric - numeric\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir_typ)\n\n elif expr.op in [\"<\", \">\", \"==\", \"!=\", \"<=\", \">=\", \"||\", \"&&\"]:\n value = self.gen_condition_to_integer(expr)\n elif expr.op in [\n \"=\",\n \"+=\",\n \"-=\",\n \"*=\",\n \"%=\",\n \"/=\",\n \">>=\",\n \"<<=\",\n \"&=\",\n \"|=\",\n \"~=\",\n \"^=\",\n ]:\n # Handle struct assignment special case:\n if expr.op == \"=\" and expr.a.typ.is_struct:\n lhs = self.gen_expr(expr.a, rvalue=False)\n rhs = self.gen_expr(expr.b, rvalue=False)\n amount = self.sizeof(expr.a.typ)\n self.gen_copy_struct(lhs, rhs, amount)\n value = None\n else:\n lhs = self.gen_expr(expr.a, rvalue=False)\n rhs = self.gen_expr(expr.b, rvalue=True)\n\n if expr.op == \"=\":\n value = rhs\n else:\n # Handle '+=' and friends:\n op = expr.op[:-1]\n ir_typ = self.get_ir_type(expr.typ)\n loaded = self._load_value(lhs, expr.typ)\n\n # pointer arithmatic:\n if op in [\"+\", \"-\"] and expr.a.typ.is_pointer:\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n\n value = self.builder.emit_binop(loaded, op, rhs, ir_typ)\n self._store_value(value, lhs)\n else: # pragma: no cover\n raise NotImplementedError(str(expr.op))\n return value",
"def __call__(self):\r\n new_node = Node()\r\n new_node.op = self\r\n return new_node",
"def _reduce_expr(tree, tok):\n second = tree.pop()\n if len(tree) > 0 and not Parser._is_unary_op(tok):\n first = tree.pop()\n expr = BinaryExpression(first, tok, second)\n else:\n expr = UnaryExpression(second, tok)\n tree.append(expr)",
"def _(self, node: BinaryOp):\n left = self.visit(node.left)\n right = self.visit(node.right)\n\n return f\"( {node.op} {left} {right} )\"",
"def __call__(self):\n new_node = Node()\n new_node.op = self\n return new_node",
"def reflected_binary_operator(op):\n assert not is_comparison(op)\n\n def reflected_binary_operator(self, other):\n\n if isinstance(self, NumericalExpression):\n self_expr, other_expr, new_inputs = self.build_binary_op(\n op, other\n )\n return NumExprFactor(\n \"({left}) {op} ({right})\".format(\n left=other_expr,\n right=self_expr,\n op=op,\n ),\n new_inputs,\n )\n\n # Only have to handle the numeric case because in all other valid cases\n # the corresponding left-binding method will be called.\n elif isinstance(other, Number):\n return NumExprFactor(\n \"{constant} {op} x_0\".format(op=op, constant=other),\n binds=(self,),\n )\n raise BadBinaryOperator(op, other, self)\n return reflected_binary_operator",
"def operator_constructor(loader, node):\n global workspace\n obj = loader.construct_mapping(node, deep=True)\n obj = resolve_pointer( workspace, obj )\n operation, arg = yaml_to_args( obj )[0]\n return getattr( operator, operation )( *arg )",
"def _lex_operators(self):\n try:\n val = self._current\n type = Lexer._OPERATORS[self._current]\n self._advance()\n return Token(val, type)\n except KeyError:\n raise ParserError(self._expr,\n \"Encountered invalid token '{t}' at {i}\".format(\n t=self._current, i=self._index))",
"def match_expr(self, precedence: int) -> \"AbstractNode\":\n tkn = self.lexer.tkn\n # This line is solely to satisfy mypy.\n left = AbstractNode()\n if tkn.type == Token.AT:\n self.lexer.next_token()\n address = self.match_expr(PREC_PREFIX)\n left = MemoryNode(address)\n elif tkn.type == Token.INT:\n try:\n left = IntNode(int(tkn.value, base=0))\n except ValueError:\n raise SyntaxError(\"invalid integer literal: {}\".format(tkn))\n else:\n self.lexer.next_token()\n elif tkn.type == Token.MINUS:\n self.lexer.next_token()\n left = PrefixNode(\"-\", self.match_expr(PREC_PREFIX))\n elif tkn.type == Token.REGISTER:\n try:\n left = RegisterNode(register_to_index(tkn.value))\n except HERAError:\n raise SyntaxError(\"{} is not a valid register\".format(tkn.value))\n self.lexer.next_token()\n elif tkn.type == Token.SYMBOL:\n left = SymbolNode(tkn.value)\n self.lexer.next_token()\n elif tkn.type == Token.LPAREN:\n self.lexer.next_token()\n left = self.match_expr(PREC_LOWEST)\n if self.lexer.tkn.type != Token.RPAREN:\n self.unexpected(self.lexer.tkn)\n self.lexer.next_token()\n else:\n self.unexpected(tkn)\n\n infix_tkn = self.lexer.tkn\n while infix_tkn.type in PREC_MAP and precedence < PREC_MAP[infix_tkn.type]:\n infix_precedence = PREC_MAP[infix_tkn.type]\n self.lexer.next_token()\n right = self.match_expr(infix_precedence)\n left = InfixNode(infix_tkn.value, left, right)\n infix_tkn = self.lexer.tkn\n return left",
"def Expression(self, paren=False):\n left = self.Conjunction(paren)\n while self.currtok[1].name == \"OR\":\n op = self.currtok[0]\n self.currtok = next(self.tg)\n right = self.Conjunction()\n left = BinaryExpr(op, left, right, paren)\n return left",
"def binary_operator(cls, quad):\n\t\tleft_op = cls.get_address_value(quad.left_operand)\n\t\tright_op = cls.get_address_value(quad.right_operand)\n\t\tresult = cls.execute_binary_operator(quad.operator, left_op, right_op)\n\t\tcls.set_address_value(quad.result, result)"
] | [
"0.6768396",
"0.6402284",
"0.6212105",
"0.6038182",
"0.6029708",
"0.59814626",
"0.59771395",
"0.5969799",
"0.57881653",
"0.57876146",
"0.57825124",
"0.57195956",
"0.56935775",
"0.56912225",
"0.56751347",
"0.559061",
"0.5583933",
"0.5532599",
"0.54641896",
"0.5460819",
"0.5419034",
"0.5418368",
"0.5414495",
"0.53841996",
"0.53565824",
"0.5339949",
"0.5322291",
"0.53207654",
"0.5312278",
"0.53110856"
] | 0.66272795 | 1 |
Returns the expression in prefix form. | def prefix(self):
return str(self.operator) + " " + self.leftOperand.prefix() + " " + self.rightOperand.prefix() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calculate_prefix_expression(cls, expression):\n\t\tlogger.info(f\"in the calculate prefix expression {expression}\")\n\t\telements = expression.split()\n\t\tstack = []\n\t\tfor e in reversed(elements):\n\t\t\tif e.isdigit():\n\t\t\t\tstack.append(int(e))\n\t\t\telse:\n\t\t\t\t# this is an operator\n\t\t\t\tif (len(stack) < 2):\n\t\t\t\t\tlogger.info(\"invalid input\")\n\t\t\t\t\traise Exception(\"invalid input\")\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\toperand1 = stack.pop()\n\t\t\t\t\toperand2 = stack.pop()\n\t\t\t\t\tif e == \"+\":\n\t\t\t\t\t\tresult = operand1 + operand2\n\t\t\t\t\t\tstack.append(int(result))\n\n\t\t\t\t\telif e == \"-\":\n\t\t\t\t\t\tresult = operand1 - operand2\n\t\t\t\t\t\tstack.append(int(result))\n\n\t\t\t\t\telif e == \"*\":\n\t\t\t\t\t\tresult = operand1 * operand2\n\t\t\t\t\t\tstack.append(int(result))\n\n\t\t\t\t\telif e == \"/\":\n\t\t\t\t\t\tresult = operand1 / operand2\n\t\t\t\t\t\tstack.append(int(result))\n\t\t\t\t\telse:\n\t\t\t\t\t\tlogger.exception(\"Unrecognized operator\")\n\t\t\t\t\t\traise Exception(\"Not a valid operator\")\n\t\treturn float(stack[0])",
"def infix_to_prefix(self, expr: str) -> str:\n\n # Reverse expr\n expr = reversed(expr)\n\n # Convert expr to list\n expr = list(expr)\n\n # Reverse all parantheses\n for i, e in enumerate(expr):\n if e == \"(\":\n expr[i] = \")\"\n elif e == \")\":\n expr[i] = \"(\"\n \n # Convert expr back to string\n expr = ''.join(expr)\n\n # Convert expr to postfix\n expr = self.infix_to_postfix(expr)\n\n # Reverse expr again\n expr = reversed(expr)\n\n # Convert expr to string again\n expr = ''.join(expr)\n\n # Return expr\n return expr",
"def base_prefix(self):\n return self.calculation.base_prefix",
"def prefix(self):\n return self[\"prefix\"]",
"def prefix(self):\n return self[\"prefix\"]",
"def prefix(pattern):\r\n return pattern[0:len(pattern)-1]",
"def prefix(pattern):\n return pattern[0:len(pattern)-1]",
"def getPrefix(self):\n return _libsbml.ASTBasePlugin_getPrefix(self)",
"def prefix(self):\n return self._prefix",
"def prefix(self):\n return self._prefix",
"def prefix(self):\n return self._prefix",
"def getPrefix(self):\n raise NotImplementedError",
"def result_prefix(self):\n return self.calculation.result_prefix",
"def get_prefix(self):\n return self.prefix",
"def get_prefix(self):\n return self._prefix",
"def get_prefix(self):\n return self._prefix",
"def getPrefix(self):\n return _libsbml.MultiASTPlugin_getPrefix(self)",
"def getPrefix(self):\n return _libsbml.SBase_getPrefix(self)",
"def getPrefix(self, *args):\n return _libsbml.XMLNamespaces_getPrefix(self, *args)",
"def getPrefix(self):\n return _libsbml.XMLToken_getPrefix(self)",
"def getPrefixedName(self, *args):\n return _libsbml.XMLAttributes_getPrefixedName(self, *args)",
"def getPrefix(self, *args):\n return _libsbml.XMLAttributes_getPrefix(self, *args)",
"def prefix(self):\n return self._path_prefix",
"def set_prefix_expression(self, expression, clear_args = True):\n if expression and type(expression) is not str:\n raise TypeError('expression should be either string or None or False')\n if clear_args:\n self._prefix_kwargs = {}\n self._prefix_expression = expression",
"def _expand_prefix(prefix, configs):\n return subst_vars(prefix, configs)",
"def prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"prefix\")",
"def prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"prefix\")",
"def prefix_to_postfix(input_str: str) -> Any:\n \"\"\"Input argument: a string containing a prefix expression where tokens are \n space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns a String containing a postfix expression(tokens are space separated)\"\"\"\n stack = Stack(30)\n if input_str == \"\":\n return (\"\")\n op_list = [\"+\", \"-\", \"*\", \"/\", \"<<\", \">>\", \"**\"]\n split_list = input_str.split()\n track = len(split_list) - 1\n while track >= 0:\n new_val = split_list[track].lstrip(\"-\")\n new_val = new_val.replace(\".\", \"\", 1)\n if new_val.isdigit():\n stack.push(split_list[track])\n track = track - 1\n elif split_list[track] in op_list:\n first = stack.pop()\n second = stack.pop()\n stack.push(first + \" \" + second + \" \" + split_list[track])\n track = track - 1\n else:\n break\n postfix = stack.pop()\n return postfix",
"def getPrefixedName(self):\n return _libsbml.XMLTriple_getPrefixedName(self)",
"def getAttrPrefixedName(self, *args):\n return _libsbml.XMLToken_getAttrPrefixedName(self, *args)"
] | [
"0.710734",
"0.6866923",
"0.6425482",
"0.63313454",
"0.63313454",
"0.63225156",
"0.63132906",
"0.6267651",
"0.62378424",
"0.62378424",
"0.62378424",
"0.6218811",
"0.6152476",
"0.6141088",
"0.6069823",
"0.6069823",
"0.6055349",
"0.60520095",
"0.6048974",
"0.60268766",
"0.59823626",
"0.5966694",
"0.5940881",
"0.591457",
"0.5897211",
"0.58804023",
"0.58804023",
"0.5866765",
"0.58618367",
"0.58404773"
] | 0.7265404 | 0 |
Returns the expression in infix form (fully parenthesized). | def infix(self):
return "(" + self.leftOperand.infix() + " " + str(self.operator) + " " + self.rightOperand.infix() + ")" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def evaluate_infix(string):\n return postfix(infix_to_postfix(string))",
"def infix_to_prefix(self, expr: str) -> str:\n\n # Reverse expr\n expr = reversed(expr)\n\n # Convert expr to list\n expr = list(expr)\n\n # Reverse all parantheses\n for i, e in enumerate(expr):\n if e == \"(\":\n expr[i] = \")\"\n elif e == \")\":\n expr[i] = \"(\"\n \n # Convert expr back to string\n expr = ''.join(expr)\n\n # Convert expr to postfix\n expr = self.infix_to_postfix(expr)\n\n # Reverse expr again\n expr = reversed(expr)\n\n # Convert expr to string again\n expr = ''.join(expr)\n\n # Return expr\n return expr",
"def calculate_infix_expression(cls, expression):\n\t\tlogger.info(f\"in the calculate infix expression {expression}\")\n\t\telements = expression.split()\n\t\tstack = []\n\t\ttry:\n\t\t\tfor e in elements:\n\t\t\t\tif not e.isdigit() and e != \")\":\n\t\t\t\t\tstack.append(e)\n\t\t\t\tif e.isdigit() and not cls.is_operator(stack[-1]):\n\t\t\t\t\tstack.append(e)\n\t\t\t\tif e.isdigit() and cls.is_operator(stack[-1]):\n\t\t\t\t\toperator = stack.pop()\n\t\t\t\t\toperand1 = stack.pop()\n\t\t\t\t\tresult = cls.apply_math_operations(float(operand1), float(e), operator)\n\t\t\t\t\tif stack[-1] == \"(\":\n\t\t\t\t\t\tstack.append(str(result))\n\t\t\t\t\telse:\n\t\t\t\t\t\traise Exception(\"invalid input\")\n\t\t\t\t\t\tbreak\n\t\t\t\tif e == \")\":\n\t\t\t\t\tvalue = stack.pop()\n\t\t\t\t\tob = stack.pop()\n\t\t\t\t\tif (ob == \"(\"):\n\t\t\t\t\t\tstack.append(str(value))\n\t\t\t\t\telif (cls.is_operator(ob)):\n\t\t\t\t\t\toperand1 = stack.pop()\n\t\t\t\t\t\tstack.pop()\n\t\t\t\t\t\tresult = cls.apply_math_operations(float(operand1), float(value), ob)\n\t\t\t\t\t\tstack.append(str(result))\n\n\t\t\tanswer = float(stack[0])\n\t\t\tlogger.info(f\"the answe is {answer}\")\n\t\t\treturn answer\n\t\texcept Exception as e:\n\t\t\traise Exception(\"Exception from the infix function\")",
"def infix_to_postfix(infix:str) -> str:\n stack = deque()\n precedence = {'+':1, '-':1,\n '*':2, '/':2,\n '^':3, '(':-9\n }\n output = \"\"\n for ch in infix:\n if ch not in {'+', '-', '*', '/', '^', '(', ')'}:\n output += ch\n elif ch == '(':\n stack.append(ch)\n elif ch == ')':\n while len(stack) > 0 and\\\n stack[-1] != '(':\n output += stack.pop()\n stack.pop()\n else:\n while len(stack) > 0 and\\\n precedence[stack[-1]] >= precedence[ch]:\n output += stack.pop()\n stack.append(ch)\n while len(stack) > 0:\n output += stack.pop()\n return output",
"def infix_to_postfix(infix_expr):\n # Append adds new item to list\n # Concat creates a new list every time instead\n\n opstack = StackArray()\n res = []\n lstr = infix_expr.split()\n # l_para = r_para = 0\n # operator precedence dict\n prec = { # higher val = higher prec\n \"(\" : 4,\n \"^\" : 3, # r-to-l (i.e. 2^3^2 = 2^(3^2) )\n \"~\" : 3, # right-to-left (i.e. -3^2 = -9)\n # '*/+-' are associated left to right\n \"*\" : 2,\n \"/\" : 2,\n \"+\" : 1,\n \"-\" : 1\n }\n for token in lstr:\n if token[0] in '0123456789':\n res.append(token)\n # not opstack.is_empty() guards against IndexError on empty peek\n if not opstack.is_empty() and opstack.peek() == '^':\n res.append(opstack.pop())\n if not opstack.is_empty() and opstack.peek() == '~':\n res.append(opstack.pop())\n elif token == '(':\n # l_para += 1\n opstack.push(token)\n elif token == ')':\n # r_para += 1\n # opstack can't be empty for proper formatted input\n while opstack.peek() != '(':\n res.append(opstack.pop())\n opstack.pop() # remove left paran '('\n else: # token is ^ ~ * / + -: <-- operators\n while not opstack.is_empty() and prec[token] <= prec[opstack.peek()]:\n if opstack.peek() == '(':\n break\n elif token == '^' and opstack.peek() == '~':\n break\n else:\n res.append(opstack.pop())\n opstack.push(token)\n # if l_para != r_para:\n # raise SyntaxError\n while not opstack.is_empty():\n res.append(opstack.pop())\n res = \" \".join(res)\n res.strip()\n return res",
"def infix_to_postfix(self, expr: str) -> str:\n\n # The stack that we will be performing operations on\n stack: list[str] = []\n\n # The output\n output: str = \"\"\n\n # We always need surrounding parentheses\n expr = f\"({expr})\"\n\n # The tokenized expression\n expr = self.tokenize_expr(expr)\n\n\n \n # For every token in expression\n for token in expr:\n # Check what token it is\n if token == \"(\":\n # If it is a (, then append to stack\n stack.append(\"(\")\n elif token == \")\":\n # If it is a ), then iterate over stack\n while stack[-1] != '(':\n # Popping the last item from stack, to output\n # Include a trailing space\n # Until the last item in the stack is a (\n output += f\"{stack.pop()} \"\n # Pop the last ( from the stack\n stack.pop()\n elif re.match(r\"[a-zA-Z_][a-zA-Z0-9_]*\", token):\n # If it matches a name/variable\n # Append to output with a trailing space\n output += f\"{token} \"\n elif re.match(r\"\\d+\",token):\n # If it is a number\n # Then append with a trailing space\n output += f\"{token} \"\n else:\n if self.is_token(token):\n # If it is a token\n # Pop it from the stack while\n # It's priority is smaller than\n # the last priority of the stack\n # Put it into output with a trailing space\n while self.get_token_priority(token) <= self.get_token_priority(stack[-1]):\n output += f\"{stack.pop()} \"\n # And append token to stack\n stack.append(token)\n # Return output\n return output",
"def infix_to_postfix(expr):\n # you may find the following precedence dictionary useful\n prec = {'*': 2, '/': 2,\n '+': 1, '-': 1}\n ops = Stack()\n postfix = []\n toks = expr.split()\n ### BEGIN SOLUTION\n opp = {'*', '/','+', '-'}\n for x in toks:\n if str.isdigit(x):\n postfix.append(x)\n elif ops.empty() or ops.peek() == '(':\n ops.push(x)\n elif x == '(':\n ops.push(x)\n elif x == ')':\n while not ops.empty():\n temp = ops.pop()\n if temp == '(':\n break\n else:\n postfix.append(temp)\n elif x in opp:\n while True:\n if prec.get(x) > prec.get(ops.peek()):\n ops.push(x)\n break\n elif prec.get(x) == prec.get(ops.peek()):\n postfix.append(ops.pop())\n ops.push(x)\n break\n elif prec.get(x) < prec.get(ops.peek()):\n postfix.append(ops.pop())\n if ops.empty():\n ops.push(x)\n break\n elif ops.empty():\n break\n\n while True:\n if not ops.empty():\n postfix.append(ops.pop())\n else:\n break\n\n ### END SOLUTION\n return ' '.join(str(x) for x in postfix)",
"def infix_to_postfix(expr):\n ops = Stack()\n postfix = []\n toks = expr.split()\n def tests(chr):\n if chr.isdigit():\n postfix.append(chr)\n\n elif chr == '(':\n ops.push('(')\n\n elif ops.peek() == '(' or ops.empty():\n ops.push(chr)\n\n elif chr ==')':\n while ops.peek() != \"(\":\n postfix.append(ops.pop())\n ops.pop()\n\n elif chr in prec and prec[chr] > prec[ops.peek()]:\n ops.push(chr)\n\n elif chr in prec and prec[chr] == prec[ops.peek()]:\n postfix.append(ops.pop())\n ops.push(chr)\n\n elif chr in prec and prec[chr] < prec[ops.peek()]:\n postfix.append(ops.pop())\n tests(chr)\n\n for tok in toks:\n tests(tok)\n\n\n while not ops.empty():\n postfix.append(ops.pop())\n\n\n return ' '.join(postfix)",
"def infixToRPN(expression):\n stack = Stack()\n RPNList = []\n tokens = expression.split()\n spaces = True\n\n # If no spaces in expression then push each char in a tokens list\n if len(tokens) == 1:\n spaces = False\n tokens = [char for char in expression]\n\n for token in tokens:\n if token in alphabet or token in numbers:\n RPNList.append(token)\n elif token == '(':\n stack.push(token)\n elif token == ')':\n top = stack.pop()\n while top != '(':\n RPNList.append(top)\n top = stack.pop()\n else:\n while (not stack.isEmpty()) and (precedence[stack.peek()] >= precedence[token]):\n RPNList.append(stack.pop())\n stack.push(token)\n\n while not stack.isEmpty():\n RPNList.append(stack.pop())\n\n if spaces:\n return \" \".join(RPNList)\n else:\n return \"\".join(RPNList)",
"def toPostfix(infix):\n output = \"\" # Output stack - the numbers in our expression\n operators = \"\" # Operator stack (using string for ease but could be a list)\n precedence = {\"*\": 100, \"/\": 90, \"+\": 80, \"-\": 70, \"(\": 60, \")\": 50} # Operator precedence dictionary - operator characters mapped to an arbitrary numeric value representing their precedence (BOMDAS)\n \n #Loop through characters\n for c in infix:\n #If c is a number\n if (c.isdigit()):\n output += c\n #Else if c is a function - ignoring these for now\n #Else if c is an operator - + - * / might account for x and division ASCII symbol later\n elif c in {\"+\", \"-\", \"*\", \"/\"}:\n # While there is still an operator left at the top of the stack\n # AND the operator at the top of the stack has greater precedence\n # OR the operator at the top of the stack has equal precedence and the token is left associative (don't know what this means, ignoring for now)\n # AND that operator is not a left parenthesis '('\n # Note: \\ tells python that a statement will continue on to the next line\n while len(operators) > 0 and operators[-1] != '(' and precedence[operators[-1]] > precedence[c]:\n # Pop the operator from the operator stack onto the output queue.\n output += operators[-1]\n operators = operators[:-1]\n # Push it onto the operator stack\n operators += c\n # Else if token is a left parenthesis (\n elif c == \"(\":\n # Push c to operator stack\n operators += c\n elif c == \")\":\n while operators[-1] != \"(\":\n # Pop the operator from the operator stack onto the output queue.\n output += operators[-1]\n operators = operators[:-1]\n # If there is a left bracket at the top of the stack, remove it\n if operators[-1] == '(':\n # Pop the operator from the operator stack and discard it\n operators = operators[:-1]\n # if there is a function token at the top of the operator stack... (Ignoring this for now)\n \n # If there are any operators left in the stack, append to output\n while len(operators) > 0:\n # Push operator from top of stack to output\n output += operators[-1]\n # Remove top operator from stack\n operators = operators[:-1]\n return output",
"def convert_to_postfix(expression):\n infix = list(expression.replace(\" \", \"\"))\n opr_priority = {'!': 4, '*': 3, '+': 2, '>': 1, '=': 1, '(': 0}\n postfix = []\n stack = []\n\n for token in infix:\n if token in string.ascii_uppercase:\n postfix.append(token)\n elif token == '(':\n stack.append(token)\n elif token == ')':\n stack_token = stack.pop()\n while stack_token != '(':\n postfix.append(stack_token)\n stack_token = stack.pop()\n else:\n while stack and (opr_priority[stack[len(stack)-1]] >= opr_priority[token]):\n postfix.append(stack.pop())\n stack.append(token)\n\n while stack:\n postfix.append(stack.pop())\n\n return postfix",
"def print_infix(self):\n if self.is_empty():\n return \"\"\n else:\n if self.is_leaf():\n return str(self.root_value())\n else:\n if self.has_left():\n if self.has_right():\n return str(self.get_left().print_infix()) + \" \" + str(self.root_value()) + \" \" \\\n + str(self.get_right().print_infix())\n else:\n return str(self.get_left().print_infix()) + \" \" + str(self.root_value())\n else:\n return str(self.root_value()) + \" \" + str(self.get_right().print_infix())",
"def infix_to_postfix(string):\n \n # Validate and tokenize the string\n tokens = validate(string)\n \n # Initialize the stack\n s = Stack()\n\n # Ready the final postfix expression\n postfix = ''\n \n # List of operators that have to be handled\n operators = ['+', '-', '*', '/', '^', 'sqrt', 'u-', '(', ')']\n \n # Iterate through tokens\n for token in tokens:\n if token in operators:\n if token in ['sqrt', 'u-']:\n # Square root and unary minus have the highest precendence. So\n # they get pushed on to the stack immediately\n s.push(token)\n elif token == '^':\n top = s.peek()\n while top in ['sqrt', 'u-']:\n postfix += s.pop() + ' '\n top = s.peek()\n s.push(token)\n elif token in ['*', '/']:\n # Multiplication and division have the same precedence. Order\n # is determined by order of appearance\n top = s.peek()\n while top in ['sqrt', 'u-', '^']:\n postfix += s.pop() + ' '\n top = s.peek()\n s.push(token)\n elif token in ['+', '-']:\n # Addition and subtraction have the same precedence. Order is\n # determined by order of appearance\n top = s.peek()\n while top in ['sqrt', 'u-', '^', '*', '/']:\n postfix += s.pop() + ' '\n top = s.peek()\n s.push(token)\n elif token == '(':\n s.push(token)\n elif token == ')':\n top = s.peek()\n while top != '(':\n postfix += s.pop() + ' '\n top = s.peek()\n s.pop()\n else: # Token is a number or variable\n postfix += token + ' '\n\n # Pop out any more operators that might be sitting on the stack\n while(len(s)):\n postfix += s.pop() + ' '\n\n # Get rid of trailing whitespace and print\n postfix = postfix.strip()\n return postfix",
"def infix_to_postfix(self, exp):\n\n try:\n for i in exp:\n #if the character is an operand output it\n if self.is_operand(i):\n self.postfix.append(i)\n\n #if the character is '(' push it\n elif i is '(':\n self.push('(')\n\n elif i is ')':\n #if the character is ')\" pop until we encounter '(' in the stack\n while not self.isEmpty() and self.peek() is not '(':\n self.postfix.append(self.pop())\n if not self.isEmpty() and self.peek() is not '(':\n return -1\n else:\n self.pop()\n\n #if an operator is encountered\n else:\n while not self.isEmpty() and self.peek() is not '(' and self.not_greater(i):\n self.postfix.append(self.pop())\n self.push(i)\n while not self.isEmpty():\n self.postfix.append(self.pop())\n\n return ''.join(self.postfix)\n\n except Exception as e:\n print(\"Error occurred while performing infix to postfix conversion :\", e)\n traceback.print_exc()\n return -1",
"def parse_infix(input: str) -> Node:\n parsed = ParsedString(input).tokenize()\n ans = parse_e(parsed)\n return ans",
"def infix_to_assembly(formula: str) -> str:\n asm = \"\"\n postfix = infix_to_postfix(formula)\n for value in postfix:\n if value == \"+\":\n asm += \"\\npop bx\"\n asm += \"\\npop ax\"\n asm += \"\\nadd ax, bx\"\n asm += \"\\npush ax\"\n elif value == \"-\":\n asm += \"\\npop bx\"\n asm += \"\\npop ax\"\n asm += \"\\nsub ax, bx\"\n asm += \"\\npush ax\"\n elif value == \"*\":\n asm += \"\\npop bx\"\n asm += \"\\npop ax\"\n asm += \"\\nmul bx\"\n asm += \"\\npush ax\"\n elif value == \"/\":\n asm += \"\\nmov dx, 0h\"\n asm += \"\\npop bx\"\n asm += \"\\npop ax\"\n asm += \"\\ndiv bx\"\n asm += \"\\npush ax\"\n else:\n # asm += \"\\npush 0\" + value + \"h\"\n # the line above is commented out as the emulator has a bug\n # which pushes immediate 0bbh as 0ffbbh to the stack\n asm += \"\\nmov cx, 0\" + value + \"h\"\n asm += \"\\npush cx\"\n return asm",
"def infix_to_postfix(s):\n result = \"\" # output string\n op = Stack() # operator stack\n i = 0 # index to 's'\n while i < len(s):\n if s[i] in \"0123456789\":\n while i < len(s) and s[i] in \"0123456789\":\n result += s[i]\n i += 1\n result += \" \"\n continue\n if s[i] == '(':\n op.push(s[i])\n elif s[i] == ')':\n top = op.pop()\n while top != '(':\n result += top + \" \"\n top = op.pop()\n else: # s[i] is +,-,*,/\n while not op.is_empty() and not higher_prec(s[i], op.peek()):\n result += op.pop() + \" \"\n op.push(s[i])\n i += 1\n while not op.is_empty():\n result += op.pop() + \" \"\n return result",
"def expr(self):\n return self._express",
"def brackets(expr):\n expr_latex = sp.latex(expr)\n if '+' in expr_latex or '-' in expr_latex:\n return \"(\" + expr_latex + \")\"\n else:\n return expr_latex",
"def infix_to_postfix(string_input):\n stack_ops = []\n output = []\n value = \"\"\n\n for item in string_input:\n # item = operator\n if item in ops_prec.keys():\n value = value_to_output(value, output)\n\n # pop elements while they have lower precedence\n while (stack_ops\n and stack_ops[-1] in ops_prec.keys()\n and ops_prec[item] <= ops_prec[stack_ops[-1]]):\n output.append(stack_ops.pop())\n # else put item on stack\n stack_ops.append(item)\n\n # subexpression, delay precedence\n elif item == '(':\n value = value_to_output(value, output)\n\n stack_ops.append(item)\n elif item == ')':\n value = value_to_output(value, output)\n\n # flush output until ( is reached on stack\n while (stack_ops and stack_ops[-1] != '('):\n output.append(stack_ops.pop())\n # remove '('\n stack_ops.pop()\n\n # value = operand\n else:\n # concatenation of value for multidigit ones\n value += item\n # output.append(item) # this would be for one digit\n\n # flush stack to output\n value = value_to_output(value, output)\n\n while stack_ops:\n output.append(stack_ops.pop())\n\n return output",
"def expand(self, expression):\n if not expression:\n return b\"\"\n if expression[0] in self._operators:\n operator, expression = expression[:1], expression[1:]\n if operator == b\"+\":\n return self._expand(expression, reserved)\n elif operator == b\"#\":\n return self._expand(expression, reserved, prefix=b\"#\")\n elif operator == b\".\":\n return self._expand(expression, prefix=b\".\", separator=b\".\")\n elif operator == b\"/\":\n return self._expand(expression, prefix=b\"/\", separator=b\"/\")\n elif operator == b\";\":\n return self._expand(expression, prefix=b\";\", separator=b\";\",\n with_keys=True, trim_empty_equals=True)\n elif operator == b\"?\":\n return self._expand(expression, prefix=b\"?\", separator=b\"&\",\n with_keys=True)\n elif operator == b\"&\":\n return self._expand(expression, prefix=b\"&\", separator=b\"&\",\n with_keys=True)\n else:\n return self._expand(expression)",
"def infix(self):\n return str(self.data)",
"def infixToPostfix(expr, prec):\n ops = Stack()\n postfix = []\n toks = expr.split()\n for t in toks:\n if t.isdigit():\n postfix.append(t)\n elif t == '(':\n ops.push('(')\n elif t == ')':\n op = ops.pop()\n while op != '(':\n postfix.append(op)\n op = ops.pop()\n else:\n while True:\n if ops.empty() or ops.peek() == '(':\n ops.push(t)\n break\n if prec[t] > prec[ops.peek()]:\n ops.push(t)\n break\n elif prec[t] == prec[ops.peek()]:\n postfix.append(ops.pop())\n ops.push(t)\n break\n else:\n postfix.append(ops.pop())\n while not ops.empty():\n postfix.append(ops.pop())\n return postfix",
"def simplify(expression):\n q = []\n for x in expression:\n if x != \")\":\n q.append(x)\n else:\n subexp = \"\"\n while q:\n #print(q)\n c = q.pop()\n if c == \"(\":\n if len(q) and (q[-1] == \"+\" or q[-1] == \"-\"):\n sign = q.pop()\n else:\n sign = \"+\"\n subexp = signExp(subexp, sign)\n q.append(subexp)\n break\n else:\n subexp = c + subexp\n exp = \"\"\n while q:\n c = q.pop()\n exp = c + exp\n \n if len(exp) and exp[0] != \"+\" and exp[0] != \"-\":\n # Again if the first character is not a 'sign' make it a \"+\"\n exp = \"+\" + exp\n \n return exp",
"def infix_to_postfix(string):\n tokenlist = string.split()\n output = []\n stack = create_stack()\n for token in tokenlist:\n if token == '(':\n stack.push(token)\n elif token == ')':\n toptoken = stack.pop()\n while toptoken != '(':\n output.append(toptoken)\n toptoken = stack.pop()\n elif token == '*' or token == '/':\n toptoken = stack.top()\n while toptoken in ['*','/']:\n output.append(stack.pop())\n toptoken = stack.top()\n stack.push(token)\n elif token == '+' or token == '-':\n toptoken = stack.top()\n while toptoken in ['*','/','+','-']:\n output.append(stack.pop())\n toptoken = stack.top()\n stack.push(token)\n else:\n output.append(token)\n while stack.length() > 0:\n output.append(stack.pop())\n space= ' '\n newstr = space.join(output)\n return newstr",
"def infix_to_postfix(input_str): # postfix requires that all operators proceed after the two operands that they work on\n\n \"\"\"Input argument: a string containing an infix expression where tokens are \n space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns a String containing a postfix expression \"\"\"\n if input_str is None: raise ValueError\n # Split input string\n term_list = input_str.split()\n #print(\"TERM LIST \",term_list) \n # Create output list, will be fed to postfix_eval() at end\n output_list = []\n # initialize stack large enough to contain all operators\n operator_stack = Stack(len(term_list)//3+1)\n for term in term_list:\n # check for operand, if present append to output list\n if operand_present(term) is True:\n output_list.append(term)\n # check for operator\n elif operator_present(term) or term == '(' or term == ')':\n #if operand_stack.size()<2: \n # raise PostfixFormatException(\"Insufficient operands\")\n # Check for open parentheses\n if term == '(': operator_stack.push(term)\n # Check for closing parentheses, pop stack until open parentheses found\n elif term == ')':\n while 1:\n token = operator_stack.pop()\n if token != '(': \n output_list.append(token)\n else: break\n # Otherwise push to stack but pop any higher/equal order operators\n else:\n sort_operators(term, operator_stack, output_list)\n #print(operator_stack.peek())\n #else: raise PostfixFormatException(\"Invalid token\")\n #if len(term_list) % 3 != 0: raise PostfixFormatException(\"Too many operands\")\n while operator_stack.size() != 0:\n output_list.append(operator_stack.pop())\n new_str = (\" \".join(output_list))\n #print(\"NEW STR \", new_str)\n return new_str",
"def infix_to_postfix(input_str: str) -> Any:\n \"\"\"Input argument: a string containing an infix expression where tokens are \n space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns a String containing a postfix expression \"\"\"\n stack = Stack(30)\n if input_str == '':\n return ''\n op_list = [\"+\", \"-\", \"*\", \"/\", \"<<\", \">>\", \"**\"]\n order = {}\n order[\"+\"] = 1\n order[\"-\"] = 1\n order[\"*\"] = 2\n order[\"/\"] = 2\n order[\"**\"] = 3\n order[\"<<\"] = 4\n order[\">>\"] = 4\n pfix_str = ''\n split_list = input_str.split()\n for i in split_list:\n new_val = i.lstrip(\"-\")\n new_val = new_val.replace(\".\", \"\", 1)\n if new_val.isdigit() and pfix_str == \"\":\n pfix_str = pfix_str + i\n elif i in op_list:\n if not stack.is_empty():\n p = stack.peek()\n while 0 < stack.size():\n p = stack.peek()\n if p == \"(\":\n break\n if i == \"**\":\n if order[p] <= order[i]:\n break\n else:\n p1 = stack.pop()\n pfix_str = pfix_str + \" \" + p1\n elif order[p] < order[i]:\n break\n else:\n p2 = stack.pop()\n pfix_str = pfix_str + \" \" + p2\n stack.push(i)\n elif i == \"(\":\n stack.push(i)\n elif new_val.isdigit():\n pfix_str = pfix_str + \" \" + i\n elif i == \")\":\n p = stack.peek()\n while p != \"(\":\n pfix_str = pfix_str + \" \" + stack.pop()\n if not stack.is_empty():\n p = stack.peek()\n stack.pop()\n while not stack.is_empty():\n pop3 = stack.pop()\n pfix_str = pfix_str + \" \" + pop3\n return pfix_str",
"def _get_postfix_notation(self):\n postfix, operators_stack = list(), list() # initialize postfix list and auxiliary stack\n\n for element in self.expression.split():\n if element in self.OPERATORS:\n if operators_stack:\n # while stack isn't empty and \"stack top\" is stronger(e.g. multiplication is stronger than addition)\n # move \"stack top\" into postfix list\n while operators_stack \\\n and operators_stack[-1] in self.OPERATORS \\\n and self.OPERATOR_WEIGHT[operators_stack[-1]] >= self.OPERATOR_WEIGHT[element]:\n postfix.append(operators_stack.pop())\n\n operators_stack.append(element)\n\n elif element == self.BRACKET_LEFT:\n operators_stack.append(element)\n\n elif element == self.BRACKET_RIGHT:\n # searching for left bracket on stack, moving \"stack Top\" to postfix list\n while operators_stack and operators_stack[-1] != self.BRACKET_LEFT:\n postfix.append(operators_stack.pop())\n operators_stack.pop() # remove left bracket\n\n else: # numbers always goes into postfix list\n postfix.append(self._get_number_from_string(element))\n\n if operators_stack: # move others stack elements to postfix list\n postfix.extend(reversed(operators_stack))\n\n return postfix",
"def expression( ):#DOUBLE CHECK THIS\n\t\n\ttok = tokens.peek( )\n\tif debug: print(\"Expression: \", tok)\n\tleft = andExpr( ) #does the left side of the grammar \n\ttok = tokens.peek( )\n\twhile tok == \"or\": #checks to see if there is the token or and will preform what is inside the curly bracket since it is a series \n\t\ttokens.next()\n\t\tright = andExpr( )\n\t\tleft = BinaryExpr(tok, left, right) # MIGHT HAVE TO CHANGE THIS TO STRING CAUSE ITS \"or\"\n\t\ttok = tokens.peek( )\n\treturn left",
"def toPostfix (self,infix):\n postfix = []\n stack = []\n # Loop over characters in the input string\n for char in infix:\n # If char is a number add it to postfix\n if isFloat(char):\n postfix.append(char)\n # If its a special number add it to postfix\n elif char in Calculator.specialNumbers:\n postfix.append(char)\n # If char is a function push it onto the stack\n elif char in Calculator.functions:\n stack.append(char)\n # If the char is a function argument separator (,) pop operators off the stack onto\n # postfix until ( is reached\n elif char == ',':\n while stack[-1] != '(':\n postfix.append(stack.pop())\n # If the size of the stack reaches 0 without finding a ( there are unmatched brackets.\n if len(stack) == 0:\n return \"Unmatched Error\"\n # If char is an operator O\n elif char in Calculator.operators:\n # While there is an operator, P, on the top of stack\n while len(stack)>0 and stack[-1] in Calculator.operators:\n stackTop = stack[-1]\n precChar = Calculator.operators[char][1]\n precStackTop = Calculator.operators[stackTop][1]\n # If O in -?+* and its precedence is <= P, pop P off stack\n if char in Calculator.operators and precChar <= precStackTop:\n postfix.append(stack.pop())\n else:\n break\n # Push O onto stack\n stack.append(char)\n # If char is (, push it onto the stack\n elif char == '(':\n stack.append(char)\n # If char is )\n elif char == ')':\n # If the size of the stack reaches 0 without finding a ( there are unmatched brackets.\n if len(stack) == 0:\n return \"Unmatched Error\"\n # While top of stack isn't ( pop operators off the top of the stack\n while stack[-1] != '(':\n postfix.append(stack.pop())\n # If the size of the stack reaches 0 without finding a ( there are unmatched brackets.\n if len(stack) == 0:\n return \"Unmatched Error\"\n # Pop ( off the stack, but not onto output queue\n stack.pop()\n # If the token at the top of the stack is a function pop it off the stack and add to postfix\n if len(stack) > 0 and stack[-1] in Calculator.functions:\n postfix.append(stack.pop())\n # Finally pop all the operators off the stack onto postfix\n while len(stack)>0:\n # If the operator on the top of the stack is () then there are unmatched brackets\n if stack[-1] in '()':\n return \"Unmatched Error\"\n postfix.append(stack.pop())\n return postfix"
] | [
"0.71687776",
"0.70765674",
"0.70678484",
"0.70356464",
"0.69882065",
"0.6876605",
"0.6813432",
"0.6722101",
"0.6542399",
"0.6502566",
"0.648048",
"0.64614725",
"0.645874",
"0.63886565",
"0.63599867",
"0.632505",
"0.626854",
"0.6246395",
"0.62401587",
"0.62371445",
"0.6197802",
"0.61815476",
"0.60982955",
"0.6090422",
"0.60867715",
"0.60864705",
"0.6082653",
"0.6081127",
"0.60450387",
"0.60127366"
] | 0.81181073 | 0 |
Returns all possible velocity dispersons from all particles found in the data set. A particle filter can be passed using "filter" which is a list | def compute_velocity_dispersion(data, types = None, fields = None, filter = None):
types_to_fields = {'x': 'particle_velocity_x',
'y': 'particle_velocity_y',
'z': 'particle_velocity_z',
'r': 'particle_velocity_spherical_radius',
'theta': 'particle_velocity_spherical_theta',
'phi': 'particle_velocity_spherical_phi'}
if types is None and fields is None:
fields = types_to_fields.values()
keys = types_to_fields.keys()
elif fields is None:
fields = [ types_to_fields[x] for x in types ]
keys = types
else:
keys = fields
dispersion = {}
for i,x in enumerate(fields):
if filter is not None:
v = data[x][filter]
else:
v = data[x]
if np.size(v) == 0:
dispersion[keys[i]] = 0.0
else:
dispersion[keys[i]] = vel_dispersion( v.convert_to_units('km/s') )
return dispersion | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_velocities(self):\n\n return np.array([p.velocity for p in self.particles])",
"def particle_forceV(R,N,sigma,epsilon,D):\n F = np.zeros((3,N))\n x = np.zeros(N-1)\n y = np.zeros(N-1)\n z = np.zeros(N-1)\n r = np.zeros(N-1)\n # loop over all particles\n for i in range(N):\n # Distances for x,y,z between particles\n x = R[0,np.arange(N)!=i]-R[0,i]\n y = R[1,np.arange(N)!=i]-R[1,i]\n z = R[2,np.arange(N)!=i]-R[2,i]\n [x,y,z] = minimal_image(x,y,z,D)\n c = np.stack((x,y,z))\n r = np.sqrt(np.sum(c**2,0))\n a = (c*4*(sigma/epsilon)*(12/r**14-6/r**8))\n F[:,i] = -np.sum(a,1)\n return F",
"def particle_filter(particle_set_t, measurement_t):\n global count\n n_samples, dim = particle_set_t.shape # no of particles and dimension of each particle\n\n pred_state = np.zeros((n_samples, dim), dtype=\"float64\") # store the predicted state \n weights = np.zeros(n_samples, dtype=\"float64\") # corresponding weights for resampling\n\n particle_set_t1 = np.zeros((n_samples, dim), dtype=\"float64\") # next iteration of particles\n\n\n # this loop calculates \\bar{X_t}, i.e. the predicted belief.\n for n in range(n_samples):\n # predicted motion step:\n xn_t1 = sample_motion_model(particle_set_t[n]) # 3x1 vector: hypothetical state\n\n # measurement correction step:\n weight_xn_t1 = state_likelihood(measurement_t, xn_t1) # scalar value\n\n pred_state[n] = xn_t1\n weights[n] = weight_xn_t1\n\n \n # It was observed that if all weights are 0, the resampling step breaks. \n # Thus, adding a uniform distribution. This is obviously a very bad idea \\ \n # as the true state can easily be discarded in the resampling step: TODO!\n if np.sum(weights) > 0.0:\n weights = weights/np.sum(weights) # normalize array only when sum in not 0\n else:\n print(\"possile divergence!\")\n weights[:] = 1 / n_samples # if sum is 0 then assign uniform distribution throughout\n\n\n # the resampling step:\n # indices = monte_carlo.residual_resample(weights)\n indices = monte_carlo.stratified_resample(weights)\n count += 1\n print(count)\n\n # new particle set is particles at index locations\n for i, index in enumerate(indices):\n particle_set_t1[i] = pred_state[index]\n\n return particle_set_t1",
"def filter(self):\n self.filter_means = [self.m_0]\n self.filter_covs = [self.P_0]\n self.marginal_covs = []\n for t in range(self.data.shape[0]):\n m_bar, P_bar = self.one_step_prediction(self.filter_means[-1], self.filter_covs[-1])\n\n # Update step\n y = self.data[t]\n if not np.isnan(y).any():\n v = y[:, None] - self.observation_matrix @ m_bar\n S = self.observation_matrix @ P_bar @ self.observation_matrix.T + self.observation_cov\n K = P_bar @ self.observation_matrix.T @ np.linalg.inv(S)\n\n m_bar = m_bar + K @ v\n P_bar = P_bar - K @ S @ K.T\n\n self.marginal_covs.append(S)\n\n self.filter_means.append(m_bar)\n self.filter_covs.append(P_bar)\n self.filter_means = self.filter_means[1:]\n self.filter_covs = self.filter_covs[1:]",
"def ParticleFilterParams(fix_params=False):\n\n ## Particle filter parameters\n\n # Q_c will be the time continuous covariance matrix. \n #This should be the errors in the model.\n # in the form [x_cov, y_cov, z_cov, \n # vel_x_cov, vel_y_co, vel_z_cov, \n # mass_cov, \n # sigma_cov, shape_cov, brightness_cov, tau_cov]\n \n\n Q_c = [10., 2., 2., \n 150., 50., 50., \n 5., 0, 0,\n 1e-3, 1e-10, 0., 0.0001]\n\n\n print('Qc values used:', Q_c)\n\n Q_c = np.asarray([i**2 for i in Q_c])\n\n \n # Q_c_frag is used at reinitialisation if the fragmentation option is used\n \n Q_c_frag = [0., 0., 0., \n 0.02, 0.02, 0.02, \n 0.5, 0, 0,\n 2e-3, 5e-9, 0., 0.]\n\n Q_c_frag = [i**2 for i in Q_c_frag]\n\n ## P: starting uncertainty to initialise gaussian spread of particals. \n ## P2: starting uncertainty at reinitialisation if the fragmentation option is used\n ## in the form [x_cov, y_cov, z_cov, % of vel_x_cov, % of vel_y_co, % of vel_z_cov]\n P = [50., 50., 50., 250., 250., 250.]\n P2 = [50., 50., 50., 250., 250., 250.]\n\n ## Initialise state ranges\n\n\n ## shape parameter close to a rounded brick (1.8) (A for a sphere =1.21)\n A_min = 1.21\n A_max = 3.0 \n\n ## luminosity coefficient\n tau_min = 0.0001\n tau_max = 0.1\n\n ## lists of typical meteorite densities for different types. [chond, achond, stony-iron, iron, cometary]\n pm_mean = [3000, 3100, 4500, 7500, 850]\n pm_std = [420, 133, 133, 167, 117 ]\n\n ## to choose density values according to a distribution of meteorite percentages:\n particle_choices = []\n\n # this is created using lines 257-266; uncomment if needs changing.\n random_meteor_type = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 3, 3, 4, 4]\n\n #random_meteor_type = []\n #for i in range(80): # 80 % Chondrites\n # random_meteor_type.append(0)\n #for i in range(11): # 11 % Achondrites\n # random_meteor_type.append(1)\n #for i in range(2):\n # random_meteor_type.append(2) # 2 % Stony-Iron\n #for i in range(5):\n # random_meteor_type.append(3) # 5 % iron\n #for i in range(2):\n # random_meteor_type.append(4) # 2 % cometary\n\n ## ablation coefficeint \n #sigma_min = 0.001*1e-6\n #sigma_max = 0.5*1e-6\n\n\n #range_params = [m0_max, A_mean, A_std, pm_mean, pm_std, random_meteor_type, cd_mean, cd_std, sigma_min, sigma_max, K_min, K_max, tau_min, tau_max]\n range_params = [A_min, A_max, pm_mean, pm_std, random_meteor_type, tau_min, tau_max]\n\n if fix_params:\n \tQ_c[-4:] = [0., 0., 0., 0.]\n \tQ_c_frag[-4:] = [0., 0., 0., 0.]\n return Q_c, Q_c_frag, P, range_params",
"def getDifferentialFlowDataForAllEvents(self, particleName=\"pion\", order=2, pT_range=None, where=\"\", orderBy=\"event_id\"):\n pid = self._pid(particleName)\n whereClause = \"pid=%d and n=%d\" % (pid, order)\n if pT_range:\n whereClause += \" and %g<=pT and pT<=%g\" % (pT_range[0], pT_range[1])\n if where:\n whereClause += \" and \" + where\n RawdiffvnData = np.asarray(self.db.selectFromTable(\"diff_vn\", (\"pT\", \"vn_real\", \"vn_imag\"), whereClause=whereClause, orderByClause=orderBy))\n #nevent = self.getNumberOfEvents()\n nevent = self.db.selectFromTable(\"multiplicities\", \"count()\", \"pid = %d\" % pid)[0][0]\n npT = len(RawdiffvnData[:,0])/nevent\n diffvnData = RawdiffvnData.reshape(nevent, npT, 3)\n return diffvnData",
"def init_particle_filter(self, motion_prior, n_p):\n # Define necessary components for the particle filter\n if motion_prior['mode'] == 'PositionDiffusion':\n # Diffusion\n dc_infer = motion_prior['dc']\n d_h = 2 # Dimension of hidden state (i.e. x,y = 2 dims)\n sdev = np.sqrt(dc_infer * self.dt / 2) * np.ones((d_h,))\n ipd = pf.GaussIPD(d_h, self.n_n, sdev * 0.001)\n tpd = pf.GaussTPD(d_h, self.n_n, sdev)\n ip = pf.GaussIP(d_h, sdev * 0.001)\n tp = pf.GaussTP(d_h, sdev)\n lp = PoissonLP(self.n_n, d_h, self.tc.spike_energy)\n\n elif motion_prior['mode'] == 'VelocityDiffusion':\n # FIXME: save these params\n d_h = 4 # Hidden state dim, x,y,vx,vy\n\n v0 = motion_prior['v0'] # Initial Estimate for velocity\n dcv = motion_prior['dcv'] # Velocity Diffusion Constant\n st = np.sqrt(dcv * self.dt)\n adj = np.sqrt(1 - st ** 2 / v0 ** 2)\n\n eps = 0.00001 # Small number since cannot have exact zero\n sigma0 = np.array([eps, eps, v0, v0]) # Initial sigmas\n sigma_t = np.array([eps, eps, st, st]) # Transition sigmas\n\n # Transition matrix\n a = np.array([[1, 0, self.dt, 0],\n [0, 1, 0, self.dt],\n [0, 0, adj, 0],\n [0, 0, 0, adj]])\n\n ipd = pf.GaussIPD(d_h, self.n_n, sigma0)\n tpd = pf.GaussTPD(d_h, self.n_n, sigma_t, A=a)\n ip = pf.GaussIP(d_h, sigma0)\n tp = pf.GaussTP(d_h, sigma_t, A=a)\n lp = PoissonLP(self.n_n, d_h, self.tc.spike_energy)\n # Note trick where PoissonLP takes 0,1 components of the\n # hidden state which is the same for both cases\n\n else:\n raise ValueError(\n 'Unrecognized Motion Prior ' + str(motion_prior))\n\n r = np.zeros((self.n_n, self.n_t)).astype('float32')\n return pf.ParticleFilter(\n ipd, tpd, ip, tp, lp, r.transpose(), n_p)",
"def k_particles(chosen_particle, positions, velocities):\n\n\n # array with all indecies of all k particles for positions\n positions_k = []\n velocities_k = []\n\n # array of new distances considering boundary conditions\n new_distances = []\n\n # check over all particles in positions\n for index in range(N):\n\n distance_x, distance_y = per_boun_distance(chosen_particle, positions[index])\n\n # distance from selected particle to particle with index\n d = np.sqrt(distance_x**2 + distance_y**2)\n\n # append this distance to array of distances\n new_distances.append(d)\n\n # Now we need a sorting algorithm (merge)\n for j in range(k+1):\n low = min(new_distances)\n\n index_k = new_distances.index(low)\n\n # get the index of the particle for velocity\n velocities_k.append(velocities[index_k])\n\n # get the index of the particle for position\n # and add position to all positions within r\n positions_k.append(positions[index_k])\n\n new_distances.pop(index_k)\n\n return velocities_k, positions_k",
"def particle_velocityV(V,F,dt,Rv,sigma,epsilon,D,N): \n V += dt/2*(particle_forceV(Rv[-1], N, sigma, epsilon, D) + particle_forceV(Rv[-2], N, sigma, epsilon, D))\n return V",
"def _FilterProtonsAndElectrons(self):\n self.reactants = filter(lambda c: c.compound.kegg_id not in \n ['C00080', 'C05359'], self.reactants)",
"def create_particles(self):\n # xf, yf = create_fluid_with_solid_cube()\n xf, yf = create_fluid()\n uf = np.zeros_like(xf)\n vf = np.zeros_like(xf)\n m = initialize_mass(xf, yf)\n rho = initialize_density_fluid(xf, yf)\n h = np.ones_like(xf) * self.hdx * self.dx\n fluid = get_particle_array_wcsph(x=xf, y=yf, h=h, m=m, rho=rho, u=uf,\n v=vf, name=\"fluid\")\n\n xt, yt = create_boundary(self.dx / 2.)\n ut = np.zeros_like(xt)\n vt = np.zeros_like(xt)\n m = np.ones_like(xt) * 1500 * self.dx * self.dx\n rho = np.ones_like(xt) * 1000\n h = np.ones_like(xt) * self.hdx * self.dx / 2.\n tank = get_particle_array_wcsph(x=xt, y=yt, h=h, m=m, rho=rho, u=ut,\n v=vt, name=\"tank\")\n\n return [fluid, tank]",
"def getTallyParticles(self):\n\n\t\tparticleNames = []\n\n\t\tif self.typeNumber > 0:\n\t\t\tparticleNames.append(particleListShort[self.typeNumber]) \n\t\telse:\n\t\t\tfor i,name in enumerate(self.particleList):\n\t\t\t\ttry:\n\t\t\t\t\tif self.tallyParticles[i] == 1:\n\t\t\t\t\t\tparticleNames.append(self.particleList[i])\n\t\t\t\texcept:\n\t\t\t\t\tpass # For some reasons there can be less than 35 particles listed. Skip in case.\n\t\treturn particleNames",
"def new_plummer_distribution(number_of_particles, \n total_mass = 1.0|nbody_system.mass, \n virial_radius = 1.0|nbody_system.length,\n mass_cutoff = 0.999,\n G = None,\n **keyword_arguments): # optional arguments for UniformSphericalDistribution\n particles = new_plummer_spatial_distribution(number_of_particles, total_mass=total_mass, \n virial_radius=virial_radius, **keyword_arguments)\n \n if G is None:\n G = nbody_system.G if generic_unit_system.is_generic_unit(total_mass.unit) else constants.G\n velocity_unit = (G*total_mass/virial_radius).sqrt().unit.base_unit()\n plummer_radius = 0.1875 * numpy.pi * virial_radius\n \n escape_velocity = (1 + particles.position.lengths_squared()/plummer_radius**2)**(-0.25) | velocity_unit\n velocity = escape_velocity * sample_from_velocity_distribution(number_of_particles)\n velocity *= numpy.sqrt((G*total_mass*number_of_particles) / (2*virial_radius*velocity.length_squared()))\n particles.velocity = velocity.reshape((-1,1)) * random_direction(number_of_particles)\n return particles",
"def particle_filter(\n h_obs,\n n_particles = 10000,\n dt = 0.1,\n gen = SEIHR_generator(100000),\n particles = None,\n s_min = None\n):\n \n # Get the list of particles to fit\n if particles is not None:\n n_particles = len(particles)\n t_prev = particles[0].t\n if any([t_ <= t_prev for t_, y in h_obs]):\n warnings.warn(\"Dropping observations with time below current simulation time\")\n h_obs = [(t_, y) for t_, y in h_obs if t_ > t_prev]\n \n particles = [p.copy() for p in particles]\n \n else:\n particles = [\n gen.generate(i) for i in range(n_particles)\n ]\n t_prev = 0\n \n if s_min is None:\n s_min = n_particles*0.9\n \n #Loop over observations\n for t, y in h_obs:\n \n #Using list comprehension; for each particle, step forward to the next evaluation point\n #and update weights based on the eval_value, which need not be atomic\n tmp_particles = [\n x.step(t, i, dt = dt, eval_value = y) for x, i in zip(particles, range(len(particles)))\n ]\n \n #Harvest the weights from all particles, then normalize to add to 1\n pre_weights = [x.w_t for x in tmp_particles]\n weights = np.array(pre_weights)/np.sum(pre_weights)\n s_eff = 1/np.sum(weights**2)\n \n for x, w in zip(tmp_particles, weights):\n x.w_t = w\n \n print(s_eff)\n \n #If effective sample size is below minimum, then perform resampling\n if s_eff < s_min:\n \n particles = np.random.choice(\n tmp_particles,\n p = weights, \n replace = 1, \n size = len(particles)\n )\n \n for x in particles:\n x.w_t = 1/len(particles)\n \n else:\n particles = tmp_particles\n \n return particles",
"def get_velocity(self):\n\n vs = []\n pairs = [(-2, -1), (-3, -1), (-3, -1)]\n\n for i1, i2 in pairs:\n f1 = self.files[i1]\n p1 = Profile(os.path.join(self.name, f1))\n\n f2 = self.files[i2]\n p2 = Profile(os.path.join(self.name, f2))\n\n # we'll do this by looking at 3 different temperature\n # thresholds and averaging\n T_ref = [2.e9, 3.e9, 4.e9]\n\n for T0 in T_ref:\n x1 = p1.find_x_for_T(T0)\n x2 = p2.find_x_for_T(T0)\n vs.append((x1 - x2)/(p1.time - p2.time))\n\n vs = np.array(vs)\n v = np.mean(vs)\n v_sigma = np.std(vs)\n return v, v_sigma",
"def sieve_function(raw_data):\n matchers = []\n return_list = []\n\n matchers.append(D1000TemperatureDataParticle.regex_compiled())\n\n for matcher in matchers:\n for match in matcher.finditer(raw_data):\n return_list.append((match.start(), match.end()))\n\n if not return_list:\n log.debug(\"sieve_function: raw_data=%r, return_list=%s\", raw_data, return_list)\n return return_list",
"def pipes_velocity(p_list):\n for pipes in p_list:\n pipes.centerx -= 4\n return p_list",
"def initializeParticles(self):\n import itertools\n import random\n #create a list of possible ghost permutations, where each of three ghosts can be on any of the legal positions in the boards.\n permutations = list(itertools.product(self.legalIntentions, repeat=self.numAgents))\n \n random.shuffle(permutations)\n p = len(permutations)\n n = self.numParticles\n self.particles = []\n #create the particles\n while n >= p:\n self.particles += permutations\n n -= p\n #add the remainder\n self.particles += permutations[0: n - 1]",
"def get_variables(self, z0, u_inf):\n # Get the ambient data from the CTD profile\n Ta, Sa, P = self.profile.get_values(z0, ['temperature', 'salinity',\n 'pressure'])\n rho = seawater.density(Ta, Sa, P)\n \n # Compute the properties of each dispersed-phase particle\n us = np.zeros(len(self.particles))\n rho_p = np.zeros(len(self.particles))\n m_p = np.zeros(len(self.particles))\n B_p = np.zeros(len(self.particles))\n for i in range(len(self.particles)):\n m0 = self.particles[i].m0\n T0 = self.particles[i].T0\n m_p[i] = np.sum(m0) * self.particles[i].nb0\n if m_p[i] > 0.:\n # Particles exist, get properties. Make sure the algorithm \n # uses the dirty bubble properties since this is supposed\n # to be the rise velocity averaged over the whole plume.\n us[i], rho_p[i]= self.particles[i].properties(m0, T0, P, Sa, \n Ta, np.inf)[0:2]\n B_p[i] = (rho - rho_p[i]) / rho * 9.81 * (m_p[i] / rho_p[i])\n else:\n # Particles dissolved, set to ambient conditions\n us[i] = 0.\n rho_p[i] = rho\n B_p[i] = 0.\n \n # Select the correct slip velocity\n u_slip = us[0]\n for i in range(len(self.particles) - 1):\n if B_p[i+1] > B_p[i]:\n u_slip = us[i+1]\n \n # Compute the total buoyancy flux\n B = np.sum(B_p)\n \n # Get the ambient buoyancy frequency\n N = self.profile.buoyancy_frequency(z0)\n \n # Return the governing parameters\n return (B, N, u_slip, u_inf)",
"def get_fitness_vector(self):\r\n vector = list()\r\n \r\n for particle in self.population: \r\n vector.append(particle.current_fitness)\r\n \r\n return vector",
"def turbulence(self, particles, current_step=0):\n\n for i in range(len(particles)):\n if i % 6 == 0:\n mutated = self.mutator.mutate(particles[i])\n particles[i].vector = copy(mutated.vector)",
"def select(self, test):\n survivors = []\n for particle in self.particles:\n # Find the originating particle\n parent = particle\n while parent.origin is not None:\n parent = parent.origin.initial_state[0]\n if test(parent, particle) is True:\n survivors.append(particle)\n return ParticleCollection(survivors)",
"def getEnergy(pos: dc.float64[N, 3], vel: dc.float64[N, 3],\n mass: dc.float64[N], G: dc.float64):\n # Kinetic Energy:\n # KE = 0.5 * np.sum(np.sum( mass * vel**2 ))\n # KE = 0.5 * np.sum( mass * vel**2 )\n KE = 0.5 * np.sum(np.reshape(mass, (N, 1)) * vel**2)\n\n # Potential Energy:\n\n # positions r = [x,y,z] for all particles\n x = pos[:, 0:1]\n y = pos[:, 1:2]\n z = pos[:, 2:3]\n\n # matrix that stores all pairwise particle separations: r_j - r_i\n # dx = x.T - x\n # dy = y.T - y\n # dz = z.T - z\n # dx = np.transpose(x) - x\n # dy = np.transpose(y) - y\n # dz = np.transpose(z) - z\n dx = np.add.outer(-x, x)\n dy = np.add.outer(-y, y)\n dz = np.add.outer(-z, z)\n\n # matrix that stores 1/r for all particle pairwise particle separations\n inv_r = np.sqrt(dx**2 + dy**2 + dz**2)\n # inv_r[inv_r>0] = 1.0/inv_r[inv_r>0]\n I = inv_r > 0\n np.divide(1.0, inv_r, out=inv_r, where=I)\n\n # sum over upper triangle, to count each interaction only once\n # PE = G * np.sum(np.sum(np.triu(-(mass*mass.T)*inv_r,1)))\n # PE = G * np.sum(np.triu(-(mass*mass.T)*inv_r,1))\n tmp = -np.multiply.outer(mass, mass) * inv_r\n PE = 0.0\n for j in range(N):\n for k in range(j + 1, N):\n PE += tmp[j, k]\n PE *= G\n\n return KE, PE",
"def turbulence(self, particles, current_step=0):\n\n for i in range(len(particles)):\n if i % 3 == 0:\n mutated = self.uniform_mutator.mutate(particles[i])\n elif i % 3 == 1:\n mutated = self.non_uniform_mutator.mutate(particles[i], current_step)\n particles[i].vector = copy(mutated.vector)\n return",
"def velocities(self, return_np=False):\n if return_np:\n return self.si_values()[3:]\n return [self.v_x, self.v_y, self.v_z]",
"def generate_particle_distribution(self, max_loop = np.inf, outfile=None):\n \n self.pos = np.zeros((self.N_part, 3))\n self.vel = np.zeros((self.N_part, 3))\n \n \n F_max = np.max(self.DF.f) ; F_min = np.min(self.DF.f)\n\n n_particles = 0\n loop_counter = 0\n \n if self.optimize:\n relative_potential = self._interpolate_relative_potential\n else:\n relative_potential = self.DF.relative_potential\n \n \n \n # Continue until max number of particles chosen, or until max loop counter\n while ((n_particles < self.N_part) and (loop_counter < max_loop)):\n \n # choose random position, eval potential, choose velocity\n r = self._choose_position()\n \n Psi = relative_potential(r) \n v = self._choose_velocity(r, Psi)\n \n E = Psi - 0.5 * v * v\n\n # interpolate along DF to find f(E) of chosen particle\n f_E = self.DF.interpolate_f(E)\n\n # random number from 0 to F_max for accept reject\n #F = np.random.rand() * F_max\n \n # HOLY CRAP....Fmax - Fmin ... not Fmin - Fmax\n F = 10.0**( np.random.rand()*(np.log10(F_max) - np.log10(F_min)) + np.log10(F_min) )\n \n \n if F <= f_E: # accept particle\n\n \n # convert position to cartesian using random theta and phi\n theta = np.random.rand() * np.pi\n phi = np.random.rand() * 2.0 * np.pi\n \n x = np.sin(theta) * np.cos(phi)\n y = np.sin(theta) * np.sin(phi)\n z = np.cos(theta)\n \n # save particle position\n self.pos[n_particles] = r * np.array([x,y,z])\n \n # repeat for velocity using new random numbers\n theta = np.random.rand() * np.pi\n phi = np.random.rand() * 2.0 * np.pi\n \n vx = np.sin(theta) * np.cos(phi)\n vy = np.sin(theta) * np.sin(phi)\n vz = np.cos(theta)\n \n # save particle velocity\n self.vel[n_particles] = v * np.array([vx,vy,vz])\n \n \n n_particles = n_particles + 1\n \n \n if (loop_counter % 5000) == 0:\n _my_print(\"Have %4i particles. On loop %6i\"%(n_particles, loop_counter))\n loop_counter = loop_counter + 1\n \n \n if (not outfile == None):\n self.write_pd(outfile)\n \n return self.pos, self.vel",
"def particle_initial_velocity(fignr,N,D,T,m,dim,kb):\n V = np.zeros((3,N))\n V[0:dim,:] = np.random.normal(0, kb*T/m, (dim,N))# / np.sqrt(T/(kb*m))\n plotfunctions.velocity(fignr,N,V)\n # Typical speed for particles\n return V",
"def filter_sontek(vel_in):\n\n # Identify all samples where the velocity did not change\n test1 = np.abs(np.diff(vel_in, 1, 1)) < 0.00001\n\n # Identify all samples with all zero values\n test2 = np.nansum(np.abs(vel_in), 0) < 0.00001\n test2 = test2[1:] * 4 # using 1: makes the array dimension consistent with test1 as diff results in 1 less.\n\n # Combine criteria\n test_sum = np.sum(test1, 0) + test2\n\n # Develop logical vector of invalid ensembles\n invalid_bool = np.full(test_sum.size, False)\n invalid_bool[test_sum > 3] = True\n # Handle first ensemble\n invalid_bool = np.concatenate((np.array([False]), invalid_bool), 0)\n if np.nansum(vel_in[:, 0]) == 0:\n invalid_bool[0] = True\n\n # Set invalid ensembles to nan\n vel_out = np.copy(vel_in)\n vel_out[:, invalid_bool] = np.nan\n\n return vel_out",
"def getInterpretedComplexDifferentialFlowsForAllEvents(self, particleName=\"pion\", order=2, pTs=np.linspace(0,2.5,10), where=\"\", orderBy=\"event_id\", verbose=False):\n diffVnData = self.getDifferentialFlowDataForAllEvents(particleName=particleName, order=order, where=where, orderBy=orderBy)\n diffVnintepBlock = []\n if verbose: print(\"Looping over {} events... (please be patient)\".format(diffVnData.shape[0]))\n for iev in range(diffVnData.shape[0]):\n diffVnintep = np.interp(pTs, diffVnData[iev,:,0], diffVnData[iev,:,1]) + 1j*np.interp(pTs, diffVnData[iev,:,0], diffVnData[iev,:,2])\n diffVnintepBlock.append(diffVnintep)\n if verbose: print(\"Done. Thanks for waiting.\")\n return np.asarray(diffVnintepBlock)",
"def verlet(self,t,dt,r0,r1):\r\n r2 = np.zeros([2,self.particles.size])\r\n\r\n MX, MXT = np.meshgrid(r1[0,:],r1[0,:],copy=False)\r\n MY, MYT = np.meshgrid(r1[1,:],r1[1,:],copy=False)\r\n dx = MXT - MX\r\n dx = dx\r\n\r\n dy = MYT - MY\r\n dy = dy\r\n\r\n r2 = np.square(dx)+np.square(dy)\r\n\r\n if(np.round((t/self.dt*dt)%0.5,1) == 0): #JV: every certain amount of steps we update the list\r\n self.close_list = close_particles_list(r2,self.Nlist,self.particles.size,self.param[2]) #JV: matrix that contains in every row the indexs of the m closest particles\r\n\r\n r2 = (2*r1 - r0 + np.transpose(fv(r1[0,:],r1[1,:],dx,dy,r2,t/self.dt,True,self.param[2],self.particles.size,self.U,self.dt,self.close_list,self.Nlist,self.vel_verlet_on,self.R,self.param[3],self.param[4],self.param[5],self.grid,self.G,self.wallcount,self.X2)) * (dt**2))\r\n #The transpose is necessary because I messed up the shapes when I did the fv function.\r\n\r\n #JV: this needs to change if we want to include particles with mass diferent than 1 (in reduced units),\r\n # in other words, diferent particles than the Argon gas\r\n\r\n return r2[0,:],r2[1,:]"
] | [
"0.5962206",
"0.57577115",
"0.5713186",
"0.56626225",
"0.56291974",
"0.5555897",
"0.5501763",
"0.5466105",
"0.54309624",
"0.54021454",
"0.5381622",
"0.53503567",
"0.53398895",
"0.5317946",
"0.52965754",
"0.5289197",
"0.5287057",
"0.52832603",
"0.52564645",
"0.5244272",
"0.52303654",
"0.5228992",
"0.5225467",
"0.52220017",
"0.5197605",
"0.51926786",
"0.51897305",
"0.5189419",
"0.5178475",
"0.51722455"
] | 0.6084097 | 0 |
This api does not return xml | def xml(self):
raise NotImplementedError('This api does not return xml') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def xml(self, request):\n raise Exception(\"Not Implemented\")",
"def content_api_xml(url, request):\n headers = {'content-type': 'application/xml'}\n content = 'xml string'\n return response(status_code=200,\n content=content,\n headers=headers,\n request=request)",
"def make_request_xml(self):\n #print (self.url)\n try:\n with closing(get(self.url, stream=True)) as resp: #returns b`xml`\n if self.is_good_enough_xml(resp):\n return resp.content\n else:\n return None\n except RequestException as e:\n print('Error during requests to {0} : {1}'.format(url, str(e)))\n return None",
"def _api_call(url: str) -> ET.Element:\n result = requests.get(url)\n if result.status_code != 200:\n raise RequestException(f\"API status code {result.status_code} for URL: {url}\")\n\n # Remove HTML line breaks (which cause confusion in the XML parsing)\n t: str = re.sub(r\"\\s*(<br/>)+\\s*\", r\" \", result.text)\n\n x_tree = ET.fromstring(t)\n return x_tree",
"def api(self) -> str:",
"def parse(self, response):",
"def test_xml_direct(self): \n response = client.result(True, 'xml', 'unittest', test_data = self.test_data)\n root = ET.fromstring(response)\n first_name = root[0][0][0].text\n self.assertEqual(first_name,'John', 'Should print John')\n nationality = '<nationality>' in response\n self.assertFalse(nationality, 'Nationality should not be present')",
"def _xml_command(self, request):\n response = self._send(request)\n self._check_response(response)\n return response",
"def render_GET(self, request):\n return etree.tostring(self.xml(request), pretty_print=True)",
"def test():\n r = Response(response=\"This worked!\", status=200,\n mimetype=\"application/xml\")\n r.headers[\"Content-Type\"] = \"text/xml; charset=utf-8\"\n return r",
"def read_xml(self):\n pass",
"def xml(self):\n raise NotImplementedError('must be implemented by all subclasses')",
"def __call_api(self, values):\n # Add auth key to the request dictionary if not supplie\n if 'auth' not in values:\n values['auth'] = self.auth_data['auth']\n\n # Encode the data for a GET request\n data = urllib.parse.urlencode(values)\n\n #print values\n\n # Try to make the request\n xml_string = urllib.request.urlopen(self.xml_rpc + '?' + data).read()\n\n # Parse the XML\n response_data = xmltodict(self.__sanitize(xml_string))\n\n # Ensure that there was XML to parse\n if not response_data:\n return None\n\n # Grab the root element\n response_data = response_data['root'][0]['child']\n\n return response_data",
"def xml():\n response = make_response(render_template(\"sample.xml\"))\n response.headers[\"Content-Type\"] = \"application/xml\"\n return response",
"def parse_response(self):\n pass",
"def read(self, return_string=False):\r\n # Get result data from debugger engine and verify length of response\r\n data = self.read_data()\r\n\r\n # Show debug output\r\n debug('[Response data] %s' % data)\r\n\r\n # Return data string\r\n if return_string:\r\n return data\r\n\r\n # Remove special character quoting\r\n data = self.unescape(data)\r\n\r\n # Replace invalid XML characters\r\n data = ILLEGAL_XML_RE.sub('?', data)\r\n\r\n # Create XML document object\r\n document = ET.fromstring(data)\r\n return document",
"def main_response(self, data):",
"def main_response(self, data):",
"def xml(self):\n return self._xml",
"def xml(self):\n return self._xml",
"def get_usercp_xml(self,):\n response = self.session.get('https://ngb.to/usercp.php?type=xml')\n return response.text",
"def parsexml(self):\n raise NotImplementedError",
"def get_data(self):",
"def get(self):\n xml = self._robot.GetCapabilitiesXml()\n self.response.headers['Content-Type'] = 'text/xml'\n self.response.out.write(xml)",
"def request_xml(self):\n xml_filename = pkg_resources.resource_filename(__name__, 'data/request.xml')\n with open(xml_filename, 'r') as xml_file:\n xml = xml_file.read()\n xml = xml.format(username=self.username,\n password=self.password,\n timestamp=time.time(),\n hardware_id=self.hardware_id(),\n advertisement_id=self.advertisement_id(),\n locale=self.locale)\n return xml",
"def retreive_xml(**options):\n get_query = []\n for k, v in options.iteritems():\n get_query.append(k + '=' + v)\n\n url = twitter_api + '&'.join(get_query)\n p = urllib.urlopen(url)\n content = p.read()\n return content",
"def execute(self):\n headers = {\n 'Content-type': 'application/x-www-form-urlencoded',\n 'Accept-Charset': 'utf-8',\n 'User-Agent': USER_AGENT\n }\n request = urllib2.Request(self.url(), headers=headers)\n response = urllib2.urlopen(request)\n \n return etree.parse(response)",
"def get_pmid_xml(api_key,idtype,searchid):\n headers = {'X-ELS-APIKey':api_key}\n url = \"http://api.elsevier.com/content/search/index:SCOPUS?query=\" + idtype + \"(\" + searchid + \")&field=doi\"\n #url = 'http://api.elsevier.com/content/abstract/scopus_id:' + str(pmid)\n print url\n web = requests.get(url, headers=headers)\n try:\n doi = web.json()['search-results']['entry'][0]['prism:doi']\n except: \n print web.json()\n doi = False\n \n try: \n scopus_abstract = web.json()['search-results']['entry'][0]['prism:url']\n web_scopus = requests.get(scopus_abstract, headers=headers)\n xml_text = web_scopus.text\n except: \n scopus_abstract = False\n xml_text = False\n\n output = {'doi':doi, 'scopus_abstract':scopus_abstract, 'xml_text':xml_text}\n return(output)",
"def get_xml(self):\n with io.StringIO() as string:\n string.write(ET.tostring(self.root, encoding=\"unicode\"))\n return string.getvalue()",
"def serve_metadata():\n xml = generate_xml_metadata()\n return Response(xml, mimetype=\"application/gzip\")"
] | [
"0.6906242",
"0.681684",
"0.66007024",
"0.64178306",
"0.6360245",
"0.629127",
"0.61903584",
"0.61852777",
"0.6081651",
"0.6022426",
"0.6001489",
"0.5988445",
"0.59532",
"0.5934724",
"0.5896389",
"0.5798428",
"0.57974744",
"0.57974744",
"0.5797071",
"0.5797071",
"0.576466",
"0.57562596",
"0.57530856",
"0.56981385",
"0.565673",
"0.56447184",
"0.5608147",
"0.5604755",
"0.5597347",
"0.55895483"
] | 0.75670606 | 0 |
Returns whether erorr is NOAUTH | def noauth(self):
try:
# some endpoints dont return json
return self.json['response'].get('error_id') == 'NOAUTH'
except:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def auth_error():\n return unauthorized('Invalid credentials')",
"def unauthorized():\n return HttpError(401)",
"def check_auth_none(self, username):\n return AUTH_FAILED",
"def test_no_auth(self) -> None:\n channel = self.make_request(\"GET\", self.url, {})\n\n self.assertEqual(401, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.MISSING_TOKEN, channel.json_body[\"errcode\"])",
"def test_no_auth(self) -> None:\n channel = self.make_request(\"GET\", self.url, {})\n\n self.assertEqual(401, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.MISSING_TOKEN, channel.json_body[\"errcode\"])",
"def check_unauthorized_response(response: HTTPResponse) -> bool:\n return response.status_code == 403",
"def is_auth_error(error: Exception) -> bool:\n if not isinstance(error, Fault):\n return False\n return (\n any(\n \"NotAuthorized\" in code\n for code in extract_subcodes_as_strings(error.subcodes)\n )\n or \"auth\" in stringify_onvif_error(error).lower()\n )",
"def test_retrieve_user_unauthorized(self):\r\n res = self.client.get(ME_URL)\r\n\r\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_retrieve_user_unauthorized(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_retrieve_user_unauthorized(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)",
"def check_auth():",
"def test_retrieve_user_unautherized(self):\n res = self.client.get(ME_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_retrieve_unauthorized_user(self):\n\n response = self.client.get(URL_ME)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_unauthenticated_request(self):\n url = self.get_url(self.active_user.id)\n response = self.client.get(url)\n\n expected_status_code = 401\n self.assertEqual(response.status_code, expected_status_code)",
"def auth_failure():\n return \"Request denied due to failed authorization\", 201, {'Content-Type': 'text/html'}",
"def test_unauthorized(self):\n self._error_test(fitbit_exceptions.HTTPUnauthorized)",
"def test_retrive_user_unauthenticated(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_if_not_available_for_unauthorized(self):\r\n res = self.not_authenticated.get(reverse(LIST_USER_URL),data={})\r\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_retrieve_user_unauthorized(self):\n # HTTP GET Request\n response = self.client.get(ME_URL)\n\n # If you call the URL without authorization\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"def assertHttpUnauthorized(self, resp):\r\n return self.assertEqual(resp.status_code, 401)",
"def get_authenticated_denied(self):",
"def unauthorized():\n return {'errors': ['Unauthorized']}, 401",
"def token_auth_error():\n logger.debug(\"Token authentication failed.\")\n return unauthorized(\"Invalid credentials.\")",
"def __check_http_err(self, status_code):\n if status_code == 403:\n raise exceptions.APIAuthenticationError(self.api_key)\n elif status_code == 503:\n raise exceptions.APITimeoutError()\n else:\n return False",
"def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass",
"def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass",
"def test_no_auth(self) -> None:\n channel = self.make_request(\"DELETE\", self.url)\n\n self.assertEqual(401, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.MISSING_TOKEN, channel.json_body[\"errcode\"])",
"def user_must_authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})",
"def _check_auth(self):\n if self.authToken:\n return True\n else:\n msg = \"you need to login\"\n self.raise_error(msg)",
"def test_is_unauthenticated(self):\n response = self.post_question()\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)"
] | [
"0.7120319",
"0.70609397",
"0.7014564",
"0.6881835",
"0.6881835",
"0.6855839",
"0.67338675",
"0.67045474",
"0.66409737",
"0.66409737",
"0.66238505",
"0.6619812",
"0.66152024",
"0.659969",
"0.65859014",
"0.6582713",
"0.6556535",
"0.6549733",
"0.6540464",
"0.6489589",
"0.6488741",
"0.6488231",
"0.64701575",
"0.6464409",
"0.6462344",
"0.6462344",
"0.64472467",
"0.6370041",
"0.63248056",
"0.63104796"
] | 0.81090945 | 0 |
Verify can select Maven option | def test_should_choose_maven(self):
search_str = "//*[text()='Maven Project']"
els = self.driver.find_elements_by_xpath(search_str)
self.assertGreater(len(els), 0, 'Maven project is not found!')
els[0].click() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def can_install_project(self):\n return True",
"def test_target_repo(self):\n # network may be unavailable, but we are not interested anyway,\n # so we ignore the exitcode\n output = self.run_command(\"selfupdate --check bennr01:dev\", exitcode=None)\n self.assertIn(\"Target: bennr01:dev\", output)\n self.assertNotIn(\"Target: ywangd:master\", output)",
"def test_project_administrator(project):\n if is_server_administrator():\n return True\n if is_project_administrator(project):\n return True\n return False",
"def test_checkCustoms(self):\n self.failUnlessEqual(self.nice.opts['myflag'], \"PONY!\")\n self.failUnlessEqual(self.nice.opts['myparam'], \"Tofu WITH A PONY!\")",
"def Checktest(self, expectedoutput):\n\n if expectedoutput == 0:\n result = self.runner.invoke(yoda.cli, [\"setup\", \"check\"])\n self.assertEqual(result.exit_code, 0)\n self.assertIn(\"The configuration file does not exist.\", result.output)\n return\n\n if expectedoutput == 1:\n result = self.runner.invoke(yoda.cli, [\"setup\", \"check\"])\n self.assertEqual(result.exit_code, 0)\n self.assertIn(\"Name: Name\", result.output)\n self.assertIn(\"Email: [email protected]\", result.output)\n self.assertIn(\"Github username: GhUser\", result.output)",
"def checkBuildStatus(self):\n pass",
"def test_default_repo(self):\n # network may be unavailable, but we are not interested anyway,\n # so we ignore the exitcode\n output = self.run_command(\"selfupdate --check dev\", exitcode=None)\n self.assertIn(\"Target: ywangd:dev\", output)\n self.assertNotIn(\"Target: ywangd:master\", output)",
"def test_functionality(self):\n self.browserObject = globalVars.browserObject\n \n #Check for current logged in user\n self.verifyCurrentUser(userRole='Read only', loginAsUser=True)\n \n #Navigate to Repositories Page\n self.get_RepositoriesPage(\"Firmware\")\n \n self.logout()\n \n #Verify Options",
"def test_version():\n with open(\"pyproject.toml\") as f:\n tomllines = f.read().splitlines()\n tomlversion = set([l for l in tomllines if \"version =\" in l])\n initversion = set([f'version = \"{mei2volpiano.__version__}\"'])\n # set is there to catch any duplicate/additional entries\n assert initversion == tomlversion",
"def mvn(version):\n\tif version is None:\n\t\tmvn_list = get_mvn_list()\n\t\t_err('Available Maven versions: {0}'.format(mvn_list))\n\tget_mvn(version)",
"def test_project_reader(project):\n if is_server_administrator():\n return True\n if is_project_administrator(project):\n return True\n if is_project_writer(project):\n return True\n if is_project_reader(project):\n return True\n return False",
"def test_default(self):\r\n self.assertEqual(self.option.default, False)",
"def test_get_property_success(self):\r\n self.assertEqual(self.config.option1, 1337)",
"def test_installed(self):\n check_output('unity --help', shell=True)",
"def test_version_dropdown(plugin_dialog):\n widget = plugin_dialog.available_list.item(1).widget\n assert widget.version_choice_dropdown.currentText() == \"3\"\n # switch from PyPI source to conda one.\n widget.source_choice_dropdown.setCurrentIndex(1)\n assert widget.version_choice_dropdown.currentText() == \"4.5\"",
"def test_build_tools(self):\n #raise AssertionError(\"%s not implemented\" % sys._getframe().f_code.co_name)\n if self.status: self.status.Warning(\"By default build tools is Xilinx this can be changed in demo/nysa_platform.py\")\n if find_xilinx_path() is None:\n return False\n return True",
"def test_version_check_update_available(self):\n output = self.run_command(\"selfupdate --check bennr01:selfupdate_test_future\", exitcode=0)\n self.assertIn(\"Target: bennr01:selfupdate_test_future\", output)\n self.assertNotIn(\"Target: ywangd:master\", output)\n self.assertNotIn(\"Already at latest version\", output)\n self.assertIn(\"New version available\", output)\n self.assertNotIn(\"Error: \", output)",
"def verify_package_status(self):\n pass",
"def verify_package_status(self):\n pass",
"def check_options(options, parser):\n if not options.get('release_environment', None):\n print(\"release environment is required\")\n parser.print_help()\n return os.EX_USAGE\n\n return 0",
"def _in_travis(): # pragma: no cover\n return 'TRAVIS' in os.environ",
"def package_version_check(args, parser):\n if (args.build or args.check) and args.package_version:\n parser.error('--package-version works only with --create')",
"def test_default(self):\r\n self.assertEqual(self.option.default, 'testing')",
"def test_default_select():\n # Arange\n REPO = \"https://foo.bar/foobar\"\n\n # Act\n rm = gcbo.RepoManager(REPO)\n\n # Assert\n assert rm.select() == REPO",
"def test_check_no_download(self):\n output = self.run_command(\"selfupdate --check\", exitcode=0)\n contains_latest_version = (\"Already at latest version\" in output)\n contains_new_version = (\"New version available\" in output)\n assert (contains_latest_version or contains_new_version)\n self.assertNotIn(\"Url: \", output)\n self.assertNotIn(\"Update completed.\", output)\n self.assertNotIn(\"Failed to update. Please try again.\", output)",
"def __gitVerify(self):\n self.vcs.gitVerify(self.project.getProjectPath())",
"def test_project_writer(project):\n if is_server_administrator():\n return True\n if is_project_administrator(project):\n return True\n if is_project_writer(project):\n return True\n return False",
"def check(self, context):\r\n return context.config.preset is not None",
"def _cmake_needed():\n if \"NOX_INSTALL_CMAKE\" in os.environ:\n return True\n\n return shutil.which(\"cmake\") is None",
"def check_requirement(self):\n raise NotImplementedError"
] | [
"0.56208634",
"0.5418946",
"0.5402192",
"0.5377991",
"0.53591835",
"0.53054255",
"0.5300273",
"0.52879065",
"0.52866113",
"0.5282876",
"0.52460307",
"0.5236804",
"0.5199385",
"0.51885027",
"0.5179016",
"0.5172662",
"0.5153789",
"0.5144154",
"0.5144154",
"0.51356703",
"0.50819343",
"0.5067656",
"0.5066592",
"0.5057324",
"0.50542575",
"0.50278634",
"0.50239307",
"0.5019979",
"0.50186574",
"0.50186473"
] | 0.7265652 | 0 |
Checks that all transformers in self.transformer_list are compatible with methods fit, transform and fit_transform. | def _check_transformers(self):
assert all([hasattr(trf, "fit") for trf in self.transformer_list]), "At least one transformer object is not " \
"compatible with 'fit' method."
assert all([hasattr(trf, "transform") for trf in self.transformer_list]), "At least one classifier object " \
"is not compatible with " \
"'transform' method."
assert all([hasattr(trf, "fit_transform") for trf in self.transformer_list]), "At least one classifier " \
"object is not compatible with " \
"'fit_transform' method." | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _validate_transforms(self):\n if len(self.transforms) > 1:\n for transform in self.transforms:\n if transform.applies is None:\n raise ValueError(\n 'If more than one transform is provided, each '\n 'provided transform must provide an apply field.',\n )",
"def _validate_transforms(self):\n if len(self.transforms) > 1:\n for transform in self.transforms:\n if transform.applies is None:\n raise ValueError(\n 'If more than one transform is provided, each '\n 'provided transform must provide an apply field.',\n )",
"def _ensure_transform(\n self, message: dict, transformers: Optional[List[Callable]] = None\n ) -> None:\n required_transformers = self.__requiredtransformers__\n\n missing_transformers = None\n if required_transformers and not transformers:\n missing_transformers = required_transformers\n\n called = set()\n if transformers:\n for func in transformers:\n if isinstance(func, functools.partial):\n called.add(func.func.__name__)\n else:\n called.add(func.__name__)\n\n func(message=message)\n\n if required_transformers != called:\n missing_transformers = required_transformers.difference(called)\n\n if missing_transformers:\n raise MissingTransformersError(self.__class__.__name__, missing_transformers)",
"def check_if_it_can_fit(object):\n if hasattr(object, \"fit\") and hasattr(object, \"predict\") and hasattr(object, \"get_params\") and hasattr(object,\n \"set_params\"):\n return object\n else:\n raise Exception(\"Pass an estimator that has methods fit predict set_params get_params\")",
"def _check_integrity(self):\n for f in self.list_func:\n if(not(isinstance(f, (pFunc_collec, pFunc_base, pFunc_fromcallable)))):\n raise ValueError('type %s while expecting pFunc_base or collection'\n ' ' % (str(type(f))))\n f._check_integrity()",
"def validate_bettertransformer(self):\n if self.num_heads is None:\n raise ValueError('Number of heads not set for `BetterTransformer` integration.')\n if self.embed_dim is None:\n raise ValueError('Embedding dimension not set for `BetterTransformer` integration.')\n if self.norm2_eps is None or self.norm1_eps is None:\n raise ValueError('`norm2_eps` and `norm1_eps` not set for `BetterTransformer` integration.')\n if self.pos_emb_type is not None and self.pos_emb_type != 'absolute':\n raise ValueError(f'Positional embedding type {self.pos_emb_type} not supported for `BetterTransformer` integration')\n if self.norm1_eps != self.norm2_eps:\n raise ValueError('norm1_eps and norm2_eps must be equal for `BetterTransformer` integration.')\n if self.act_fn in USE_AT_OWN_RISK_ACTIVATION_FUNCTIONS:\n logger.warning(f'Overridding {self.act_fn} activation with gelu. Use the transformed model at your own risk, the output logits could be significantly different.')\n self.act_fn = 'gelu'\n elif self.act_fn not in SUPPORTED_ACTIVATION_FUNCTIONS:\n raise ValueError(f'Activation function {self.act_fn} not supported for `BetterTransformer` integration.')\n self.use_gelu = self.act_fn == 'gelu' or self.act_fn == 'gelu_new'\n if self.num_heads % 2 == 1:\n raise ValueError(f'Number of heads {self.num_heads} is not supported for `BetterTransformer` integration. Number of heads must be even.')",
"def assert_transformation_available(self, data):\n mt = ModelType.from_view_type(data)\n\n for record in self._validators:\n mt_other = ModelType.from_view_type(record.view)\n if not mt.has_transformation(mt_other):\n raise AssertionError(\n 'Could not validate %s using %r because there was no'\n ' transformation from %r to %r' %\n (self.concrete_type, record.validator.__name__,\n mt._view_name, mt_other._view_name)\n )",
"def validate(self):\n for validator in self.exttype_validators:\n validator.validate(self.ext_type)",
"def fit(self, X, y=None, **fitparams):\n \n self.fitted_transformers_ = []\n for transformer in self.list_of_transformers:\n fitted_trans = clone(transformer).fit(X, y=None, **fitparams)\n self.fitted_transformers_.append(fitted_trans)\n return self",
"def _check_is_fitted(self):\n check_is_fitted(self, ['w', 'b'])",
"def _verify_fit(self) -> None:\n if not hasattr(self, 'X_train') or not hasattr(self, 'Y_train'):\n raise ValueError('Training data not set. Call `fit` and pass training data first.')",
"def _is_transformable(self):\n if not self._app.get_paths():\n raise NotTransformable(\"No image to\")\n elif not edit_supported(self._app.get_path()):\n raise NotTransformable(\"Filetype not supported for\")\n # Some operations only make sense if we are allowed to save to file\n elif not settings[\"autosave_images\"].get_value():\n message = \"\"\n if self._app[\"thumbnail\"].toggled:\n message = 'When operating in thumbnail mode ' \\\n '\"autosave_images\" must be enabled for'\n elif self._app[\"mark\"].marked:\n message = 'When images are marked ' \\\n '\"autosave_images\" must be enabled for'\n if message:\n raise NotTransformable(message)",
"def is_sklearn_transformer(obj):\n return is_sklearn_estimator(obj) and sklearn_scitype(obj) == \"transformer\"",
"def test_class_methods(self):\n\n x = BaseTransformer()\n\n h.test_object_method(obj=x, expected_method=\"fit\", msg=\"fit\")\n\n h.test_object_method(obj=x, expected_method=\"transform\", msg=\"transform\")\n\n h.test_object_method(\n obj=x, expected_method=\"columns_set_or_check\", msg=\"columns_set_or_check\"\n )\n\n h.test_object_method(\n obj=x, expected_method=\"columns_check\", msg=\"columns_check\"\n )",
"def test_arguments(self):\n\n h.test_function_arguments(\n func=BaseTransformer.fit,\n expected_arguments=[\"self\", \"X\", \"y\"],\n expected_default_values=(None,),\n )",
"def _check_initialized(self):\n check_is_fitted(self, 'estimators_')",
"def _check_is_fitted(self):\n # Do not check `b` as some classifiers do not set it\n check_is_fitted(self, 'w')\n super(CClassifierLinear, self)._check_is_fitted()",
"def check_regressor(self):\n\n # Sklearn and Mlxtend stacking regressors, as well as \n # LightGBM, XGBoost, and CatBoost regressor \n # do not adhere to the convention.\n try:\n super().check_regressor\n except:\n print(f'{_MODEL_DICT[self.regressor_choice]} does not adhere to sklearn conventions.')",
"def test_compatibility_with_sklearn(self) -> type(None):\n check_estimator(StackingClassifier)",
"def check_transforms_match(self, transform: Mapping) -> None:\n xform_id = transform.get(TraceKeys.ID, \"\")\n if xform_id == id(self):\n return\n # TraceKeys.NONE to skip the id check\n if xform_id == TraceKeys.NONE:\n return\n xform_name = transform.get(TraceKeys.CLASS_NAME, \"\")\n warning_msg = transform.get(TraceKeys.EXTRA_INFO, {}).get(\"warn\")\n if warning_msg:\n warnings.warn(warning_msg)\n # basic check if multiprocessing uses 'spawn' (objects get recreated so don't have same ID)\n if torch.multiprocessing.get_start_method() in (\"spawn\", None) and xform_name == self.__class__.__name__:\n return\n raise RuntimeError(\n f\"Error {self.__class__.__name__} getting the most recently \"\n f\"applied invertible transform {xform_name} {xform_id} != {id(self)}.\"\n )",
"def validate(self):\n validation_methods = get_validation_methods(Layout)\n\n for method in validation_methods:\n getattr(self, method)()",
"def check_for_fit(cls, method):\n\n @wraps(method)\n def _check_for_fit(self, *args, **kwargs):\n klass = type(self).__name__\n if not self._is_fitted:\n raise PipelineNotYetFittedError(\n f\"This {klass} is not fitted yet. You must fit {klass} before calling {method.__name__}.\"\n )\n\n return method(self, *args, **kwargs)\n\n return _check_for_fit",
"def test_arguments(self):\n\n h.test_function_arguments(\n func=BaseTransformer.transform, expected_arguments=[\"self\", \"X\"]\n )",
"def test_scikit_learn_compatibility():\n\n # sklearn tests in:\n # https://github.com/scikit-learn/scikit-learn/blob/main/sklearn/utils/estimator_checks.py\n\n skip_tests = {\n \"check_dtype_object\", # the error message required to pass is too specific and incorrect for us\n \"check_classifiers_one_label\", # TODO: fix this! We should accept 1 category\n \"check_classifiers_regression_target\", # we're more permissive and convert any y values to str\n \"check_supervised_y_no_nan\", # error message too specific\n \"check_supervised_y_2d\", # we ignore useless added dimensions\n \"check_fit2d_predict1d\", # we accept 1d for predict\n \"check_fit2d_1sample\", # TODO: we allow fitting on 1 sample, but this kind of input is likely a bug from the caller, so change this\n \"check_regressors_no_decision_function\", # TODO: fix this!\n }\n for estimator, check_func in check_estimator(\n ExplainableBoostingClassifier(), generate_only=True\n ):\n f = check_func.func\n module = f.__module__\n shortname = f.__name__\n fullname = f\"{module}.{shortname}\"\n if shortname not in skip_tests:\n try:\n check_func(estimator)\n except BaseException as e:\n print(fullname)\n print(f\"{type(e).__name__}: {e}\")\n print()\n\n for estimator, check_func in check_estimator(\n ExplainableBoostingRegressor(), generate_only=True\n ):\n f = check_func.func\n module = f.__module__\n shortname = f.__name__\n fullname = f\"{module}.{shortname}\"\n if shortname not in skip_tests:\n try:\n check_func(estimator)\n except BaseException as e:\n print(fullname)\n print(f\"{type(e).__name__}: {e}\")\n print()\n\n for estimator, check_func in check_estimator(\n DPExplainableBoostingClassifier(), generate_only=True\n ):\n f = check_func.func\n module = f.__module__\n shortname = f.__name__\n fullname = f\"{module}.{shortname}\"\n if shortname not in skip_tests:\n try:\n check_func(estimator)\n except BaseException as e:\n print(fullname)\n print(f\"{type(e).__name__}: {e}\")\n print()\n\n for estimator, check_func in check_estimator(\n DPExplainableBoostingRegressor(), generate_only=True\n ):\n f = check_func.func\n module = f.__module__\n shortname = f.__name__\n fullname = f\"{module}.{shortname}\"\n if shortname not in skip_tests:\n try:\n check_func(estimator)\n except BaseException as e:\n print(fullname)\n print(f\"{type(e).__name__}: {e}\")\n print()",
"def test_arguments(self):\n\n h.test_function_arguments(\n func=ScalingTransformer.fit,\n expected_arguments=[\"self\", \"X\", \"y\"],\n expected_default_values=(None,),\n )",
"def fit(self, data):\n if not self._transformers:\n return\n\n transformed_data = self._preprocess(data)\n final_step = self._transformers[-1]\n final_step[1].fit(transformed_data)",
"def check_sklearn_attributes(sklearn_preprocess:object):\n\t\tcoder_type = str(type(sklearn_preprocess))\n\t\tstringified_coder = str(sklearn_preprocess)\n\n\t\tif (inspect.isclass(sklearn_preprocess)):\n\t\t\traise ValueError(dedent(\"\"\"\n\t\t\tYikes - The encoder you provided is a class name, but it should be a class instance.\\n\n\t\t\tClass (incorrect): `OrdinalEncoder`\n\t\t\tInstance (correct): `OrdinalEncoder()`\n\t\t\t\\n\"\"\"))\n\n\t\tif ('sklearn.preprocessing' not in coder_type):\n\t\t\traise ValueError(dedent(\"\"\"\n\t\t\tYikes - At this point in time, only `sklearn.preprocessing` encoders are supported.\n\t\t\thttps://scikit-learn.org/stable/modules/classes.html#module-sklearn.preprocessing\n\t\t\t\\n\"\"\"))\n\t\telif ('sklearn.preprocessing' in coder_type):\n\t\t\tif (not hasattr(sklearn_preprocess, 'fit')): \n\t\t\t\traise ValueError(dedent(\"\"\"\n\t\t\t\tYikes - The `sklearn.preprocessing` method you provided does not have a `fit` method.\\n\n\t\t\t\tPlease use one of the uppercase methods instead.\n\t\t\t\tFor example: use `RobustScaler` instead of `robust_scale`.\n\t\t\t\t\\n\"\"\"))\n\n\t\t\tif (hasattr(sklearn_preprocess, 'sparse')):\n\t\t\t\tif (sklearn_preprocess.sparse == True):\n\t\t\t\t\traise ValueError(dedent(f\"\"\"\n\t\t\t\t\tYikes - Detected `sparse==True` attribute of {stringified_coder}.\n\t\t\t\t\tFYI `sparse` is True by default if left blank.\n\t\t\t\t\tThis would have generated 'scipy.sparse.csr.csr_matrix', causing Keras training to fail.\\n\n\t\t\t\t\tPlease try again with False. For example, `OneHotEncoder(sparse=False)`.\n\t\t\t\t\t\"\"\"))\n\n\t\t\tif (hasattr(sklearn_preprocess, 'encode')):\n\t\t\t\tif (sklearn_preprocess.encode == 'onehot'):\n\t\t\t\t\traise ValueError(dedent(f\"\"\"\n\t\t\t\t\tYikes - Detected `encode=='onehot'` attribute of {stringified_coder}.\n\t\t\t\t\tFYI `encode` is 'onehot' by default if left blank and it results in 'scipy.sparse.csr.csr_matrix',\n\t\t\t\t\twhich causes Keras training to fail.\\n\n\t\t\t\t\tPlease try again with 'onehot-dense' or 'ordinal'.\n\t\t\t\t\tFor example, `KBinsDiscretizer(encode='onehot-dense')`.\n\t\t\t\t\t\"\"\"))\n\n\t\t\tif (hasattr(sklearn_preprocess, 'copy')):\n\t\t\t\tif (sklearn_preprocess.copy == True):\n\t\t\t\t\traise ValueError(dedent(f\"\"\"\n\t\t\t\t\tYikes - Detected `copy==True` attribute of {stringified_coder}.\n\t\t\t\t\tFYI `copy` is True by default if left blank, which consumes memory.\\n\n\t\t\t\t\tPlease try again with 'copy=False'.\n\t\t\t\t\tFor example, `StandardScaler(copy=False)`.\n\t\t\t\t\t\"\"\"))\n\t\t\t\n\t\t\tif (hasattr(sklearn_preprocess, 'sparse_output')):\n\t\t\t\tif (sklearn_preprocess.sparse_output == True):\n\t\t\t\t\traise ValueError(dedent(f\"\"\"\n\t\t\t\t\tYikes - Detected `sparse_output==True` attribute of {stringified_coder}.\n\t\t\t\t\tPlease try again with 'sparse_output=False'.\n\t\t\t\t\tFor example, `LabelBinarizer(sparse_output=False)`.\n\t\t\t\t\t\"\"\"))\n\n\t\t\tif (hasattr(sklearn_preprocess, 'order')):\n\t\t\t\tif (sklearn_preprocess.sparse_output == 'F'):\n\t\t\t\t\traise ValueError(dedent(f\"\"\"\n\t\t\t\t\tYikes - Detected `order=='F'` attribute of {stringified_coder}.\n\t\t\t\t\tPlease try again with 'order='C'.\n\t\t\t\t\tFor example, `PolynomialFeatures(order='C')`.\n\t\t\t\t\t\"\"\"))\n\n\t\t\t\"\"\"\n\t\t\t- Attempting to automatically set this. I was originally validating based on \n\t\t\t whether or not the encoder was categorical. But I realized, if I am going to \n\t\t\t rule them out and in... why not automatically set it?\n\t\t\t- Binners like 'KBinsDiscretizer' and 'QuantileTransformer'\n\t\t\t will place unseen observations outside bounds into existing min/max bin.\n\t\t\t- Regarding a custom FunctionTransformer, assuming they wouldn't be numerical\n\t\t\t as opposed to OHE/Ordinal or binarizing.\n\t\t\t\"\"\"\n\t\t\tcategorical_encoders = [\n\t\t\t\t'OneHotEncoder', 'LabelEncoder', 'OrdinalEncoder', \n\t\t\t\t'Binarizer', 'MultiLabelBinarizer'\n\t\t\t]\n\t\t\tonly_fit_train = True\n\t\t\tfor c in categorical_encoders:\n\t\t\t\tif (stringified_coder.startswith(c)):\n\t\t\t\t\tonly_fit_train = False\n\t\t\t\t\tbreak\n\t\t\treturn only_fit_train",
"def can_retransform(self):\r\n return self._can_retransform",
"def _check_inputs(self):\n\n # Check if attributes exists\n if self.attributes is None:\n print(\"attributes is missing; call set_attributes(new_attributes) to fix this! new_attributes should be a\",\n \"populated dataset of independent variables.\")\n return False\n\n # Check if labels exists\n if self.labels is None:\n print(\"labels is missing; call set_labels(new_labels) to fix this! new_labels should be a populated dataset\",\n \"of dependent variables.\")\n return False\n\n # Check if attributes and labels have same number of rows (samples)\n if self.attributes.shape[0] != self.labels.shape[0]:\n print(\"attributes and labels don't have the same number of rows. Make sure the number of samples in each\",\n \"dataset matches!\")\n return False\n\n # Type-checking for fit_intercept, normalize, and copy_X isn't needed; these can accept truthy/falsy values\n\n # Check if n_jobs is an integer or None\n if self.n_jobs is not None and not isinstance(self.n_jobs, int):\n print(\"n_jobs must be None or an integer; call set_n_jobs(new_n_jobs) to fix this!\")\n return False\n\n # Check if test_size is a float or None\n if self.test_size is not None and not isinstance(self.test_size, (int, float)):\n print(\"test_size must be None or a number; call set_test_size(new_test_size) to fix this!\")\n return False\n\n return True",
"def test_sklearn_compatible_estimator(estimator: Any, check: Any) -> None:\n check(estimator)"
] | [
"0.6600919",
"0.6600919",
"0.6245045",
"0.60071653",
"0.60061026",
"0.597027",
"0.5953561",
"0.57135946",
"0.566122",
"0.5652529",
"0.5621234",
"0.55733705",
"0.5559613",
"0.5533336",
"0.54827064",
"0.5438628",
"0.5429233",
"0.5416639",
"0.5386236",
"0.53803736",
"0.5358731",
"0.53483915",
"0.5336691",
"0.53317505",
"0.53241795",
"0.5294224",
"0.528637",
"0.5285609",
"0.5275569",
"0.5274284"
] | 0.9007204 | 0 |
Deduce correct spark dtype from pandas dtype for column col of pandas dataframe df | def infer_spark_dtype(df, col):
logger = logging.getLogger(__name__ + ".infer_spark_dtype")
pd_dtype = df.dtypes[col]
# get a sample from column col
sample = df[col].dropna()
if sample.shape[0] == 0:
logger.warning("column %s of dtype %s containing nulls found" % (col, pd_dtype))
sample = None
else:
sample = sample.iloc[0]
# infer spark dtype
# datetimes
if pd.api.types.is_datetime64_any_dtype(pd_dtype):
ret = T.TimestampType()
# ints
elif (pd_dtype == 'int8') or (pd_dtype == 'int16'): # int8, int16
ret = T.ShortType()
elif pd_dtype == 'int32':
ret = T.IntegerType()
elif pd.api.types.is_int64_dtype(pd_dtype):
ret = T.LongType()
# uints
elif pd_dtype == 'uint8':
ret = T.ShortType()
elif pd_dtype == 'uint16':
ret = T.IntegerType()
elif pd_dtype == 'uint32':
ret = T.LongType()
elif pd_dtype == 'uint64':
logger.warning("converting column %s of type uint64 to spark LongType - overflows will be nulls" % col)
ret = T.LongType()
# floats
elif (pd_dtype == 'float16') or (pd_dtype == 'float32'):
ret = T.FloatType()
elif pd_dtype == 'float64': # float64
ret = T.DoubleType()
elif pd_dtype == 'bool':
ret = T.BooleanType()
# object
elif pd_dtype == 'object':
if (sample is None) or (isinstance(sample, str)):
logger.warning("converting column %s of type object to spark StringType" % col)
ret = T.StringType()
elif isinstance(sample, tuple):
raise NotImplementedError("cannot convert column %s containing tuples to spark" % col)
else:
raise NotImplementedError("values in column %s of type object not understood" % col)
# category
elif pd.api.types.is_categorical_dtype(pd_dtype):
logger.warning("converting column %s of type category to spark StringType" % col)
ret = T.StringType()
else:
raise NotImplementedError("column %s of type %s not understood" % (col, pd_dtype))
return ret | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_dtype(data_df, settings):\n data_df = data_df.astype(settings[\"dtype\"])\n return data_df",
"def change_col_type(df,schema):\n d = {'int':IntegerType(),'str':StringType(),'float':FloatType(),'bool':BooleanType()}\n \n for c,t in schema.items():\n df = df.withColumn(c,col(c).cast(d[t]))\n return df",
"def get_col_dtype(col):\n if col.dtype == \"object\":\n try:\n col_new = pd.to_datetime(col.dropna().unique())\n return ['timestamp', 'datetime']\n except:\n return [\"text\", 'string']\n elif col.dtype == 'float64':\n return ['float', 'float64']\n elif col.dtype == 'int64':\n return ['int', 'int64']\n elif col.dtype == 'datetime64[ns]':\n return ['timestamp', 'datetime']\n else:\n return ['text', 'string']",
"def inspect_dtype_object(self, column: str) -> str:\n\n series = self.df[column].dropna()\n\n # check for bool\n try:\n conv = pd.to_numeric(series)\n return self.inspect_dtype(conv)\n except ValueError:\n pass\n\n # check for mixed dtypes\n dtypes = {type(x) for x in series}\n if len(dtypes) > 1:\n raise TypeError(\"Column `{}` has mixed dtypes: {}. Currently, \"\n \"this is not supported.\"\n .format(column, dtypes))\n\n # check for string\n if isinstance(series[0], str):\n return \"str\"\n\n # raise if unsupported dtype is encountered\n raise TypeError(\"Column `{}` has dtype `{}` which is currently \"\n \"not supported.\"\n .format(column, type(series[0])))",
"def get_data_type(col_val):\n dtype = \"\"\n\n original_col_val = col_val\n digits_only = col_val.replace('-', '',1).replace(',', '', -1).replace(\".\", \"\")\n if digits_only.isdigit():\n try:\n int(original_col_val)\n dtype = TYPE_INT\n except ValueError:\n dtype = TYPE_FLOAT\n \n return dtype",
"def ibis_schema_apply_to(schema, df):\n\n for column, dtype in schema.items():\n pandas_dtype = dtype.to_pandas()\n if isinstance(dtype, dt.Interval):\n df[column] = df[column].values.astype(pandas_dtype)\n else:\n df[column] = df[column].astype(pandas_dtype, errors='ignore')\n\n if PY2 and dtype == dt.string:\n df[column] = df[column].str.decode('utf-8', errors='ignore')\n\n return df",
"def cast(elem, psql_type):\n if psql_type == 'real':\n return float(format(elem, '.6g'))\n elif psql_type == 'double precision':\n return float(format(elem, '.15g'))\n elif psql_type == 'timestamp':\n if isinstance(elem, pd.Timestamp):\n return elem.to_pydatetime()\n else:\n return elem\n elif psql_type == 'text':\n if type(elem) == float:\n return \"NaN\"\n return str(elem)\n else:\n return elem",
"def test_df_all_types():\n return pd.DataFrame({\n 'intcol': [1, 2],\n 'strcol': ['three', 'four'],\n 'floatcol': [5.0, 6.0],\n 'boolcol': [True, False],\n 'datetimecol': [\n np.datetime64('2020-01-01'), np.datetime64('2020-01-02')],\n })",
"def _infer_pa_column_type(self, column: pa.lib.ChunkedArray):\n # Validates the column to ensure that value types are consistent\n column.validate()\n return pa_to_feast_value_type(column)",
"def convert_types(df):\n \n # Iterate through each column\n for c in df:\n \n # Convert ids and booleans to integers\n if ('SK_ID' in c):\n df[c] = df[c].fillna(0).astype(np.int32)\n \n # Convert objects to category\n elif (df[c].dtype == 'object') and (df[c].nunique() < df.shape[0]):\n df[c] = df[c].astype('category')\n \n # Booleans mapped to integers\n elif list(df[c].unique()) == [1, 0]:\n df[c] = df[c].astype(bool)\n \n # Float64 to float32\n elif df[c].dtype == float:\n df[c] = df[c].astype(np.float32)\n \n # Int64 to int32\n elif df[c].dtype == int:\n df[c] = df[c].astype(np.int32)\n \n return df",
"def pandas_typecast(self) -> dict:\n res = {}\n for feat in self.data_features:\n res[feat.key] = ApiForm.typecast(feat.dtype)\n return res",
"def get_dtype(col):\n dtype = col.dtype\n\n if isinstance(dtype, CategoricalDtype):\n col = col.astype(type(col.values[0]))\n out = get_dtype(col)\n elif np.issubdtype(dtype, np.floating):\n out = 'float32'\n elif np.issubdtype(dtype, np.integer):\n if col.max() < 32767:\n out = 'int16'\n else:\n out = 'int32'\n elif np.issubdtype(dtype, np.object_):\n size = int(col.astype(str).str.len().max())\n out = 'S{:}'.format(size)\n else:\n out = dtype\n\n return out",
"def infer_dtype(self):\n raise NotImplementedError",
"def cudf_dtype_from_pydata_dtype(dtype):\n\n if cudf.api.types.is_categorical_dtype(dtype):\n return cudf.core.dtypes.CategoricalDtype\n elif cudf.api.types.is_decimal32_dtype(dtype):\n return cudf.core.dtypes.Decimal32Dtype\n elif cudf.api.types.is_decimal64_dtype(dtype):\n return cudf.core.dtypes.Decimal64Dtype\n elif cudf.api.types.is_decimal128_dtype(dtype):\n return cudf.core.dtypes.Decimal128Dtype\n elif dtype in cudf._lib.types.SUPPORTED_NUMPY_TO_LIBCUDF_TYPES:\n return dtype.type\n\n return infer_dtype_from_object(dtype)",
"def cast_type(cdm_column_type, value):\n if cdm_column_type in ('integer', 'int64'):\n # Regex check only relevant if submission dtype is 'object'\n if not re.match(SCIENTIFIC_NOTATION_REGEX, str(value)):\n return int(value)\n if cdm_column_type in ('character varying', 'text', 'string'):\n return str(value)\n if cdm_column_type == 'numeric':\n return float(value)\n if cdm_column_type == 'float' and isinstance(value, float):\n return value\n if cdm_column_type == 'date' and isinstance(value, datetime.date):\n return value\n if cdm_column_type == 'timestamp' and isinstance(\n value, datetime.datetime): # do not do datetime.datetime\n return value",
"def dtype_to_pgtype(dtype, colname):\n if colname in ('the_geom', 'the_geom_webmercator'):\n return 'geometry'\n else:\n if dtype == 'float64':\n return 'numeric'\n elif dtype == 'int64':\n return 'int'\n elif dtype == 'datetime64[ns]':\n return 'date'\n elif dtype == 'bool':\n return 'boolean'\n else:\n return 'text'\n\n return None",
"def inspect_dtype(series: pd.Series) -> str:\n\n mapping = {pd_types.is_bool_dtype: \"bool\",\n pd_types.is_integer_dtype: \"int\",\n pd_types.is_float_dtype: \"float\",\n pd_types.is_datetime64_any_dtype: \"datetime\"}\n\n for check, result in mapping.items():\n if check(series):\n return result\n\n raise TypeError(\"Type is not understand for column '{}'. Allowed \"\n \"types are bool, int, float, str and datetime.\"\n .format(series.name))",
"def _parse_dtypes(data, table_meta):\n for name, field in table_meta['fields'].items():\n field_type = field['type']\n if field_type == 'datetime':\n datetime_format = field.get('format')\n data[name] = pd.to_datetime(data[name], format=datetime_format, exact=False)\n elif field_type == 'numerical' and field.get('subtype') == 'integer':\n data[name] = data[name].dropna().astype(np.int64)\n elif field_type == 'id' and field.get('subtype', 'integer') == 'integer':\n data[name] = data[name].dropna().astype(np.int64)\n\n return data",
"def astype(self, dtype: Union[Dict[str, str], str]) -> 'DataFrame':\n\n def change_each_array(new_loc, new_kind, old_kind, arr, new_arr, cur_srm):\n missing_value_code = utils.get_missing_value_code(new_kind)\n if new_kind == 'S':\n if old_kind == 'b':\n arr = arr + 1\n cur_srm = [False, 'False', 'True']\n elif old_kind in 'i':\n cur_srm, arr = _va.convert_int_to_str(arr)\n elif old_kind == 'f':\n cur_srm, arr = _va.convert_float_to_str(arr)\n elif old_kind in 'mM':\n cur_srm, arr = _va.convert_datetime_str_to_str(arr.astype('str'))\n\n new_arr[:, new_loc] = arr\n new_srm[new_loc] = cur_srm\n else:\n if new_kind != old_kind:\n nas = utils.isna_array(arr, old_kind)\n if new_kind == 'b' and old_kind != 'b':\n arr = arr.astype('bool').astype('int8')\n new_arr[:, new_loc] = arr\n if new_kind != old_kind:\n new_arr[nas, new_loc] = missing_value_code\n\n if isinstance(dtype, str):\n new_dtype: str = utils.check_valid_dtype_convert(dtype)\n new_kind: str = utils.convert_numpy_to_kind(new_dtype)\n utils.check_astype_compatible(new_kind, self._data.keys())\n\n new_column_info: ColInfoT = {}\n new_arr = utils.create_empty_arr(new_kind, self.shape)\n new_data = {new_kind: new_arr}\n new_srm = {}\n col_iter = enumerate(self._col_info_iter(with_order=True, with_arr=True))\n for i, (col, old_kind, loc, order, arr) in col_iter:\n new_column_info[col] = utils.Column(new_kind, i, order)\n if old_kind == 'S':\n cur_srm = self._str_reverse_map[loc].copy()\n else:\n cur_srm = []\n change_each_array(i, new_kind, old_kind, arr, new_arr, cur_srm)\n elif isinstance(dtype, dict):\n col_kind_convert = {}\n for col, new_dtype in dtype.items():\n self._validate_column_name(col)\n new_dtype: str = utils.check_valid_dtype_convert(new_dtype)\n new_kind: str = utils.convert_numpy_to_kind(new_dtype)\n col_kind_convert[col] = new_kind\n old_kind = self._column_info[col].dtype\n utils.check_astype_compatible(new_kind, {old_kind})\n\n new_column_info: ColInfoT = {}\n cols_per_kind: Dict[str, int] = defaultdict(int)\n for col, old_kind, loc, order in self._col_info_iter(with_order=True):\n new_kind = col_kind_convert.get(col, old_kind)\n cur_loc = cols_per_kind[new_kind]\n new_column_info[col] = utils.Column(new_kind, cur_loc, order)\n cols_per_kind[new_kind] += 1\n\n # create empty arrays for each type\n new_data = {}\n for new_kind, num_cols in cols_per_kind.items():\n shape = len(self), num_cols\n new_data[new_kind] = utils.create_empty_arr(new_kind, shape)\n\n new_srm = {}\n for col, old_kind, loc, order, arr in self._col_info_iter(with_order=True, with_arr=True):\n new_kind = new_column_info[col].dtype\n new_loc = new_column_info[col].loc\n new_arr = new_data[new_kind]\n if old_kind == 'S':\n cur_srm = self._str_reverse_map[loc].copy()\n else:\n cur_srm = []\n change_each_array(new_loc, new_kind, old_kind, arr, new_arr, cur_srm)\n else:\n raise TypeError('Argument dtype must be either a string or a dictionary')\n\n new_columns = self._columns.copy()\n return self._construct_from_new(new_data, new_column_info, new_columns, new_srm)",
"def get_column_dtypes(self) -> Tuple[List[str], List[str]]:\n\n columns, pyspark_dtypes = zip(*self.df.dtypes)\n\n # check unsupported pyspark dtypes\n unsupported = set(pyspark_dtypes).difference(self.TYPE_MAPPING.keys())\n if unsupported:\n raise ValueError(\"Unsupported dtype encountered: {}. Supported\"\n \"dtypes are: {}.\"\n .format(unsupported, self.TYPE_MAPPING.keys()))\n\n dtypes = [self.TYPE_MAPPING[dtype] for dtype in pyspark_dtypes]\n\n return columns, dtypes",
"def datatype_conversion(self):\n\n category_cols = self.FEATURE_TYPES[\"category_cols\"]\n integer_cols = self.FEATURE_TYPES[\"integer_cols\"]\n float_cols = self.FEATURE_TYPES[\"float_cols\"]\n datetime_cols = self.FEATURE_TYPES[\"datetime_cols\"]\n string_cols = self.FEATURE_TYPES[\"string_cols\"]\n bool_cols = self.FEATURE_TYPES[\"bool_cols\"]\n data = self.data\n \n data[category_cols] = data[category_cols].astype('category',copy=False) \n data[integer_cols] = data[integer_cols].astype('int64',copy=False)\n data[float_cols] = data[float_cols].astype('float64',copy=False)\n data[datetime_cols] = data[datetime_cols].astype('datetime64[ns]',copy=False)\n data[string_cols] = data[string_cols].astype('str',copy=False)\n data[bool_cols] = data[bool_cols].astype('bool', copy=False)\n\n return data",
"def get_data_type(self, col):\n if ((self.data_df[col].dtype == np.int64) or (self.data_df[col].dtype == np.int32)):\n return 'int'\n elif ((self.data_df[col].dtype == np.float64) or (self.data_df[col].dtype == np.float32)):\n return 'float'\n else:\n raise ValueError(\"Unknown data type of feature %s: must be int or float\" % col)",
"def to_dtype(x, dtype):\n return x.type(dtype)",
"def to_pandas(self) -> np.dtype:\n return self._pandas_type",
"def test__convert_to_str_dtype(self):\n new_column_types = process_mutation._convert_to_str_dtype(\n self.column_types, [\"foo\"]\n )\n assert new_column_types == {\"foo\": \"object\", \"bar\": \"object\"}",
"def set_dtypes(df):\n # drop rows where a column names appear (happened while appending to csv)\n df = df.loc[df[df.columns[0]] != df.columns[0]]\n # convert numerics\n df = df.apply(pd.to_numeric, errors='ignore')\n # parse query_timestamp\n df.query_timestamp = df.query_timestamp.apply(pd.to_datetime)\n\n df.reset_index(inplace=True, drop=True)\n\n return df",
"def convertColumn(df, names, newType) -> pyspark.sql.dataframe.DataFrame:\n for name in names: \n df = df.withColumn(name, df[name].cast(newType))\n return df",
"def data_all_types(df):\n \n printmd (\"**Type of every column in the data**\")\n print(\"\")\n print(df.dtypes)",
"def convert_dtypes(rows):\n dtype_map = {pd.Timestamp: lambda x: x.to_pydatetime(),\n np.int8: lambda x: int(x),\n np.int16: lambda x: int(x),\n np.int32: lambda x: int(x),\n np.int64: lambda x: int(x),\n np.float16: lambda x: float(x),\n np.float32: lambda x: float(x),\n np.float64: lambda x: float(x),\n np.float128: lambda x: float(x)}\n for row in rows:\n yield [dtype_map.get(type(elem), lambda x: x)(elem) for elem in row]",
"def _preprocess_temporal_columns(df: DataFrame) -> DataFrame:\n for col in df.select_dtypes(include=[\"datetime64[ns, UTC]\"]):\n df = df.astype({col: \"O\"})\n for col in df.select_dtypes(include=\"timedelta64[ns]\"):\n df = df.astype({col: \"O\"})\n return df"
] | [
"0.72055167",
"0.70074373",
"0.67383504",
"0.66940576",
"0.66150796",
"0.65282935",
"0.64637434",
"0.64026904",
"0.63992226",
"0.63900924",
"0.63755214",
"0.63500774",
"0.6339335",
"0.63325894",
"0.6327806",
"0.63060737",
"0.62946403",
"0.62688094",
"0.6235217",
"0.6229606",
"0.62216234",
"0.6220546",
"0.6207605",
"0.6205551",
"0.618993",
"0.6183051",
"0.616651",
"0.6109026",
"0.60733336",
"0.607028"
] | 0.7871534 | 0 |
Run a command and echo it first | def run_cmd(call, cmd, *, echo=True, **kwargs):
if echo:
print('$> ' + ' '.join(map(pipes.quote, cmd)))
return call(cmd, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run(command):\n if arguments['--dry-run']:\n print command\n else:\n subprocess.call(command, shell=True)",
"async def terminal(event):\r\n command = utils.raw(event.message)\r\n await event.edit(f\"**Running command:**\\n`{command}`\")\r\n result = subprocess.getoutput(command)\r\n await event.edit(f\"**Running command:**\\n`{command}`\\n**Result:**\\n`{result}`\")",
"def system_call(command):\n print(\"\\n### {}\".format(command))\n stderr = subprocess.STDOUT\n pipe = subprocess.Popen(\n command,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n shell=True,\n )\n stdout, stderr = pipe.communicate()\n print(stdout)",
"def run_command(opts, cmd):\n print(cmd)\n if not opts.dryrun:\n print(check_output(cmd, shell=True))",
"def shell(cmd):\n print('Running \"{}\"...'.format(cmd))\n subprocess.check_call(cmd, shell=True)",
"def local_command(command):\n print('Executing command: {0}\\n'.format(command))\n p = Popen([command], stdout=PIPE, stderr=PIPE, shell=True)\n while True:\n line = p.stdout.readline()\n if not line:\n break\n line = line.strip()\n print(line)\n stdout, stderr = p.communicate()\n print(stdout)\n print(stderr)",
"def shell(cmd, check=True):\n eprint(f\"+ {cmd}\")\n return run(cmd, shell=True, check=check)",
"def runCommand(command):\n None",
"def print_stdout(command):\n sys.stdout.write(\"%s\\n\" % command)\n sys.stdout.flush()",
"def run(self, command):\n try:\n print(f\"RUNNING: {command}\")\n print(\"-\" * 80)\n print(subprocess.check_output(command, shell=True).decode('utf-8'))\n except subprocess.CalledProcessError as e:\n print(f\"ERROR calling '{command}'\")\n print(\"-\" * 20)\n print(e.output and e.output.decode('utf-8'))\n sys.exit(-1)",
"def exec_cmd(command):\r\n global _verbose\r\n debug(\"Executing command: %s\" % command)\r\n if not _verbose:\r\n command = \"%s > /dev/null 2>&1\" % command\r\n resp = os.system(command)\r\n if resp != 0:\r\n exit(\"Command [%s] failed\" % command, resp)",
"def run(cmd: str, verbose: bool = False):\n\n if verbose:\n print(cmd)\n\n out = subprocess.check_output(cmd, shell=True).decode(\"utf-8\")\n\n if verbose:\n print(out)\n\n return out",
"def cmd(command):\n pflush(\"[%s]> %s\" % (HOSTNAME, command))\n code = os.system(command)\n if code != 0:\n raise RuntimeError(\"Error executing: \" + command)",
"def run_command(self, command):\n subprocess.call(command, shell=True)",
"def run(cmd, comment):\n print(\"―\" * 80)\n if comment:\n print(f\"💬 {comment}\")\n print(f\"➤ {cmd}\")\n proc = subprocess.run(cmd, shell=True) # nosec\n if proc.returncode == 0:\n print(\"✅ success\")\n else:\n print(f\"❌ ERROR! return code: {proc.returncode}\")\n sys.exit(proc.returncode)",
"def run_command(self, command, timeout=None, stdout=True):\n print('Running \"{}\"...'.format(command))\n output = self._shell.run_command(\n command, timeout=timeout, async_=False\n )\n if stdout:\n print(output)\n print(\"Done!\")\n return output",
"def run(cmd):\n cmd = str(cmd)\n\n if env['verbose']:\n sys.stdout.write('--> %s\\n' % cmd)\n\n cmd_list = shlex.split(cmd)\n\n p = subprocess.Popen(\n cmd_list,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE\n )\n\n return p.communicate()",
"def shell_cmd(self, cmd):\n cmd_ex = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)\n output = cmd_ex.communicate()[0]",
"def execute(cmd) :\n return os.system( cmd )",
"def do_command(command):\n send_command(command)\n response = get_response()\n print(\"Rcvd: <<< \\n\" + response)\n return response",
"def execute_command(self):\n return ''",
"def execute_command(cmd):\n popen = Popen(cmd, stdout=PIPE, stderr=PIPE)\n stdout = b''\n while True: # Save output to youtube_stdout while this being echoed\n tmp = popen.stdout.read(1)\n stdout += tmp\n _print(tmp, end=\"\")\n sys.stdout.flush()\n # do it until the process finish and there isn't output\n if tmp == b\"\" and popen.poll() is not None:\n break",
"def run_command(command):\n os.system('(echo {} | {})&'.format(command, SHELL))",
"def _printAndRun(self, logger, prefix, command, check=False):\n logger.info(prefix + \"Run: {}\".format(command), False)\n subprocess.run(command, check=check)",
"def run_command(command):\n process = subprocess.Popen(command, stdout=subprocess.PIPE)\n while True:\n output = process.stdout.readline()\n if output == '' and process.poll() is not None:\n break\n if output:\n print output.strip()\n\n rc = process.poll()\n return rc",
"def issue(self, cmd):\n self.send([cmd])\n return self.read_until_prompt()[1:] # drop the echo",
"def run(cmd):\n print(cmd)\n r = os.system(cmd)\n if r:\n print(\"ERROR: command returned {0}\".format(r))\n sys.exit(r)",
"def cmd( self, *args, **kwargs ):\n verbose = kwargs.get( 'verbose', False )\n log = info if verbose else debug\n log( '*** %s : %s\\n' % ( self.name, args ) )\n self.sendCmd( *args, **kwargs )\n return self.waitOutput( verbose )",
"def run_command(cmd):\n\n proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n for line in iter(proc.stdout.readline, b''):\n print(\">>> \" + line.rstrip())\n (stdout, stderr) = proc.communicate()\n return proc.returncode == 0, proc",
"def do_shell(self, command):\n proc = subprocess.Popen(command, stdout=self.stdout, shell=True)\n proc.communicate()"
] | [
"0.69847924",
"0.6959119",
"0.69450945",
"0.6919458",
"0.6859296",
"0.6812634",
"0.67663026",
"0.6756266",
"0.67383146",
"0.67201954",
"0.670066",
"0.6692517",
"0.66810435",
"0.6675754",
"0.6665769",
"0.6646724",
"0.65908474",
"0.6578724",
"0.65752876",
"0.6571929",
"0.65608656",
"0.6559181",
"0.6520646",
"0.64916915",
"0.64899254",
"0.6484098",
"0.6469638",
"0.6465695",
"0.646413",
"0.643645"
] | 0.696063 | 1 |
Get the last commit to modify the given paths | def last_modified_commit(*paths, **kwargs):
return check_output([
'git',
'log',
'-n', '1',
'--pretty=format:%h',
'--',
*paths
], **kwargs).decode('utf-8') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_diff_to_last_commit(path_to_repository, ignore_subrepositories):\n repo = Repo(path_to_repository)\n if ignore_subrepositories==True:\n unstaged_diff = repo.index.diff(other=None, paths=None, create_patch=False, ignore_submodules=\"all\")\n staged_diff = repo.head.commit.diff(other=Diffable.Index, paths=None, create_patch=False, ignore_submodules=\"all\")\n else:\n unstaged_diff = repo.index.diff(other=None, paths=None, create_patch=False)\n staged_diff = repo.head.commit.diff(other=Diffable.Index, paths=None, create_patch=False)\n\n return unstaged_diff + staged_diff",
"def get_first_last_commit_date(path):\n # %at specifies a UNIX time stamp\n process = subprocess.Popen(['git', 'log', '--format=%at'], cwd=path, stdout=subprocess.PIPE)\n stdout, _ = process.communicate()\n log = stdout.decode().strip('\\n').split('\\n')\n last = int(log[0])\n first = int(log[-1])\n return (first, last)",
"def cur_commit():\n result = run(\n [\"git\", \"rev-parse\", \"HEAD\"], stdout=PIPE, stderr=PIPE, encoding=\"utf-8\",\n )\n result.check_returncode()\n return result.stdout.strip()",
"def get_current_commit():\n import os\n import subprocess\n git_dir = \"{}/.git\".format(settings.BASE_DIR)\n if os.name == 'nt':\n git_dir = \"{}\\\\.git\".format(settings.BASE_DIR)\n return subprocess.check_output([\"git\", \"--git-dir={}\".format(git_dir), \"rev-parse\", \"--verify\", \"HEAD\", \"--short\"]).decode(\"utf-8\")",
"def get_last_commit_id(commits):\n print(commits)\n if bool(commits):\n return commits[-1].get('id')\n return \"no commits\"",
"def getLast():\n try:\n open(os.path.join(basepath, 'last'))\n except IOError:\n try:\n arguments.project\n except NameError:\n print(\"No current project. Start one with -p\")\n exit()\n else:\n f = open(os.path.join(basepath, 'last'), 'w')\n f.write(arguments.project[0])\n f.close()\n store = open(os.path.join(basepath, 'last'), 'r')\n last = store.readline().rstrip('\\n')\n last = [last, 's']\n store.close()\n path = getPath(last[0])\n with open(path, 'r') as log:\n reader = csv.reader(log)\n for row in reader:\n if row[1] == 'a' or row[1] == 's':\n line = row\n try:\n line\n except NameError:\n last[1] = 's'\n else:\n last[1] = line[1]\n return last",
"async def get_last_commit(self) -> None:\n _endpoint = f\"/repos/{self.full_name}/branches/{self.default_branch}\"\n response = await self.client.get(endpoint=_endpoint)\n return AIOGitHubAPIReposCommit(response.get(\"commit\", {}))",
"def get_last_commit_contains(self, file):\n\n commits = self.get_commits_contains(file)\n return commits[0] if commits else None",
"def last_modified_date(*paths, **kwargs):\n return check_output([\n 'git',\n 'log',\n '-n', '1',\n '--pretty=format:%cd',\n '--date=iso',\n '--',\n *paths\n ], **kwargs).decode('utf-8')",
"def getCommitsSinceLastRelease(self):\n f = open(self.last_released, 'r')\n old_rev = f.read().replace('\\n', '')\n f.close()\n new_rev = commands.getoutput('cd '+self.proj_dir+' && git log -1 --format=%H')\n cmd = 'cd '+self.proj_dir+' && git log --no-merges --pretty=format:\"%s\" '+old_rev+'..'+new_rev\n unreleased_commits = commands.getoutput(cmd) \n print 'Commits since last release:'\n print unreleased_commits\n unreleased_commits = unreleased_commits.split('\\n')\n self.commit_msgs = unreleased_commits\n self.new_rev = new_rev",
"def get_first_commit(repo, changes, since):\n if since:\n first = since\n else:\n first = get_latest_rev(changes)\n\n if first:\n try:\n return repo.rev_parse(first)\n except GitRepositoryError:\n if since:\n raise GbsError(\"Invalid commit: %s\" % (first))\n else:\n raise GbsError(\"Can't find last commit ID in the log, \"\\\n \"please specify it by '--since'\")",
"def get_commit(self, cwd=None):\n cwd = cwd or self.path\n if isinstance(cwd, str):\n cwd = config.Path(cwd)\n if not cwd.exists():\n return None\n try:\n return subprocess.check_output([\n \"git\", \"rev-parse\", \"HEAD\"\n ], cwd=str(cwd)).decode(\"utf-8\").strip()\n except subprocess.CalledProcessError:\n return \"Failed\"",
"def last_commit_date():\n return subprocess.check_output(['git', 'log', '-1', '--pretty=%ad',\n '--date=format:%d %b %H:%M', 'py/calendon']).decode().strip()",
"def get_commit():\n cmd = \"git rev-parse HEAD\"\n result = subprocess.run(shlex.split(cmd), stdout=subprocess.PIPE)\n return result.stdout.decode(\"utf-8\").strip()",
"def get_git_commit(path: str) -> Optional[str]:\n try:\n from git import Repo\n except ImportError as e:\n _logger.warning(\n \"Failed to import Git (the Git executable is probably not on your PATH),\"\n \" so Git SHA is not available. Error: %s\",\n e,\n )\n return None\n try:\n if os.path.isfile(path):\n path = os.path.dirname(path)\n repo = Repo(path, search_parent_directories=True)\n commit = repo.head.commit.hexsha\n return commit\n except Exception:\n return None",
"def current_commit(self) -> str:\n # TODO: Do we want short ids?\n head = self.open_repo().head\n if head is None:\n return None # TODO: This is bad\n else:\n return str(head.target)",
"def get_sha_commit(self):\n self.get_meta()\n filename = 'lastshacommit'\n # For unittest read from localfile\n if app.config['TEST']:\n filename = 'lastshacommittest'\n app.logger.debug(\"App config set to TEST. Reading shacommit from file \" + filename)\n\n try:\n handle = open(filename, \"r\")\n except Exception as e:\n app.logger.error(\"Error occurred when opening file \" + filename)\n app.logger.error(e)\n raise\n l_shacommit = handle.read().rstrip()\n handle.close()\n return l_shacommit",
"def svn_client_commit_item_t_path_get(svn_client_commit_item_t_self): # real signature unknown; restored from __doc__\n return \"\"",
"def last_commit_short_log():\n subprocess.check_output('git log -1 --pretty=format:%h:%s'.split()).decode()",
"def get_commit_message():\n return shell_output('git log HEAD -1 --pretty=%B')",
"def __last_commit_date(self):\n return utils.run('git', ['log', '--all', '-1', '--format=%cI'],\n self.__project.location).rstrip()",
"def git_get_mtime_at_commit(path: Path, revision: str, cwd: Path) -> str:\n cmd = [\"log\", \"-1\", \"--format=%ct\", revision, \"--\", path.as_posix()]\n lines = _git_check_output_lines(cmd, cwd)\n return datetime.utcfromtimestamp(int(lines[0])).strftime(GIT_DATEFORMAT)",
"def svn_client_commit_item2_t_path_get(svn_client_commit_item2_t_self): # real signature unknown; restored from __doc__\n return \"\"",
"def get_commit_hash():\n git_dir = get_git_root()\n args = [\"git\", \"-C\", git_dir, \"rev-parse\", \"--short\", \"--verify\", \"HEAD\"]\n return subprocess.check_output(args).strip().decode()",
"def get_current_commit_hash() -> FullCommitHash:\n return get_commit_hash(\"HEAD\")",
"def getConfig(self, committed=False, ignore_last=False):\n # go back from the latest entry, find the most recent config entry\n for idx, entry in list(enumerate(self.log))[::-1]:\n if 'config' in entry.command:\n if not committed: break\n if entry.command['config'] == 'joint': break\n if self.commit_idx >= idx and not ignore_last: break\n # print('committed: %s, ignore_last: %s, FETCHED config: %s' % (committed, ignore_last, entry.command))\n return entry.command",
"async def __last_commit(self, file_path: str) -> SourceResponses:\n files_api_url = await self._gitlab_api_url(\n f\"repository/files/{file_path}?ref={self._parameter('branch', quote=True)}\"\n )\n response = await self._session.head(files_api_url, headers=self._headers())\n last_commit_id = response.headers[\"X-Gitlab-Last-Commit-Id\"]\n commit_api_url = await self._gitlab_api_url(f\"repository/commits/{last_commit_id}\")\n return await super()._get_source_responses(commit_api_url)",
"async def set_last_commit(self) -> None:\n _endpoint = f\"/repos/{self.full_name}/branches/{self.default_branch}\"\n response = await self.client.get(endpoint=_endpoint)\n self._last_commit = response[\"commit\"][\"sha\"][0:7]",
"def last_rev(self, path, peg_revision, limit_revision=None):\n \n # Here's the plan, man. In the trivial case (where PEG_REVISION is\n # the same as LIMIT_REVISION), this is a no-brainer. If\n # LIMIT_REVISION is older than PEG_REVISION, we can use Subversion's\n # history tracing code to find the right location. If, however,\n # LIMIT_REVISION is younger than PEG_REVISION, we suffer from\n # Subversion's lack of forward history searching. Our workaround,\n # ugly as it may be, involves a binary search through the revisions\n # between PEG_REVISION and LIMIT_REVISION to find our last live\n # revision.\n peg_revision = self._getrev(peg_revision)\n limit_revision = self._getrev(limit_revision)\n if peg_revision == limit_revision:\n return peg_revision, path\n elif peg_revision > limit_revision:\n path = self.get_location(path, peg_revision, limit_revision)\n return limit_revision, path\n else:\n direction = 1\n while peg_revision != limit_revision:\n mid = (peg_revision + 1 + limit_revision) / 2\n try:\n path = self.get_location(path, peg_revision, mid)\n except vclib.ItemNotFound:\n limit_revision = mid - 1\n else:\n peg_revision = mid\n return peg_revision, path",
"def get_latest_path(self):\n files = [fname for fname in os.listdir(self.checkpoint_dir) if fname.endswith(\".pth\")]\n filepaths = [os.path.join(self.checkpoint_dir, filepath) for filepath in files]\n latest_file = max(filepaths, key=os.path.getctime)\n return latest_file"
] | [
"0.66357094",
"0.646586",
"0.64447695",
"0.6435589",
"0.6427532",
"0.6335761",
"0.629556",
"0.6295136",
"0.6277516",
"0.6207843",
"0.6153193",
"0.61308944",
"0.6121598",
"0.6120476",
"0.60908526",
"0.5947546",
"0.5922661",
"0.5920262",
"0.5910456",
"0.58968264",
"0.5894823",
"0.5834052",
"0.58259857",
"0.58242947",
"0.58126366",
"0.57875985",
"0.57866156",
"0.5778886",
"0.5738794",
"0.5732797"
] | 0.7615342 | 0 |
Return the last modified date (as a string) for the given paths | def last_modified_date(*paths, **kwargs):
return check_output([
'git',
'log',
'-n', '1',
'--pretty=format:%cd',
'--date=iso',
'--',
*paths
], **kwargs).decode('utf-8') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_date_modified(path):\n return str(datetime.datetime.fromtimestamp(os.path.getmtime(path)))",
"def _get_last_modified_date(path):\n last_date = 0\n root_dir, subdirs, files = os.walk(path).next()\n # get subdirs and remove hidden ones\n subdirs = [s for s in subdirs if not s.startswith('.')]\n for subdir in subdirs:\n for root, _, _ in os.walk(join(path, subdir)):\n base = os.path.basename(root)\n # checking if is a hidden path\n if not base.startswith(\".\") and not base.startswith(\"/.\"):\n last_date = max(last_date, os.path.getmtime(root))\n\n # check files of interest in the skill root directory\n files = [f for f in files\n if not f.endswith('.pyc') and f != 'settings.json']\n for f in files:\n last_date = max(last_date, os.path.getmtime(os.path.join(path, f)))\n return last_date",
"def last_modified(path):\n\n date = os.path.getmtime(path)\n date = datetime.datetime.fromtimestamp(date)\n return time.mktime(date.timetuple())",
"def last_modified():\n return \"Last modified: %s\" % time.ctime(os.path.getmtime(FILE_NAME))",
"def getDate(path):\n utime = ftp.stat(path=path).st_mtime\n last_modified = datetime.fromtimestamp(utime)\n return last_modified",
"def mtime(path):",
"def get_file_last_modification_date(filename=None):\n with open(filename, 'r') as fp:\n for line in fp:\n if line.startswith('Modify'):\n date_line = line.split()[1]\n file_date = datetime.strptime(date_line, \"%Y-%m-%d\")\n return filename, file_date",
"def get_file_modification_date() -> str:\n file_modification_date = datetime.now().strftime(\"%d.%m.%Y\")\n print(file_modification_date)\n return file_modification_date",
"def get_last_modified_date(self):\n\t\treturn call_sdk_function('PrlFsEntry_GetLastModifiedDate', self.handle)",
"def last_modified_commit(*paths, **kwargs):\n return check_output([\n 'git',\n 'log',\n '-n', '1',\n '--pretty=format:%h',\n '--',\n *paths\n ], **kwargs).decode('utf-8')",
"def getmtime(self, path):\n return os.path.getmtime(path)",
"def get_file_modified_date(filepath):\n return datetime.datetime.fromtimestamp(os.path.getmtime(filepath))",
"def last_modified(self) -> str:\n\t\tif self.name == \"\":\n\t\t\tif \"last_modified\" in self.ds._file[\"/matrix\"].attrs:\n\t\t\t\treturn self.ds._file[\"/matrix\"].attrs[\"last_modified\"]\n\t\t\telif self.ds._file.mode == 'r+':\n\t\t\t\tself.ds._file[\"/matrix\"].attrs[\"last_modified\"] = timestamp()\n\t\t\t\tself.ds._file.flush()\n\t\t\t\treturn self.ds._file[\"/matrix\"].attrs[\"last_modified\"]\n\n\t\tif self.name != \"\":\n\t\t\tif \"last_modified\" in self.ds._file[\"/layers/\" + self.name].attrs:\n\t\t\t\treturn self.ds._file[\"/layers/\" + self.name].attrs[\"last_modified\"]\n\t\t\telif self.ds._file.mode == 'r+':\n\t\t\t\tself.ds._file[\"/layers/\" + self.name].attrs[\"last_modified\"] = timestamp()\n\t\t\t\tself.ds._file.flush()\n\t\t\t\treturn self.ds._file[\"/layers/\" + self.name].attrs[\"last_modified\"]\n\n\t\treturn timestamp()",
"def last_modified(self):\n return os.path.getmtime(self.filename)",
"def last_modified_time(self) -> str:\n return pulumi.get(self, \"last_modified_time\")",
"def last_modified_time(self) -> str:\n return pulumi.get(self, \"last_modified_time\")",
"def last_modified_time(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"last_modified_time\")",
"def DateModified(filepath, stringformat=False):\n time_in_s = os.path.getmtime(filepath)\n if stringformat:\n return time.ctime(time_in_s)\n else:\n return time_in_s",
"def get_modified_time(fname):\n return os.stat(fname).st_mtime",
"def getmtime(path):\n return get_instance(path).getmtime(path)",
"def last_modified(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"last_modified\")",
"def get_mtime(path):\n try:\n return path.lstat().mtime\n except error.Error:\n pass",
"def last_modified_at(self) -> str:\n return pulumi.get(self, \"last_modified_at\")",
"def last_modified_at(self) -> str:\n return pulumi.get(self, \"last_modified_at\")",
"def get_last_modified() -> str:\n service = get_authenticated_service(\"drive\", \"v3\")\n response = (\n service.files().get(fileId=SPREADSHEET_ID, fields=\"modifiedTime\").execute()\n )\n return response[\"modifiedTime\"]",
"def last_modified_date(when):\n\n # FIXME: this should use the Moira server timezone\n delta = datetime.datetime.now() - when\n if delta.days > 0:\n if delta.days > 365:\n return \"%.2f years\" % (delta.days / 365.25)\n else:\n return plural(delta.days, \"day\", \"days\")\n else:\n if delta.seconds > 3600:\n hours = delta.seconds / 3600\n minutes = (delta.seconds - hours * 3600) / 60\n return plural(hours, \"hour\", \"hours\") + ' ' + plural(minutes, \"minute\", \"minutes\")\n elif delta.seconds > 60:\n return plural(delta.seconds / 60, \"minute\", \"minutes\")\n else:\n return plural(delta.seconds, \"second\", \"seconds\")",
"def get_recently_modified_scratch_file(settings):\n dir_contents = os.listdir(settings.location)\n full_paths = map(lambda f: os.path.join(settings.location, f), dir_contents)\n files = filter(lambda f: os.path.isfile(str(f)), full_paths)\n if not files:\n return \"\"\n files = sorted(files, key=_get_mtime)\n return files[-1]",
"def last_modified_date_time(self):\n if \"lastModifiedDateTime\" in self._prop_dict:\n return datetime.strptime(self._prop_dict[\"lastModifiedDateTime\"].replace(\"Z\", \"\"), \"%Y-%m-%dT%H:%M:%S.%f\")\n else:\n return None",
"def last_modified(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified\")",
"def last_modified(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified\")"
] | [
"0.8046738",
"0.7416479",
"0.7245152",
"0.7219578",
"0.71944565",
"0.6939524",
"0.67891824",
"0.6737202",
"0.6734333",
"0.67058384",
"0.6656483",
"0.6634494",
"0.6624429",
"0.6620233",
"0.6591059",
"0.6591059",
"0.65882814",
"0.6566373",
"0.6562852",
"0.6555743",
"0.6548625",
"0.653265",
"0.64970225",
"0.64970225",
"0.6482359",
"0.64482903",
"0.64400184",
"0.64279085",
"0.63936424",
"0.63936424"
] | 0.7935193 | 1 |
Return whether the given paths have been changed in the commit range Used to determine if a build is necessary | def path_touched(*paths, commit_range):
return check_output([
'git', 'diff', '--name-only', commit_range, '--', *paths
]).decode('utf-8').strip() != '' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_rev_dirty(ctx: \"PlanemoCliContext\", directory: str) -> bool:\n return io.shell([\"git\", \"diff\", \"--quiet\"], cwd=directory) != 0",
"def _can_checkout(wit_path) -> bool:\n\n current_id = _get_head(wit_path)\n changes_to_be_committed = _return_as_string(_get_changes_to_be_committed, wit_path, current_id)\n changes_not_staged_for_commit = _return_as_string(_get_changes_not_staged_for_commit, wit_path)\n if changes_to_be_committed + changes_not_staged_for_commit == '':\n return True\n logging.error(FileNotSavedError('Some files are not saved. Try \"status\" command to view them.'))\n return False",
"def has_changes(self):\n if self.repo_is_empty:\n return True\n\n tree = self.repo.get(self.index.write_tree(self.repo))\n diff = tree.diff_to_tree(self.repo.get(self.repo.head.target).tree)\n return bool(diff)",
"def check_dirty(args):\n man = load_manifest()\n any_dirty = False\n for (name, project) in man.projects.iteritems():\n repo = GitRepo(workdir_for_project(project))\n any_dirty = check_dirty_repo(repo) or any_dirty\n return any_dirty",
"def should_build(target_platform, changed_files):\n return any(_should_file_trigger_build(target_platform, file) for file in changed_files)",
"def ShouldBuild(self, src_files, dst_files):\n if self.force:\n return True\n\n oldest = None\n for dst in dst_files:\n if not os.path.exists(dst):\n self.DebugMsg(\"Build because %s does not exist\" % dst)\n return True\n modified = os.path.getmtime(dst)\n if oldest == None or modified < oldest:\n old = dst\n oldest = modified\n\n for src in src_files:\n modified = os.path.getmtime(src)\n if modified > oldest:\n self.DebugMsg(\"Build because %s is newer than %s\" % (src, old))\n return True\n\n self.DebugMsg(\"%s are up to date\" % \", \".join(dst_files))\n return False",
"def check_dependency_change(targets: List[str], dependencies: List[str]) -> bool:\n min_target_mtime = min([get_mtime(path) for path in targets])\n max_dep_mtime = max([get_mtime(path) for path in dependencies])\n return max_dep_mtime > min_target_mtime",
"def has_changes(directory=None):\n out = check_output('git status', shell=True, cwd=directory)\n if 'nothing to commit (working directory clean)' in out:\n return False\n if 'nothing to commit, working directory clean' in out:\n return False\n if 'nothing to commit, working tree clean' in out:\n return False\n if 'nothing added to commit' in out:\n return False\n return True",
"def local_changes():\n result, output = popen('git status', False, False)\n try:\n return not output[-1].startswith(\"nothing to commit\")\n except IndexError:\n return True",
"def _check_guts_toc_mtime(attr, old, toc, last_build, pyc=0):\n for (nm, fnm, typ) in old:\n if mtime(fnm) > last_build:\n print \"building because %s changed\" % fnm\n return True\n elif pyc and mtime(fnm[:-1]) > last_build:\n print \"building because %s changed\" % fnm[:-1]\n return True\n return False",
"def index_is_dirty():\n result, output = popen('git diff --cached', False, False)\n return len(output) > 0",
"def repo_has_incoming(*repo_paths):\n incoming = False\n\n for repo_path in repo_paths:\n try:\n subprocess.check_output(['hg', 'incoming', '-R', repo_path])\n print('Detected incoming changesets in \"{}\"'.format(repo_path))\n incoming = True\n except subprocess.CalledProcessError as e:\n if e.returncode != 1:\n raise\n\n return incoming",
"def has_staged_changes(repo):\n return subprocess.call(['git', 'diff-index', '--cached', '--quiet', 'HEAD'],\n cwd=repo) != 0",
"def different_required(old_required: Dict[str, UpToDate], new_required: Dict[str, UpToDate]) -> bool:\n for new_path in sorted(new_required.keys()):\n if new_path not in old_required:\n Logger.why(f\"Must run actions because changed to require: {new_path}\")\n return True\n\n for old_path in sorted(old_required.keys()):\n if old_path not in new_required:\n Logger.why(f\"Must run actions because changed to not require: {old_path}\")\n return True\n\n for path in sorted(new_required.keys()):\n old_up_to_date = old_required[path]\n new_up_to_date = new_required[path]\n if old_up_to_date.producer != new_up_to_date.producer:\n Logger.why(\n f\"Must run actions because the producer of the required: {path} \"\n f'has changed from: {old_up_to_date.producer or \"source file\"} '\n f'into: {new_up_to_date.producer or \"source file\"}'\n )\n return True\n if not is_exists(path) and old_up_to_date.mtime_ns != new_up_to_date.mtime_ns:\n Logger.why(\n f\"Must run actions \"\n f\"because the modification time of the required: {path} \"\n f\"has changed from: \"\n f\"{_datetime_from_nanoseconds(old_up_to_date.mtime_ns)} \"\n f\"into: \"\n f\"{_datetime_from_nanoseconds(new_up_to_date.mtime_ns)}\"\n )\n return True\n\n return False",
"def dirty(self) -> bool:\n return len(self.detect_changed_files()) != 0",
"def svn_fs_paths_changed(*args):\r\n return _fs.svn_fs_paths_changed(*args)",
"def has_diff(ds, remote_branch_name, remote, paths):\n remote_ref = '/'.join((remote, remote_branch_name))\n if remote_ref not in ds.repo.get_remote_branches():\n lgr.debug(\"Remote '%s' has no branch matching %r. Will publish\",\n remote, remote_branch_name)\n # we don't have any remote state, need to push for sure\n return True\n\n lgr.debug(\"Testing for changes with respect to '%s' of remote '%s'\",\n remote_branch_name, remote)\n current_commit = ds.repo.get_hexsha()\n within_ds_paths = [p['path'] for p in paths if p['path'] != ds.path]\n commit_differ = current_commit != ds.repo.get_hexsha(remote_ref)\n # yoh: not sure what \"logic\" was intended here for comparing only\n # some files. By now we get a list of files, if any were changed,\n # from the commit on remote, and somehow diff says below that they didn't differ...\n # but if commit is different -- there must be differences and we\n # should publish. otherwise now skips publishing root dataset\n # although its master is behind by 1 commit. Moreover there could\n # be an empty commit -- shouldn't we publish then???\n if not commit_differ and within_ds_paths:\n # only if any paths is different from just the parentds root\n # in which case we can do the same muuuch cheaper (see below)\n # if there were custom paths, we will look at the diff\n lgr.debug(\"Since paths provided, looking at diff\")\n return any(r[\"state\"] != \"clean\"\n for r in ds.diff(path=within_ds_paths,\n fr=\"HEAD\",\n to=remote_ref,\n untracked=\"no\"))\n else:\n # if commits differ at all\n lgr.debug(\"Since no paths provided, comparing commits\")\n return commit_differ",
"def hasChanged(self):\n return ((self.mtime != getmtime(self.path)) or\n (self.size != os.path.getsize(self.path)) )",
"def is_commit_affecting_directory(self, commit, directory):\n exit_code = self.run([\n 'git', 'diff-tree', '--quiet', '--no-commit-id', '-r', commit,\n '--', directory\n ],\n return_exit_code=True)\n return exit_code == 1",
"def needs_rebuild(source, target):\n return not os.path.isfile(target) or (\n os.path.getmtime(source) > os.path.getmtime(target))",
"def files_are_modified(filenames, lastupdate):\n for filename in filenames:\n if file_is_modified(filename, lastupdate):\n return True\n return False",
"def commits_exist(repo, commits):\n for commit in commits:\n if not commit_exists(repo, commit):\n return False\n return True",
"def is_release_notes_changed(self):\n # there exists a difference between origin/master and current branch\n if self.master_diff:\n diff_releases = self.master_diff.split('##')\n unreleased_section = diff_releases[1]\n unreleased_section_lines = unreleased_section.split('\\n')\n\n adds_in_diff = 0\n removes_in_diff = 0\n\n for line in unreleased_section_lines:\n if line.startswith('+'):\n adds_in_diff += 1\n elif line.startswith('-') and not re.match(r'- *$', line):\n removes_in_diff += 1\n\n # means that at least one new line was added\n if adds_in_diff - removes_in_diff > 0:\n return True\n\n print_error(F'No new comment has been added in the release notes file: {self.release_notes_path}')\n return False",
"def is_valid_commits(args):\n if args.commits is not None:\n return True\n return False",
"def check_for_major_changes(cabal: CabalFile) -> bool:\n old_ver = cabal.get_version()\n old_tag = None\n if f'v{old_ver}' in get_tags():\n old_tag = f'v{old_ver}'\n if f'{old_ver}' in get_tags():\n old_tag = f'{old_ver}'\n if old_tag is None:\n print(f\"Couldn't find tag {old_tag} for current version; skipping revision check.\\n\")\n return False\n\n cmd = ['git', 'diff', '--name-only', f'{old_tag}..HEAD']\n changed_files = [ l.strip()\n for l in check_output(cmd).decode('UTF-8').split('\\n')\n if len(l.strip()) > 0 ]\n non_cabals = [ f\n for f in changed_files\n if not f.endswith('.cabal') ]\n print(f\"{len(changed_files)} files have changed since {old_tag}:\\n \",\n ' \\n'.join(changed_files))\n\n if len(non_cabals) > 0:\n return False\n else:\n print(dedent(f'''\n It appears that the only changes between {old_tag} and now are in the\n cabal file. Perhaps you want to make a revision instead?\n\n y = make a revision\n n = do a full release anyways\n d = show me a diff\n '''))\n while True:\n resp = prompt_for_char('How to proceed?', options='ynd')\n if resp == 'd':\n cmd = ['git', 'diff', f'{old_tag}..HEAD']\n print(' '.join(cmd))\n check_call(cmd)\n elif resp == 'y':\n return True\n elif resp == 'n':\n return False",
"def has_unstaged_changes(repo):\n subprocess.check_call(['git', 'update-index', '-q', '--ignore-submodules',\n '--refresh'], cwd=repo)\n return subprocess.call(['git', 'diff-index', '--quiet', 'HEAD'],\n cwd=repo) != 0",
"def _check_guts_eq(attr, old, new, last_build):\n if old != new:\n print \"building because %s changed\" % attr\n return True\n return False",
"def _paths_are_consistent_with_hash_prefixes(self, paths,\n path_hash_prefixes):\n\n # Assume that 'paths' and 'path_hash_prefixes' are inconsistent until\n # proven otherwise.\n consistent = False\n\n if len(paths) > 0 and len(path_hash_prefixes) > 0:\n for path in paths:\n path_hash = self._get_target_hash(path)\n # Assume that every path is inconsistent until proven otherwise.\n consistent = False\n\n for path_hash_prefix in path_hash_prefixes:\n if path_hash.startswith(path_hash_prefix):\n consistent = True\n break\n\n # This path has no matching path_hash_prefix. Stop looking further.\n if not consistent: break\n\n return consistent",
"def needs_update(self, *path):\n dt_fmt = \"%Y-%m-%d %H:%M:%S\"\n try:\n linfo = self.info(*path)\n dt_local = datetime.datetime.strptime(\n linfo[\"datetime\"][:19], dt_fmt)\n dt_server = datetime.datetime.strptime(\n self.serverfiles.info(*path)[\"datetime\"][:19], dt_fmt)\n return dt_server > dt_local\n except FileNotFoundError:\n return True\n except KeyError:\n return True",
"def _any_files_newer(cls, files, check_mtime):\n for path in files:\n path_mtime = os.path.getmtime(path)\n if path_mtime > check_mtime:\n # This path was modified more recently than the\n # check_mtime.\n return True\n # If we made it here, nothing was newer than the check_mtime\n return False"
] | [
"0.6485178",
"0.64627033",
"0.63761",
"0.6337259",
"0.6332157",
"0.6283598",
"0.6267245",
"0.6246275",
"0.6215795",
"0.6191025",
"0.6181917",
"0.6179466",
"0.6164216",
"0.6145015",
"0.6080553",
"0.6079178",
"0.6058201",
"0.6047565",
"0.6034595",
"0.6026013",
"0.6012013",
"0.60063803",
"0.59711635",
"0.5956637",
"0.5941676",
"0.5896293",
"0.5873296",
"0.58268684",
"0.58145463",
"0.57890725"
] | 0.8217679 | 0 |
Get docker build args dict, rendering any templated args. | def render_build_args(options, ns):
build_args = options.get('buildArgs', {})
for key, value in build_args.items():
build_args[key] = value.format(**ns)
return build_args | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def docker_build_context(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"docker_build_context\")",
"def docker_build_context(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"docker_build_context\")",
"def read_dockerfile_for_args(target):\n import colorama\n build_args = {}\n missing_args = {}\n empty_string = \"\"\n\n # read dockerfile for args that have no value\n try:\n with open(target + '/Dockerfile') as dockerfile:\n for line in dockerfile:\n if line.startswith(\"ARG \"):\n dockerfile_args = line.replace(\n \"ARG \", \"\").strip(\"\\n\").split(\"=\")\n\n arg_name = dockerfile_args[0]\n arg_value = \"\"\n\n if len(dockerfile_args) > 1:\n arg_value = dockerfile_args[1].strip(\"\\n\")\n\n env_value = os.environ.get(arg_name)\n\n build_args[arg_name] = arg_value\n if not env_value is None:\n build_args[arg_name] = env_value\n\n if build_args[arg_name] is empty_string:\n missing_args[arg_name] = arg_name\n except FileNotFoundError:\n exit(f\"Dockerfile not found: {target}/Dockerfile\")\n\n if len(missing_args) > 1:\n message = \"WARNING: Arguments found with no defined value \" \\\n \"found in Dockerfile or environment [{}]\"\n print(colorama.Fore.YELLOW + colorama.Style.BRIGHT +\n message.format(\", \".join(missing_args)))\n\n return build_args",
"def cookiecutter_args(self) -> dict[str, str]:\n local_args = {\n \"add_golden\": \"y\" if self.golden_tests else \"n\",\n \"copyright_holder\": self.copyright_holder,\n \"copyright_year\": (\n self.today.strftime(\"%Y\")\n if not self.copyright_year\n else self.copyright_year\n ),\n \"github_owner\": self.github_owner,\n \"name\": self.name,\n \"slug\": self.slug,\n # The template expects the test cases in a single string separated by\n # spaces.\n \"test_cases\": \" \".join(self.test_cases),\n }\n cruft_json = self.target_dir / \".cruft.json\"\n if cruft_json.is_file():\n with open(cruft_json, \"r\", encoding=\"utf-8\") as f:\n cruft_json_data = json.load(f)\n args = cruft_json_data[\"context\"][\"cookiecutter\"]\n for k, v in local_args.items():\n args[k] = v\n else:\n args = local_args\n\n return args",
"def build_docker_build_command(configuration):\n parts = configuration.pop('docker', 'docker').split()\n parts.append('build')\n\n build = configuration.pop('build')\n\n build['path'] = os.path.join(configuration['workspace'], build['path'])\n build['file'] = os.path.join(build['path'], build['file'])\n\n parts.extend(build_parameter_parts(\n build, 'tag', 'file', 'no-cache', 'quiet', 'cpu-shares', 'memory'))\n\n parts.extend(build_dict_parameter_parts(build, 'build-arg'))\n parts.append(build.pop('path'))\n\n return parts",
"def build_args(self, project_update, private_data_dir, passwords):\n args = []\n if getattr(settings, 'PROJECT_UPDATE_VVV', False):\n args.append('-vvv')\n if project_update.job_tags:\n args.extend(['-t', project_update.job_tags])\n return args",
"def _get_context(data):\n try:\n docker_options = DockerRunCommandOptions(cmd=\"docker run --help\",\n start=\"Options:\",\n end=None).get_options_json()\n except Exception as ex:\n print(ex)\n docker_options = {}\n context = DEFAULT_DATA.copy()\n context[\"docker_options\"] = docker_options\n context.update(data)\n context[\"registry\"][\"address_select\"] = \"\"\n if context[\"registry\"][\"address\"] in context[\"registry_options\"].keys():\n context[\"registry\"][\"address_select\"] = context[\"registry\"][\"address\"]\n return context",
"def parse_arguments(self):\n self.args = self.argparser.parse_args(self.template_args) # noqa: T484\n\n # get values from args or defaults\n for name, (categ, rest) in self.data.items():\n if categ not in '<>?':\n continue\n val = getattr(self.args, name)\n if rest.get('type') == 'flag':\n val = str(rest.get('val')) if val else ''\n else:\n val = val if val is not None else rest.get('default')\n self.variables[name] = val\n\n # possibly fill in substitutions in the template variables\n findreplace = re.compile(r'{{\\s*(\\w+)\\s*}}')\n for name, val in self.variables.items():\n if findreplace.search(val):\n t = jinja2.Template(val)\n self.variables[name] = t.render(self.variables)",
"def docker_params(self):\n return {}",
"def _render_args(self, target, output_dir):\n args = []\n\n # Glossary of used aapt flags. Aapt handles a ton of action, this will continue to expand.\n # : 'package' is the main aapt operation (see class docstring for more info).\n # : '-m' is to \"make\" a package directory under location '-J'.\n # : '-J' Points to the output directory.\n # : '-M' is the AndroidManifest.xml of the project.\n # : '-S' points to the resource_dir to \"spider\" down while collecting resources.\n # : '-I' packages to add to base \"include\" set, here it is the android.jar of the target-sdk.\n args.extend([self.aapt_tool(target.build_tools_version)])\n args.extend(['package', '-m', '-J', output_dir])\n args.extend(['-M', target.manifest.path])\n args.extend(['-S', target.resource_dir])\n args.extend(['-I', self.android_jar_tool(target.manifest.target_sdk)])\n args.extend(['--ignore-assets', self.ignored_assets])\n logger.debug('Executing: {0}'.format(' '.join(args)))\n return args",
"def _build_arguments(self):\n # TODO: comeback to allow test path override. maybe?\n # self._parser.add_argument(\n # '--test-path',\n # type=utils.validate_path,\n # required=False,\n # help=('Path th projects test Dockerfile. Dockerfile should be in the root of the test directory.')\n # )\n self._parser.add_argument(\n '--configs',\n type=bool,\n required=False,\n default=False,\n help=\"Would you like to inject configuration files?\"\n )",
"def docker_build(c):\n cli_tasks.docker_build.run(c)",
"def build(parser):\n parser.add_argument(\n '-i', '--identity-file',\n help=(\n 'A SSH private key file which may be used to pull down '\n 'repositories when building.'\n ),\n )\n parser.add_argument(\n '-e', '--env',\n action='append',\n default=[],\n help=(\n 'Add environ variables to the build. These may be accessed in '\n 'the build scripts. Each variable should be of the format '\n 'KEY=VALUE. This may be used to pass in credentials required '\n 'to access private repositories. May be specified more than once.'\n ),\n )\n parser.add_argument(\n '-b', '--build-dir',\n default=os.getcwd(),\n help=(\n 'This folder should be accessible from the docker instance.'\n ),\n )\n parser.add_argument(\n '--archive',\n help=(\n 'Archive the build files into a local tarball.'\n ),\n )\n parser.add_argument(\n '--archive-only',\n action='store_true',\n default=False,\n help=(\n 'Skip tagging and building the runner image.'\n ),\n )\n parser.add_argument(\n '-t', '--tag',\n help=(\n 'Tag to apply to the built image. '\n 'This will default to the current date/time.'\n ),\n )\n parser.add_argument(\n '--no-cache',\n dest='use_cache',\n action='store_false',\n default=True,\n help=(\n 'Do not mount a cache volume when compiling the app.'\n ),\n )\n parser.add_argument(\n '--cache',\n metavar='CONTAINER:PATH',\n help=(\n 'An optional volume or location for the cache. The format is '\n '\"<volume_id>:<path>\" where the \"volume_id\" must be the '\n 'name or hash of an existing volume. The \"path\" is an absolute '\n 'path to the cache folder/volume within the build container.'\n '\\n\\n'\n 'By default a container will be created by mangling the name of '\n 'the app by appending \"__buildcache\" (e.g. \"myapp__buildcache\").'\n '\\n\\n'\n 'This option is ignored if --no-cache is specified.'\n '\\n\\n'\n 'The \"volume_id\" may be an absolute path on the host filesystem.'\n '\\n\\n'\n 'The \"path\" may be dropped, in which case it will default to '\n '/tmp/cache inside the build container.'\n '\\n\\n'\n 'Examples:'\n '\\n\\n'\n ' # custom volume with default path\\n'\n ' --cache my_cache'\n '\\n\\n'\n ' # custom path inside of volume\\n'\n ' --cache my_cache:/tmp/cache'\n '\\n\\n'\n ' # host filesystem\\n'\n ' --cache /tmp/cache'\n ),\n )\n parser.add_argument(\n '--rebuild-cache',\n action='store_true',\n default=False,\n help=(\n 'Delete any cached artifacts prior to building.'\n ),\n )\n parser.add_argument(\n '--skip-cleanup',\n action='store_true',\n default=False,\n help=(\n 'Skip removal of images and containers.'\n ),\n )\n parser.add_argument(\n 'app',\n help=(\n 'Path to an application folder with a meta.yml file'\n ),\n )",
"def _generateWindowsBuildArgs(\n self, logger, basetagOverride=None, isolationOverride=None\n ):\n\n # Determine the appropriate container image base tag for the host system release unless the user specified a base tag\n buildArgs = []\n hostBaseTag = WindowsUtils.getHostBaseTag()\n baseTag = basetagOverride if basetagOverride is not None else hostBaseTag\n\n if baseTag is None:\n raise RuntimeError(\n \"unable to determine Windows Server Core base image tag from host system. Specify it explicitly using -basetag command-line flag\"\n )\n\n buildArgs = [\"--build-arg\", \"BASETAG={}\".format(baseTag)]\n\n # Use the default isolation mode unless requested otherwise\n dockerInfo = DockerUtils.info()\n isolation = (\n isolationOverride\n if isolationOverride is not None\n else dockerInfo[\"Isolation\"]\n )\n buildArgs += [\"--isolation={}\".format(isolation)]\n\n # If the user specified process isolation mode and a different base tag to the host system then warn them\n prefix = self.getPrefix()\n if isolation == \"process\" and baseTag != hostBaseTag:\n logger.info(\n \"[{}] Warning: attempting to use different Windows container/host versions\".format(\n prefix\n ),\n False,\n )\n logger.info(\n \"[{}] when running in process isolation mode, this will usually break!\".format(\n prefix\n ),\n False,\n )\n\n # Set a sensible memory limit when using Hyper-V isolation mode\n if isolation == \"hyperv\":\n buildArgs += [\"-m\", \"4GiB\"]\n\n return buildArgs",
"def main():\n parser = argparse.ArgumentParser(\n epilog=main.__doc__, formatter_class=argparse.RawDescriptionHelpFormatter\n )\n parser.add_argument(\n \"-d\", \"--dry-run\", action=\"store_true\", default=0, help=\"Dry run mode.\"\n )\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n action=\"count\",\n default=0,\n help=\"Verbosity. Default is WARNING level.\",\n )\n\n subparsers = parser.add_subparsers(help=\"Sub commands\", dest=\"subparser\")\n subparsers.required = True\n\n build_parser = subparsers.add_parser(\n \"build\",\n description=\"Build an image from Dockerfile, caching image hierarchy\",\n help=\"Build an image from a Dockerfile\",\n )\n build_parser.add_argument(\n \"path\", metavar=\"PATH\", help=\"The build context directory\"\n )\n build_parser.add_argument(\n \"-f\",\n \"--file\",\n help=\"Name of the Dockerfile. If not provided, \"\n \"will use config.DOCKERFILE_PATH_PATTERN to compute. \",\n )\n build_parser.add_argument(\n \"-v\",\n \"--git-sha\",\n required=True,\n help=\"The version of code to build against, \" \"will pass as GIT_SHA variable\",\n )\n build_parser.add_argument(\n \"-n\", \"--name\", required=True, help=\"The name of the image to build\"\n )\n build_parser.add_argument(\n \"--build-arg\",\n metavar=\"ARG=VALUE\",\n nargs=\"*\",\n default=[],\n help=\"Set extra build-time variables. GIT_SHA, TIMESTAMP will be passed by default.\",\n )\n build_parser.add_argument(\n \"-r\",\n \"--raw\",\n action=\"store_true\",\n help=\"Whether to use raw docker build command to build, skipping caching logic\",\n )\n build_parser.add_argument(\n \"--registry\",\n default=config.DOCKER_REGISTRY,\n help=\"Docker registry use to determine the image identity, \"\n \"can be set via IMAGE_BUILDER_DOCKER_REGISTRY environment variable, \"\n 'or set DOCKER_REGISTRY in config.py. Default is \"%(default)s\"',\n )\n build_parser.add_argument(\n \"-t\",\n \"--tag-pattern\",\n default=config.GIT_SHA_TAG_PATTERN,\n help=\"Tag pattern, can only include one `{git_sha}` placeholder, \"\n 'such as \"{git_sha}-new\". If the tag exists, we won\\'t rebuild it. '\n 'Default is \"%(default)s\"',\n )\n build_parser.add_argument(\n \"-e\",\n \"--extra-tag\",\n nargs=\"*\",\n default=[],\n help=\"Extra tags to tag to the final images\",\n )\n build_parser.add_argument(\n \"--extra-name\",\n nargs=\"*\",\n default=[],\n help=\"Extra name and optionally with a tag in the 'name:tag' format\",\n )\n build_parser.add_argument(\n \"-o\", \"--output-hash\", help=\"The output filename of the files hash log.\"\n )\n build_parser.set_defaults(func=build)\n\n args = parser.parse_args()\n if args.dry_run:\n # DRY_RUN env will be read in image_builder.libs.process\n os.environ[\"DRY_RUN\"] = \"1\"\n\n if args.func == build:\n args.path = expand_path(args.path)\n if args.output_hash:\n args.output_hash = expand_path(args.output_hash)\n\n args.file = args.file or locate_dockerfile(args.name)\n args.file = expand_path(args.file)\n # set environ for main dockerfile for possibly retrieving later\n os.environ[\n config.DOCKERFILE_ENV_PATTERN.format(image_name=args.name)\n ] = args.file\n\n # change CWD to PATH\n os.chdir(args.path)\n\n if not args.registry:\n parser.error(\n \"--registry should be provied \"\n \"or specified by IMAGE_BUILDER_DOCKER_REGISTRY environment variable or set DOCKER_REGISTRY in config.py\"\n )\n if not all(\"=\" in kv for kv in args.build_arg):\n parser.error(\"--build_arg must be in ARG=VALUE format\")\n\n # set git_sha_tag\n try:\n args.git_sha_tag = args.tag_pattern.format(git_sha=args.git_sha)\n except KeyError:\n parser.error(\n 'Wrong --tag-pattern provided. Can only include one `{git_sha}` placeholder, such as \"{git_sha}-new\"'\n )\n\n # setup logging\n level = logging.WARNING - args.verbose * 10\n logging.basicConfig(\n level=level, format=\"%(asctime)s %(name)s %(levelname)s %(message)s\"\n )\n\n if args.output_hash:\n h = logging.FileHandler(args.output_hash)\n h.setLevel(logging.DEBUG)\n h.setFormatter(logging.Formatter(\"%(message)s\"))\n hash_logger.addHandler(h)\n\n # Suppress warning when we don't verify ssl\n import urllib3\n\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n return args.func(args)",
"def args(self):\n if not self.__args_updated:\n for inc in self.include_templates:\n self.__args.update(inc.args)\n self.__args_updated = True\n return self.__args",
"def _translate_docker_properties(self):\n self.spec.setdefault(\"name\", self.spec.pop(\"container_name\", self.name))\n self.spec.setdefault(\"command\", shlex.split(self.spec.pop(\"entrypoint\", \"\")))\n self.spec.setdefault(\"args\", shlex.split(self.spec.pop(\"cmd\", \"\")))\n \n self.spec.setdefault(\"env\", _make_env(self.spec.pop(\"environment\", {})))\n for env in self.spec.get(\"env\", []):\n if \"value\" in env:\n env[\"value\"] = str(env[\"value\"])\n\n self.spec.setdefault(\"stdin\", self.spec.pop(\"stdin_open\", None))\n self.spec.setdefault(\"workingDir\", self.spec.pop(\"working_dir\", None))\n\n privileged = self.spec.pop(\"privileged\", None)\n if privileged:\n self.spec.setdefault(\"securityContext\", {})\n self.spec[\"securityContext\"].setdefault(\"privileged\", privileged)\n\n # Clean-up any empty fields\n self.spec = {k: v for k, v in self.spec.items() if v}",
"def docker_info(args): # type: (CommonConfig) -> t.Dict[str, t.Any]\n stdout, _dummy = docker_command(args, ['info', '--format', '{{json .}}'], capture=True, always=True)\n return json.loads(stdout)",
"def wrapper_environment(args):\n\n return {\n ENVIRONMENT_KEY: json.dumps({\n 'verbose': args.verbose,\n 'cc': shlex.split(args.cc),\n 'cxx': shlex.split(args.cxx)\n })\n }",
"def build(context, cache=True, force_rm=False, hide=False):\n python_name = f\"{IMAGE_NAME}-{IMAGE_VER}\"\n docker_name = f\"{IMAGE_NAME}:{IMAGE_VER}\"\n\n print(f\"Building Python package {python_name}\")\n run_cmd(\n context=context,\n exec_cmd=\"poetry build\",\n pty=False,\n error_message=f\"Failed to build Python package {python_name}\",\n )\n\n print(f\"Building Docker image {docker_name}\")\n command = (\n f\"docker build --tag {docker_name} \"\n f\"--build-arg LMA_VERSION={IMAGE_VER} --build-arg WHEEL_DIR=dist \"\n f\"-f Dockerfile .\"\n )\n\n if not cache:\n command += \" --no-cache\"\n if force_rm:\n command += \" --force-rm\"\n\n run_cmd(\n context=context,\n exec_cmd=command,\n pty=False,\n hide=hide,\n error_message=f\"Failed to build Docker image {docker_name}\",\n )",
"def build(args):\n\n logging.info(\"Parsing configuration...\")\n try:\n config = {\"site\": configurator.get_config(os.path.join(args.src, args.configfile))}\n except Exception as exc:\n sys.exit(\"Error during configuration: \" + str(exc))\n\n if (args.autobaseurl):\n config[\"site\"][\"baseurl\"] = os.path.abspath(args.dest)\n\n logging.info(\"Loading and pre-processing content...\")\n if (os.path.isdir(os.path.join(args.src, paths.POSTS_PATH))):\n try:\n config[\"posts\"] = loader.get_from_folder(os.path.join(args.src, paths.POSTS_PATH), config)\n except ValueError as exc:\n sys.exit(\"Error loading posts: \" + str(exc))\n else:\n config[\"posts\"] = {}\n\n if (os.path.isdir(os.path.join(args.src, paths.PAGES_PATH))):\n try:\n config[\"pages\"] = loader.get_from_folder(os.path.join(args.src, paths.PAGES_PATH), config)\n except ValueError as exc:\n sys.exit(\"Error loading pages: \" + str(exc))\n else:\n config[\"pages\"] = {}\n\n logging.debug(\"Configuring Jinja2 environment...\")\n jinjaEnv = configurator.configure_jinja(config[\"site\"][\"theme\"], args.src)\n\n logging.debug(\"Initializing builder...\")\n Builder(jinjaEnv, config, args.src, args.dest, args.noclean).build()",
"def render(self, *args, **kwargs):\r\n for dictarg in args: kwargs.update(dictarg)\r\n stdout = []\r\n self.execute(stdout, kwargs)\r\n return ''.join(stdout)",
"def build_arguments(self, *cmd_args, **cmd_kwargs):\n args = []\n args.extend(cmd_args)\n\n for raw_key, value in cmd_kwargs.items():\n if len(raw_key) == 1:\n args.append('-{}'.format(raw_key))\n else:\n key = raw_key.replace('_', '-')\n args.append('--{}'.format(key))\n\n if value is True:\n # If True, it is enough.\n # e.g.: system=True translates to --system\n continue\n\n args.append(str(value))\n\n return args",
"def _dict_to_args(self, arg_dict):\n if arg_dict:\n yield \"--{}=data:application/json;charset=utf-8,{}\".format(\n self._CONFIG_FLAG.name,\n urllib.parse.quote(json_encode(arg_dict, pretty=False), encoding=\"utf-8\")\n )",
"def cmd_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--image\",\n help=\"Full image path can be optionally supplied.\")\n args = parser.parse_args()\n return args",
"def _generate_template_context(arguments: PackagingResourceArguments,\n manifest: OdahuProjectManifest,\n output_folder: str) -> DockerTemplateContext:\n logging.info('Building context for template')\n\n return DockerTemplateContext(\n model_name=manifest.model.name,\n model_version=manifest.model.version,\n odahuflow_version=manifest.odahuflowVersion,\n timeout=arguments.timeout,\n host=arguments.host,\n port=arguments.port,\n workers=arguments.workers,\n threads=arguments.threads,\n pythonpath=output_folder,\n wsgi_handler=f'{HANDLER_MODULE}:{HANDLER_APP}',\n model_location=ODAHU_SUB_PATH_NAME,\n entrypoint_target=ENTRYPOINT_TEMPLATE,\n handler_file=f'{HANDLER_MODULE}.py',\n base_image=arguments.dockerfileBaseImage,\n conda_file_name=CONDA_FILE_NAME,\n conda_server_file_name=CONDA_SERVER_FILE_NAME,\n entrypoint_docker=ENTRYPOINT_TEMPLATE\n )",
"def get_args_from_console(args):\n return {\n \"cleaning_policy\": args.cleaning_policy,\n \"clear\": args.clear,\n \"content\": args.content,\n \"dry_run\": args.dry_run,\n \"force\": args.force,\n \"in_lines\": args.in_lines,\n \"max_size\": args.max_size,\n \"regex\": args.regex,\n \"restore\": args.restore,\n \"rmdir\": args.rmdir,\n \"short\": args.short,\n \"silent\": args.silent,\n \"storage_time\": args.storage_time,\n \"wastebasket_path\": args.wastebasket_path\n }",
"def get_argdict(cls, toolchain, args):\n return {} # Empty must be overloaded (if required)",
"def _create_container_args(kwargs):\n # Copy over kwargs which can be copied directly\n create_kwargs = {}\n for key in copy.copy(kwargs):\n if key in RUN_CREATE_KWARGS:\n create_kwargs[key] = kwargs.pop(key)\n host_config_kwargs = {}\n for key in copy.copy(kwargs):\n if key in RUN_HOST_CONFIG_KWARGS:\n host_config_kwargs[key] = kwargs.pop(key)\n\n # Process kwargs which are split over both create and host_config\n ports = kwargs.pop('ports', {})\n if ports:\n host_config_kwargs['port_bindings'] = ports\n\n volumes = kwargs.pop('volumes', {})\n if volumes:\n host_config_kwargs['binds'] = volumes\n\n network = kwargs.pop('network', None)\n network_driver_opt = kwargs.pop('network_driver_opt', None)\n if network:\n network_configuration = {'driver_opt': network_driver_opt} \\\n if network_driver_opt else None\n\n create_kwargs['networking_config'] = {network: network_configuration}\n host_config_kwargs['network_mode'] = network\n\n # All kwargs should have been consumed by this point, so raise\n # error if any are left\n if kwargs:\n raise create_unexpected_kwargs_error('run', kwargs)\n\n create_kwargs['host_config'] = HostConfig(**host_config_kwargs)\n\n # Fill in any kwargs which need processing by create_host_config first\n port_bindings = create_kwargs['host_config'].get('PortBindings')\n if port_bindings:\n # sort to make consistent for tests\n create_kwargs['ports'] = [tuple(p.split('/', 1))\n for p in sorted(port_bindings.keys())]\n if volumes:\n if isinstance(volumes, dict):\n create_kwargs['volumes'] = [\n v.get('bind') for v in volumes.values()\n ]\n else:\n create_kwargs['volumes'] = [\n _host_volume_from_bind(v) for v in volumes\n ]\n return create_kwargs",
"def prepare():\n sh('docker build --rm -t {image} {dir}', image=IMAGE, dir=os.path.dirname(__file__))"
] | [
"0.64995056",
"0.6460428",
"0.5992242",
"0.59243613",
"0.5775845",
"0.56487375",
"0.5570073",
"0.5546533",
"0.5522067",
"0.5497052",
"0.5493939",
"0.54721403",
"0.5428458",
"0.5366059",
"0.5361795",
"0.535775",
"0.5269126",
"0.5204375",
"0.5196358",
"0.51938367",
"0.518926",
"0.51665103",
"0.515709",
"0.5149833",
"0.51478",
"0.51452595",
"0.51444477",
"0.51331633",
"0.5128882",
"0.50937015"
] | 0.7038579 | 0 |
Cached getter for docker client | def docker_client():
return docker.from_env() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def get_docker_client(self) -> \"DockerClient\":",
"def docker_client():\n client = docker.from_env()\n return client",
"def get_client():\n info = {}\n host = os.environ.get('DOCKER_HOST')\n net_host = os.environ.get('DOCKER_NET_HOST')\n\n client_api_version = os.environ.get('DOCKER_API_VERSION')\n if not client_api_version:\n client_api_version = \"auto\"\n\n # IP to use for started containers\n if net_host:\n info['host'] = net_host\n elif host:\n info['host'] = urlparse.urlparse(host).netloc.split(':')[0]\n else:\n info['host'] = 'localhost'\n\n verify = os.environ.get('DOCKER_TLS_VERIFY') == '1'\n if verify: # use TLS\n assert_hostname = None\n cert_path = os.environ.get('DOCKER_CERT_PATH')\n if cert_path:\n client_cert = (os.path.join(cert_path, 'cert.pem'), os.path.join(cert_path, 'key.pem'))\n ca_cert = os.path.join(cert_path, 'ca.pem')\n else:\n client_cert = ca_cert = None\n\n tls_config = docker.tls.TLSConfig(\n client_cert=client_cert,\n ca_cert=ca_cert,\n verify=verify,\n assert_hostname=assert_hostname,\n )\n return docker.Client(base_url=host, tls=tls_config, version=client_api_version), info\n else:\n return docker.Client(base_url=host, version=client_api_version), info",
"def get_docker_client(kard):\n return ComposePkr(kard, DOCKER_SOCK)",
"def _cache(self):\n return self._class(self.client_servers, **self._options)",
"def docker_client(request):\n client = docker.from_env()\n yield client\n client.close()",
"def redis_client(self) -> Redis:\n return self.app.key_value_store.redis_client",
"def _get_client(self):\n credentials = service_account.Credentials.from_service_account_info(self.service_account_info)\n client = googleapiclient.discovery.build('container', 'v1', credentials=credentials)\n\n return client",
"def _connect_docker_client(self):\n # lets check if Docker ENV information is set and use local socket as fallback\n if os.environ.get(\"DOCKER_HOST\") is None:\n os.environ[\"DOCKER_HOST\"] = \"unix://var/run/docker.sock\"\n LOG.warning(\"ENV variable 'DOCKER_HOST' not set. Using %r as fallback.\" % os.environ[\"DOCKER_HOST\"])\n\n # lets connect to the Docker instance specified in current ENV\n # cf.: http://docker-py.readthedocs.io/en/stable/machine/\n dc = docker.from_env(assert_hostname=False)\n # do a call to ensure that we are connected\n dc.info()\n LOG.info(\"Connected to Docker host: %r\" % dc.base_url)\n return dc",
"async def login(self) -> \"DockerClient\":",
"def get_client():\n\n return MongoClientManager().client",
"def get_client():\n client_class = _import_by_path(settings.REDISIO_CLIENT_CLASS)\n return client_class(host=settings.REDISIO_HOST,\n port=settings.REDISIO_PORT,\n db=settings.REDISIO_DB)",
"def docker(self, obj):\n\n if self._dockerclient is not None:\n return self._dockerclient\n host = self.properties[self.HOST_NODE]\n host_ip = self.get_host_ip(self, obj, host)\n url = 'tcp://' + host_ip + ':2375'\n self._dockerclient = docker.Client(base_url=url)",
"def redis_client(docker_ip, docker_services):\n client = redis.StrictRedis(host='localhost', port=6379, db=0)\n docker_services.wait_until_responsive(\n timeout=30.0, pause=0.1,\n check=lambda: is_responsive(client)\n )\n return client",
"def get_redis_client():\n return redis.from_url(settings.REDIS_URI)",
"def Get():\n return ServiceConfig() # Singleton decorator ensures there's only one",
"def configure_client(self):\n self.client = self.get_redis_client()\n return self.client",
"def get(key):\n return Cache.cache_connector.get(key)",
"def _getMemcacheClient(self, refresh=False):\n if refresh or not hasattr(self, \"memcacheClient\"):\n\n if config.Memcached.Pools.Default.MemcacheSocket:\n client_addr = \"unix:{}\".format(config.Memcached.Pools.Default.MemcacheSocket)\n else:\n client_addr = \"{}:{}\".format(\n config.Memcached.Pools.Default.BindAddress,\n config.Memcached.Pools.Default.Port,\n )\n self.memcacheClient = ClientFactory.getClient([client_addr], debug=0, pickleProtocol=2)\n return self.memcacheClient",
"def get_client() -> 'MongoCLient':\n client = pymongo.MongoClient()\n db = client['c3']\n c = db['json']\n return c",
"def get_client():\n return Client(__address, authkey='strumamor')",
"def _get_dask_client(client: Optional[Client]) -> Client:\n if client is None:\n return default_client()\n else:\n return client",
"def test_redisdb_get_client():\n test_redisdb = RedisClient()\n test_redisdb.client = \"mock_client\"\n\n test_redisdb_client = test_redisdb.get_client()\n assert test_redisdb_client == \"mock_client\"",
"def _get_conn(self):\n return redis.Redis(connection_pool=self.pool)",
"def _init_raw_client(self) -> None:\n if self.credentials:\n auth = HTTPBasicAuth(self.credentials['username'], self.credentials['password'])\n else:\n auth = None\n base_url = \"http://\" if self.untrusted else \"https://\"\n base_url += self.url\n self.raw_client = client.DockerRegistryClient(base_url=base_url, auth=auth)",
"def get_client(self):\n return self.client",
"def docker_client(environment, version=None, tls_config=None, host=None,\n tls_version=None):\n try:\n kwargs = kwargs_from_env(environment=environment, ssl_version=tls_version)\n except TLSParameterError:\n raise UserError(\n \"TLS configuration is invalid - make sure your DOCKER_TLS_VERIFY \"\n \"and DOCKER_CERT_PATH are set correctly.\\n\"\n \"You might need to run `eval \\\"$(docker-machine env default)\\\"`\")\n\n if host:\n kwargs['base_url'] = host\n if tls_config:\n kwargs['tls'] = tls_config\n\n if version:\n kwargs['version'] = version\n\n timeout = environment.get('COMPOSE_HTTP_TIMEOUT')\n if timeout:\n kwargs['timeout'] = int(timeout)\n else:\n kwargs['timeout'] = HTTP_TIMEOUT\n\n kwargs['user_agent'] = generate_user_agent()\n\n # Workaround for\n # https://pyinstaller.readthedocs.io/en/v3.3.1/runtime-information.html#ld-library-path-libpath-considerations\n if 'LD_LIBRARY_PATH_ORIG' in environment:\n kwargs['credstore_env'] = {\n 'LD_LIBRARY_PATH': environment.get('LD_LIBRARY_PATH_ORIG'),\n }\n\n client = APIClient(**kwargs)\n client._original_base_url = kwargs.get('base_url')\n\n return client",
"def get_cache(self):\n return self.cache",
"def get_client(self, name):\n return self.get_clients(as_dict=True).get(name)",
"def __getattr__(self, name):\n impl = self._get_client_impl()\n return getattr(impl, name)"
] | [
"0.7949226",
"0.6975132",
"0.69293606",
"0.6704719",
"0.6433767",
"0.6314111",
"0.6047752",
"0.59885114",
"0.59635204",
"0.5917807",
"0.5916698",
"0.5907519",
"0.5857684",
"0.58574104",
"0.58456975",
"0.5838481",
"0.58354545",
"0.58210045",
"0.58168083",
"0.58074665",
"0.57086885",
"0.56755704",
"0.5672046",
"0.56555134",
"0.5648413",
"0.5631415",
"0.56311697",
"0.56213063",
"0.55941993",
"0.55911475"
] | 0.72880656 | 1 |
Return whether an image needs pushing | def image_needs_pushing(image):
d = docker_client()
try:
d.images.get_registry_data(image)
except docker.errors.APIError:
# image not found on registry, needs pushing
return True
else:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def PrePush(self, image):\n pass",
"def hasImage(self):\n if self.getImage():\n return True\n return False",
"def _pushing(pushop):\n return bool(\n pushop.outgoing.missing\n or pushop.outdatedphases\n or pushop.outobsmarkers\n or pushop.outbookmarks\n )",
"def hasImage(self):\n return self._image is not None",
"def has_image(self):\n return hasattr(self, \"_image\") and self._image is not None",
"def _check_trigger_sync(pre_image, image):\n return pre_image.status in ('saving', 'queued') and image.size and \\\n [l for l in image.locations if not utils.is_glance_location(l['url'])]",
"def has_images(self):\n return len(self.images) > 0",
"def image_needs_building(image):\n d = docker_client()\n\n # first, check for locally built image\n try:\n d.images.get(image)\n except docker.errors.ImageNotFound:\n # image not found, check registry\n pass\n else:\n # it exists locally, no need to check remote\n return False\n\n # image may need building if it's not on the registry\n return image_needs_pushing(image)",
"def hasImages(self):\n return len(self.getImages()) > 0",
"def hasImages(self):\n return len(self.getImages()) > 0",
"def filter_push(move: dict):\n if move.get(\"pushes\") > 0:\n return True\n else:\n return False",
"def push(self):\n return False",
"def can_push(self) -> bool:\n return pulumi.get(self, \"can_push\")",
"def request_image(self, source, connection):\n try:\n self.__image_queue.put_nowait((source, connection))\n return True\n except Queue.Full:\n return False",
"def is_image(self, service_name: str) -> bool:\n return False if self.get_from_service(service_name, \"build\") else True",
"def check_image_local(self, tag):\n tags = self.get_tags()\n return (tag in tags)",
"def __contains__(self, image: Any) -> bool:\n return isinstance(image, self.native_image_type)",
"def has_media(self):\r\n if self.image:\r\n return True\r\n return False",
"def test_push_already_pushed(self, mock_docker_environment, snapshot, capsys):\n mock_docker_environment.api.push = mock.Mock(\n return_value=event_streams.PUSH_ALREADY_PRESENT\n )\n push_image(TEST_IMAGE_NAME)\n out, err = capsys.readouterr()\n snapshot.assert_match(out)",
"def is_image(pos, image, start_pos, dim_square):\n # Grab image on real board\n im = region_grabber((start_pos[0] + pos[1] * dim_square[0],\n start_pos[1] - (pos[0] + 1.0) * dim_square[1],\n start_pos[0] + (pos[1] + 1.0) * dim_square[0],\n start_pos[1] - pos[0] * dim_square[1]))\n\n pos_image = imagesearcharea(image, 0, 0, 0, 0, 0.9, im)\n return pos_image != [-1, -1]",
"def __nonzero__(self):\n if self._pushed:\n return True\n try:\n self.push(self.next())\n except StopIteration:\n return False\n return True",
"def has_picture(self):\n try:\n first = self.picture_planets()[0]\n except IndexError:\n first = None\n\n return first is not None",
"def image_comparison(self):\n for result in self.cards:\n if result.image_status:\n return True\n return False",
"def _is_push_command(self):\n return self._match_memory_pattern(\"push\")",
"def images_exist(self):\n pass",
"def is_valid_image(image):\n if image not in AVAILABLE_IMAGES.keys():\n return False\n\n return True",
"def push_image(self, tag_list, push_to_defaults, additional_registries=[], version_release_tuple=None,\n push_late=False, dry_run=False):\n\n # Late pushes allow certain images to be the last of a group to be\n # pushed to mirrors. CI/CD systems may initiate operations based on the\n # update a given image and all other images need to be in place\n # when that special image is updated. The special images are there\n # pushed \"late\"\n # Actions that need to push all images need to push all images\n # need to make two passes/invocations of this method: one\n # with push_late=False and one with push_late=True.\n\n is_late_push = False\n if self.config.push.late is not Missing:\n is_late_push = self.config.push.late\n\n if push_late != is_late_push:\n return True\n\n push_names = []\n\n if push_to_defaults:\n push_names.extend(self.metadata.get_default_push_names())\n\n push_names.extend(self.metadata.get_additional_push_names(additional_registries))\n\n # Nothing to push to? We are done.\n if not push_names:\n return True\n\n with Dir(self.distgit_dir):\n\n if version_release_tuple:\n version = version_release_tuple[0]\n release = version_release_tuple[1]\n else:\n\n # History\n # We used to rely on the \"release\" label being set in the Dockerfile, but this is problematic for several reasons.\n # (1) If 'release' is not set, OSBS will determine one automatically that does not conflict\n # with a pre-existing image build. This is extremely helpful since we don't have to\n # worry about bumping the release during refresh images. This means we generally DON'T\n # want the release label in the file and can't, therefore, rely on it being there.\n # (2) People have logged into distgit before in order to bump the release field. This happening\n # at the wrong time breaks the build.\n\n # If the version & release information was not specified,\n # try to detect latest build from brew.\n # Read in version information from the Distgit dockerfile\n _, version, release = self.metadata.get_latest_build_info()\n\n try:\n record = {\n \"distgit_key\": self.metadata.distgit_key,\n \"distgit\": '{}/{}'.format(self.metadata.namespace, self.metadata.name),\n \"image\": self.config.name,\n \"version\": version,\n \"release\": release,\n \"message\": \"Unknown failure\",\n \"status\": -1,\n # Status defaults to failure until explicitly set by success. This handles raised exceptions.\n }\n\n # pull just the main image name first\n image_name_and_version = \"%s:%s-%s\" % (self.config.name, version, release)\n brew_image_url = \"/\".join((constants.BREW_IMAGE_HOST, image_name_and_version))\n pull_image(brew_image_url)\n record['message'] = \"Successfully pulled image\"\n record['status'] = 0\n except Exception as err:\n record[\"message\"] = \"Exception occurred: %s\" % str(err)\n self.logger.info(\"Error pulling %s: %s\" % (self.metadata.name, err))\n raise\n finally:\n self.runtime.add_record('pull', **record)\n\n push_tags = list(tag_list)\n\n # If no tags were specified, build defaults\n if not push_tags:\n push_tags = self.metadata.get_default_push_tags(version, release)\n\n for image_name in push_names:\n try:\n\n repo = image_name.split('/', 1)\n\n action = \"push\"\n record = {\n \"distgit_key\": self.metadata.distgit_key,\n \"distgit\": '{}/{}'.format(self.metadata.namespace, self.metadata.name),\n \"repo\": repo, # ns/repo\n \"name\": image_name, # full registry/ns/repo\n \"version\": version,\n \"release\": release,\n \"message\": \"Unknown failure\",\n \"tags\": \", \".join(push_tags),\n \"status\": -1,\n # Status defaults to failure until explicitly set by success. This handles raised exceptions.\n }\n\n for push_tag in push_tags:\n push_url = '{}:{}'.format(image_name, push_tag)\n\n if dry_run:\n rc = 0\n self.logger.info('Would have tagged {} as {}'.format(brew_image_url, push_url))\n self.logger.info('Would have pushed {}'.format(push_url))\n else:\n rc, out, err = exectools.cmd_gather([\"docker\", \"tag\", brew_image_url, push_url])\n\n if rc != 0:\n # Unable to tag the image\n raise IOError(\"Error tagging image as: %s\" % push_url)\n\n for r in range(10):\n self.logger.info(\"Pushing image to mirror [retry=%d]: %s\" % (r, push_url))\n rc, out, err = exectools.cmd_gather([\"docker\", \"push\", push_url])\n if rc == 0:\n break\n self.logger.info(\"Error pushing image -- retrying in 60 seconds\")\n time.sleep(60)\n\n if rc != 0:\n # Unable to push to registry\n raise IOError(\"Error pushing image: %s\" % push_url)\n\n record[\"message\"] = \"Successfully pushed all tags\"\n record[\"status\"] = 0\n\n except Exception as err:\n record[\"message\"] = \"Exception occurred: %s\" % str(err)\n self.logger.info(\"Error pushing %s: %s\" % (self.metadata.name, err))\n raise\n\n finally:\n self.runtime.add_record(action, **record)\n\n return True",
"def check_got_promotion():\n im = region_grabber((550, 250, 815, 320)) # Hardcoded\n pos = imagesearcharea(\"Images/promotion_queen.jpg\", 0, 0, 0, 0, 0.9, im)\n if pos != [-1, -1]:\n print(\"Got promotion\")\n pos_image = [550 + pos[0], 250 + pos[1]]\n click_image(\"Images/promotion_queen.jpg\", pos_image, \"left\", 0.2)\n time.sleep(0.5)\n return True\n return False",
"def is_finished(self):\n if self.task_index + 1 >= self.image_count:\n return True\n return False",
"def is_use_pictures(self):\n\t\treturn bool(call_sdk_function('PrlVmCfg_IsUsePictures', self.handle))"
] | [
"0.67209774",
"0.66439164",
"0.6631148",
"0.656371",
"0.64065194",
"0.63889164",
"0.6336889",
"0.6315508",
"0.6282437",
"0.6282437",
"0.62279326",
"0.61669666",
"0.6137131",
"0.6032941",
"0.6019985",
"0.59767556",
"0.5969536",
"0.59661514",
"0.5943",
"0.5921205",
"0.59130216",
"0.5907948",
"0.5905992",
"0.59012645",
"0.5897086",
"0.58821714",
"0.5851078",
"0.58395475",
"0.57877517",
"0.57867956"
] | 0.7235848 | 0 |
Return whether an image needs building Checks if the image exists (ignores commit range), either locally or on the registry. | def image_needs_building(image):
d = docker_client()
# first, check for locally built image
try:
d.images.get(image)
except docker.errors.ImageNotFound:
# image not found, check registry
pass
else:
# it exists locally, no need to check remote
return False
# image may need building if it's not on the registry
return image_needs_pushing(image) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def image_needs_pushing(image):\n d = docker_client()\n try:\n d.images.get_registry_data(image)\n except docker.errors.APIError:\n # image not found on registry, needs pushing\n return True\n else:\n return False",
"def check_image(self, tag):\n image_name = self.build_image_name(tag)\n try:\n self.client.images.get_registry_data(image_name)\n return True\n except Exception as ex:\n print('Image {} does not exist: '.format(image_name), str(ex))\n return False",
"def is_image(self, service_name: str) -> bool:\n return False if self.get_from_service(service_name, \"build\") else True",
"def test_image_exists_local_and_registry(self, mock_docker_environment):\n build_image_if_needed(TEST_IMAGE_NAME)\n mock_docker_environment.images.build.assert_not_called()",
"def is_valid_image(image):\n if image not in AVAILABLE_IMAGES.keys():\n return False\n\n return True",
"def check_molns_image(self):\n if 'molns_image_name' in self.config and self.config['molns_image_name'] is not None \\\n and self.config['molns_image_name'] != '':\n return self.docker.image_exists(self.config['molns_image_name'])\n return False",
"def is_available_skopeo_image(self, image, registry, task_vars):\n\n cmd_str = \"skopeo inspect docker://{registry}/{image}\".format(\n registry=registry,\n image=image,\n )\n\n args = {\"_raw_params\": cmd_str}\n result = self.module_executor(\"command\", args, task_vars)\n return not result.get(\"failed\", False) and result.get(\"rc\", 0) == 0",
"def docker_image_exists(args, image): # type: (EnvironmentConfig, str) -> bool\n try:\n docker_command(args, ['image', 'inspect', image], capture=True)\n except SubprocessError:\n return False\n\n return True",
"def req_build(container):\n try:\n return 'dockerfile' in self.kard.env.get_container(container)\n except KeyError:\n return False",
"def hasImage(self):\n if self.getImage():\n return True\n return False",
"def is_image_local(self, image):\n result = self.execute_module(\"docker_image_facts\", {\"name\": image})\n return bool(result.get(\"images\")) and not result.get(\"failed\")",
"def test_image_exists_local(self, mock_docker_environment):\n build_image_if_needed(TEST_IMAGE_NAME)\n mock_docker_environment.images.build.assert_not_called()",
"def hasImage(self):\n return self._image is not None",
"def is_image_exists(c, name):\n res = c.run('sudo docker images', hide='stdout')\n for image in res.stdout.split('\\n'):\n if name == image.split(' ')[0]:\n print('Image {name} exists'.format(name=name))\n return True\n\n print('Image {name} doesn\\'t exist'.format(name=name))\n return False",
"def has_image(self):\n return hasattr(self, \"_image\") and self._image is not None",
"def _check_build(self, gppkg_file, gppkg_spec):\n return gppkg_file == gppkg_spec.get_filename()",
"def check_exist(self):\n helper.RbdImageOperator._check_rbd_image(self.real_path)",
"def image_is_available(filename):\n # FIXME - Implement!\n file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)\n return os.path.isfile(file_path)",
"def _check_trigger_sync(pre_image, image):\n return pre_image.status in ('saving', 'queued') and image.size and \\\n [l for l in image.locations if not utils.is_glance_location(l['url'])]",
"def images_exist(self):\n pass",
"def is_available_skopeo_image(self, image, default_registries):\n registries = default_registries\n\n # If image already includes a registry, only use that.\n # NOTE: This logic would incorrectly identify images that do not use a namespace, e.g.\n # registry.access.redhat.com/rhel7 as if the registry were a namespace.\n # It's not clear that there's any way to distinguish them, but fortunately\n # the current set of images all look like [registry/]namespace/name[:version].\n if image.count(\"/\") > 1:\n registry, image = image.split(\"/\", 1)\n registries = [registry]\n\n for registry in registries:\n if registry not in self.reachable_registries:\n self.reachable_registries[registry] = self.connect_to_registry(registry)\n if not self.reachable_registries[registry]:\n continue\n\n args = {\"_raw_params\": self.skopeo_img_check_command.format(registry=registry, image=image)}\n result = self.execute_module_with_retries(\"command\", args)\n if result.get(\"rc\", 0) == 0 and not result.get(\"failed\"):\n return True\n if result.get(\"rc\") == 124: # RC 124 == timed out; mark unreachable\n self.reachable_registries[registry] = False\n\n return False",
"def check_image(image):\n\n if not path.isfile(image):\n raise ImageException('Error: Singularity image \"%s\" not found.' % image)\n return True",
"def has_image(self, container_name):\n name, tag = split_container_name(container_name)\n images = self._client.images(all=True)\n return any(container_name in image[\"RepoTags\"] for image in images)",
"def BuildExists(buildname):\n for platform in platforms:\n if not os.path.exists(builds_basedir+'/'+platform+'/'+buildname):\n return False\n return True",
"def test_image_exists_registry(self, mock_docker_environment):\n mock_docker_environment.images.get.side_effect = DockerNotFound(\"testing\")\n build_image_if_needed(TEST_IMAGE_NAME)\n mock_docker_environment.api.pull.assert_called_with(\n TEST_IMAGE_NAME, \"latest\", decode=True, stream=True\n )\n mock_docker_environment.images.build.assert_not_called()",
"def test_image_exists_registry_no_pull(self, mock_docker_environment):\n mock_docker_environment.images.get.side_effect = DockerNotFound(\"testing\")\n build_image_if_needed(TEST_IMAGE_NAME, pull=False)\n mock_docker_environment.images.build.assert_called_with(**self.default_call_kwargs)",
"def pil_available():\n out = False\n try:\n from PIL import Image # noqa\n out = True\n except ImportError:\n pass\n return out",
"def docker_available(): # type: () -> bool\n return bool(get_docker_command())",
"def image_check(kwargs) -> bool:\n\n # Kwarg argument check\n return kwarg_check(\n kwargs=kwargs,\n options=[\n \"min_captured_at\",\n \"max_captured_at\",\n \"radius\",\n \"image_type\",\n \"organization_id\",\n \"fields\",\n ],\n callback=\"image_check\",\n )",
"def check_image_local(self, tag):\n tags = self.get_tags()\n return (tag in tags)"
] | [
"0.74606645",
"0.7137432",
"0.70654684",
"0.6916333",
"0.6890094",
"0.6849104",
"0.68168086",
"0.6784783",
"0.6744717",
"0.66717255",
"0.6600988",
"0.6536723",
"0.65264153",
"0.65185374",
"0.650834",
"0.6502069",
"0.6478776",
"0.6386925",
"0.6359315",
"0.6355004",
"0.6343484",
"0.6323211",
"0.632266",
"0.62907976",
"0.6287016",
"0.62540287",
"0.6188263",
"0.615947",
"0.6124011",
"0.6123703"
] | 0.8575527 | 0 |
Update name/values.yaml with modifications | def build_values(name, values_mods):
values_file = os.path.join(name, 'values.yaml')
with open(values_file) as f:
values = yaml.load(f)
for key, value in values_mods.items():
parts = key.split('.')
mod_obj = values
for p in parts:
mod_obj = mod_obj[p]
print(f"Updating {values_file}: {key}: {value}")
if isinstance(mod_obj, MutableMapping):
keys = IMAGE_REPOSITORY_KEYS & mod_obj.keys()
if keys:
for key in keys:
mod_obj[key] = value['repository']
else:
possible_keys = ' or '.join(IMAGE_REPOSITORY_KEYS)
raise KeyError(
f'Could not find {possible_keys} in {values_file}:{key}'
)
mod_obj['tag'] = value['tag']
else:
raise TypeError(
f'The key {key} in {values_file} must be a mapping.'
)
with open(values_file, 'w') as f:
yaml.dump(values, f) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _write_values(self, app_name, chart_dir, values):\n\n data = self._get_values(app_name, chart_dir)\n new_data = {**data, **values}\n new_raw = yaml.dump(new_data)\n\n values_path = \"%s/%s/values.yaml\" % (chart_dir, app_name)\n with open(values_path, mode=\"w\") as values_file:\n values_file.write(new_raw)",
"def update_feature(selfs, k, v, cfg_path):\n with open(cfg_path, 'r') as cfg:\n file_dict = yaml.safe_load(cfg)\n # overprint the entries with the new config_dict\n file_dict['{}'.format(k)] = v\n with open(cfg_path, 'w') as w_file:\n w_file.write(yaml.dump(file_dict))",
"def update_file(filename,d):\n if os.path.exists(filename):\n f_old = open(filename,'r')\n d_old = yaml.load(f_old)\n f_old.close()\n d_old.update(d)\n d = d_old\n f = open(filename, 'w')\n yaml.dump(d, f)\n f.close()",
"def update_variables(old_contents):\n new_contents = []\n\n for line in old_contents:\n words = line.split()\n\n for word in words:\n # Using the whitespace split above, the keys in the yaml file will\n # have a : at the end, so we need to strip that off before\n # replacing\n if word.endswith(':'):\n word = word[:-1]\n\n if word in VAR_MAPPINGS.keys():\n line = line.replace(word, VAR_MAPPINGS[word])\n\n new_contents.append(line)\n\n return new_contents",
"def _do_update(self, meta, k, v):\n self.runtime.logger.info('{}: [{}] -> {}'.format(meta.in_group_config_path, k, v))\n meta.config[k] = v\n meta.save()",
"def update(self, values):\n for k, v in values.items():\n setattr(self, k, v)",
"def update_values(self):\n for key in self.inputs.keys():\n value = self.inputs[key]['entry'].get()\n self.inputs[key]['value'] = value",
"def set(self, name, path):\n self.yaml[IDK_YAML_GROUP][name] = path\n self.write()",
"def update(self, values):\r\n for k, v in six.iteritems(values):\r\n setattr(self, k, v)",
"def update(self, namein, nameout):\n\t\ttext = self.dict.sub(self.readFile(namein))\n\t\tself.writeFile(nameout, text)\n\t\treturn",
"def update(self, values):\n for k, v in six.iteritems(values):\n setattr(self, k, v)",
"def save(self, name, description, template, values):\n # Before attempting to write, ensure the directory exists\n self.directory.mkdir(parents = True, exist_ok = True)\n dest = self.directory / \"{}.yaml\".format(name)\n with dest.open('w') as f:\n yaml.dump(\n dict(\n description = description or '',\n template = template.name,\n values = values\n ),\n f\n )",
"def write_data(filename: str, old_position: dict, new_position: dict) -> None:\n\n combined = {\"old_positions\": old_position, \"new_positions\": new_position}\n\n with open(filename, \"w\") as f:\n yaml.dump(combined, f)\n\n return",
"def load_values(self, values: Context) -> None:\n for name, refers_to in values.items():\n self.logger.info(f\"load_values {name!r} : {refers_to!r}\")\n if not self.extended_name_path.match(name):\n raise ValueError(f\"Invalid name {name}\")\n\n context = self\n\n # Expand \"name1.name2....\": refers_to into [\"name1\", \"name2\", ...]: refers_to\n # Update NameContainer(\"name1\", NameContainer(\"name2\", NameContainer(..., refers_to)))\n *path, final = self.ident_pat.findall(name)\n for name in path:\n ref = context.setdefault(name, Referent())\n if ref.container is None:\n ref.container = NameContainer(parent=self.parent)\n context = ref.container\n context.setdefault(final, Referent()) # No annotation.\n context[final].value = refers_to",
"def updateconfig(self):\n\n # Initialize the yaml data\n ydata = {\"metadata\": self._metadata, \"nodes\": self._nodes}\n\n # Write the system config file\n filename = self._rootdir + self._metadata[\"system_config_file\"]\n with open(filename, \"w\") as yamlfile:\n yaml.dump(ydata, yamlfile)",
"def set(self, key, value):\n try:\n if value.lower() in ['true', 'false']:\n value = value.lower() == 'true'\n except:\n pass\n\n try:\n if \".\" in key:\n keys = key.split(\".\")\n #\n # create parents\n #\n parents = keys[:-1]\n location = self.data\n for parent in parents:\n if parent not in location:\n location[parent] = {}\n location = location[parent]\n #\n # create entry\n #\n location[keys[-1]] = value\n else:\n self.data[key] = value\n\n except KeyError:\n raise ValueError(f\"The key '{key}' could not be found in the yaml file '{self.filename}'\")\n except Exception as e:\n print(e)\n raise ValueError(\"unkown error\")\n\n self.flush()",
"def update(self, values, priority=\"project\"):\n\t\tself._assert_mutability()\n\t\tif isinstance(values, six.string_types):\n\t\t\tvalues = json.loads(values)\n\t\tif values is not None:\n\t\t\tif isinstance(values, BaseSettings):\n\t\t\t\tfor name, value in six.iteritems(values):\n\t\t\t\t\tself.set(name, value, values.getpriority(name))\n\t\t\telse:\n\t\t\t\tfor name, value in six.iteritems(values):\n\t\t\t\t\tself.set(name, value, priority)",
"def update(context, namespace_name, id, values, session):\n namespace_api.get(context, namespace_name, session)\n\n metadata_tag = _get(context, id, session)\n metadef_utils.drop_protected_attrs(models.MetadefTag, values)\n # values['updated_at'] = timeutils.utcnow() - done by TS mixin\n try:\n metadata_tag.update(values.copy())\n metadata_tag.save(session=session)\n except db_exc.DBDuplicateEntry:\n LOG.debug(\"Invalid update. It would result in a duplicate\"\n \" metadata tag with same name=%(name)s\"\n \" in namespace=%(namespace_name)s.\",\n {'name': values['name'],\n 'namespace_name': namespace_name})\n raise exc.MetadefDuplicateTag(\n name=values['name'], namespace_name=namespace_name)\n\n return metadata_tag.to_dict()",
"def update(self, values):\n pass",
"def conversion_yaml():\r\n data ={\r\n 'name': 'george',\r\n 'age': 16,\r\n 'friends':\r\n [{'name': 'marry', 'age': 16}, {'name': 'jack', 'age': 17}]\r\n }\r\n yaml_data = yaml.dump(data)\r\n dirname = os.path.dirname(os.path.dirname(__file__))\r\n # data_dir = os.path.join(dirname, 'data')\r\n data_dir = '/'.join([dirname, 'data'])\r\n file_path = data_dir + '/' + 'test.yaml'\r\n with open(file_path, 'w') as fw:\r\n fw.write(yaml_data)\r\n print(yaml_data)",
"def update(self, new_values):\n values_copy = new_values.copy()\n for key in self.SET_KEYS:\n if key in values_copy:\n values_copy[key] = set(values_copy[key])\n super(ConfigDict, self).update(values_copy)",
"def _update_all_fields(self, name, value):\n for field in self._field_map.values():\n setattr(field, name, value)",
"def set_values(self, new_values):\n for name, value in new_values.items():\n self.nodes_db.loc[name][\"node\"].set_value(value)",
"def json_to_yaml(cls, name, filename=\"~/.cloudmesh/security/google.json\"):\n path = path_expand(filename)\n\n # Open and load the JSON file.\n with open(path, \"r\") as file:\n d = json.load(file)\n\n # Get the project id and client email.\n project_id = d[\"project_id\"]\n client_email = d[\"client_email\"]\n\n # Format the sample with json file details.\n format_sample = cls.sample.format_map(locals())\n # Convert the yaml sample to JSON.\n google_yaml = yaml.load(format_sample, Loader=yaml.SafeLoader)\n # Extract the google compute section\n google_config = google_yaml[\"cloudmesh\"][\"cloud\"]\n\n # Update the google cloud section of cloudmesh.yaml config file.\n config = Config()\n config[\"cloudmesh\"][\"cloud\"][name] = google_config\n config.save()\n banner(\"Result\")\n pprint(config[\"cloudmesh\"][\"cloud\"][name])",
"def update(self):\n self.save_config_file()",
"def update_config(self, data):\n self.config.data = dict_merge(self.config.data, data)\n self.config.save()",
"def modify_res_value(name, delta):\n pass",
"def upgrade_settings(self, keys):\n upgradable_keys = {\n \"project_dir\": \"%root_dir%\",\n \"source_folder\": \"%source_folder%\",\n \"packages_path\": \"%packages_path%\",\n \"sep\": \"%sep%\",\n \"$\": \"$\"\n }\n for key in keys:\n value, from_global = self.get(key, as_tuple=True)\n value = value.replace(\"%\", \"%%%\")\n for k in upgradable_keys:\n value = value.replace(\"$\" + k, upgradable_keys[k])\n self.set(key, value, to_global=from_global)",
"def update(name, value, config_dir=None):\n if name not in Config.__ALLOWED:\n msg = f'Cannot update configuration; value \"{name}\" is not allowed.'\n raise ConfigurationError(msg)\n config_dir = Config.resolve_config_dir(config_dir)\n config_dat, config_file = Config.get_config_file(\n config_dir,\n round_trip_load=True,\n quiet=True,\n )\n config_dat.update({name: value})\n Config.write_config_file(config_dat, config_file)\n if Config.is_set:\n Config.__conf[name] = value",
"def test_with_different_name(data_store_path):\n data_set = [\n {\"name\": \"Eric Idle\", \"phone\": \"123-456-7890\", \"address\": \"here\"},\n {\"name\": \"John Cleese\", \"phone\": \"111-222-3333\", \"address\": \"there\"},\n ]\n data_store_path.write_text(yaml.dump(data_set))\n data_store = YAMLDataStore(file_path=str(data_store_path))\n assert data_store._users == data_set\n\n updated_user = {\n \"name\": \"Terry Gilliam\",\n \"phone\": \"999-999-9999\",\n \"address\": \"not here\",\n }\n data_store.update(\"Eric Idle\", **updated_user)\n\n assert updated_user in data_store._users\n assert not [user for user in data_store._users if user[\"name\"] == \"Eric Idle\"]\n yaml_data = yaml.safe_load(data_store_path.read_text())\n assert updated_user in yaml_data\n assert not [user for user in yaml_data if user[\"name\"] == \"Eric Idle\"]"
] | [
"0.6856736",
"0.6455863",
"0.6232132",
"0.6048177",
"0.5998861",
"0.5927065",
"0.5708528",
"0.56122386",
"0.5609265",
"0.560229",
"0.5582246",
"0.55429924",
"0.552479",
"0.5504513",
"0.54904646",
"0.54742396",
"0.54720205",
"0.54367936",
"0.54282165",
"0.53917795",
"0.5384969",
"0.5374541",
"0.5321715",
"0.5317371",
"0.5312636",
"0.52658564",
"0.52367586",
"0.52245927",
"0.5220419",
"0.5178973"
] | 0.69657123 | 0 |
Publish helm chart index to github pages | def publish_pages(name, paths, git_repo, published_repo, extra_message=''):
version = last_modified_commit(*paths)
checkout_dir = '{}-{}'.format(name, version)
check_call([
'git', 'clone', '--no-checkout',
git_remote(git_repo), checkout_dir],
echo=False,
)
check_call(['git', 'checkout', 'gh-pages'], cwd=checkout_dir)
# package the latest version into a temporary directory
# and run helm repo index with --merge to update index.yaml
# without refreshing all of the timestamps
with TemporaryDirectory() as td:
check_call([
'helm', 'package', name,
'--destination', td + '/',
])
check_call([
'helm', 'repo', 'index', td,
'--url', published_repo,
'--merge', os.path.join(checkout_dir, 'index.yaml'),
])
# equivalent to `cp td/* checkout/`
# copies new helm chart and updated index.yaml
for f in os.listdir(td):
shutil.copy2(
os.path.join(td, f),
os.path.join(checkout_dir, f)
)
check_call(['git', 'add', '.'], cwd=checkout_dir)
if extra_message:
extra_message = '\n\n%s' % extra_message
else:
extra_message = ''
check_call([
'git',
'commit',
'-m', '[{}] Automatic update for commit {}{}'.format(name, version, extra_message)
], cwd=checkout_dir)
check_call(
['git', 'push', 'origin', 'gh-pages'],
cwd=checkout_dir,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def template(c, release=\"url-shortener\"):\n c.run(f\"helm template {release} {HELM_CHART_DIR} > ./generated-deployment.yml\")",
"def index():\n return render_template(\"charts.html\")",
"def main():\n \n root = Folder(name=os.getcwd(), file='meta.json',\n collection='.github/jekyll')\n root.update()\n root.export_folders(True)",
"def main():\n # Step1: generate htmls\n csv_data_path= \"./frontend/html_template_data/dataset.csv\"\n html_template_path = \"./frontend/html_template_data/template.html\"\n html_save_path = \"./frontend/html_files/\"\n\n generate_htmls(csv_data_path, html_template_path, html_save_path)\n\n # Step2: push htmls to Github\n # push htmls to Github Pages, currently manual.",
"def index():\n graphs = [\n message_genre_bar_chart(df),\n category_bar_chart(df),\n top_words_bar_chart(df)\n ]\n \n # encode plotly graphs in JSON\n ids = [\"graph-{}\".format(i) for i, _ in enumerate(graphs)]\n graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)\n \n # render web page with plotly graphs\n return render_template('master.html', ids=ids, graphJSON=graphJSON)",
"def deploy(version):\n toolkit.readmegen(version)",
"def index():\n\n # open the README file\n with open(os.path.dirname(app.root_path) + '/README.md',\n 'r', encoding=\"utf-8\") as markdown_file:\n\n # Read the content of the file\n content = markdown_file.read()\n\n # convert to html\n return markdown.markdown(content)",
"def write_index_html(self):\n print(\"- writing index.md\")\n index_toc = [f\"### [Table of Contents]({config['github_pages_url']}/toc.html)\"] if self.notebooks else []\n if os.path.isfile(os.path.join(self.dst_dir, \"data_index.html\")):\n index_toc += [f\"### [Data Index]({config['github_pages_url']}/data_index.html)\"]\n if os.path.isfile(os.path.join(self.dst_dir, \"figure_index.html\")):\n index_toc += [f\"### [Figure Index]({config['github_pages_url']}/figure_index.html)\"]\n if os.path.isfile(os.path.join(self.dst_dir, \"python_index.html\")):\n index_toc += [f\"### [Python Module Index]({config['github_pages_url']}/python_index.html)\"]\n if os.path.isfile(os.path.join(self.dst_dir, \"tag_index.html\")):\n index_toc += [f\"### [Tag Index]({config['github_pages_url']}/tag_index.html)\"]\n index_toc += [f\"- {nb.link}\" if type(nb) == Section else f\"\\n### {nb.link}\" for nb in self.notebooks]\n env = Environment(loader=FileSystemLoader(\"templates\"))\n with open(os.path.join(self.dst_dir, \"index.md\"), 'w') as f:\n f.write(env.get_template('index.md.tpl').render(\n readme_toc=index_toc, page_title=config['github_repo_name'], github_url=config['github_repo_url']))",
"def home_page():\n\n return render_template('index.html', stories=stories.values())",
"def main():\n year = time.strftime(\"%Y\")\n month = time.strftime(\"%m\")\n today = time.strftime(\"%Y%m%d\")\n homedir = \"/home/\" + user + \"/raspi-sump/\"\n webchart.create_folders(year, month, homedir)\n webchart.create_chart(homedir)\n webchart.copy_chart(year, month, today, homedir)",
"def index():\n return render_template('home.jinja2')",
"def index(request):\n copy = '2018 ' + author\n\n context = dict(author=author, copyright=copy, repo_url=repo_url)\n\n return render(request, 'index.html', context)",
"def publish_info_in_pagebrowser():\n env.run('bin/django create_pagebrowser_books')",
"def index():\n # create table for original dataset\n table_1 = data_table_low(filepath = \"sparkify_data.csv\", title='Raw Sparkify Data')\n\n table_2 = data_table_low(filepath = \"cleaned_data.csv\", title='Cleaned Sparkify Data')\n\n # create and append plotly visuals into an array to be passed later for graphJSON file\n graphs = [table_1, table_2]\n\n # encode plotly graphs in JSON\n ids = [\"graph-{}\".format(i) for i, _ in enumerate(graphs)]\n graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)\n\n # render web page with plotly graphs\n return render_template(\"master.html\", ids=ids, graphJSON=graphJSON)",
"def index():\n r = requests.get(API_ROUTE, headers={'Auth': _auth()})\n if r.status_code != requests.codes.ok:\n return r.text, r.status_code\n\n articles = sorted(r.json(), key=lambda article: (article['release_at'] or '9', article['updated_at']), reverse=True)\n return render_template('index.html', articles=articles)",
"def index():\n return render_template(\"index.html\", page_title=\"Home\")",
"def install_helm_plugins():\n plugins = {\n 'https://github.com/technosophos/helm-gpg': '0.1.0',\n }\n for plugin_url, version in plugins.items():\n install_cmd = \"helm plugin install {0} --version={1}\".format(\n plugin_url,\n version)\n logging.info(\"installing helm plugin with command: {0}\".format(install_cmd))\n sp.call(install_cmd, shell=True)",
"def home():\n return render_template(\"d3_graph.html\")",
"def save(self):\n for page in self.pages.get_published_pages():\n site_path = page.path_to_page.replace('.md', '').replace(\n self.source_path, '').strip('/')\n save_path = self.output_path\n\n # ensure we are not creating a directory for the index file that\n # that lives at the source_path\n if page.full_path() != f'{self.source_path}{os.sep}index.md':\n site_path = slugify_path(site_path)\n save_path = os.path.join('', self.output_path, site_path)\n\n try:\n os.makedirs(save_path, exist_ok=True)\n except Exception as e:\n log((f'unable to create directories: {save_path}'\n f' because: {e}'), True)\n continue\n\n try:\n save_file = os.path.join(save_path, 'index.html')\n log(f'saving {save_file}')\n\n published = self.pages.get_published_pages()\n prev_page = self.pages.get_previous_page(page)\n next_page = self.pages.get_next_page(page)\n content = page.render(published_pages=published,\n previous_page=prev_page, next_page=next_page)\n write(save_file, content)\n except Exception as e:\n log(f'unable to save file: {save_file} -- {e}', True)\n\n unpublished = self.pages.get_unpublished_pages()\n if len(unpublished):\n log('')\n log('these pages were unpublished and not rendered:', True)\n for up in unpublished:\n log(up.path_to_page, True)\n log('')\n\n # build the _tags pages\n for tag, pages in self.tags.pages.items():\n content = self.tags.render(tag, pages)\n tag_index_dir = f'{self.tag_dir}/{slugify(tag)}'\n tag_index = f'{tag_index_dir}/index.html'\n os.makedirs(tag_index_dir, exist_ok=True)\n write(tag_index, content)\n\n log('finished builidng site')",
"def home():\n payload = manager.get_payload()\n return render_template('index.html', payload=payload)",
"def entry_point():\n return render_template(\"index.html\")",
"def index():\n\n return render_template(\"index.html\"), 200",
"def main():\n\td = Hugo(\"cmd\")\n\tpass",
"def index():\n return render_template(\n 'main/index.html',\n title='Main page'\n )",
"def generate():\n\n # Verify if directory exists\n if not os.path.isdir(config.techniques_markdown_path):\n os.mkdir(config.techniques_markdown_path)\n\n #Write the technique index.html page\n with open(os.path.join(config.techniques_markdown_path, \"overview.md\"), \"w\", encoding='utf8') as md_file:\n md_file.write(config.technique_overview_md)\n\n for domain in config.domains:\n generate_domain_markdown(domain)",
"def index():\n g.data['api_version'] = API_VERSION\n g.data['apilib_version'] = API_VERSION\n g.data['oar_version'] = VERSION\n g.data['links'] = []\n #endpoints = ('resources', 'jobs', 'config', 'admission_rules')\n endpoints = ('resources', 'jobs')\n for endpoint in endpoints:\n g.data['links'].append({\n 'rel': 'collection',\n 'href': url_for('%s.index' % endpoint),\n 'title': endpoint,\n })",
"def index():\n return render_template(\"index.html\",\n title='Index')",
"def index():\n return render_template('index.html', title='PanOS Bootstrap Utility')",
"def index():\n today = datetime.today()\n return render_template(\"index.html.j2\", today=today)",
"def index():\n return render_template('index.html'), 200"
] | [
"0.6303632",
"0.5992799",
"0.59895986",
"0.56986564",
"0.5615526",
"0.55472004",
"0.5513828",
"0.53691167",
"0.5364688",
"0.5286044",
"0.5263749",
"0.52587676",
"0.5251907",
"0.5230299",
"0.5216074",
"0.52005446",
"0.5190611",
"0.5182666",
"0.5175446",
"0.51723653",
"0.51648855",
"0.5161892",
"0.5158165",
"0.51531535",
"0.5147999",
"0.51465327",
"0.5144903",
"0.5141292",
"0.5141177",
"0.5139922"
] | 0.6350673 | 0 |
Add the domain restrictions. | def add_domains_restriction(self, domain_restriction):
self._domain_restricion = domain_restriction
self._size_var = self._get_size_var()
self._nr_of_bits = self._get_nr_of_bits() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def prepare_domain_restrictions(self):\n for index, restriction in enumerate(self._domain_restrictions):\n self.add_specific_domain_restriction(index+1, restriction)",
"async def setjradd(self, ctx, domain):\n allowedDomains = await self.config.guild(ctx.guild).allowedDomains()\n allowedDomains.append(domain)\n await self.config.guild(ctx.guild).allowedDomains.set(allowedDomains)\n await ctx.message.add_reaction(\"✅\")",
"def domain(self, domain):",
"def _adddomain(self, domain: Domain):\n\n domain = copy.deepcopy(domain)\n if self.model is not None:\n # Check that model and domain are compatible\n self._validate_model_domain(self.model, domain)\n\n # Add in domain\n self.domain = domain\n\n # Setup base namelists\n self._set_base_namelists()\n else:\n self.domain = domain",
"def relevant_domains(self):\n pass",
"def domains(self, domains):\n\n self._domains = domains",
"def create_all(self):\n for name in self.app.config['SIMPLE_DOMAINS']:\n self.connection.create_domain(name)",
"def add_function(self, function):\n super(BaseAG, self).add_function(function)\n self._representation.add_domains_restriction(\n function.get_domain_restrictions)\n self._selection.add_function(function)",
"def add_new_domain(self):\n\n domain = self.dlg.uComboBoxDomain.currentText()\n\n if domain in self.domains:\n self.dlg.uWarningSettings.show()\n self.dlg.uWarningSettings.setText(\n \"Warning: Domains must be unique. \" \"Please edit the domain below\"\n )\n return\n\n if len(self.domains) >= 10:\n self.dlg.uWarningSettings.show()\n self.dlg.uWarningSettings.setText(\n \"Warning: You can only store up to . \" \"10 domain entries\"\n )\n return\n\n if domain == \"OTHER\":\n domain = \"\"\n getattr(self.dlg, \"uTextDomain{0}\".format(len(self.domains) + 1)).setText(\n domain\n )\n getattr(self.dlg, \"uTextDomain{0}\".format(len(self.domains) + 1)).show()\n getattr(self.dlg, \"uTextAPIKey{0}\".format(len(self.domains) + 1)).show()\n getattr(self.dlg, \"uBtnRemoveDomain{0}\".format(len(self.domains) + 1)).show()\n getattr(self.dlg, \"uBtnSaveDomain{0}\".format(len(self.domains) + 1)).show()\n self.dlg.uWarningSettings.hide()",
"def add_variable(self, name, domain):\n self.variables.append(name)\n self.domains[name] = list(domain)\n self.constraints[name] = {}",
"def exclude_domain(self) -> None:\n self.exclude_domains.add(current_domain.get())",
"def par_domain(self):",
"def update_domain():\n\n for e in Expr.search() + User.search(): e.set_tld(config.server_name)",
"def domains(self):\n return DomainCollection(self.request)",
"def restrict_domain(self, geometry ):\n raise NotImplementedError(\"restrict_domain\")",
"def fixDomains(self, domainMin, domainMax, fixToDomain):\n\n return 0",
"def set_asset_restrictions(self, restrictions, on_error='fail'):\n control = RestrictedListOrder(on_error, restrictions)\n self.register_trading_control(control)\n self.restrictions |= restrictions",
"def add_subdomain_output(self,filename,ll_x,ll_y, ur_x, ur_y,start,stop,step,area_id = 0): \n \n self.number_of_subdomains += 1\n self.subdomains.number_of_subdomains = self.number_of_subdomains #set the 'number_of_subdomains' attribute \n name = 'subdomain' + str(self.number_of_subdomains) \n self.subdomainGroups.append(self.subdomains.createGroup(name) ) #great a new subdomain Group\n \n self.subdomainGroups[self.number_of_subdomains-1].filename = filename #set the bounds attributes for the subdomain\n\n self.subdomainGroups[self.number_of_subdomains-1].ll_x = ll_x #set the bounds attributes for the subdomain\n self.subdomainGroups[self.number_of_subdomains-1].ll_y = ll_y\n self.subdomainGroups[self.number_of_subdomains-1].ur_x = ur_x\n self.subdomainGroups[self.number_of_subdomains-1].ur_y = ur_y\n self.subdomainGroups[self.number_of_subdomains-1].start = start\n self.subdomainGroups[self.number_of_subdomains-1].stop = stop\n self.subdomainGroups[self.number_of_subdomains-1].step = step\n self.subdomainGroups[self.number_of_subdomains-1].area_id = area_id",
"def allowed_domains(self):\n if self._allowed_domains is None:\n uri = \"/loadbalancers/alloweddomains\"\n resp, body = self.method_get(uri)\n dom_list = body[\"allowedDomains\"]\n self._allowed_domains = [itm[\"allowedDomain\"][\"name\"]\n for itm in dom_list]\n return self._allowed_domains",
"def Create(self, domainsList) :\n\t\t...",
"def _get_domain(self):\n self.ensure_one()\n domain = ['|', ('active', '=', True), ('active', '=', False)]\n # Check active\n if self.active == 'true':\n domain += [('active', '=', True)]\n elif self.active == 'false':\n domain += [('active', '=', False)]\n # Check partner type\n if self.partner_type == 'customer_or_supplier':\n domain += ['|', ('customer', '=', True), ('supplier', '=', True)]\n elif self.partner_type == 'customer_and_supplier':\n domain += [('customer', '=', True), ('supplier', '=', True)]\n elif self.partner_type == 'customer':\n domain += [('customer', '=', True)]\n elif self.partner_type == 'supplier':\n domain += [('supplier', '=', True)]\n # Check category\n if self.category_ids:\n domain += [('category_id', 'in', self.category_ids.ids)]\n return domain",
"def allowed_domains(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"allowed_domains\")",
"def SetDomainsList(self, domainsList) :\n\t\t...",
"def domain( self ):\n raise NotImplementedError(\"domain\")",
"def allowed_domains(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"allowed_domains\")",
"def allowed_domains(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"allowed_domains\")",
"def domainparams(self):\n\t\traise Exception(NotImplemented)",
"def add(self, newaddress):\n list = newaddress.split(\"@\")\n newdomain = list[-1]\n if not newdomain in self.__domainlist:\n self.__domainlist.append(newdomain)\n else:\n print(\"Domain is already in the database\")",
"def test_add_domain_type_assignment_rule(self):\n pass",
"def set_all_domains(self, domains_dict) :\n if not set(domains_dict.keys()) <= set(self.variables):\n invalid_vars = filter(lambda v: v not in self.variables, domains_dict.keys())\n raise KeyError(str(invalid_vars) + \" are not variables in this problem.\")\n self.domains = deepcopy(domains_dict)\n return self"
] | [
"0.7922115",
"0.623785",
"0.60880244",
"0.5960196",
"0.58387566",
"0.57663685",
"0.5692345",
"0.5655885",
"0.56333846",
"0.55938256",
"0.558293",
"0.5558514",
"0.55429906",
"0.5535729",
"0.5511199",
"0.54556143",
"0.5452144",
"0.54225695",
"0.54151505",
"0.54145116",
"0.5341205",
"0.5337445",
"0.5314957",
"0.5277656",
"0.5272155",
"0.5272155",
"0.52388835",
"0.52074236",
"0.5205038",
"0.51969105"
] | 0.7894003 | 1 |
Get the number of bits needed for an item. | def _get_nr_of_bits(self):
return sum(self._size_var) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __len__(self):\r\n return numBits(self.n)",
"def number_of_bits(self) -> int:\n raise NotImplementedError('To be Overidden by the derived class')",
"def bitSizeOf(self) -> int:\n\n return self._numBits",
"def bitSizeOf(self) -> int:\n\n return self._numBits",
"def number_of_bits(self):\n return self.numbits",
"def bitSizeOf() -> int:\n\n return 1",
"def NumBits(self):\n num_bits = 8*len(self.output)\n if self.out_boff % 8:\n num_bits -= 8\n num_bits += self.out_boff\n if num_bits < 0:\n print \"What the...\"\n return num_bits",
"def bitSizeOf() -> int:\n\n return 64",
"def bits(self):\n return self._q.bit_length()",
"def bit_length(self, ???):",
"def bitSizeOf() -> int:\n\n return 32",
"def n_bits(self):\n return self._n_bits",
"def get_bitsize(self) -> int:\n return self._surface.get_bitsize()",
"def bit_length(self): # real signature unknown; restored from __doc__\n pass",
"def bit_length(self): # real signature unknown; restored from __doc__\n pass",
"def bit_length(self): # real signature unknown; restored from __doc__\n pass",
"def bit_length(self): # real signature unknown; restored from __doc__\n pass",
"def bit_length(self): # real signature unknown; restored from __doc__\n pass",
"def bit_length(self): # real signature unknown; restored from __doc__\n pass",
"def bit_length(self): # real signature unknown; restored from __doc__\n pass",
"def bit_length(self): # real signature unknown; restored from __doc__\n pass",
"def bit_length(self): # real signature unknown; restored from __doc__\n pass",
"def bit_length(self): # real signature unknown; restored from __doc__\n pass",
"def bit_length(self): # real signature unknown; restored from __doc__\n pass",
"def bit_length(self): # real signature unknown; restored from __doc__\n pass",
"def bit_length(self): # real signature unknown; restored from __doc__\n pass",
"def bit_length(self): # real signature unknown; restored from __doc__\n pass",
"def bit_length(self): # real signature unknown; restored from __doc__\n pass",
"def bit_length(self): # real signature unknown; restored from __doc__\n pass",
"def getNbrOfBit(self):\n return DPxGetDinNumBits()"
] | [
"0.7077946",
"0.70436716",
"0.69864804",
"0.69864804",
"0.6966107",
"0.68727595",
"0.68669325",
"0.68431276",
"0.6806957",
"0.67431825",
"0.67405957",
"0.6726725",
"0.66758716",
"0.6658841",
"0.6658841",
"0.6658841",
"0.6658841",
"0.6658841",
"0.6658841",
"0.6658841",
"0.6658841",
"0.6658841",
"0.6658841",
"0.6658841",
"0.6658841",
"0.6658841",
"0.6658841",
"0.6658841",
"0.6658841",
"0.6655241"
] | 0.7367046 | 0 |
Get a random genom. | def get_random(self):
base_genom = "1" * sum(self._size_var)
return utils.randomise_a_string(base_genom) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_random_genome(self):\n return random.choice(self.genomes)",
"def generate_random_individual():\n genotype = []\n ### Your code here\n return {'genotype': genotype, 'fitness': None }",
"def random_gene(self):\n size = random.randint(1,50)\n gene = \"\"\n for i in range(0,size,1):\n gene+=random.choice(self.instructions)\n return gene",
"def random():\r\n return R.NextDouble()",
"def generate():\n s = random_data.random_bytes(100)\n return generate_from_string(s)",
"def get_random_individual(self, generation):\n if len(self.generations) <= generation < 0:\n raise ValueError('Please enter a valid generation.')\n return self.get_individual(\n generation=generation,\n index=random.randint(0, len(self.generations[generation]) - 1))",
"def random_gene(self):\n path_number = 6\n x = random.randint(0, path_number)\n return x",
"def random(self) -> Gadza:\n return choice(self.gadzas)",
"def get_random_object():\n\n return random.choice([\n get_random_alphabetic_string,\n get_random_alphanumeric_string,\n get_random_integer,\n get_random_real_number\n ])()",
"def random_plush_gene(self):\n atom = random.choice(list(self.atom_generators))\n return self.atom_to_plush_gene(atom)",
"def _get_gaussian_random(self):\n u1 = generateRandom()\n u2 = generateRandom()\n if u1 < 1e-6:\n u1 = 1e-6\n return sqrt(-2 * log(u1)) * cos(2 * pi * u2)",
"def get_random(self):\n return self._get_random()",
"def get_random_population():\r\n return [ get_random_individual() for _ in range(POPULATION_COUNT) ]",
"def getRandom(self):\n return random.choice(self.vec)",
"def __generate_random_gene_sequence(self):\n genes = []\n for j in range(self.chromosome_size):\n genes.append(random.choice(self.gene_pool))\n\n return genes",
"def getRandom( self ):\n import random \n count = Mysql.ex( \"SELECT count(*) AS c FROM `%s`.`people`;\" % self.db_name )\n the_id = random.randint( 1, count[0]['c'] )\n people = self.getByID( the_id )\n return people",
"def getRandom(self):\n return random.choice(self.data)",
"def get_random(cls):\n\n\t\tnum = randint(0, 6)\n\n\t\treturn Tetromino(num)",
"def random_chromosome(self):\n genes = []\n for i in range(self.chromosome_length):\n genes.append(self.random_gene())\n\n return genes",
"def _gen_random_number() -> float:\n return uniform(0, 1000)",
"def getRandom(self):\n return self.nums[randint(0, len(self.nums)-1)]",
"def get_random(self):\n self.random_range = list(np.array(self.friendly_range) * self.conversion)\n return np.random.uniform(self.random_range[0], self.random_range[1], 1)[0]",
"def get_random(self,num):\n return ''.join(sample('abcdefghijklmnopqrstuvwxyz1234567890!',8))",
"def generate_random_gene_sequence(gene_pool):\n genes = []\n for j in range(DEFAULT_CHROMOSOME_SIZE):\n genes.append(random.choice(gene_pool))\n\n return genes",
"def _make_random_genome(evo_config):\n\n # create random genome by creating chromosomes for box size and movement\n return _make_size_dict(evo_config), _make_move_pattern(_make_limb_dict(), evo_config)",
"def random():\n return constant(1)",
"def random_girl(self):\n return [result for result in self._db.girls.find().limit(1) \\\n .skip(random.randrange(self._db.girls.count()))][0]",
"def getRandom(self) -> int:",
"def getRandom(self) -> int:",
"def getRandom(self):\n randomIndex = random.randrange(0, self.size)\n return self.nums[randomIndex]"
] | [
"0.7129977",
"0.68136334",
"0.65213746",
"0.64397925",
"0.6434923",
"0.6387772",
"0.63817495",
"0.6367737",
"0.63332325",
"0.63026583",
"0.62996596",
"0.6297405",
"0.6288606",
"0.62817526",
"0.62786907",
"0.6232036",
"0.6221588",
"0.6218463",
"0.61816496",
"0.6164786",
"0.6125345",
"0.6108744",
"0.6062824",
"0.6056672",
"0.60522807",
"0.6047211",
"0.60292006",
"0.6021488",
"0.6021488",
"0.60089886"
] | 0.6945635 | 1 |
Create a new block cipher, configured in CTR mode. | def __init__(self, block_cipher, initial_counter_block,
prefix_len, counter_len, little_endian):
if len(initial_counter_block) == prefix_len + counter_len:
self.nonce = _copy_bytes(None, prefix_len, initial_counter_block)
"""Nonce; not available if there is a fixed suffix"""
self._state = VoidPointer()
result = raw_ctr_lib.CTR_start_operation(block_cipher.get(),
c_uint8_ptr(initial_counter_block),
c_size_t(len(initial_counter_block)),
c_size_t(prefix_len),
counter_len,
little_endian,
self._state.address_of())
if result:
raise ValueError("Error %X while instantiating the CTR mode"
% result)
# Ensure that object disposal of this Python object will (eventually)
# free the memory allocated by the raw library for the cipher mode
self._state = SmartPointer(self._state.get(),
raw_ctr_lib.CTR_stop_operation)
# Memory allocated for the underlying block cipher is now owed
# by the cipher mode
block_cipher.release()
self.block_size = len(initial_counter_block)
"""The block size of the underlying cipher, in bytes."""
self._next = [self.encrypt, self.decrypt] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_ctr_cipher(factory, **kwargs):\n\n cipher_state = factory._create_base_cipher(kwargs)\n\n counter = kwargs.pop(\"counter\", None)\n nonce = kwargs.pop(\"nonce\", None)\n initial_value = kwargs.pop(\"initial_value\", None)\n if kwargs:\n raise TypeError(\"Invalid parameters for CTR mode: %s\" % str(kwargs))\n\n if counter is not None and (nonce, initial_value) != (None, None):\n raise TypeError(\"'counter' and 'nonce'/'initial_value'\"\n \" are mutually exclusive\")\n\n if counter is None:\n # Crypto.Util.Counter is not used\n if nonce is None:\n if factory.block_size < 16:\n raise TypeError(\"Impossible to create a safe nonce for short\"\n \" block sizes\")\n nonce = get_random_bytes(factory.block_size // 2)\n else:\n if len(nonce) >= factory.block_size:\n raise ValueError(\"Nonce is too long\")\n \n # What is not nonce is counter\n counter_len = factory.block_size - len(nonce)\n\n if initial_value is None:\n initial_value = 0\n\n if is_native_int(initial_value):\n if (1 << (counter_len * 8)) - 1 < initial_value:\n raise ValueError(\"Initial counter value is too large\")\n initial_counter_block = nonce + long_to_bytes(initial_value, counter_len)\n else:\n if len(initial_value) != counter_len:\n raise ValueError(\"Incorrect length for counter byte string (%d bytes, expected %d)\" % (len(initial_value), counter_len))\n initial_counter_block = nonce + initial_value\n\n return CtrMode(cipher_state,\n initial_counter_block,\n len(nonce), # prefix\n counter_len,\n False) # little_endian\n\n # Crypto.Util.Counter is used\n\n # 'counter' used to be a callable object, but now it is\n # just a dictionary for backward compatibility.\n _counter = dict(counter)\n try:\n counter_len = _counter.pop(\"counter_len\")\n prefix = _counter.pop(\"prefix\")\n suffix = _counter.pop(\"suffix\")\n initial_value = _counter.pop(\"initial_value\")\n little_endian = _counter.pop(\"little_endian\")\n except KeyError:\n raise TypeError(\"Incorrect counter object\"\n \" (use Crypto.Util.Counter.new)\")\n\n # Compute initial counter block\n words = []\n while initial_value > 0:\n words.append(struct.pack('B', initial_value & 255))\n initial_value >>= 8\n words += [ b'\\x00' ] * max(0, counter_len - len(words))\n if not little_endian:\n words.reverse()\n initial_counter_block = prefix + b\"\".join(words) + suffix\n\n if len(initial_counter_block) != factory.block_size:\n raise ValueError(\"Size of the counter block (%d bytes) must match\"\n \" block size (%d)\" % (len(initial_counter_block),\n factory.block_size))\n\n return CtrMode(cipher_state, initial_counter_block,\n len(prefix), counter_len, little_endian)",
"def __create_cipher(self, nonce=None, iv=None):\r\n cipher = None\r\n if self.__encryption_method == EncryptionMethod.AES:\r\n if nonce is not None:\r\n cipher = AES.new(self.__encryption_key, _block_mode_dict[self.__block_mode], nonce=nonce)\r\n elif iv is not None:\r\n cipher = AES.new(self.__encryption_key, _block_mode_dict[self.__block_mode], iv=iv)\r\n else:\r\n cipher = AES.new(self.__encryption_key, _block_mode_dict[self.__block_mode])\r\n elif self.__encryption_method == EncryptionMethod.DES3:\r\n if nonce is not None:\r\n cipher = DES3.new(self.__encryption_key, _block_mode_dict[self.__block_mode], nonce=nonce)\r\n elif iv is not None:\r\n cipher = DES3.new(self.__encryption_key, _block_mode_dict[self.__block_mode], iv=iv)\r\n else:\r\n cipher = DES3.new(self.__encryption_key, _block_mode_dict[self.__block_mode])\r\n elif self.__encryption_method == EncryptionMethod.DES:\r\n if nonce is not None:\r\n cipher = DES.new(self.__encryption_key, _block_mode_dict[self.__block_mode], nonce=nonce)\r\n elif iv is not None:\r\n cipher = DES.new(self.__encryption_key, _block_mode_dict[self.__block_mode], iv=iv)\r\n else:\r\n cipher = DES.new(self.__encryption_key, _block_mode_dict[self.__block_mode])\r\n elif self.__encryption_method == EncryptionMethod.SHIFT:\r\n if not self.__block_mode == BlockMode.ECB:\r\n raise Exception(\"Shift only supports ECB\")\r\n cipher = SimpleShiftCipher(self.__encryption_key)\r\n elif self.__encryption_method == EncryptionMethod.XOR:\r\n if not self.__block_mode == BlockMode.ECB:\r\n raise Exception(\"XOR only supports ECB\")\r\n cipher = SimpleXorCipher(self.__encryption_key)\r\n else:\r\n raise Exception(\"Unknown encryption method \" + str(self.__encryption_method))\r\n return cipher",
"def new(key,mode=MODE_ECB,IV=None,counter=None,segment_size=None):\n return AES(key,mode,IV,counter,segment_size)",
"def encrypt_ctr(self, plaintext, iv):\n assert len(iv) == 16\n\n plaintext = pad(plaintext)\n\n blocks = []\n nonce = iv\n for plaintext_block in split_blocks(plaintext):\n # CTR mode encrypt: plaintext_block XOR encrypt(nonce)\n block = xor_bytes(plaintext_block, self.encrypt_block(nonce))\n blocks.append(block)\n nonce = inc_bytes(nonce)\n\n return b''.join(blocks)",
"def aes_ctr(key, counter=None):\n return AES.new(key, AES.MODE_CTR, counter=(counter if counter is not None else Counter.new(128)))",
"def __CreateCipher(self, key_bytes, iv_bytes, mode=AES.MODE_CBC):\n # can we use M2Crypto and was it requested?\n if ACTIVE_CRYPT_LIB.lower() == 'm2crypto' and EVP:\n # yes, so do so\n return self.EVPAdaptor(key_bytes, iv_bytes, mode)\n else:\n # default to PyCrypto\n return self.AESAdaptor(key_bytes, iv_bytes, mode)",
"def encryptAESCTR(key, plaintext):\n # 128-bit iv, securely generated\n iv = os.urandom(16)\n cipher = Cipher(algorithms.AES(key), modes.CTR(iv), backend=default_backend())\n encryptor = cipher.encryptor()\n ciphertext = encryptor.update(plaintext) + encryptor.finalize()\n return (iv, ciphertext)",
"def __init__(self, key):\n self.block_size = 16\n self.cipher = Cipher(algorithms.AES(key), modes.ECB(), default_backend())",
"def aes_cbc(key, iv=None):\n return AES.new(key, AES.MODE_CBC, iv if iv is not None else get_zero_vector(16))",
"def ctr(in_file, out_file, block_size, key, op):\n \n with open(in_file, 'rb') as input: # Open files\n with open(out_file, 'wb') as output:\n ctr_str = read_block(input, block_size)[0] # This is the initial ctr\n ctr = byte_str_to_int(ctr_str)\n i = 0 # This is the value which will be added to ctr as we loop\n size = 2**(block_size * 8) # This is the length of the block size in bits\n \n output.write(int_to_byte_str(ctr + i, block_size))\n i += 1\n\n block = [True, True] # This is just to get into the while loop\n while block[1]: # Iterate through the rest of the input\n block = read_block(input, block_size)\n if block [0] != -1:\n this_ctr = (ctr + i) % size\n i += 1\n funced_block = func(int_to_byte_str(this_ctr, block_size), key)\n block_xor = xor(block[0], funced_block)\n output.write(block_xor)",
"def ecb_or_cbc_encrypt(plaintext, mode='random'):\n if mode == 'random':\n mode = 'ECB' if randint(0, 1) == 0 else 'CBC'\n\n key = randstr(AES_BSZ)\n plaintext = (\n ''.join([randstr(1) for _ in range(randint(5, 10))]) +\n plaintext +\n ''.join([randstr(1) for _ in range(randint(5, 10))])\n )\n plaintext = pad_to_blocksize(plaintext)\n\n if mode == 'ECB':\n ecb = AES.new(key, AES.MODE_ECB)\n ciphertext = ecb.encrypt(plaintext)\n elif mode == 'CBC':\n iv = randstr(AES_BSZ)\n cbc = AES.new(key, AES.MODE_CBC, iv)\n ciphertext = cbc.encrypt(plaintext)\n else:\n raise Exception(\"invalid mode\")\n\n return ciphertext",
"def main():\n b64 = (b\"L77na/nrFsKvynd6HzOoG7GHTLXsTVu9qvY/2syLXzhPweyyMTJULu/6/kXX0KSvo\"\n b\"OLSFQ==\")\n binary = base64.b64decode(b64)\n\n key = b\"YELLOW SUBMARINE\"\n nonce = bytes(8)\n cipher = AES.new(key, AES.MODE_ECB)\n ctr = CTRMode(\n blksize=16,\n encrypt_blk=cipher.encrypt,\n decrypt_blk=cipher.decrypt,\n nonce=nonce,\n )\n\n decrypted = ctr.decrypt(binary)\n\n print(decrypted.decode())",
"def encryptAESCTR(key, nonce, pt):\n\tct = b''\n\tcounter = 0\n\tfor ptBlock in chunks(pt, 16):\n\t\tblock = (int.from_bytes(nonce, byteorder='big') + counter).to_bytes(16, byteorder='big')\n\t\tencBlock = encryptAESBlock(key, block)\n\t\tct += xor(ptBlock, encBlock)\t\t\n\t\tcounter += 1\n\treturn ct",
"def aes_ctr_encrypt(self, key: bytes, plain_data: bytes, nonce: bytes) -> bytes:\n cipher = Cipher(algorithms.AES(key), modes.CTR(nonce), default_backend())\n enc = cipher.encryptor()\n return enc.update(plain_data) + enc.finalize()",
"def decrypt_ctr(self, ciphertext, iv):\n assert len(iv) == 16\n\n blocks = []\n nonce = iv\n for ciphertext_block in split_blocks(ciphertext):\n # CTR mode decrypt: ciphertext XOR decrypt(nonce)\n block = xor_bytes(ciphertext_block, self.decrypt_block(nonce))\n blocks.append(block)\n nonce = inc_bytes(nonce)\n\n return unpad(b''.join(blocks))",
"def operate_cipher(self):",
"def encryptor(iv = os.urandom(16), key = os.urandom(32), bc = backend,key_type = 'AES128',mode='CBC'):\n\tif key_type == 'AES128':\n\t\talgo = algorithms.AES(key)\n\telif key_type == 'ChaCha20':\n\t\talgo = algorithms.ChaCha20(key,nonce=os.urandom(32))\n\telse:\n\t\traise('Error algorithm ' + key_type + ' not supported!')\n\tif mode == 'CBC':\n\t\tmode = modes.CBC(iv)\n\telif mode == 'GCM':\n\t\tmode = modes.GCM(iv)\n\telse :\n\t\traise('Error mode ' + mode + ' not supported!')\n\tcipher = Cipher(algo,mode,backend = bc)\n\treturn iv,key,cipher.encryptor()",
"def __init__(self, block_cipher: BlockCipher, code_size: int):\n self.cipher = block_cipher\n self.code_size = code_size",
"def cipher(input_bytes, expanded_key, n_r):\n\n state = generate_initial_state(input_bytes)\n state = add_round_key(state, expanded_key, 0)\n\n # Apply rounds of operations as stated in AES standard\n for round_no in range(1, n_r):\n state = sub_bytes(state)\n state = shift_rows(state)\n state = mix_columns(state)\n state = add_round_key(state, expanded_key, round_no * 4 * 4)\n\n state = sub_bytes(state)\n state = shift_rows(state)\n state = add_round_key(state, expanded_key, n_r * 4 * 4)\n\n return state",
"def decryptAESCTR(key, nonce, ct):\n\tpt = b''\n\tcounter = 0\n\tfor ctBlock in chunks(ct, 16):\n\t\tblock = (int.from_bytes(nonce, byteorder='big') + counter).to_bytes(16, byteorder='big')\n\t\tencBlock = encryptAESBlock(key, block)\n\t\tpt += xor(ctBlock, encBlock)\t\t\n\t\tcounter += 1\n\treturn pt",
"def aes128_ctr_cipher(string, nonce, key):\n cipher_string = b''\n # Divide input string in blocks of 16 bytes\n cipher_text_blocks = [string[i:i + 16] for i in range(0, len(string), 16)]\n for i in range(len(cipher_text_blocks)):\n # Calculate incremental nonce block for each input string block\n nonce_block = nonce + i.to_bytes(8, byteorder='little')\n nonce_matrix = string_to_matrix_states(nonce_block)[0]\n # Cipher nonce block with key\n nonce_matrix_cipher = aes128_RoundBlock(nonce_matrix, key)\n d = xor_states(nonce_matrix_cipher, string_to_matrix_states(cipher_text_blocks[i])[0])\n cipher_string += matrix_to_bytes(d)\n return cipher_string",
"def __CreateCipher(self):\n is_data_avail = True\n if not self.__cipher:\n reqd_block_size = self.__key.block_size\n new_bytes_reqd = reqd_block_size - len(self.__encrypted_buffer)\n read_bytes, is_data_avail = self.__ReadBytes(new_bytes_reqd)\n if read_bytes:\n self.__encrypted_buffer += read_bytes\n if len(self.__encrypted_buffer) >= reqd_block_size:\n iv_bytes = self.__encrypted_buffer[:reqd_block_size]\n self.__encrypted_buffer = self.__encrypted_buffer[\n reqd_block_size:\n ]\n self.__hmac_stream.Update(iv_bytes)\n self.__cipher = AES.new(self.__key.key_bytes, AES.MODE_CBC,\n iv_bytes)\n return is_data_avail",
"def encrypt(text,key):\r\n aes = pyaes.AESModeOfOperationCTR(key)\r\n ciphertext = aes.encrypt(text)\r\n return ciphertext",
"def generate_key():\n key = ''.join([chr(random.randint(0, 0x10)) for _ in range(block_size)])\n return AES.new(second_key, AES.MODE_ECB).encrypt(pad((key.encode('ascii')), block_size))",
"def decrypt_ctr(key, ciphertext):\n\tmessage = ''\n\tiv = ciphertext[0:16]\n\tfor i in range(16, len(ciphertext), 16):\n\t\tinputblock = ciphertext[i:i+16]\n\t\tcipher = AES.new(key, AES.MODE_ECB)\n\t\txorkey = cipher.encrypt(long_to_bytes(bytes_to_long(iv)+(i/16-1)))\n\t\tif len(inputblock) == 16:\n\t\t\tmessage += strxor(inputblock, xorkey)\n\t\telse:\n\t\t\tmessage += strxor(inputblock, xorkey[:len(inputblock)])\n\treturn message",
"def cbc(in_file, out_file, block_size, key, op):\n with open(in_file, 'rb') as input: # Open files\n with open(out_file, 'wb') as output:\n block = [True, True] # This is just to get into the while loop\n prev_block = read_block(input, block_size)[0] # This is the IV\n output.write(prev_block)\n \n while block[1]: # Iterate through the rest of the input\n block = read_block(input, block_size)\n if block[0] != -1: # Make sure there is at least one char in there\n if (op == \"-e\"):\n block_xor = xor(block[0], prev_block) \n block_cipher = func(block_xor, key)\n output.write(block_cipher)\n prev_block = block_cipher\n elif (op == \"-d\"):\n block_xor = unfunc(block[0], key)\n block_message = xor(block_xor, prev_block)\n output.write(block_message)\n prev_block = block[0]",
"def __init__(self, key, iv, do, ciphername='aes-256-cbc', tag_len=12, iv_len=7, tag=None):\n self.cipher = OpenSSL.get_cipher(ciphername)\n self.ctx = OpenSSL.EVP_CIPHER_CTX_new()\n if (do == 1 or do == 0):\n k = OpenSSL.malloc(key, len(key))\n IV = OpenSSL.malloc(iv, len(iv))\n if self.cipher == OpenSSL.get_cipher('aes-128-ccm') or \\\n self.cipher == OpenSSL.get_cipher('aes-128-gcm'):\n OpenSSL.EVP_CipherInit_ex(self.ctx, self.cipher.get_pointer(), 0, 0, 0, do)\n self.tag_len = tag_len\n self.iv_len = iv_len\n if do == 0:\n if tag is None or (tag is not None and len(tag) != tag_len):\n raise Exception(\"Invalid Tag Input...\")\n else:\n self.cipher_ctrl(tag_val=tag)\n else:\n self.cipher_ctrl()\n OpenSSL.EVP_CipherInit_ex(self.ctx, 0, 0, k, IV, do)\n else:\n OpenSSL.EVP_CipherInit_ex(\n self.ctx, self.cipher.get_pointer(), 0, k, IV, do)\n else:\n raise Exception(\"RTFM ...\")",
"def encrypt(\r\n key: bytes,\r\n plain_text: str,\r\n) -> bytes:\r\n block_size = 16\r\n plain_text = _pad(plain_text, block_size)\r\n iv = os.urandom(block_size)\r\n cipher = AES.new(key, AES.MODE_CBC, iv)\r\n cipher_text = cipher.encrypt(plain_text.encode())\r\n return iv + cipher_text",
"def gen_ciphertext(message: str) -> str:\r\n key = 1\r\n for i in range(26):\r\n ciphertext = cipher(key, message)\r\n yield f\"Key #{key}: {ciphertext}\"\r\n key += 1",
"def __init__(self, block_cipher: BlockCipher, section_size: int, gamma_block_size: int):\n assert(block_cipher.key_size == 32)\n assert(block_cipher.block_size % 2 == 0)\n assert(block_cipher.key_size % block_cipher.block_size == 0)\n assert(section_size % block_cipher.block_size == 0)\n assert(block_cipher.block_size % gamma_block_size == 0)\n\n BlockCipherMode.__init__(self, block_cipher)\n self.section_size = section_size\n self.gamma_block_size = gamma_block_size"
] | [
"0.7619631",
"0.7126915",
"0.67098886",
"0.65550065",
"0.6327081",
"0.62329865",
"0.6143115",
"0.60997117",
"0.60525525",
"0.59431106",
"0.59406626",
"0.588603",
"0.5855224",
"0.5798523",
"0.5777665",
"0.573847",
"0.5719348",
"0.5686902",
"0.5685684",
"0.56328213",
"0.55945",
"0.5580485",
"0.5573387",
"0.555133",
"0.550119",
"0.54734033",
"0.54612625",
"0.5457154",
"0.54376197",
"0.5429617"
] | 0.714244 | 1 |
Instantiate a cipher object that performs CTR encryption/decryption. | def _create_ctr_cipher(factory, **kwargs):
cipher_state = factory._create_base_cipher(kwargs)
counter = kwargs.pop("counter", None)
nonce = kwargs.pop("nonce", None)
initial_value = kwargs.pop("initial_value", None)
if kwargs:
raise TypeError("Invalid parameters for CTR mode: %s" % str(kwargs))
if counter is not None and (nonce, initial_value) != (None, None):
raise TypeError("'counter' and 'nonce'/'initial_value'"
" are mutually exclusive")
if counter is None:
# Crypto.Util.Counter is not used
if nonce is None:
if factory.block_size < 16:
raise TypeError("Impossible to create a safe nonce for short"
" block sizes")
nonce = get_random_bytes(factory.block_size // 2)
else:
if len(nonce) >= factory.block_size:
raise ValueError("Nonce is too long")
# What is not nonce is counter
counter_len = factory.block_size - len(nonce)
if initial_value is None:
initial_value = 0
if is_native_int(initial_value):
if (1 << (counter_len * 8)) - 1 < initial_value:
raise ValueError("Initial counter value is too large")
initial_counter_block = nonce + long_to_bytes(initial_value, counter_len)
else:
if len(initial_value) != counter_len:
raise ValueError("Incorrect length for counter byte string (%d bytes, expected %d)" % (len(initial_value), counter_len))
initial_counter_block = nonce + initial_value
return CtrMode(cipher_state,
initial_counter_block,
len(nonce), # prefix
counter_len,
False) # little_endian
# Crypto.Util.Counter is used
# 'counter' used to be a callable object, but now it is
# just a dictionary for backward compatibility.
_counter = dict(counter)
try:
counter_len = _counter.pop("counter_len")
prefix = _counter.pop("prefix")
suffix = _counter.pop("suffix")
initial_value = _counter.pop("initial_value")
little_endian = _counter.pop("little_endian")
except KeyError:
raise TypeError("Incorrect counter object"
" (use Crypto.Util.Counter.new)")
# Compute initial counter block
words = []
while initial_value > 0:
words.append(struct.pack('B', initial_value & 255))
initial_value >>= 8
words += [ b'\x00' ] * max(0, counter_len - len(words))
if not little_endian:
words.reverse()
initial_counter_block = prefix + b"".join(words) + suffix
if len(initial_counter_block) != factory.block_size:
raise ValueError("Size of the counter block (%d bytes) must match"
" block size (%d)" % (len(initial_counter_block),
factory.block_size))
return CtrMode(cipher_state, initial_counter_block,
len(prefix), counter_len, little_endian) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, block_cipher, initial_counter_block,\n prefix_len, counter_len, little_endian):\n\n if len(initial_counter_block) == prefix_len + counter_len:\n self.nonce = _copy_bytes(None, prefix_len, initial_counter_block)\n \"\"\"Nonce; not available if there is a fixed suffix\"\"\"\n\n self._state = VoidPointer()\n result = raw_ctr_lib.CTR_start_operation(block_cipher.get(),\n c_uint8_ptr(initial_counter_block),\n c_size_t(len(initial_counter_block)),\n c_size_t(prefix_len),\n counter_len,\n little_endian,\n self._state.address_of())\n if result:\n raise ValueError(\"Error %X while instantiating the CTR mode\"\n % result)\n\n # Ensure that object disposal of this Python object will (eventually)\n # free the memory allocated by the raw library for the cipher mode\n self._state = SmartPointer(self._state.get(),\n raw_ctr_lib.CTR_stop_operation)\n\n # Memory allocated for the underlying block cipher is now owed\n # by the cipher mode\n block_cipher.release()\n\n self.block_size = len(initial_counter_block)\n \"\"\"The block size of the underlying cipher, in bytes.\"\"\"\n\n self._next = [self.encrypt, self.decrypt]",
"def __create_cipher(self, nonce=None, iv=None):\r\n cipher = None\r\n if self.__encryption_method == EncryptionMethod.AES:\r\n if nonce is not None:\r\n cipher = AES.new(self.__encryption_key, _block_mode_dict[self.__block_mode], nonce=nonce)\r\n elif iv is not None:\r\n cipher = AES.new(self.__encryption_key, _block_mode_dict[self.__block_mode], iv=iv)\r\n else:\r\n cipher = AES.new(self.__encryption_key, _block_mode_dict[self.__block_mode])\r\n elif self.__encryption_method == EncryptionMethod.DES3:\r\n if nonce is not None:\r\n cipher = DES3.new(self.__encryption_key, _block_mode_dict[self.__block_mode], nonce=nonce)\r\n elif iv is not None:\r\n cipher = DES3.new(self.__encryption_key, _block_mode_dict[self.__block_mode], iv=iv)\r\n else:\r\n cipher = DES3.new(self.__encryption_key, _block_mode_dict[self.__block_mode])\r\n elif self.__encryption_method == EncryptionMethod.DES:\r\n if nonce is not None:\r\n cipher = DES.new(self.__encryption_key, _block_mode_dict[self.__block_mode], nonce=nonce)\r\n elif iv is not None:\r\n cipher = DES.new(self.__encryption_key, _block_mode_dict[self.__block_mode], iv=iv)\r\n else:\r\n cipher = DES.new(self.__encryption_key, _block_mode_dict[self.__block_mode])\r\n elif self.__encryption_method == EncryptionMethod.SHIFT:\r\n if not self.__block_mode == BlockMode.ECB:\r\n raise Exception(\"Shift only supports ECB\")\r\n cipher = SimpleShiftCipher(self.__encryption_key)\r\n elif self.__encryption_method == EncryptionMethod.XOR:\r\n if not self.__block_mode == BlockMode.ECB:\r\n raise Exception(\"XOR only supports ECB\")\r\n cipher = SimpleXorCipher(self.__encryption_key)\r\n else:\r\n raise Exception(\"Unknown encryption method \" + str(self.__encryption_method))\r\n return cipher",
"def aes_ctr(key, counter=None):\n return AES.new(key, AES.MODE_CTR, counter=(counter if counter is not None else Counter.new(128)))",
"def encryptAESCTR(key, plaintext):\n # 128-bit iv, securely generated\n iv = os.urandom(16)\n cipher = Cipher(algorithms.AES(key), modes.CTR(iv), backend=default_backend())\n encryptor = cipher.encryptor()\n ciphertext = encryptor.update(plaintext) + encryptor.finalize()\n return (iv, ciphertext)",
"def new(key,mode=MODE_ECB,IV=None,counter=None,segment_size=None):\n return AES(key,mode,IV,counter,segment_size)",
"def encrypt_ctr(self, plaintext, iv):\n assert len(iv) == 16\n\n plaintext = pad(plaintext)\n\n blocks = []\n nonce = iv\n for plaintext_block in split_blocks(plaintext):\n # CTR mode encrypt: plaintext_block XOR encrypt(nonce)\n block = xor_bytes(plaintext_block, self.encrypt_block(nonce))\n blocks.append(block)\n nonce = inc_bytes(nonce)\n\n return b''.join(blocks)",
"def __CreateCipher(self, key_bytes, iv_bytes, mode=AES.MODE_CBC):\n # can we use M2Crypto and was it requested?\n if ACTIVE_CRYPT_LIB.lower() == 'm2crypto' and EVP:\n # yes, so do so\n return self.EVPAdaptor(key_bytes, iv_bytes, mode)\n else:\n # default to PyCrypto\n return self.AESAdaptor(key_bytes, iv_bytes, mode)",
"def aes_ctr_encrypt(self, key: bytes, plain_data: bytes, nonce: bytes) -> bytes:\n cipher = Cipher(algorithms.AES(key), modes.CTR(nonce), default_backend())\n enc = cipher.encryptor()\n return enc.update(plain_data) + enc.finalize()",
"def encrypt(text,key):\r\n aes = pyaes.AESModeOfOperationCTR(key)\r\n ciphertext = aes.encrypt(text)\r\n return ciphertext",
"def __Cipher(self, selector):\n assert selector in self.OP_TYPES, 'Invalid selector :%s' % selector\n if selector == self.OP_ACTIVE and (len(self.ciphers.keys()) > 1 or\n not len(self.ciphers.keys())):\n assert 0, 'If both encryption and decryption used then selector must \\\n be OP_ENCRYPT or OP_DECRYPT and at least 1 must be active'\n\n cipher = None\n if selector == self.OP_ACTIVE:\n # should only be one cipher active\n cipher = self.ciphers.values()[0]\n else:\n cipher = self.ciphers.get(selector)\n # have we been created a cipher for this selector yet?\n if not cipher:\n # no, so set it up as requested\n\n # convert between AES and EVP modes\n # NOTE: AES auto-selects based on key size using the same mode, but\n # EVP requires different mode strings for each key size (in bits)\n mode = 'aes_%s_cbc' % (self.key_size * 8)\n cipher = EVP.Cipher(alg=mode,\n key=self.key_bytes,\n iv=self.IV,\n op=selector,\n padding=0)\n self.ciphers[selector] = cipher\n return cipher",
"def __init__(self, key):\n self.block_size = 16\n self.cipher = Cipher(algorithms.AES(key), modes.ECB(), default_backend())",
"def __init__(self, key, iv, do, ciphername='aes-256-cbc', tag_len=12, iv_len=7, tag=None):\n self.cipher = OpenSSL.get_cipher(ciphername)\n self.ctx = OpenSSL.EVP_CIPHER_CTX_new()\n if (do == 1 or do == 0):\n k = OpenSSL.malloc(key, len(key))\n IV = OpenSSL.malloc(iv, len(iv))\n if self.cipher == OpenSSL.get_cipher('aes-128-ccm') or \\\n self.cipher == OpenSSL.get_cipher('aes-128-gcm'):\n OpenSSL.EVP_CipherInit_ex(self.ctx, self.cipher.get_pointer(), 0, 0, 0, do)\n self.tag_len = tag_len\n self.iv_len = iv_len\n if do == 0:\n if tag is None or (tag is not None and len(tag) != tag_len):\n raise Exception(\"Invalid Tag Input...\")\n else:\n self.cipher_ctrl(tag_val=tag)\n else:\n self.cipher_ctrl()\n OpenSSL.EVP_CipherInit_ex(self.ctx, 0, 0, k, IV, do)\n else:\n OpenSSL.EVP_CipherInit_ex(\n self.ctx, self.cipher.get_pointer(), 0, k, IV, do)\n else:\n raise Exception(\"RTFM ...\")",
"def decrypt_ctr(self, ciphertext, iv):\n assert len(iv) == 16\n\n blocks = []\n nonce = iv\n for ciphertext_block in split_blocks(ciphertext):\n # CTR mode decrypt: ciphertext XOR decrypt(nonce)\n block = xor_bytes(ciphertext_block, self.decrypt_block(nonce))\n blocks.append(block)\n nonce = inc_bytes(nonce)\n\n return unpad(b''.join(blocks))",
"def __init__(self, key, msg0503):\n enkey1 = map(ord, AES.new(key).encrypt(msg0503[:16]))\n self.cipher = AES.new(\"\".join(\n map(chr, (enkey1[i] ^ ord(msg0503[i + 16]) for i in range(16)))))\n self.encrypt_seq = random.randint(0, 0xffff)",
"def operate_cipher(self):",
"def aes_ctr_decrypt(self, key: bytes, encrypted_data: bytes, nonce: bytes) -> bytes:\n cipher = Cipher(algorithms.AES(key), modes.CTR(nonce), default_backend())\n enc = cipher.decryptor()\n return enc.update(encrypted_data) + enc.finalize()",
"def __get_cipher(self):\n return Fernet(open(self.__key_file, 'rb').read())",
"def decryptAESCTR(key, iv, ciphertext):\n cipher = Cipher(algorithms.AES(key), modes.CTR(iv), backend=default_backend())\n decryptor = cipher.decryptor()\n return decryptor.update(ciphertext) + decryptor.finalize()",
"def decrypt_ctr(key, ciphertext):\n\tmessage = ''\n\tiv = ciphertext[0:16]\n\tfor i in range(16, len(ciphertext), 16):\n\t\tinputblock = ciphertext[i:i+16]\n\t\tcipher = AES.new(key, AES.MODE_ECB)\n\t\txorkey = cipher.encrypt(long_to_bytes(bytes_to_long(iv)+(i/16-1)))\n\t\tif len(inputblock) == 16:\n\t\t\tmessage += strxor(inputblock, xorkey)\n\t\telse:\n\t\t\tmessage += strxor(inputblock, xorkey[:len(inputblock)])\n\treturn message",
"def aes_cbc(key, iv=None):\n return AES.new(key, AES.MODE_CBC, iv if iv is not None else get_zero_vector(16))",
"def encrypt(self, plaintext, output=None):\n\n if self.encrypt not in self._next:\n raise TypeError(\"encrypt() cannot be called after decrypt()\")\n self._next = [self.encrypt]\n \n if output is None:\n ciphertext = create_string_buffer(len(plaintext))\n else:\n ciphertext = output\n \n if not is_writeable_buffer(output):\n raise TypeError(\"output must be a bytearray or a writeable memoryview\")\n \n if len(plaintext) != len(output):\n raise ValueError(\"output must have the same length as the input\"\n \" (%d bytes)\" % len(plaintext))\n\n result = raw_ctr_lib.CTR_encrypt(self._state.get(),\n c_uint8_ptr(plaintext),\n c_uint8_ptr(ciphertext),\n c_size_t(len(plaintext)))\n if result:\n if result == 0x60002:\n raise OverflowError(\"The counter has wrapped around in\"\n \" CTR mode\")\n raise ValueError(\"Error %X while encrypting in CTR mode\" % result)\n \n if output is None:\n return get_raw_buffer(ciphertext)\n else:\n return None",
"def main():\n b64 = (b\"L77na/nrFsKvynd6HzOoG7GHTLXsTVu9qvY/2syLXzhPweyyMTJULu/6/kXX0KSvo\"\n b\"OLSFQ==\")\n binary = base64.b64decode(b64)\n\n key = b\"YELLOW SUBMARINE\"\n nonce = bytes(8)\n cipher = AES.new(key, AES.MODE_ECB)\n ctr = CTRMode(\n blksize=16,\n encrypt_blk=cipher.encrypt,\n decrypt_blk=cipher.decrypt,\n nonce=nonce,\n )\n\n decrypted = ctr.decrypt(binary)\n\n print(decrypted.decode())",
"def __init__(self,**kwargs):\n self.msg = kwargs.get('msg','')\n self.shift = kwargs.get('shift','')\n op = kwargs.get('op', False)\n if op:\n try:\n op = getattr(self,op)\n except AttributeError as e: \n raise CipherError(\"valid operations: (encode|decode).\")\n op()\n print \"cipher={c}|key={s}|{r}\".format(c=self.__module__.split('.')[2],\n s=self.shift,\n r=self.result)",
"def aes_cipher_from_key(key):\r\n return AES.new(key, AES.MODE_CBC, generate_aes_iv(key))",
"def __init__(self, key, plaintext=None, ciphertext=None):\n self.key = key\n # If plaintext is specified, generate its encrypted counterpart\n if plaintext:\n self.plaintext = plaintext\n self.ciphertext, self.iv = self.encrypt()\n # If instead cipher-text is specified, decrypt it\n elif ciphertext:\n self.ciphertext = ciphertext\n self.plaintext, self.iv = self.decrypt()\n # Otherwise declaration is invalid\n else:\n raise InvalidMessage(\"Either plaintext or cipher-text must be declared\")",
"def encryptor(iv = os.urandom(16), key = os.urandom(32), bc = backend,key_type = 'AES128',mode='CBC'):\n\tif key_type == 'AES128':\n\t\talgo = algorithms.AES(key)\n\telif key_type == 'ChaCha20':\n\t\talgo = algorithms.ChaCha20(key,nonce=os.urandom(32))\n\telse:\n\t\traise('Error algorithm ' + key_type + ' not supported!')\n\tif mode == 'CBC':\n\t\tmode = modes.CBC(iv)\n\telif mode == 'GCM':\n\t\tmode = modes.GCM(iv)\n\telse :\n\t\traise('Error mode ' + mode + ' not supported!')\n\tcipher = Cipher(algo,mode,backend = bc)\n\treturn iv,key,cipher.encryptor()",
"def cipher(input_bytes, expanded_key, n_r):\n\n state = generate_initial_state(input_bytes)\n state = add_round_key(state, expanded_key, 0)\n\n # Apply rounds of operations as stated in AES standard\n for round_no in range(1, n_r):\n state = sub_bytes(state)\n state = shift_rows(state)\n state = mix_columns(state)\n state = add_round_key(state, expanded_key, round_no * 4 * 4)\n\n state = sub_bytes(state)\n state = shift_rows(state)\n state = add_round_key(state, expanded_key, n_r * 4 * 4)\n\n return state",
"def aes_enc_dec(self, key, iv, inputVal):\n\n\t\taes = Cipher(\"AES-128-CTR\")\n\t\tenc = aes.enc(key, iv)\n\t\toutput = enc.update(inputVal)\n\t\toutput += enc.finalize()\n\t\treturn output",
"def choose_cipher(cls):\n while True:\n\n crypt = input(\"Would you like to encrypt or decrypt?\").lower()\n print(crypt)\n if (crypt != \"encrypt\") and (crypt != \"decrypt\"):\n crypt = 0\n print(\"Invalid Selection\")\n else:\n break\n\n while True:\n\n cipher_choice = input(\"Select Cipher: \\n\"\n \"A) Affine\\n\"\n \"B) Atbash\\n\"\n \"C) Keyword\\n\"\n ).lower()\n\n if cipher_choice == (\"a\" or \"a)\" or \"affine\"):\n cipher_choice = \"affine\"\n break\n elif cipher_choice == (\"b\" or \"b)\" or \"atbash\"):\n cipher_choice = \"atbash\"\n break\n elif cipher_choice == (\"c\" or \"c)\" or \"keyword\"):\n cipher_choice = \"keyword\"\n break\n\n else:\n print(\"Invalid Selection\")\n while True:\n message = input(\"Input your message: \")\n if (len(message) >= 1):\n break\n else:\n print(\"Invalid Message\")\n while True:\n otp = input(\"Enter one time pad: \")\n if crypt == \"encrypt\" or crypt == \"e\":\n if (len(message) % 5):\n otp_length = (len(message) + (5 - (len(message) % 5)))\n else:\n otp_length = (len(message))\n if len(otp) >= otp_length:\n break\n else:\n print(\"otp for this message must be \"\n \"{} characters long\".format(otp_length))\n else:\n break\n return cls(crypt, cipher_choice, otp, message)",
"def __init__(self, ciphertext):\n\n slice_index = None # Replace None\n self.preamble = ciphertext[:slice_index]\n self.ciphertext = ciphertext[slice_index:]"
] | [
"0.72857714",
"0.7123023",
"0.6978616",
"0.6857513",
"0.6587187",
"0.6543356",
"0.65047663",
"0.645677",
"0.64431256",
"0.6244246",
"0.6212461",
"0.618751",
"0.60572654",
"0.6051523",
"0.6047843",
"0.6029323",
"0.592563",
"0.5921373",
"0.5918702",
"0.59144205",
"0.5887465",
"0.5854102",
"0.58235747",
"0.5823531",
"0.58145875",
"0.5801802",
"0.5748123",
"0.57185954",
"0.56878304",
"0.5654341"
] | 0.7571308 | 0 |
Execute the sequence of SQL statements in {sql} as a single command | def execute(self, *sql):
# assemble the command and pass it on to the connection
return self.postgres.execute(self.connection, "\n".join(sql)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def execute(self,sql):\n # self.results = self.execute_silent(sql)\n # return self.results\n # sql = self.format_sql(sql, **kwargs)\n sql_list = sql.split(';')\n for stmt in sql_list:\n if stmt:\n stmt = stmt.strip()\n if len(stmt) < 10:\n break\n result = self.execute_silent(stmt)\n #if result is not None,It's select stmt.\n if result:\n return result",
"def run_sql(self, sql):\n def mk_run_sql_q(sql):\n return {\n 'type' : 'run_sql',\n 'args': {\n 'sql' : sql\n }\n }\n return self.v1q(mk_run_sql_q(sql))",
"def execute_sql_cmds(cursor, cmds, args):\n\tfor cmd in cmds:\n\t\tcursor.execute(cmd, args)\n\t\tif len(args) == 3:\n\t\t\tprint(\"{} rows updated on {} table for {}\".format(cursor.rowcount, str.split(cmd)[1], args[2]))\n\t\telse:\n\t\t\tprint(\"{} rows updated on {} table\".format(cursor.rowcount, str.split(cmd)[1]))",
"def runSql(self, sql):\r\n cursor = self.c.cursor()\r\n cursor.execute(sql)\r\n self.c.commit()\r\n cursor.close()\r\n return True",
"def execute(self, sql, *args, **kwgs):\n curr = self.conn.cursor()\n curr.execute(sql, *args, **kwgs)\n self.conn.commit()\n curr.close()",
"def pg_execute(pg_conn, sql):\n print sql\n # XXX execute command",
"def execute_sql_files(connection, sql_files):\n for filename in sql_files:\n statement = resource_text(filename)\n for sub_statement in statement.split(\";\"):\n if sub_statement.strip():\n connection.execute(text(sub_statement))",
"def sql_query(sql):\n cur = c.cursor()\n cur.execute(sql)\n c.commit()",
"def batched_query(self, sql):\r\n\r\n result_sets = []\r\n messages = \"\"\r\n query = []\r\n last_query=\"\"\r\n\r\n batches = re.split(\"^\\s*(GO(?:\\s+[0-9]+)?)\\s*(?:--.*)?$\",sql,flags=re.M|re.I)\r\n # print(batches)\r\n for b in batches:\r\n if b.upper() == \"GO\":\r\n # execute one\r\n query.append(last_query)\r\n continue\r\n else:\r\n match = re.match(\"^GO\\s+([0-9]+)$\",b,re.I)\r\n if match is not None:\r\n #execute many\r\n for i in range(0,int(match.group(1))):\r\n query.append(last_query)\r\n else:\r\n # not a Go statment\r\n last_query = b\r\n query.append(last_query)\r\n\r\n # print(query)\r\n for q in query:\r\n r = self.query(q)\r\n if r is not None:\r\n result_sets.extend(r)\r\n messages += self.messages\r\n\r\n self.messages = messages\r\n return result_sets",
"def execute_query_sequence(db_cursor, all_queries):\n\n for query in all_queries:\n db_cursor.execute(query)",
"def run_sql_file(self, sqlfile):\n try:\n queries = self.get_queries_from(sqlfile)\n queries_executed = 0\n for query in queries:\n if self._execute_query(query, values=None): # execute each query\n queries_executed += 1\n print(\"{} Executed queries from {}\".format(queries_executed, sqlfile))\n except pymysql.InternalError as error:\n print(error.args[1])",
"def execute_sql(self, a_sql):\n \n sql = sqlalchemy.text(a_sql)\n \n if self._activate_timer:\n result = []\n func = self._conn.execute\n the_timer = ftimer(func, [sql], {}, result, number = 1)\n self._log.debug(\"\\nTime: %s secs \\nDatabase: %s\\nRequest: %s\\n\"%(the_timer, self._url, a_sql))\n return result[0]\n else:\n result = self._conn.execute(sql)\n return result",
"def execute(self):\n if self.sql is None:\n self.sql = self.construct_query()\n # Only SQL strings can be split, not (e.g.) SQLAlchemy statements.\n if self.multiple_statements and isinstance(self.sql, str):\n statements = self._split_sql()\n else:\n statements = [self.sql]\n single_statement = True if len(statements) == 1 and self.filename else False\n try:\n for statement in statements:\n result_proxy = self.cm.conn.execute(statement)\n log_string = self.filename if single_statement else str(statement)[:25]\n self.logger.info(\"Executed {} against {}\".format(log_string, self.cm.db))\n if result_proxy.cursor:\n return self.fetch_results(result_proxy)\n except Exception as e:\n self.logger.exception(e)\n raise",
"def batch_execute(self, sql_list):\n with self.connection.cursor() as dbc:\n responses = []\n for sql in sql_list:\n dbc.execute(sql)\n responses.append(dbc.fetchall())\n return responses",
"def run_multiple_sql_statements(statements, fetch=True, cur=None, conn=None, commit=True):\n\n try:\n if conn is None:\n logger.error(\"Connection cannot be None.\")\n raise ValueError(\"Connection cannot be None.\")\n\n if cur is None:\n cur = conn.cursor()\n\n if statements is None:\n logger.error(\"Sql statement list is empty\")\n raise ValueError(\"Sql statement list is empty\")\n\n for _, statement in enumerate(statements):\n logger.debug(\"Executing SQL = \" + statement)\n res = cur.execute(statement)\n if fetch:\n data = cur.fetchall()\n else:\n data = None\n if commit:\n conn.commit()\n except Exception as exception:\n logger.error(exception)\n raise exception\n\n return (res, data)",
"def execute_on_each_row(self, a_sql, a_treatment):\n \n sql = sqlalchemy.text(a_sql)\n \n result = self._conn.execute(sql)\n \n row = result.fetchone()\n \n while row:\n a_treatment.executeOnRow(row)\n row = result.fetchone()\n \n result.close()",
"def sql_scripts_execute(self, sql_scripts, params={}):\n ps = self.parameter_handler(params)\n log.debug('Got parameters: %s', ps)\n cursor = self._get_cursor()\n for q in sql_scripts:\n with open(q, 'r') as s:\n sql_string_formatted = s.read().format(**ps)\n cursor.execute(sql.SQL(sql_string_formatted), ps)\n self.connection.commit()\n self.connection.close()",
"def query(self, *sql):\n self.cursor.execute(*sql)\n self.conn.commit()",
"def execute_and_commit_sql(db, sql):\n conn_string = return_connection(db)\n with pg2.connect(conn_string) as conn:\n with conn.cursor() as curs:\n curs.execute(sql)\n conn.commit()",
"def __execsql(self, sql, seq):\n return self.sqldb.executemany(sql, [x._asdict() for x in seq])",
"def insert_many_execute(self) -> None:\n self.connection.isolation_level = None\n self.cursor.execute('BEGIN TRANSACTION')\n for i in self.__sql_buffer.split(';'):\n self.cursor.execute(i)\n self.__sql_buffer = \"\"\n self.cursor.execute('COMMIT')",
"def execute_sql(conn, sql):\n try:\n c = conn.cursor()\n if __debug__:\n print(\"Executing SQL: %s\" % sql)\n c.execute(sql)\n except Error as e:\n print(e)\n conn.commit()",
"def run(self):\n rows = None\n if self.sql.startswith('select'):\n conn = self.table.connect()\n with conn.cursor() as curs:\n try:\n curs.execute(self.sql)\n except conn.DatabaseError as exc:\n error, = exc.args\n logging.error(f\"\"\"error executing {self.sql}:\n {error.code}\"\"\")\n self.excep = exc\n raise exc\n else:\n rows = curs.fetchall()\n # logging.critical(f\"\"\"executed {self.sql}\"\"\")\n self.result_exec = rows",
"def execute(self, sql):\n return self.db.execute(sql)",
"def run_new_sql(self):\n\n pass",
"def run_db_query(sql):\n with connect_sqlalchemy() as conn:\n return conn.execute(sql)",
"def run(self, sql, *args):\n return self.database.execute(sql, args)",
"def _runsql(self):\n self.logger.info(\"Running SQL where sequence > %s\" % self.seq)\n try:\n results = self.engine.execute(self.sql,\n (self.max_rows, self.seq)).fetchall()\n except sqlalchemy.exc.ProgrammingError, err:\n self.logger.critical(\"Error connecting to DB : %s\" % err)\n return None\n self.logger.info('Fetched %d rows from DB' % len(results))\n if not len(results):\n self.logger.info(\"No rows returned from DB. Finished loading\")\n return False\n return results",
"def execute(sql, args=()):\n res = con.execute(sql, args)\n con.commit()\n return res.fetchall()",
"def execute(self, sqlcmd):\n self._c.execute(sqlcmd)\n return"
] | [
"0.7174249",
"0.7070815",
"0.70517427",
"0.7040094",
"0.7025959",
"0.6985139",
"0.6980579",
"0.6964437",
"0.69108903",
"0.6902813",
"0.6886875",
"0.6844708",
"0.6828679",
"0.6801381",
"0.67388976",
"0.67349917",
"0.67162806",
"0.66934794",
"0.66897595",
"0.66596186",
"0.66156036",
"0.6584134",
"0.65690196",
"0.6567074",
"0.65546435",
"0.6548934",
"0.65483195",
"0.65304446",
"0.6498274",
"0.6480291"
] | 0.7186094 | 0 |
Return default zoom setting. | def _defaultZoom(self):
return (-1.0, 1.0, -1.0, 1.0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def zoom(self):\n return self['zoom']",
"def zoom(self):\n return self.container['zoom']",
"def _get_zoom(self) :\n \n # TODO : make it absolute zoom value : a zoom of 1 displays one data\n # pixel in one viewport pixel.\n \n return self._zoom",
"def zoom(self) -> Optional[int]:\n return self.get(\"/Zoom\", None)",
"def zoom(self) -> float:\n return self._zoom",
"def zoom(self):\n res = np.max(self.metadata[\"resolution\"])\n\n if self.atlas_name == \"allen_human_500um\":\n logger.debug(\n \"ATLAS: setting zoom manually for human atlas, atlas needs fixing\"\n )\n return 350\n else:\n return 40 / res",
"def get_zoom(self) -> float:\n transform = self.transform()\n cur_scale = (transform.m11(), transform.m22())\n return float(f\"{cur_scale[0] - 1.0:0.2f}\")",
"def get_zoom_transform(self):\n return self.zoom_levels[self.cur_zoom][1]",
"def _getDefaultSettings(cls):\n return {'minimumROIDimensions': 1,\n 'minimumROISize': None, # Skip testing the ROI size by default\n 'normalize': False,\n 'normalizeScale': 1,\n 'removeOutliers': None,\n 'resampledPixelSpacing': None, # No resampling by default\n 'interpolator': 'sitkBSpline', # Alternative: sitk.sitkBSpline,\n 'padDistance': 5,\n 'distances': [1],\n 'force2D': False,\n 'force2Ddimension': 0,\n 'label': 1,\n 'enableCExtensions': True,\n 'additionalInfo': True}",
"def action_set_zoom(self, value):\n if value >= 0 and value < len(self.zoom_levels) and value != self.cur_zoom:\n self.cur_zoom = value\n self.apply_zoom()",
"def getDefaultSettings(self) -> ghidra.docking.settings.Settings:\n ...",
"def __zoomReset(self):\n if QApplication.focusWidget() == e5App().getObject(\"Shell\"):\n e5App().getObject(\"Shell\").zoomTo(0)\n else:\n aw = self.activeWindow()\n if aw:\n aw.zoomTo(0)\n self.sbZoom.setValue(aw.getZoom())",
"def __zoom(self):\n if QApplication.focusWidget() == e5App().getObject(\"Shell\"):\n aw = e5App().getObject(\"Shell\")\n else:\n aw = self.activeWindow()\n if aw:\n from QScintilla.ZoomDialog import ZoomDialog\n dlg = ZoomDialog(aw.getZoom(), self.ui, None, True)\n if dlg.exec_() == QDialog.Accepted:\n value = dlg.getZoomSize()\n self.__zoomTo(value)",
"def setZoom(self, zoom):\r\n self._viewZoom = zoom",
"def _autozoom(self):\n bounds = self._autobounds()\n attrs = {}\n\n midpoint = lambda a, b: (a + b)/2\n attrs['location'] = (\n midpoint(bounds['min_lat'], bounds['max_lat']),\n midpoint(bounds['min_lon'], bounds['max_lon'])\n )\n\n # remove the following with new Folium release\n # rough approximation, assuming max_zoom is 18\n import math\n try:\n lat_diff = bounds['max_lat'] - bounds['min_lat']\n lon_diff = bounds['max_lon'] - bounds['min_lon']\n area, max_area = lat_diff*lon_diff, 180*360\n if area:\n factor = 1 + max(0, 1 - self._width/1000)/2 + max(0, 1-area**0.5)/2\n zoom = math.log(area/max_area)/-factor\n else:\n zoom = self._default_zoom\n zoom = max(1, min(18, round(zoom)))\n attrs['zoom_start'] = zoom\n except ValueError as e:\n raise Exception('Check that your locations are lat-lon pairs', e)\n\n return attrs",
"def test_map_settings_default():\n m = view(world)\n assert m.location == [\n pytest.approx(-3.1774349999999956, rel=1e-6),\n pytest.approx(2.842170943040401e-14, rel=1e-6),\n ]\n assert m.options[\"zoom\"] == 10\n assert m.options[\"zoomControl\"] == True\n assert m.position == \"relative\"\n assert m.height == (100.0, \"%\")\n assert m.width == (100.0, \"%\")\n assert m.left == (0, \"%\")\n assert m.top == (0, \"%\")\n assert m.global_switches.no_touch is False\n assert m.global_switches.disable_3d is False\n assert \"openstreetmap\" in m.to_dict()[\"children\"].keys()",
"def zoom(self, zoom):\n\n self.container['zoom'] = zoom",
"def scale_settings(self) -> Optional[pulumi.Input['ScaleSettingsArgs']]:\n return pulumi.get(self, \"scale_settings\")",
"def max_zoom(self) -> float:\n return math.log(np.min(self.canvas_size) / REGION_DIM)",
"def normalize_zoomlvl(lvl):\n if lvl < gMinZoomLevel:\n return gMinZoomLevel\n elif lvl > gMaxZoomLevel:\n return gMaxZoomLevel\n else:\n return lvl - gMinZoomLevel",
"def get_scale():\r\n\r\n \r\n return 0.5",
"def __zoomTo(self, value):\n if QApplication.focusWidget() == e5App().getObject(\"Shell\"):\n aw = e5App().getObject(\"Shell\")\n else:\n aw = self.activeWindow()\n if aw:\n aw.zoomTo(value)\n self.sbZoom.setValue(aw.getZoom())",
"def __zoomIn(self):\n if QApplication.focusWidget() == e5App().getObject(\"Shell\"):\n e5App().getObject(\"Shell\").zoomIn()\n else:\n aw = self.activeWindow()\n if aw:\n aw.zoomIn()\n self.sbZoom.setValue(aw.getZoom())",
"def getorelse(self, name, default=None):\n try:\n return self._defaults[name]\n except KeyError:\n return default",
"def min_zoom(self) -> float:\n return math.log(np.min(self.canvas_size) / np.max(self._m.world.t_size))",
"def on_zoom_change(self, event) -> None:\r\n\r\n zoom_level = int(self.zoom_scale.get())\r\n self.painter.zoom = zoom_level\r\n self.painter.draw_board()",
"def at_zoom(self, zoom):\n params = {}\n for name, element in self.config.iteritems():\n out_element = _element_at_zoom(name, element, zoom)\n if out_element != None:\n params[name] = out_element\n\n return params",
"def set_zooming_keyboard(self):\n # Zooming: ALT + key arrows\n self.set('KeyPress', 'Zoom',\n key='Left', key_modifier='Control', \n param_getter=lambda p: (-.25, 0, 0, 0))\n self.set('KeyPress', 'Zoom',\n key='Right', key_modifier='Control', \n param_getter=lambda p: (.25, 0, 0, 0))\n self.set('KeyPress', 'Zoom',\n key='Up', key_modifier='Control', \n param_getter=lambda p: (0, 0, .25, 0))\n self.set('KeyPress', 'Zoom',\n key='Down', key_modifier='Control', \n param_getter=lambda p: (0, 0, -.25, 0))",
"def getDefaultLevel():\n return _libsbml.LayoutExtension_getDefaultLevel()",
"def DoZoom(self, mode):\n id_type = mode\n zoomlevel = self.GetZoom()\n if id_type == ed_glob.ID_ZOOM_OUT:\n if zoomlevel > -9:\n self.ZoomOut()\n elif id_type == ed_glob.ID_ZOOM_IN:\n if zoomlevel < 19:\n self.ZoomIn()\n else:\n self.SetZoom(0)\n return self.GetZoom()"
] | [
"0.7723424",
"0.7299348",
"0.72973317",
"0.71936",
"0.70050627",
"0.68496126",
"0.65712726",
"0.625384",
"0.6145273",
"0.6142065",
"0.61153233",
"0.6006282",
"0.5985987",
"0.5982547",
"0.59785664",
"0.59773666",
"0.5952824",
"0.58633906",
"0.5851663",
"0.58423215",
"0.57971275",
"0.5784571",
"0.57064813",
"0.5677304",
"0.56635326",
"0.5645525",
"0.5631767",
"0.5624415",
"0.56236774",
"0.5616822"
] | 0.8090395 | 0 |
Approximates root of this function using single iteration of Newton's method. | def newtonsMethod(self, x, a):
return x - a * (self._f(x) / self._df(x)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def newton(f, xinit, tol, N):\n if f(xinit) < tol:\n return xinit\n else:\n n = 1\n while n < N:\n xnew = xinit - (f(xinit) / derivative(f, xinit))\n if abs(f(xnew)) < tol:\n print('Root found. Number of iterations: ', n)\n return xnew\n break\n else:\n xinit = xnew\n n = n + 1\n else:\n return 'Max iterations reached. No root found within chosen tolerance.'",
"def newton_method(f, x_init = 0, epsilon = 1e-10):\n prev_value = x_init + 2 * epsilon\n value = x_init\n\n iterations = 0\n while abs(prev_value - value) > epsilon:\n prev_value = value\n\n f_dash = derivative(f, value)\n value = value - f(value) / f_dash\n\n iterations += 1\n\n print(f\"Newton Method converged in {iterations} iterations\")\n\n return value",
"def newton(f, x0, Df, tol=1e-5, maxiter=15, alpha=1.):\n raise NotImplementedError(\"Problem 1 Incomplete\")",
"def square_root_with_newton_method(number, iterations):\n # Inital value of g.\n # Cycle based on the iterations number.\n # Formula in the cycle.\n # Return the rounded final result.\n\n if number <= 0 or iterations < 0:\n return None\n g = number * 0.5\n for i in range(int(iterations)):\n g2 = (g + number / g) * 0.5\n g = g2\n return round(g, 3)",
"def rootfind_newton(func, x0, a, b, maxiter=50, tol=1.0e-11):\n\n for iter in xrange(maxiter):\n\n fval, fpval, args = func(x0)\n # print \"x0=%.4f fval=%.2e fpval=%.2e [%.4f, %.4f]\" % (x0, fval, fpval, a, b)\n\n if fval < 0:\n a = x0\n else:\n b = x0\n\n x = x0 - fval/fpval\n if not (a < x < b):\n # Once we have bracketed the root, we don't allow the\n # iterations to leave the bracket.\n x = 0.5*(a+b)\n\n if np.abs(x-x0) < tol or np.abs(fval) < tol:\n break\n\n x0 = x\n\n return x, fval, iter, args",
"def test_newton():\n\n f = lambda x: x**2 + np.sin(5*x)\n df = lambda x: 2*x + 5*np.cos(5*x)\n ddf = lambda x: 2 + 0,-25*np.sin(5*x)\n\n\n print newtonsMethod(f,df,ddf, 0, niter = 100)",
"def newtons_method(f, initial_guess, max_iter = 1000, method = 'exact', tol =1e-12):\n\n if method not in ['inverse', 'exact', 'gmres', 'gmres_action']:\n raise Exception(\"Not a valid method.\")\n if len(f(initial_guess)) != len(initial_guess):\n raise Exception('Output dimension of f should be the same as the input dimension of f.')\n if method == 'gmres_action':\n return _newtons_method_gmres_action(f, initial_guess, max_iter, tol)\n x0 = ad.create_vector('x0', initial_guess)\n for iter_num in range(max_iter):\n fn = np.array(f(x0)); #need convert the list/array that is passed back from function, so downstream autodiff functions for vectors work properly\n jacob = ad.get_jacobian(fn, ['x0{}'.format(i) for i in range(1, len(fn) + 1)])\n if method == 'inverse':\n step = np.linalg.inv(-jacob).dot(ad.get_value(fn))\n if method == 'exact':\n step = np.linalg.solve(-jacob, ad.get_value(fn))\n elif method == 'gmres':\n step, _ = gmres(jacob, -ad.get_value(fn), tol = tol, atol = 'legacy')\n xnext = x0 + step\n \n #check if we have converged\n if np.all(np.abs(ad.get_value(xnext) - ad.get_value(x0)) < tol):\n return (ad.get_value(xnext), iter_num + 1);\n \n #update x0 because we have not converged yet\n x0 = xnext\n \n raise RuntimeError(\"Failed to converge after {0} iterations, value is {1}\".format(max_iter, ad.get_value(x0)) );",
"def my_Newton( fct, df_dx, x0):\r\n xn = float(x0)\r\n eps = 1e-5\r\n N = 20\r\n i = 0\r\n while abs( fct( xn**(i + 1)) - fct( xn**i)) > eps and i < N:\r\n x_next = xn - fct(xn)/df_dx(xn)\r\n print( i, 'fct value', abs( fct(xn)), x_next)\r\n xn = x_next\r\n i += 1\r\n if abs( fct( xn)) < eps:\r\n return x_next\r\n else: #solution did not converge\r\n return np.nan",
"def newton_method(f, x, Ep, step):\n\n while True:\n step = step + 1\n # print(\"bnd1:=\",bnd1)\n h = f(x) / derivative(f, x)\n x = x - h\n if (decide(abs(h) <= Ep)):\n break\n # print(\"Root in Approximation: \",bnd1)\n return step",
"def newton(f, x0, dx, eps=1e-10):\n # Initialization\n globvar.ncalls = 0\n x = np.copy(x0)\n n = len(x)\n J = np.zeros((n, n), dtype='float64')\n fx = f(x)\n\n # Begin root search\n while True:\n globvar.ncalls += 1\n\n # Fill the Jacobian matrix\n for j in range(n):\n x[j] += dx[j]\n df = f(x) - fx\n\n for i in range(n):\n J[i, j] = df[i] / dx[j]\n\n x[j] -= dx[j]\n\n # Decompose and solve using Given's rotations\n decomp(J)\n Dx = -fx\n solve(J, Dx)\n\n # Begin backtracking linesearch\n lamb = 2.0\n while True: \n lamb /= 2\n y = x + Dx * lamb\n fy = f(y)\n\n fynorm = np.linalg.norm(fy)\n fxnorm = np.linalg.norm(fx)\n\n if (fynorm < (1 - lamb / 2) * fxnorm) or (lamb < (1 / 128.0)):\n break\n\n # Save latest approximation\n x = y\n fx = fy\n\n Dxnorm = np.linalg.norm(Dx)\n fxnorm = np.linalg.norm(fx)\n dxnorm = np.linalg.norm(dx)\n if Dxnorm < dxnorm or fxnorm < eps:\n break\n\n return x",
"def newton_iteration(f: Callable, df: Callable, eps: float, x0: float = None, a: float = None, b: float = None,\n weight: float = 0.9, display: bool = False, max_iterations: int = 100) -> float:\n x = np.inf\n if x0 is None:\n x0 = (a + b) / 2\n if a is not None and b is not None and a == b:\n return a\n x_next = x0\n iterations = 0\n while abs(x - x_next) > eps and iterations < max_iterations:\n iterations += 1\n x = x_next\n\n if display:\n import matplotlib.pyplot as plt\n xx0 = a or x-1\n xx1 = b or x+1\n xx = np.linspace(xx0, xx1, 100)\n yy = np.array(list(map(f, xx)))\n plt.plot(xx, yy)\n plt.axvline(x=x)\n plt.show()\n\n f_x = f(x)\n try:\n df_x = df(x)\n except ZeroDivisionError:\n df_x = (f_x - f(x-eps))/eps\n if df_x != 0:\n x_next = x - f_x / df_x\n\n if a is not None and x_next < a:\n x_next = weight * a + (1 - weight) * x\n elif b is not None and x_next > b:\n x_next = weight * b + (1 - weight) * x\n\n if a is not None and x_next < a:\n x_next = a\n if b is not None and x_next > b:\n x_next = b\n\n return x_next",
"def my_Newton(fct, df_dx, x0):\r\n xn = float(x0)\r\n eps = 1e-5\r\n N = 20\r\n i = 0\r\n while abs(fct (xn)) > eps and i < N:\r\n x_next = xn - fct(xn)/df_dx(xn)\r\n print(i , 'fct_value', abs(fct(xn)), x_next)\r\n xn = x_next\r\n i += 1\r\n if abs(fct(xn)) < eps:\r\n return x_next\r\n else: #solution did not converge\r\n return np.nan",
"def newton(f, x0, Df, tol=1e-5, maxiter=15, alpha=1.):\r\n #initialize variables\r\n iter = 0\r\n xk = x0\r\n change = tol+1\r\n #perform newton's method until maxiter hit or under tolerance\r\n #if x is in R\r\n if np.isscalar(x0):\r\n while iter < maxiter and change > tol:\r\n iter += 1\r\n xk1 = xk\r\n xk = xk - alpha*f(xk)/Df(xk)\r\n change = abs(xk-xk1)\r\n #if x is in Rn\r\n else:\r\n while iter < maxiter and change > tol:\r\n iter += 1\r\n xk1 = xk\r\n D = Df(xk)\r\n #make sure the matrix isn't singular\r\n if la.det(D)==0:\r\n break\r\n yk = la.solve(D,f(xk))\r\n xk = xk - alpha*yk\r\n change = la.norm(xk-xk1)\r\n #check if method converged\r\n if change > tol:\r\n conv = False\r\n else:\r\n conv = True\r\n return xk,conv,iter",
"def root_finding_newton_previously(fun, J, x, eps, max_iter, args):\n F_value = fun(x, args)\n F_value_ = F_value.reshape((-1,1))\n F_norm = np.linalg.norm(F_value, 2) # l2 norm of vector\n iteration_counter = 0\n while abs(F_norm) > eps and iteration_counter < max_iter:\n delta = np.linalg.solve(J(x, args), -F_value)\n x = x + delta\n F_value = fun(x, args)\n F_value_ = F_value.reshape((-1,1))\n F_norm = np.linalg.norm(F_value, 2)\n iteration_counter += 1\n\n # Here, either a solution is found, or too many iterations\n if abs(F_norm) > eps:\n iteration_counter = -1\n raise ValueError('Maximum iteration reached in newton root finding!')\n return x, iteration_counter",
"def root_finding_newton_previously(fun, J, x, eps, max_iter, args):\n F_value = fun(x, args)\n # F_value_ = F_value.reshape((-1, 1))\n F_norm = np.linalg.norm(F_value, 2) # l2 norm of vector\n iteration_counter = 0\n while abs(F_norm) > eps and iteration_counter < max_iter:\n delta = np.linalg.solve(J(x, args), -F_value)\n x = x + delta\n F_value = fun(x, args)\n # F_value_ = F_value.reshape((-1, 1))\n F_norm = np.linalg.norm(F_value, 2)\n iteration_counter += 1\n\n # Here, either a solution is found, or too many iterations\n if abs(F_norm) > eps:\n iteration_counter = -1\n raise ValueError(\"Maximum iteration reached in newton root finding!\")\n return x, iteration_counter",
"def root_finding_newton(fun, J, x, eps, max_iter, args):\n F_value = fun(x, *args)\n F_value_ = F_value.reshape((-1,1))\n F_norm = np.linalg.norm(F_value, 2) # l2 norm of vector\n iteration_counter = 0\n while abs(F_norm) > eps and iteration_counter < max_iter:\n delta = np.linalg.solve(J(x, args), -F_value_)\n\n for i in range(x.size): #wtf numba!?!?!\n x[i] += delta[i,0]\n\n F_value = fun(x, *args)\n F_value_ = F_value.reshape((-1,1))\n F_norm = np.linalg.norm(F_value, 2)\n iteration_counter += 1\n\n # Here, either a solution is found, or too many iterations\n if abs(F_norm) > eps:\n iteration_counter = -1\n raise ValueError('Maximum iteration reached in newton root finding!')\n return x, iteration_counter",
"def newton1d(f, df, ddf, x, niter=10):\n for i in xrange(niter):\n x_new = x - df(x)/ddf(x)\n x = x_new\n return x",
"def newton(f, f_prime, x0, n):\n approximations = [x0]\n\n xnm1 = x0\n for i in range(1, n):\n xn = xnm1 - f(xnm1) / f_prime(xnm1)\n approximations.append(xn)\n xnm1 = xn\n\n for n in range(len(approximations)):\n print(f'Approximation {n + 1}: {approximations[n]:<17.13f} Exact: {pi:<17.13f} Difference: {abs(pi - approximations[n]):.13e}')",
"def newtons_method_1d(f, df_dx, x0, tol):\n # begin solution\n x = x0\n while abs(f(x)) > tol:\n x -= f(x) / df_dx(x)\n return x\n # end solution",
"def newton_method_vector(f, x_init, epsilon = 1e-10):\n prev_value = x_init + 2 * epsilon\n value = x_init\n\n iterations = 0\n while np.all(np.abs(prev_value - value)) > epsilon:\n prev_value = value\n\n j = jacobian(f, value)\n value = value - np.dot(np.linalg.pinv(j), f(value))\n\n iterations += 1\n\n print(f\"Newton Method converged in {iterations} iterations\")\n\n return value",
"def quasi_newtons_method(f, initial_guess, max_iter = 10000, method = 'BFGS', tol = 1e-12):\n \n if method not in ['BFGS', 'DFP', 'Broyden']:\n raise Exception(\"Not a valid method.\")\n x = initial_guess\n H = np.identity(len(x))\n for i in range(max_iter):\n x_vector = ad.create_vector('x', x)\n fn_at_x = f(x_vector)\n gradient = fn_at_x.getGradient(['x{}'.format(i) for i in range(1, len(x) + 1)])\n\n p = -H @ gradient\n \n alpha = line_search(f, x, p)\n delta_x = alpha * p\n\n x = x + delta_x\n x_vector2 = ad.create_vector('x', x)\n fn_at_x2 = f(x_vector2)\n gradient2 = fn_at_x2.getGradient(['x{}'.format(i) for i in range(1, len(x) + 1)])\n if np.sqrt(np.abs(gradient2).sum()) < tol:\n break\n y = (gradient2 - gradient).reshape(-1, 1)\n delta_x = delta_x.reshape(-1, 1)\n if method == 'BFGS':\n H = (np.identity(len(H)) - (delta_x @ y.T) / (y.T @ delta_x)) @ H \\\n @ (np.identity(len(H)) - (y @ delta_x.T) / (y.T @ delta_x)) + (delta_x @ delta_x.T) / (y.T @ delta_x)\n elif method == 'DFP':\n H = H + (delta_x @ delta_x.T) / (delta_x.T @ y) - (H @ y @ y.T @ H) / (y.T @ H @ y)\n elif method == 'Broyden':\n H = H + ((delta_x - H @ y) @ delta_x.T @ H) / (delta_x.T @ H @ y)\n\n return (x, i + 1)",
"def newton_raphson(f,x0,iterations): \n current = x0\n fdash = differentiate_polynomial(f)\n print(fdash)\n for i in range(iterations): \n current = current - evaluate_polynomial(f,current)/evaluate_polynomial(fdash,current)\n return current",
"def NewtonMethod(f, df, x=0.75, tol=1e-10):\n\tstart = time()\n\terror = tol + 1\n\t\n\ti = 0\n\terrs = []\n\n\twhile error > tol:\n\t\terrs.append(error)\n\n\t\tx_temp = x\n\t\tx = x - f(x) / df(x)\n\t\terror = np.abs(x-x_temp)\n\t\ti = i+1\n\tend = time()\n\treturn x, (end-start), i",
"def sqrt_newton(a):\n\tdef sqrt_update(x):\n\t\treturn 0.5 * (x + a / x)\n\tdef sqrt_close(x):\n\t\treturn approx_eq(x * x, a)\n\treturn improve(sqrt_update, sqrt_close)",
"def newton1d(f, df, ddf, x, niter=10):\n\n x_0 = x\n x_k = x\n\n for i in xrange(niter):\n x_k1 = x_k - df(x_k)/ddf(x_k)\n x_k = x_k1\n\n return x_k",
"def newton_update(f, df):\n def update(x):\n return x - f(x) / df(x)\n return update",
"def newton(n):\n x = n\n y = (x + 1) // 2\n while y < x:\n x = y\n y = (x + n // x) // 2\n return x",
"def test_newton_root_finder(self):\n\n # Set up the problem of finding the square roots of three numbers.\n constants = np.array([4.0, 9.0, 16.0])\n initial_values = np.ones(len(constants))\n\n def objective_and_gradient(values):\n objective = values**2 - constants\n gradient = 2.0 * values\n return objective, gradient\n\n # Obtain and evaluate a tensor containing the roots.\n roots = newton_root_finder(objective_and_gradient, initial_values)\n root_values, converged, failed = self.evaluate(roots)\n\n # Reference values.\n roots_bench = np.array([2.0, 3.0, 4.0])\n converged_bench = np.array([True, True, True])\n failed_bench = np.array([False, False, False])\n\n # Assert that the values we obtained are close to the true values.\n np.testing.assert_array_equal(converged, converged_bench)\n np.testing.assert_array_equal(failed, failed_bench)\n np.testing.assert_almost_equal(root_values, roots_bench, decimal=7)",
"def root_finding_newton(fun, J, x, eps, max_iter, args):\n F_value = fun(x, args)\n F_value_ = F_value.reshape((-1, 1))\n F_norm = np.linalg.norm(F_value, 2) # l2 norm of vector\n iteration_counter = 0\n while abs(F_norm) > eps and iteration_counter < max_iter:\n delta = np.linalg.solve(J(x, args), -F_value_)\n\n for i in range(x.size): # wtf numba!?!?!\n x[i] += delta[i, 0]\n\n F_value = fun(x, args)\n F_value_ = F_value.reshape((-1, 1))\n F_norm = np.linalg.norm(F_value, 2)\n iteration_counter += 1\n\n # Here, either a solution is found, or too many iterations\n if abs(F_norm) > eps:\n iteration_counter = -1\n raise ValueError(\"Maximum iteration reached in newton root finding!\")\n return x, iteration_counter",
"def _newton_update(func):\n return lambda x: x - func[0](x) / func[1](x)"
] | [
"0.7563373",
"0.75321746",
"0.7448436",
"0.74475104",
"0.7388106",
"0.7297471",
"0.7297028",
"0.72617483",
"0.72546536",
"0.7249506",
"0.72134876",
"0.7199049",
"0.7197292",
"0.71775806",
"0.71667355",
"0.71341896",
"0.71326596",
"0.7126753",
"0.7094095",
"0.7091968",
"0.70573956",
"0.7051226",
"0.7014604",
"0.7011255",
"0.6941722",
"0.69401205",
"0.6873508",
"0.68681645",
"0.68659025",
"0.67397463"
] | 0.76958627 | 0 |
Converts the generated fractal into an RGB image array | def _toRgbImage(self, fractal, colors, color_offset):
soln_real = adjustRange(fractal[0], 0, 127)
soln_imag = adjustRange(fractal[1], 0, 127)
iters = adjustRange(fractal[2], 0, 128)
rgb_image = np.array([
soln_real + iters,
soln_imag + iters,
iters
]
).astype(dtype=np.uint8)
return rgb_image.T | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _colored_img_to_arr(image, verbose=False):\n height, width = image.size\n arr = np.array(image.getdata())\n arr = arr.reshape(3, height, width)\n r = arr[0]\n g = arr[1]\n b = arr[2]\n return r, g, b",
"def _toRgbImage(self, fractal, colors, color_offset):\n hsv_img = np.array(\n [\n # Cycle through color wheel.\n (fractal * colors + color_offset) % 1,\n\n # Saturation = fractal value.\n fractal,\n\n # Maximum value.\n np.ones(fractal.shape)\n ]\n ).astype(dtype=float).T\n\n rgb_img = (mpl.colors.hsv_to_rgb(hsv_img) * 255).astype(dtype=np.uint8)\n return rgb_img",
"def _toRgbImage(self, fractal, colors, color_offset):\n hsv_img = np.array(\n [\n # Cycle through color wheel.\n (fractal * colors + color_offset) % 1,\n\n # Saturation = 1 where fractal values > 0,\n # Saturation = 0 otherwise.\n fractal.astype(dtype=bool).astype(dtype=float),\n\n # Invert colours\n 1 - fractal\n ]\n ).astype(dtype=float).T\n\n rgb_img = (mpl.colors.hsv_to_rgb(hsv_img) * 255).astype(dtype=np.uint8)\n return rgb_img",
"def rgb_image(self):\n z3 = self.z[:,:,newaxis]\n return z3 * self.c",
"def reconstructImage(self,arr):\n\t\tarr = arr * 256\n\t\tarr = np.array(np.round(arr),dtype=np.uint8)\n\t\t#arr = np.array(arr,dtype=np.uint8)\n\n\t\t# We need to transpose the array because we flatten X by columns\n\t\t#arr = arr.T\n\t\t#a = arr.reshape((self.width, self.height,3))\n\t\t\n\t\tif self.mode == 'L':\n\t\t\ta = arr.reshape((self.width, self.height))\n\t\telse:\n\t\t\ta = arr.reshape((self.width, self.height,3))\n\n\t\t#a = arr.reshape((3,self.width, self.height))\t\t\n\t\t#a = arr.transpose(0, 3, 1, 2)\n\n\t\tim = Image.fromarray(a,mode=self.mode)\n\n\t\treturn im",
"def carla_rgb_image_to_ndarray(image: carla.Image) -> np.ndarray: # pylint: disable=no-member\n image.convert(carla.ColorConverter.Raw) # pylint: disable=no-member\n array = np.frombuffer(image.raw_data, dtype=np.dtype(\"uint8\"))\n array = array.astype(np.float32) / 255\n array = np.reshape(array, (image.height, image.width, 4))\n array = array[:, :, :3]\n array = array[:, :, ::-1]\n return array",
"def generate_channels(path):\n # Abrir imagen y transformar a array\n image = Image.open(path)\n img_array = np.array(image)\n \n # Sacar RGB\n R = img_array[..., 0]\n G = img_array[..., 1]\n B = img_array[..., 2]\n \n return (R, G, B)",
"def grey_to_rgb_imitation(img):\n return np.repeat(img[...,np.newaxis], 3, -1)",
"def get_rgb(self, img, r, g, b):\r\n\r\n # Get specific bands of hyperspectral image\r\n red_channel = img[:, :, r]\r\n green_channel = img[:, :, g]\r\n blue_channel = img[:, :, b]\r\n\r\n img = np.stack((red_channel, green_channel, blue_channel), axis=2)\r\n img = img.astype('float32')\r\n return img",
"def img_to_rgb(img):\r\n if len(img.shape) < 3 or img.shape[2] == 1:\r\n return np.repeat(img, 3).reshape(img.shape[0], img.shape[1], 3)\r\n else:\r\n return img",
"def GetRGBArray(self, p_int):\n ...",
"def reveal_RGB_image(filename):\n\tnew_array = [[], [], []]\n\tim = Image.open(filename)\n\tpixels = convert_image_to_pixels(filename) # get RGB array\n\tfor pixel in pixels: # get tuple of RGB\n\t\tfor x in range(3): # get R, G, B lists\n\t\t\tnew_array[x].append(85 * (pixel[x] & 3)) # change 0-3 to 0-255\n\t\t# get hidden 2 least significant bits\n\tfinal_array = list(zip(new_array[0], new_array[1], new_array[2]))\n\t# create a new image container in RGB mode,\n\t# and import array pixels data into the container\n\treturn convert_pixels_to_image(final_array, im.size)",
"def get_image():\n image_response = client.simGetImages([airsim.ImageRequest(\"0\", airsim.ImageType.Scene, False, False)])[0]\n image1d = np.fromstring(image_response.image_data_uint8, dtype=np.uint8)\n image_rgba = image1d.reshape(image_response.height, image_response.width, 4)\n return image_rgba[78:144,1:255,0:3].astype(float)\n # return image_rgba[78:144,76:255,0:3].astype(float)",
"def get_image(self):\n image = np.frombuffer(self.image, dtype=np.uint8)\n return image.reshape(*self.size, self.channels)",
"def generate_array_image(R, G, B, height, width):\n R = R.reshape((height, width))\n G = G.reshape((height, width))\n B = B.reshape((height, width))\n \n return np.moveaxis(np.array([R, G, B]), 0, -1)",
"def img_to_array(img, path=True):\n global width, height\n\n if path:\n img = Image.open(img)\n img_arr = np.array(img) / 255.0\n img_arr = img_arr.reshape(width, height, channels)\n \n return img_arr",
"def data_to_bytescale_rgb(data): # used to create the SOURCE PNGs (MRI, FA, MD)\n im = bytescale(data)\n w, h = im.shape\n ret = np.empty((w,h,3), dtype=np.uint8)\n ret[:,:,0] = im\n ret[:,:,1] = im\n ret[:,:,2] = im\n return ret",
"def get_img_array(myzipfile, imgid, shape=(299,299)):\n img_arr = np.zeros(shape=(512, 512, 3), dtype=np.float32)\n img_green = Image.open(myzipfile.open(f'{imgid}_green.png'))\n img_blue = Image.open(myzipfile.open(f'{imgid}_blue.png'))\n img_red = Image.open(myzipfile.open(f'{imgid}_red.png'))\n img_yellow = Image.open(myzipfile.open(f'{imgid}_yellow.png'))\n img_arr[:,:,0] = np.divide(np.array(img_green), 255)\n img_arr[:,:,1] = np.divide(np.array(img_blue), 255)/2 + np.divide(np.array(img_yellow), 255)/2\n img_arr[:,:,2] = np.divide(np.array(img_red), 255)/2 + np.divide(np.array(img_red), 255)/2\n img_arr = cv2.resize(img_arr, shape)\n return img_arr",
"def imageToArray(i):\r\n a=gdalnumeric.numpy.fromstring(i.tostring(),'b')\r\n a.shape=i.im.size[1], i.im.size[0]\r\n return a",
"def bgr_to_rgb(ims):\n out = []\n for im in ims:\n out.append(im[:,:,::-1])\n return out",
"def load_image_as_rgb(image_path):\n im = imageio.imread(image_path)\n y_size = im.shape[0]\n x_size = im.shape[1]\n logging.info(\"Image has dimensions X:%d Y:%d\" % (x_size, y_size))\n arr = np.zeros((im.shape[0],im.shape[1]), dtype=int)\n i = 0\n for im_row in im:\n j = 0\n for vec in im_row:\n arr[i,j] = rgb_vec_to_num(vec)\n j = j + 1\n i = i + 1\n return arr",
"def _preprocess(self, image):\n\n # Scale from [0, 255] to [0, 1] and BGR to RGB \n return (image / 255.0)[:, :, ::-1]",
"def to_image(x):\n x = denorm(x.data.cpu())\n ndarr = x.mul(255).clamp(0, 255).byte().permute(1, 2, 0).cpu().numpy()\n im = ndarr\n return im",
"def generate_lut(self):\n r,g,b=(Numeric.zeros(256),Numeric.zeros(256),Numeric.zeros(256))\n for i in Numeric.arange(256):\n r_,g_,b_=self.colfct(i/255.0) # these are from [0,1]\n r[i],g[i],b[i]=int(255*r_),int(255*g_),int(255*b_)\n return r,g,b",
"def read_color_image(path):\n with open(path, 'rb') as f:\n img = Image.fromarray(read_ppm(f), mode='RGB')\n img = tf.keras.preprocessing.image.img_to_array(img, dtype=int)\n img = tf.convert_to_tensor(img)\n return img",
"def get_rendered_image(self) -> np.ndarray:\n return np.transpose(self.state['observation'], [1, 2, 0])",
"def image2array(filename, shape=None):\n # Open the image and change it to black and white\n im = Image.open(filename).convert('1', dither=Image.NONE)\n\n im = im.resize(shape, Image.ANTIALIAS)\n pattern = np.array(im)\n \n return pattern",
"def imageprepare():\r\n file_name = '9-test.png'\r\n im = Image.open(file_name).convert('L')\r\n\r\n im.save(\"9-t.png\")\r\n plt.imshow(im)\r\n plt.show()\r\n tv = list(im.getdata())\r\n\r\n # normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\r\n tva = [(255 - x) * 1.0 / 255.0 for x in tv]\r\n return tva",
"def red_filter(img):\r\n #with Image.open(filename) as img:\r\n w = img.width\r\n h = img.height\r\n\r\n newimg = Image.new('RGB', (w,h))\r\n for y in range(h):\r\n for x in range(w):\r\n r, g, b = img.getpixel((x,y))\r\n \r\n newimg.putpixel((x, y), (r, 0, 0))\r\n \r\n return newimg",
"def get_image(image_path):\r\n image = Image.open(image_path, 'r')\r\n width, height = image.size\r\n pixel_values = list(image.getdata())\r\n if image.mode == 'RGB':\r\n channels = 3\r\n elif image.mode == 'L':\r\n channels = 1\r\n else:\r\n print(\"Unknown mode: %s\" % image.mode)\r\n return None\r\n pixel_values = np.array(pixel_values).reshape((1,width, height, channels))\r\n # print(pixel_values.shape)\r\n return pixel_values"
] | [
"0.7032956",
"0.6756124",
"0.6748998",
"0.66770595",
"0.64644593",
"0.6463771",
"0.64612466",
"0.6436402",
"0.6378915",
"0.6336183",
"0.6302488",
"0.6263761",
"0.624275",
"0.6217485",
"0.62155837",
"0.61859244",
"0.61853856",
"0.61381644",
"0.61203206",
"0.60929006",
"0.6056744",
"0.6051514",
"0.6051217",
"0.6048501",
"0.60263324",
"0.6018934",
"0.5997953",
"0.5955463",
"0.5953483",
"0.5934768"
] | 0.70360917 | 0 |
checking return values for `start` and `end` when calling channel_messages for numbers not multiples of 50. | def test_channel_messages_unlimited_pagination():
clear()
userOne = auth_register('[email protected]', '123abc!@#', 'First', 'User')
randChannel = channels_create(userOne['token'], 'randChannel', True)
for _ in range(149):
message_send(userOne['token'], randChannel['channel_id'], 'Hello')
messages = channel_messages(userOne['token'], randChannel['channel_id'], 0)
assert(messages['start'] == 0)
assert(messages['end'] == 50)
messages2 = channel_messages(userOne['token'], randChannel['channel_id'], 50)
assert(messages2['start'] == 50)
assert(messages2['end'] == 100)
messages3 = channel_messages(userOne['token'], randChannel['channel_id'], 100)
assert(messages3['start'] == 100)
assert(messages3['end'] == -1)
assert(len(messages3['messages']) == 49)
# an error should be raised when start is beyond 149 messages
with pytest.raises(InputError):
channel_messages(userOne['token'], randChannel['channel_id'], 150) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def channel_messages(token, channel_id, start):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n\n # check if user is a member of channel with channel_ID and return AccessError if not\n if is_user_channel_member(channel_id, curr_id) is False:\n raise error.AccessError(description=\"user is not a member of this channel\")\n\n #get channel data\n curr_channel = database.get_channel_data(channel_id)\n # find the length of messages\n messages_length = len(curr_channel[\"messages\"])\n\n # if start is after the oldest message in messages InputError is raised\n # if messages is called and start is 0 on an empty channel, it returns an empty channel.\n # if start is after the oldest message in messages InputError is raised\n\n if messages_length <= start and (messages_length != 0 or start > 0):\n raise error.InputError(description=\"\"\"The start value selected is\n past the oldest message in the list\"\"\")\n\n if messages_length == 0 and start == 0:\n return {\"messages\": [], \"start\": start, \"end\": -1}\n\n # get the list of dictionaries 'message'\n curr_messages = curr_channel[\"messages\"]\n messages_returned = []\n\n end = start + 50\n num_msgs_to_check = messages_length - start\n\n # If end is larger than the total no. of messages,\n # the function will print till end and return -1\n if num_msgs_to_check < 50:\n\n counter = 0\n while counter < num_msgs_to_check:\n target_message_index = start + counter\n messages_returned.append(curr_messages[target_message_index])\n counter += 1\n\n end = -1\n # else if end is within total no of messages,\n # function will print 50 messaages from start and return start + 50\n else:\n # loop to add each message to return up till 50 messages is returned\n counter = 0\n while counter < 50:\n target_message_index = start + counter\n messages_returned.append(curr_messages[target_message_index])\n counter += 1\n\n for msg in messages_returned:\n for react in msg['reacts']:\n react['is_this_user_reacted'] = curr_id in react['u_ids']\n\n return {\"messages\": messages_returned, \"start\": start, \"end\": end}",
"def test_generator_continuous():\n RANGE_MAX = 100\n prev_value = RANGE_MAX // 2\n for msg in it.islice(generate_msgs(0, RANGE_MAX), 0, 42):\n curr_value = Message.parse(msg).power\n assert curr_value - prev_value <= 1\n prev_value = curr_value",
"def test_messenger_limit():\n all_messages_resp = requests.get(BASE_URL)\n all_messages = all_messages_resp.json()\n total_message_count = len(all_messages)\n message_limit = total_message_count // 2\n\n query_params = {\"limit\": message_limit}\n limit_resp = requests.get(BASE_URL, params=query_params)\n limited_messages = limit_resp.json()\n assert limit_resp.status_code == 200\n assert len(limited_messages) == message_limit",
"def test_if_it_outputs_correct_output_for_numbers_greater_than_50(self):\n self.assertEquals(len(prime_numbers(55)), 16)",
"def test_generator_downward(narrow_power_range):\n with patch('random.randint', side_effect=lambda a,b: -1):\n range_min, range_max = narrow_power_range\n for msg in it.islice(generate_msgs(range_min, range_max), 0, 5):\n pass\n power = Message.parse(msg).power\n assert power == range_min",
"def test_generator_upward(narrow_power_range):\n with patch('random.randint', side_effect=lambda a,b: 1):\n range_min, range_max = narrow_power_range\n for msg in it.islice(generate_msgs(range_min, range_max), 0, 5):\n pass\n power = Message.parse(msg).power\n assert power == range_max",
"def validate(c_name, val):\n n = 80\n threshold = 4\n while (threshold >= 0):\n if ((len(channels[c_name]) > n) and (val <= threshold)):\n return True\n else:\n n -= 20\n threshold -= 1\n\n return False",
"def test_limit_and_from(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url + \"?from=5&limit=10\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 20)\n self.assertEqual(channel.json_body[\"next_token\"], 15)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 10)\n self._check_fields(channel.json_body[\"event_reports\"])",
"def test_seq_rangeExamples(self):\n\n self.assertEqual(MessageSet(2, 4), MessageSet(4, 2))\n self.assertEqual(list(MessageSet(2, 4)), [2, 3, 4])\n\n m = MessageSet(3291, None)\n m.last = 3290\n self.assertEqual(list(m), [3290, 3291])",
"def test_args_count_in_range(args: list, min: int, max: int) -> bool:\n\n\tcount = args_count(args)\n\treturn (count >= min and count <= max)",
"def test_hello_failed_code_value(self):\n\n value = 0\n\n iter_given_code = self.test_hello_failed_code.__iter__()\n length = self.test_hello_failed_code.__len__()\n\n while value < self.MAX_HELLO_FAILED_CODE_VALUE or length > 0:\n\n self.assertEqual(value, iter_given_code.__next__())\n\n if value < self.MAX_HELLO_FAILED_CODE_VALUE:\n value += 1\n\n length -= 1",
"def test_stream_loop(self):\n chans, gains, scans, rate = (10,10,10,10), (1,2,4,5), 1024, 500\n v = [v[0] for v in self.l.stream_sync(\n channels=chans, gains=gains,\n num_scans=scans, rate=rate)]\n for vi in v:\n for r in vi:\n self.assertTrue(abs(r-2.5) < .1,\n \"%s should be cal, 2.5v\" % vi[0])",
"def _send_messages(number_range, partition=0, topic=topic, producer=kafka_producer, request=request):\n messages_and_futures = [] # [(message, produce_future),]\n for i in number_range:\n # request.node.name provides the test name (including parametrized values)\n encoded_msg = '{}-{}-{}'.format(i, request.node.name, uuid.uuid4()).encode('utf-8')\n future = kafka_producer.send(topic, value=encoded_msg, partition=partition)\n messages_and_futures.append((encoded_msg, future))\n kafka_producer.flush()\n for (msg, f) in messages_and_futures:\n assert f.succeeded()\n return [msg for (msg, f) in messages_and_futures]",
"def check_delivered_messages(results):\n assert results[\"metrics\"][\"Delivered messages\"] == 20",
"def channel_messages(token, channel_id, start=0):\n auth_u_id = get_id_from_token(token)\n channel = channels.get(channel_id)\n if channel is None:\n raise ValueError(\"channel_id does not exist.\")\n if auth_u_id not in channel[\"all_members\"]:\n raise AccessError(\"The authorised user is not a member of the channel.\")\n messages_results = messages.query(\"channel_id\", \"==\", channel_id)\n if start > len(messages_results):\n raise ValueError(\n \"start is greater than the total number of messages in the channel.\"\n )\n if start < 0:\n raise ValueError(\"Invalid value for start.\")\n sorted_messages = sorted(messages_results, key=itemgetter(\"time_created\"))\n sorted_messages.reverse()\n end = len(sorted_messages) if start + 50 > len(sorted_messages) - 1 else start + 50\n messages_list = sorted_messages[start:end]\n returned_messages = []\n for message in messages_list:\n returned_messages.append(\n {\n \"message_id\": message[\"message_id\"],\n \"u_id\": message[\"u_id\"],\n \"message\": message[\"message\"],\n \"is_pinned\": message[\"is_pinned\"],\n \"time_created\": message[\"time_created\"],\n }\n )\n for message in returned_messages:\n reacts_results = reacts.get(message[\"message_id\"])\n returned_reacts = []\n for react_id in reacts_results:\n if not react_id == \"message_id\":\n returned_reacts.append(\n {\n \"react_id\": react_id,\n \"u_ids\": reacts_results[react_id],\n \"is_this_user_reacted\": auth_u_id in reacts_results[react_id],\n }\n )\n message[\"reacts\"] = returned_reacts\n return {\n \"messages\": returned_messages,\n \"start\": start,\n \"end\": -1 if end == len(sorted_messages) else end,\n }",
"def check_all():\r\n i = 100000\r\n while i <= 999996:\r\n if check(i):\r\n print(i)\r\n i = i + 1",
"def test_meter_mod_failed_code_value(self):\n\n value = 0\n\n iter_given_code = self.test_meter_mod_failed_code.__iter__()\n length = self.test_meter_mod_failed_code.__len__()\n\n while value < self.MAX_METER_MOD_FAILED_CODE_VALUE or length > 0:\n\n self.assertEqual(value, iter_given_code.__next__())\n\n if value < self.MAX_METER_MOD_FAILED_CODE_VALUE:\n value += 1\n\n length -= 1",
"async def chatchart(self, ctx, channel: Optional[discord.TextChannel] = None, messages:int = 5000):\n if channel is None:\n channel = ctx.channel\n\n # --- Early terminations\n if channel.permissions_for(ctx.message.author).read_messages is False:\n return await ctx.send(\"You're not allowed to access that channel.\")\n if channel.permissions_for(ctx.guild.me).read_messages is False:\n return await ctx.send(\"I cannot read the history of that channel.\")\n blacklisted_channels = await self.config.guild(ctx.guild).channel_deny()\n if channel.id in blacklisted_channels:\n return await ctx.send(f\"I am not allowed to create a chatchart of {channel.mention}.\")\n if messages < 5:\n return await ctx.send(\"Don't be silly.\")\n\n message_limit = await self.config.limit()\n if (message_limit != 0) and (messages > message_limit):\n messages = message_limit\n\n embed = discord.Embed(\n title=f\"Fetching messages from #{channel.name}\",\n description=\"This might take a while...\",\n colour=await self.bot.get_embed_colour(location=channel)\n )\n loading_message = await ctx.send(embed=embed)\n try:\n history = await self.fetch_channel_history(channel, loading_message, messages)\n except discord.errors.Forbidden:\n try:\n await loading_message.delete()\n except discord.NotFound:\n pass\n return await ctx.send(\"No permissions to read that channel.\")\n\n msg_data = self.calculate_member_perc(history)\n # If no members are found.\n if len(msg_data[\"users\"]) == 0:\n try:\n await loading_message.delete()\n except discord.NotFound:\n pass\n return await ctx.send(f\"Only bots have sent messages in {channel.mention} or I can't read message history.\")\n\n top_twenty, others = self.calculate_top(msg_data)\n chart = await self.create_chart(top_twenty, others, channel)\n\n try:\n await loading_message.delete()\n except discord.NotFound:\n pass\n await ctx.send(file=discord.File(chart, \"chart.png\"))",
"def test_flow_monitor_failed_code_value(self):\n\n value = 0\n\n iter_given_code = self.test_flow_monitor_failed_code.__iter__()\n length = self.test_flow_monitor_failed_code.__len__()\n\n while value < self.MAX_FLOW_MONITOR_FAILED_CODE_VALUE or length > 0:\n\n self.assertEqual(value, iter_given_code.__next__())\n\n if value < self.MAX_FLOW_MONITOR_FAILED_CODE_VALUE:\n value += 1\n\n length -= 1",
"def count_valid(message, prefix):\n return 3",
"def test_inrange():\n assert cs.any > 0\n assert cs.any < cmax",
"def test_async_config_failed_code_value(self):\n\n value = 0\n\n iter_given_code = self.test_async_config_failed_code.__iter__()\n length = self.test_async_config_failed_code.__len__()\n\n while value < self.MAX_ASYNC_CONFIG_FAILED_CODE_VALUE or length > 0:\n\n self.assertEqual(value, iter_given_code.__next__())\n\n if value < self.MAX_ASYNC_CONFIG_FAILED_CODE_VALUE:\n value += 1\n\n length -= 1",
"def range_query(self, start_key, end_key):\n if not self.attached:\n raise CastleCollectionNotAttachedException()\n\n print \"THIS IS FAKE\"\n pycastle_log.info(\"Doing range query from key \"+str(start_key)+\" to key \"+str(end_key))\n try:\n i = 0\n while i < 10:\n yield i\n i+=1\n if i % 5 == 0:\n pycastle_log.info(\"Getting next batch\")\n except GeneratorExit:\n pycastle_log.info(\"User requested stop of range query from key \"+str(start_key)+\" to key \"+str(end_key))",
"def numeralcheck(msg, *args):\r\n try:\r\n num = int(msg.content)\r\n if num == 0 and not args:\r\n return (\"done\")\r\n return (num)\r\n except ValueError:\r\n if msg.content.lower() == \"done\":\r\n if args:\r\n return (0)\r\n return (\"done\")",
"def test_sufficientWidth(self):\n msg = \"barbazbo\"\n maxLen = len(\"PRIVMSG foo :{}\".format(msg)) + 2\n self.client.msg(\"foo\", msg, maxLen)\n self.assertEqual(self.client.lines, [\"PRIVMSG foo :{}\".format(msg)])\n self.client.lines = []\n self.client.msg(\"foo\", msg, maxLen - 1)\n self.assertEqual(2, len(self.client.lines))\n self.client.lines = []\n self.client.msg(\"foo\", msg, maxLen + 1)\n self.assertEqual(1, len(self.client.lines))",
"def test_splitLongMessagesWithDefault(self):\n message = \"o\" * (irc.MAX_COMMAND_LENGTH - 2)\n self.assertLongMessageSplitting(message, 2)",
"def chain_rangeValid(start, stop):\r\n for i in range(start, stop):\r\n chain = chain_153(i)\r\n if len(chain) > 1 or chain[0] == 153:\r\n for j in chain_153(i):\r\n print(j)",
"def send_messages(_) -> int:\n return 1 << 11",
"def send_messages(_) -> int:\n return 1 << 11",
"def check_exit_reached(minimum: int, maximum: int) -> list:\n the_exit = [minimum - 1, maximum - 1]\n return the_exit"
] | [
"0.6338927",
"0.58790916",
"0.53648764",
"0.53105456",
"0.52494335",
"0.5227476",
"0.519632",
"0.51526994",
"0.5122233",
"0.5106034",
"0.50966465",
"0.50898653",
"0.50656456",
"0.5056774",
"0.50472474",
"0.50439817",
"0.5038363",
"0.50265247",
"0.50246954",
"0.50190175",
"0.50129956",
"0.5001165",
"0.49974138",
"0.49937803",
"0.49851128",
"0.49762174",
"0.4967449",
"0.49574095",
"0.49574095",
"0.49535394"
] | 0.6797525 | 0 |
checking if the user is already in the channel, raise accesserror if they are | def test_channel_join_already_in_channel():
clear()
user = auth_register('[email protected]', '123abc!@#', 'first', 'last')
userchannel_id = channels_create(user['token'], 'userchannel', True)
with pytest.raises(AccessError):
channel_join(user['token'], userchannel_id['channel_id']) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def have_channel_open(channels, user):\n for x in channels:\n chan = channels[x]\n if 'is_member' in chan:\n continue\n if chan['user'] == user:\n return True\n return False",
"def have_channel_open(channels, user):\n for x in channels:\n chan = channels[x]\n if 'is_member' in chan:\n continue\n if \"user\" in chan and chan['user'] == user:\n return True\n return False",
"def user_present(ctx: Context, channel: TextChannel) -> bool:\n for member in channel.members:\n if member.id == ctx.author.id:\n return True\n\n return False",
"def check_user(msg):\n if \"Error\" in msg:\n raise ValueError('User already exists.')",
"def test_channel_addowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])",
"def verify_user_existance(self, user):\n for client in self.clients:\n if user == client.get_name():\n return True\n return False",
"def test_channel_addowner_not_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_forth_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_third_result['token'], randChannel_id['channel_id'], register_forth_result['u_id'])",
"def has_user(self, user): # pylint: disable=unused-argument\r\n return False",
"def test_channel_addowner_already_an_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(InputError):\n assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])",
"def is_access_allowed(self, user_id):\n ### DATABASE CODE GOES HERE\n return False",
"def test_channel_leave_invalid_user():\n \n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n leaver = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True) \n with pytest.raises(AccessError):\n channel_leave(leaver['token'], userchannel_id['channel_id'])",
"def check_channel_request(self, kind, chanid):\n return OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED",
"def test_channel_addowner_invalid_token_after_logout():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n assert(auth_logout(register_second_result['token'])[\"is_success\"] is True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])",
"async def channel_manage_error(self, ctx: commands.context, error):\n if isinstance(error, commands.ChannelNotFound):\n await ctx.send(\"That channel was not found, make sure the channel exists.\")\n else:\n logging.warning(error)",
"def can_be_accessed(self, user):\n if self.shared_with_everyone:\n return True\n\n if self.user == user or self.users_allowed.filter(pk=user.pk).exists():\n return True\n\n for group in self.groups_allowed.all():\n if user.groups.filter(pk=group.pk).exists():\n return True\n\n return False",
"def check_channel_request(self, kind, chanid):\n if kind == 'session':\n return paramiko.OPEN_SUCCEEDED\n return paramiko.OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED",
"def current_user_has_access(self):\n return self.user_has_access(users.get_current_user())",
"async def control_checks(self, ctx):\n server_id = ctx.message.server.id\n requester = ctx.message.author\n #silently drop if not in voice\n if not self.in_voice(server_id):\n return False\n #refuse if user not in the same channel\n if not self.user_in_channel(server_id, requester):\n vcname = self.get_server_dict(server_id)['voice'].channel.name\n await ctx.bot.send_message(ctx.message.channel, \"You can't control me outside of {}.\".format(vcname))\n return False\n return True",
"def check_if_user_can_interact(bot, update, *args, **kwargs):\n\n user_id = update._effective_user\n # print(\"cerco user con id \" + str(user_id) + \", nel database\")\n user = DB.execute(TABELLE[\"id_users\"][\"select\"][\"from_id\"], (user_id['id'],))\n # print(\"ho trovato : \" + str(user))\n if not user: # user non prensete nel db id_users\n if 'private' in update.message.chat.type: # se il messaggio è stato mandato in privata allora devo chiedere l'accesso\n self.request_access(bot, user_id)\n elif 'supergroup' in update.message.chat.type: # altrimenti guardo se è presente nei bot_users\n bot_users = DB.execute(TABELLE['bot_users']['select']['by_ids'], (user_id, bot.id))\n if not bot_users: # se non è presente glielo dico e lo salvo nel db\n update.message.reply_text(\"E tu chi sei? Non ti ho mai visto da queste parti...\"\n \"Perche non mi invii un bel messaggio di start cosi diventiamo amici?\",\n reply_to_message_id=update.message.message_id)\n self.add_bot_user(update._effective_user, bot.id)\n\n return\n elif user[\"banned\"]:\n update.message.reply_text(\"Spiacente sei stato bannato dal bot\")\n return\n else:\n sig = signature(func)\n if len(sig.parameters) > 1:\n return func(bot, update, *args, **kwargs)\n else:\n return func(*args, **kwargs)",
"def already_logged_in(oauth_user, oauth_service):\n try:\n created = current_user.add_oauth_identity(oauth_user.service_name, oauth_user.service_user_id)\n if created:\n message = 'Linked your ' + oauth_service.value + ' account to your CatHerder account!'\n else:\n message = 'Your ' + oauth_service.value + ' account is already linked to a CatHerder user.'\n return current_user, message, True\n except Exception as e:\n return None, e.message, False",
"def cog_check(self, ctx):\r\n return ctx.author.guild_permissions.administrator",
"def cog_check(self, ctx):\r\n return ctx.author.guild_permissions.administrator",
"def test_channel_join_invalid_channel():\n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n joiner = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n channels_create(user['token'], 'userchannel', True)\n invalid_id = 0\n with pytest.raises(InputError):\n channel_join(joiner['token'], invalid_id)",
"async def __local_check(self, ctx):\n if not isinstance(ctx.channel, discord.TextChannel):\n raise InvalidChannelCheck(ctx.command)\n me = ctx.me.guild_permissions\n perms = (me.manage_messages, me.manage_nicknames, me.ban_members, me.kick_members)\n if not all(perms):\n raise BotPermissionsCheck(ctx.command)\n else:\n return True",
"def vc_only():\n\n async def check(ctx):\n if ctx.guild and ctx.author.voice:\n if not ctx.guild.me.voice or ctx.author.voice.channel == ctx.guild.me.voice.channel:\n return True\n await ctx.reply(\"I'm already in another voice channel!\")\n return False\n await ctx.reply('You must join a server voice channel first!')\n return False\n\n return commands.check(check)",
"def channel_addowner(token, channel_id, u_id):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n # check if user u_id is already an owner of the channel and raise InputError if so\n # also checks to see if current auth user is a owner of channel\n\n # a counter to check if user is a member of the channel\n is_curr_owner = False\n for owner_id in curr_channel[\"owner_ids\"]:\n if u_id == owner_id:\n raise error.InputError(description=\"user u_id is already an owner of this channel\")\n # checks if curr_id is an owner of channel\n if curr_id == owner_id:\n is_curr_owner = True\n\n # checks if the user u_id is a member of the channel already\n is_u_member = False\n for member_id in curr_channel[\"member_ids\"]:\n if u_id == member_id:\n is_u_member = True\n\n\n # if the auth user is an owner of the slackr, allow him to add u_id as owner of channel\n if is_u_member is True:\n if user_perms[\"permission_id\"] == 1:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(u_id)\n # if the auth user is an owner of the channel, allow him to add u_id as owner of channel\n elif is_curr_owner is True:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(u_id)\n # else the auth user is not an owner and thus cannot use addowner\n else:\n raise error.AccessError(description=\"\"\"current user is not an owner of the channel,\n or of the slackr\"\"\")",
"def has_access(self, user):\n if user.is_superuser:\n return True\n return self.user_objects(user).filter(id=self.id).exists()",
"def check_user(user):\n result_user = search_column_with_constraint(choose_database(\"auth\"), \"users\", \"id\", \"id\", user)\n # result_user = search_single_entry(choose_database(\"auth\"), \"users\", \"id\", user)\n\n if len(result_user) == 0:\n return 0\n else:\n return 1",
"def test_channel_addowner_invalid_channel_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_addowner(register_second_result['token'], 'INVALIDID', register_third_result['u_id'])",
"def test_channel_removeowner_invalid_user_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], \"[email protected]\")"
] | [
"0.6544167",
"0.6521434",
"0.64236474",
"0.6420696",
"0.63337475",
"0.63098836",
"0.6305338",
"0.62802494",
"0.6259734",
"0.6218568",
"0.6136835",
"0.60846996",
"0.6068947",
"0.6049151",
"0.6002369",
"0.59844506",
"0.597856",
"0.5952112",
"0.5945146",
"0.5934715",
"0.59036577",
"0.59036577",
"0.58986306",
"0.58922917",
"0.5889184",
"0.58862996",
"0.58634686",
"0.5858502",
"0.5858213",
"0.5852138"
] | 0.7157796 | 0 |
if the channel is private, but no invite is given to the user, then the owner of flockr can join the channel | def test_channel_join_private_owner():
clear()
joiner = auth_register('[email protected]', '123abc!@#', 'first', 'last')
user = auth_register('[email protected]', '123abc!@#', 'first', 'last')
userchannel_id = channels_create(user['token'], 'userchannel', False)
channel_join(joiner['token'], userchannel_id['channel_id'])
randChannel_details = channel_details(user['token'], userchannel_id['channel_id'])
assert(randChannel_details['all_members'] == [
{
'u_id' : user['u_id'],
'name_first' : 'first',
'name_last' : 'last',
'profile_img_url': ''
},
{
'u_id' : joiner['u_id'],
'name_first' : 'first',
'name_last' : 'last',
'profile_img_url': ''
}
]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def join(self, ctx, invite : discord.Invite):\r\n if ctx.message.author.id == \"481270883701358602\":\r\n await self.client.accept_invite(invite)\r\n await self.client.say(\"Joined the server.\")\r\n else:\r\n await self.client.say(\"**Owner only command.**\")",
"def channel_join(token, channel_id):\n\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n # checks if user is already a part of channel\n for user_id in curr_channel[\"member_ids\"]:\n if curr_id == user_id:\n raise error.InputError(description=\"user is joining a channel user is already in\")\n\n # this checks if the channel is empty (or new) in this case we make the new member an owner.\n if curr_channel[\"member_ids\"] == []:\n # adds the user into channel_member\n curr_channel[\"member_ids\"].append(curr_id)\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(curr_id)\n # this checks if the user is an owner of the slacker\n # if they are they are given owner privelages in the channel\n # else they are a member\n elif user_perms[\"permission_id\"] == 1:\n # adds the user into channel_member\n curr_channel[\"member_ids\"].append(curr_id)\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(curr_id)\n elif curr_channel[\"is_public\"] is True:\n # adds the user into the channel_member\n curr_channel[\"member_ids\"].append(curr_id)\n elif curr_channel[\"is_public\"] is False:\n raise error.InputError(description=\"\"\"channel_join recieved a channel_id\n for a private channel\"\"\")",
"def have_channel_open(channels, user):\n for x in channels:\n chan = channels[x]\n if 'is_member' in chan:\n continue\n if chan['user'] == user:\n return True\n return False",
"def have_channel_open(channels, user):\n for x in channels:\n chan = channels[x]\n if 'is_member' in chan:\n continue\n if \"user\" in chan and chan['user'] == user:\n return True\n return False",
"def user_present(ctx: Context, channel: TextChannel) -> bool:\n for member in channel.members:\n if member.id == ctx.author.id:\n return True\n\n return False",
"def channel_invite(token, channel_id, u_id):\n authorised_u_id = get_id_from_token(token)\n channel = channels.get(channel_id)\n user_to_invite = users.get(u_id)\n if user_to_invite is None:\n raise ValueError(\"u_id does not exist.\")\n if channel is None:\n raise ValueError(\"channel_id does not exist.\")\n if authorised_u_id not in channel[\"all_members\"]:\n raise AccessError(\"The authorised user is not a member of the channel.\")\n channels.set(channel_id, \"all_members\", u_id)",
"def channel_invite(token, channel_id, u_id):\n\n if database.get_current_user(token) not in database.get_channel_data(channel_id)['member_ids']:\n raise error.AccessError(description=\"\"\"Authorised user is not\n a member of channel with that channel_id.\"\"\")\n if u_id in database.get_channel_data(channel_id).get('member_ids'):\n raise error.InputError(description=\"This user is already a part of the channel.\")\n\n new_channel_data = database.get_channel_data(channel_id)\n\n new_channel_data['member_ids'].append(u_id)\n if database.get_permission_dict(u_id).get('permission_id') == 1:\n new_channel_data['owner_ids'].append(u_id)\n\n database.set_channel_data(new_channel_data)\n\n return {}",
"def test_channel_join_already_in_channel():\n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True) \n with pytest.raises(AccessError):\n channel_join(user['token'], userchannel_id['channel_id'])",
"def joinedChannel(self, channel, users):\n pass",
"def test_channel_join_private_global():\n # Clear the data structure\n clear_v1()\n # Call other functions to create the data and store in data structure\n auth_dict1 = auth_register_v2(\"[email protected]\", \"123456\", \"john\", \"smith\")\n auth_dict2 = auth_register_v2(\"[email protected]\", \"555555\", \"harry\", \"potter\")\n\n auth_token1 = auth_dict1[\"token\"]\n auth_token2 = auth_dict2[\"token\"]\n\n channel_id1 = channels_create_v2(auth_token2, \"Chill Soc\", False)\n\n\n # Global DREAM owner attempt to join a private channel \n channel_join_v2(auth_token1, channel_id1[\"channel_id\"])\n\n # Check if the global owner successfully join private channel\n assert channels_list_v2(auth_token1) == {\n 'channels': [\n \t{\n \t\t'channel_id': 1, # channel id start at 1 or 0 is worth checking ? It's currently start at 1.\n \t\t'name': 'Chill Soc',\n \t}\n ],\n }",
"def channel_join(token, channel_id):\n auth_u_id = get_id_from_token(token)\n channel = channels.get(channel_id)\n if channel is None:\n raise ValueError(\"channel_id does not exist.\")\n user = users.get(auth_u_id)\n if user[\"is_admin\"] is not True and channel[\"is_public\"] is False:\n raise AccessError(\"channel is not public\")\n\n channels.set(channel_id, \"all_members\", auth_u_id)",
"def test_channel_leave_normal_case_owner():\n \n clear()\n leaver = auth_register('[email protected]', '123abc!@#', 'first', 'last') \n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True)\n channel_join(leaver['token'], userchannel_id['channel_id'])\n channel_addowner(leaver['token'], userchannel_id['channel_id'], leaver['u_id'])\n channel_leave(leaver['token'], userchannel_id['channel_id'])\n randChannel_details = channel_details(user['token'], userchannel_id['channel_id'])\n assert(randChannel_details['owner_members'] == [\n {\n 'u_id' : user['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n }\n ])",
"def test_channel_join_except_private():\n # Clear the data structure\n clear_v1()\n # Call other functions to create the data and store in data structure\n auth_dict1 = auth_register_v2(\"[email protected]\", \"123456\", \"john\", \"smith\")\n auth_dict2 = auth_register_v2(\"[email protected]\", \"555555\", \"harry\", \"potter\")\n\n auth_token1 = auth_dict1[\"token\"]\n auth_token2 = auth_dict2[\"token\"]\n\n channel_id1 = channels_create_v2(auth_token1, \"Chill Soc\", False)\n \n with pytest.raises(AccessError):\n channel_join_v2(auth_token2, channel_id1[\"channel_id\"])",
"def test_channel_join_normal_case():\n \n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n joiner = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True) \n channel_join(joiner['token'], userchannel_id['channel_id']) \n randChannel_details = channel_details(user['token'], userchannel_id['channel_id'])\n assert(randChannel_details['all_members'] == [\n {\n 'u_id' : user['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n },\n {\n 'u_id' : joiner['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n }\n ])",
"def join(self):\n channel = self.data[0]\n user_pseudonym = VALIDATED_USERS.get_pseudonym(SOCKET_TO_USERID.get(self.source, None))\n\n if user_pseudonym and self.target:\n target_server = self.target[1]\n if(BANHANDLER.is_banned_from_channel(user_pseudonym, target_server, channel)):\n self.source[0].send(\":orcbot!~@localhost PRIVMSG \"+SOCKET_TO_USERID[self.source]+\" :You're banned from \"+channel+\"\\r\\n\")\n elif(self.target):\n self.message = self.message +\"\\r\\n\"\n self.target[0].sendall(self.message)\n self.send()",
"def channel_addowner(token, channel_id, u_id):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n # check if user u_id is already an owner of the channel and raise InputError if so\n # also checks to see if current auth user is a owner of channel\n\n # a counter to check if user is a member of the channel\n is_curr_owner = False\n for owner_id in curr_channel[\"owner_ids\"]:\n if u_id == owner_id:\n raise error.InputError(description=\"user u_id is already an owner of this channel\")\n # checks if curr_id is an owner of channel\n if curr_id == owner_id:\n is_curr_owner = True\n\n # checks if the user u_id is a member of the channel already\n is_u_member = False\n for member_id in curr_channel[\"member_ids\"]:\n if u_id == member_id:\n is_u_member = True\n\n\n # if the auth user is an owner of the slackr, allow him to add u_id as owner of channel\n if is_u_member is True:\n if user_perms[\"permission_id\"] == 1:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(u_id)\n # if the auth user is an owner of the channel, allow him to add u_id as owner of channel\n elif is_curr_owner is True:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(u_id)\n # else the auth user is not an owner and thus cannot use addowner\n else:\n raise error.AccessError(description=\"\"\"current user is not an owner of the channel,\n or of the slackr\"\"\")",
"def test_channel_addowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])",
"def is_user_channel_member(channel_id, u_id):\n for selected_id in database.get_channel_data(channel_id)[\"member_ids\"]:\n if selected_id == u_id:\n return True\n return False",
"def is_channel_owner():\n\n async def check(ctx):\n if ctx.guild:\n owner = ctx.author == ctx.guild.owner\n if not owner:\n await ctx.send(\"I guess you are not this server's pogchamp. Bruh.\")\n return owner\n return True\n\n return commands.check(check)",
"async def invite(self, ctx):\r\n myInvite = discord.utils.oauth_url(self.bot.user.id, permissions=discord.Permissions(permissions=8))\r\n await ctx.channel.send('Invite me to *your* server with this link: \\n\\n<{}>'.format(myInvite))",
"def canInvite(session):\n if session.user[\"userlevel\"] == \"admin\":\n return True\n\n dOrg = session.user[\"defaultOrganisation\"] or \"apache\"\n if session.DB.ES.exists(index=session.DB.dbname, doc_type=\"organisation\", id=dOrg):\n xorg = session.DB.ES.get(\n index=session.DB.dbname, doc_type=\"organisation\", id=dOrg\n )[\"_source\"]\n if session.user[\"email\"] in xorg[\"admins\"]:\n return True",
"def test_channel_addowner_owner_flockr():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])",
"async def lock(ctx):\n member = ctx.message.author\n channel = ctx.message.channel\n\n if (channel.category.name in [\"beta\", \"staff\", \"Pi-Bot\"]):\n return await ctx.send(\"This command is not suitable for this channel because of its category.\")\n\n member_role = discord.utils.get(member.guild.roles, name=ROLE_MR)\n if (channel.category.name == CATEGORY_STATES):\n await ctx.channel.set_permissions(member_role, add_reactions=False, send_messages=False)\n else:\n await ctx.channel.set_permissions(member_role, add_reactions=False, send_messages=False, read_messages=True)\n\n wiki_role = discord.utils.get(member.guild.roles, name=ROLE_WM)\n gm_role = discord.utils.get(member.guild.roles, name=ROLE_GM)\n admin_role = discord.utils.get(member.guild.roles, name=ROLE_AD)\n bot_role = discord.utils.get(member.guild.roles, name=ROLE_BT)\n await ctx.channel.set_permissions(wiki_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.channel.set_permissions(gm_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.channel.set_permissions(admin_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.channel.set_permissions(bot_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.send(\"Locked the channel to Member access.\")",
"def is_party_channel(channel: discord.TextChannel) -> bool:\n return get_active_feature(channel) == ActivationState.PARTY",
"async def addjoinchannel(self, ctx: commands.Context, channel: discord.TextChannel):\n db_session = self.bot.create_db_session()\n\n existing = db_session.query(Channel).filter(Channel.id == channel.id).one_or_none()\n if existing:\n existing.joinable = True\n else:\n db_session.add(Channel(id=channel.id, name=channel.name, joinable=True))\n\n db_session.commit()\n db_session.close()\n await ctx.send(f\"{channel.mention} was added as a joinable channel.\")",
"def can_message(guild, channel):\n\treturn authorized(guild, channel) and not muted(guild, channel)",
"def invite(self):\n pass",
"def ccheck(self, msg):\r\n if msg.channel == self.channel or (msg.channel.is_private and self.ispm):\r\n return True\r\n return False",
"def test_request_channel_is_none(self):\n CanInfo.objects.filter(can_id=self.UUID).update(channel_name=None)\n self.assertFalse(send_rotate_to_can(self.USER, self.BIN_NUM))",
"async def test_staff_members_can_bypass_channel_restriction(self, create_embed, constants):\n constants.STAFF_PARTNERS_COMMUNITY_ROLES = [self.moderator_role.id]\n ctx = helpers.MockContext(author=self.moderator, channel=helpers.MockTextChannel(id=200))\n\n await self.cog.user_info(self.cog, ctx)\n\n create_embed.assert_called_once_with(ctx, self.moderator, False)\n ctx.send.assert_called_once()"
] | [
"0.7135079",
"0.6953994",
"0.69395274",
"0.68968785",
"0.6849321",
"0.6775492",
"0.6759105",
"0.67089385",
"0.6606643",
"0.6557231",
"0.6467635",
"0.64343905",
"0.64186656",
"0.64149857",
"0.63559216",
"0.6322832",
"0.63140005",
"0.630644",
"0.6246793",
"0.6224127",
"0.62160075",
"0.62142015",
"0.62115926",
"0.62019324",
"0.619815",
"0.61824137",
"0.6147389",
"0.61391115",
"0.6130362",
"0.610227"
] | 0.73244226 | 0 |
checking if an inputerror is raised if attempting to add a user as an owner who is already an owner | def test_channel_addowner_already_an_owner():
clear()
auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')
register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')
randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)
register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')
channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])
with pytest.raises(InputError):
assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id']) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clean_owner(self):\n username = self.cleaned_data['owner']\n owner = User.objects.filter(username=username).first()\n if owner is None:\n raise forms.ValidationError(\n _('User %(username)s does not exist'),\n params={'username': username},\n )\n if self.organization.owners.filter(username=username).exists():\n raise forms.ValidationError(\n _('User %(username)s is already an owner'),\n params={'username': username},\n )\n return owner",
"async def owner(c, m):\n if not m.id in ids:\n await c.send('You must be an owner to use this command.')\n raise Exception()\n return True",
"def check_owner(data=None, **kw):\n if data and 'owner_id' in data and not data['owner_id'] == current_user.id:\n raise ProcessingException(description=\"No write privileges\",\n code=401)",
"def test_username_not_unique(bot):\n expect_error(register, InputError, bot.username, \"abcdef\", \"a\", \"a\", \"a\")",
"def check_user(msg):\n if \"Error\" in msg:\n raise ValueError('User already exists.')",
"def test_channel_addowner_invalid_channel_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_addowner(register_second_result['token'], 'INVALIDID', register_third_result['u_id'])",
"def _validate_ip_owner(ip, mac, row_number):\n mac = MACAddressField.normalize(mac)\n try:\n dev = Device.admin_objects.get(ethernet__mac=mac)\n except Device.DoesNotExist:\n if ip_address_exists(ip):\n raise forms.ValidationError(\n \"Row %s: IP address already exists.\" % row_number\n )\n else:\n # Does another device have this IPAddress?\n if(Device.objects.filter(\n ipaddress__number=int(ipaddr.IPAddress(ip)),\n ).exclude(\n pk=dev.id,\n ).exists()):\n raise forms.ValidationError(\n \"Row %s: IP address used by another device.\" % row_number\n )",
"def validate_owner(model, request):\n auth_token = request.headers.get('Authentication-Token')\n user = _token_loader(auth_token)\n if model.owner != user:\n abort(401)",
"def ownercheck(self, userhost):\n if self.cfg and self.cfg.owner:\n if userhost in self.cfg.owner: return True\n return False",
"def test_channel_addowner_not_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_forth_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_third_result['token'], randChannel_id['channel_id'], register_forth_result['u_id'])",
"def test_validate_owner(self):\n with self.assertRaises(ValidationError):\n self.make_assignment(self.category, self.user_bob, self.role_owner)",
"def _check_owner(user, study):\n if not user.id == study.owner:\n raise HTTPError(403, \"User %s does not own study %d\" %\n (user.id, study.id))",
"def add():\r\n ch = input('You are about to ADD an entry. If NO, you may choose another option.\\n').lower()\r\n\r\n if y_n(ch):\r\n print('Enter info for the following fields...\\n')\r\n xln = re.sub(r'\\s', '', str(input('Last name?\\n')).lower().capitalize()) # lower, cap first, remove whitespace\r\n xfn = re.sub(r'\\s', '', str(input('First name?\\n')).lower().capitalize())\r\n\r\n if search2(xln, xfn): # search if an entry already exists for user's input\r\n print('An entry already exists for', xfn, xln, end='. Please enter another entry.\\n')\r\n return add() # if an entry already exists make user enter another\r\n\r\n xgr = None\r\n try: # try except user's inputted grade\r\n xgr = int(input('Grade?\\n'))\r\n xgrs = [8, 9, 10, 11, 12, 13]\r\n\r\n xgr = check_int(xgr, xgrs)\r\n except ValueError:\r\n print('You did not enter an applicable grade. Please enter another value.')\r\n add()\r\n\r\n xsr = str(input('Stream? (eg. Academic, IB, etc...)\\n')).lower().capitalize()\r\n xrl = str(input('Role? (eg. Design Member)\\n')).lower().capitalize()\r\n xcm = str(input('Any comments?\\n')).lower().capitalize()\r\n\r\n ch2 = input('Are you sure you wish to add this individual to the database? YES or NO?\\n')\r\n if y_n(ch2):\r\n print(xfn, xln, 'has been added to the database.')\r\n with conn: # input corresponding info to table with context manager\r\n c.execute(\"\"\"INSERT INTO personnel VALUES (\r\n :last, :first, :grade, :stream, :role, :comments)\"\"\",\r\n {'last': xln, 'first': xfn, 'grade': xgr, 'stream': xsr, 'role': xrl, 'comments': xcm})\r\n\r\n start() # after user's action has been completed, ask for another\r\n else:\r\n print('Your add action has been cancelled.')\r\n start()\r\n else: # ask for another if user wishes to perform another action\r\n start()",
"async def cog_command_error(self, ctx:utils.Context, error:commands.CheckFailure):\n\n # Throw errors properly for me\n if ctx.author.id in self.bot.config['owners']:\n text = f'```py\\n{error}```'\n await ctx.send(text)\n raise error\n\n elif isinstance(error, commands.NotOwner):\n await ctx.send(\"You need to be registered as an owner to run this command.\")\n return",
"def test_signup_dupe_username(self):\n\n invalid_u = User.signup(\"[email protected]\", \"allison\", \"testpass\", \"Test\", \"User\", None)\n \n uid = 99999\n invalid_u.id = uid\n\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()",
"async def cog_check(self, ctx:utils.Context):\n\n if ctx.author.id in self.bot.config['owners']:\n return True\n raise commands.NotOwner",
"def validate_username(self, username):\n if username.data != current_user.username:\n user = User.query.filter_by(username=username.data).first()\n if user:\n raise ValidationError('That username already exists. Please choose another username.')",
"def test__validate_owner__0():\n user = User.precreate(202211270021)\n team = Team.precreate(202211270022)\n \n for input_value, expected_output in (\n (None, ZEROUSER),\n (user, user),\n (team, team),\n ):\n owner = validate_owner(input_value)\n vampytest.assert_is(owner, expected_output)",
"def channel_addowner(token, channel_id, u_id):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n # check if user u_id is already an owner of the channel and raise InputError if so\n # also checks to see if current auth user is a owner of channel\n\n # a counter to check if user is a member of the channel\n is_curr_owner = False\n for owner_id in curr_channel[\"owner_ids\"]:\n if u_id == owner_id:\n raise error.InputError(description=\"user u_id is already an owner of this channel\")\n # checks if curr_id is an owner of channel\n if curr_id == owner_id:\n is_curr_owner = True\n\n # checks if the user u_id is a member of the channel already\n is_u_member = False\n for member_id in curr_channel[\"member_ids\"]:\n if u_id == member_id:\n is_u_member = True\n\n\n # if the auth user is an owner of the slackr, allow him to add u_id as owner of channel\n if is_u_member is True:\n if user_perms[\"permission_id\"] == 1:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(u_id)\n # if the auth user is an owner of the channel, allow him to add u_id as owner of channel\n elif is_curr_owner is True:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(u_id)\n # else the auth user is not an owner and thus cannot use addowner\n else:\n raise error.AccessError(description=\"\"\"current user is not an owner of the channel,\n or of the slackr\"\"\")",
"def validate_username(self, attrs, source):\n phone_no = attrs[source]\n if not phoneCleaner(phone_no):\n raise serializers.ValidationError(\"Please check your phone no. the format is incorrect\")\n\n try:\n us = User.objects.get(username__iexact=phone_no)\n except User.DoesNotExist:\n raise serializers.ValidationError(\"Phone number must already be registered before doing this\")\n\n if us.hierarchy != 'master':\n raise serializers.ValidationError(\"Phone number must not be a slave to another user\")\n\n return attrs",
"def test_channel_removeowner_invalid_user_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], \"[email protected]\")",
"def test_channel_addowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])",
"def verify_user(self):\n if self.username == \"root\":\n print \"Error: Please do not run this script as root.\"\n sys.exit(1)\n\n members = grp.getgrnam(self.groupowner)[3]\n if not self.username in members:\n print \"Error: The user who runs this script must belong to the group: \" + self.groupowner\n sys.exit(1)",
"def test_not_member(bot, event):\n _, event_id = event\n expect_error(edit, InputError, bot.username, event_id, False, None, None)",
"def test_011_add_same_user(self):\n testflow.step(ADD_USR_MSG, TEST_USER1)\n assert not USER_CLI.run('add', TEST_USER1)[0]",
"def _onAdd(self, event):\n finished = False\n pattern = re.compile(r'[^a-zA-Z0-9_]')\n while not finished:\n dialog = wx.TextEntryDialog(self, 'New user name.', 'Username',\n '', style = wx.OK | wx.CANCEL)\n if dialog.ShowModal() == wx.ID_OK:\n newusername = dialog.GetValue()\n if re.search(pattern, newusername):\n message = wx.MessageDialog(self, _USERNAME_ERROR_MESSAGE,\n 'Error', wx.OK | wx.ICON_ERROR)\n message.ShowModal()\n elif newusername in self.users:\n message = wx.MessageDialog(self,\n 'That user already exists.',\n 'Error', wx.OK | wx.ICON_ERROR)\n else:\n c.addUser(newusername)\n self.users.append(newusername)\n self.userlist.SetItems(self.users)\n finished = True\n else:\n finished = True",
"def clean(self):\n c = super(UserForm, self).clean()\n if (self.instance.pk is None and\n c.get('email') and\n user_exists(c.get('email'),\n c.get('last_name'),\n c.get('first_name'),\n self.current_round_name)):\n raise forms.ValidationError(\n ugettext('APPLICATION_EXISTS PLEASE_LOGIN'))\n return c",
"def username_prompt(): \n\n print(\"Valid usernames contain only the characters 'a-z', e.g. pdiddy.\")\n\n while True: \n username = str(input(\"Enter username to add: \"))\n confirm_name = str(input(\"To confirm, re-enter username: \"))\n \n if username != confirm_name or not re.match(\"^[a-z]+$\", username):\n print(TRY_AGAIN)\n continue \n \n else:\n print(\"OK, checking if user: %s exists...\" %(username))\n return username",
"def test_no_owner_exception(api: API, owners: list):\n api.candlepin.get_owners.return_value = owners\n account = Account(api, \"USERNAME\", \"PASSWORD\")\n with pytest.raises(IndexError):\n account.owner_id",
"def test_create_user_invalid_username(self):\r\n print(\"Create user invalid username (already taken)\")\r\n u_id = 3\r\n username = \"100\"\r\n password = \"test9999\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)"
] | [
"0.66523606",
"0.62497765",
"0.6178345",
"0.61579764",
"0.61557573",
"0.6095019",
"0.60053355",
"0.6005061",
"0.59492403",
"0.5933165",
"0.59200364",
"0.590281",
"0.58125436",
"0.57823414",
"0.5743092",
"0.5735133",
"0.5727868",
"0.5720738",
"0.57188445",
"0.5711546",
"0.5709298",
"0.5688679",
"0.56570625",
"0.56531507",
"0.564492",
"0.56375366",
"0.5630461",
"0.5612386",
"0.55922127",
"0.5585293"
] | 0.6923221 | 0 |
checking if AccessError is returned as expected if the owner of flockr is not a member of the channel | def test_channel_addowner_owner_flockr_not_member():
clear()
register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')
register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')
register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')
randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)
with pytest.raises(AccessError):
assert channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id']) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_channel_removeowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])",
"def test_channel_addowner_not_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_forth_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_third_result['token'], randChannel_id['channel_id'], register_forth_result['u_id'])",
"def test_channel_removeowner_not_owner_permissions():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_removeowner(register_third_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])",
"def test_util_has_perm_or_owns_sanity(self):\n me = User.objects.get(pk=118533)\n my_t = Thread.objects.filter(creator=me)[0]\n other_t = Thread.objects.exclude(creator=me)[0]\n perm = 'forums_forum.thread_edit_forum'\n allowed = access.has_perm_or_owns(me, perm, my_t, self.forum_1)\n eq_(allowed, True)\n allowed = access.has_perm_or_owns(me, perm, other_t, self.forum_1)\n eq_(allowed, False)",
"def is_channel_owner():\n\n async def check(ctx):\n if ctx.guild:\n owner = ctx.author == ctx.guild.owner\n if not owner:\n await ctx.send(\"I guess you are not this server's pogchamp. Bruh.\")\n return owner\n return True\n\n return commands.check(check)",
"def test_channel_addowner_owner_flockr():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])",
"def test_channel_addowner_invalid_token_after_logout():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n assert(auth_logout(register_second_result['token'])[\"is_success\"] is True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])",
"def test_channel_addowner_already_an_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(InputError):\n assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])",
"def test_channel_removeowner_owner_flockr():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])",
"def test_channel_join_already_in_channel():\n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True) \n with pytest.raises(AccessError):\n channel_join(user['token'], userchannel_id['channel_id'])",
"def test_channel_join_private_owner():\n clear()\n joiner = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', False)\n channel_join(joiner['token'], userchannel_id['channel_id']) \n randChannel_details = channel_details(user['token'], userchannel_id['channel_id'])\n assert(randChannel_details['all_members'] == [\n {\n 'u_id' : user['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n },\n {\n 'u_id' : joiner['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n }\n ])",
"def test_channel_join_except_private():\n # Clear the data structure\n clear_v1()\n # Call other functions to create the data and store in data structure\n auth_dict1 = auth_register_v2(\"[email protected]\", \"123456\", \"john\", \"smith\")\n auth_dict2 = auth_register_v2(\"[email protected]\", \"555555\", \"harry\", \"potter\")\n\n auth_token1 = auth_dict1[\"token\"]\n auth_token2 = auth_dict2[\"token\"]\n\n channel_id1 = channels_create_v2(auth_token1, \"Chill Soc\", False)\n \n with pytest.raises(AccessError):\n channel_join_v2(auth_token2, channel_id1[\"channel_id\"])",
"def test_not_owner(self):\n creating_user = create_user()\n creating_user.save()\n festival = create_festival('test', creating_user)\n festival.save()\n\n concert = create_concert(festival, 'test')\n concert.save()\n\n login(self.client)\n\n client = create_client('test')\n client.delete_access = True\n client.save()\n\n response = self.client.post('/backend/u/conc/', {'client': 'test', 'id': concert.pk})\n self.assertEqual(response.status_code, 200)\n self.assertEqual('Permission not granted', response.content.decode('utf-8'))",
"def test_channel_removeowner_invalid_token_after_logout():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channels_create(register_third_result['token'], 'Random Channel 2', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n auth_logout(register_second_result['token'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])",
"def check_channel_request(self, kind, chanid):\n return OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED",
"def check_owner(data=None, **kw):\n if data and 'owner_id' in data and not data['owner_id'] == current_user.id:\n raise ProcessingException(description=\"No write privileges\",\n code=401)",
"def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])",
"def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])",
"def test_channel_leave_normal_case_owner():\n \n clear()\n leaver = auth_register('[email protected]', '123abc!@#', 'first', 'last') \n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True)\n channel_join(leaver['token'], userchannel_id['channel_id'])\n channel_addowner(leaver['token'], userchannel_id['channel_id'], leaver['u_id'])\n channel_leave(leaver['token'], userchannel_id['channel_id'])\n randChannel_details = channel_details(user['token'], userchannel_id['channel_id'])\n assert(randChannel_details['owner_members'] == [\n {\n 'u_id' : user['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n }\n ])",
"def ccheck(self, msg):\r\n if msg.channel == self.channel or (msg.channel.is_private and self.ispm):\r\n return True\r\n return False",
"def test_channel_leave_invalid_user():\n \n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n leaver = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True) \n with pytest.raises(AccessError):\n channel_leave(leaver['token'], userchannel_id['channel_id'])",
"async def lock(ctx):\n member = ctx.message.author\n channel = ctx.message.channel\n\n if (channel.category.name in [\"beta\", \"staff\", \"Pi-Bot\"]):\n return await ctx.send(\"This command is not suitable for this channel because of its category.\")\n\n member_role = discord.utils.get(member.guild.roles, name=ROLE_MR)\n if (channel.category.name == CATEGORY_STATES):\n await ctx.channel.set_permissions(member_role, add_reactions=False, send_messages=False)\n else:\n await ctx.channel.set_permissions(member_role, add_reactions=False, send_messages=False, read_messages=True)\n\n wiki_role = discord.utils.get(member.guild.roles, name=ROLE_WM)\n gm_role = discord.utils.get(member.guild.roles, name=ROLE_GM)\n admin_role = discord.utils.get(member.guild.roles, name=ROLE_AD)\n bot_role = discord.utils.get(member.guild.roles, name=ROLE_BT)\n await ctx.channel.set_permissions(wiki_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.channel.set_permissions(gm_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.channel.set_permissions(admin_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.channel.set_permissions(bot_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.send(\"Locked the channel to Member access.\")",
"async def __local_check(self, ctx):\n if not isinstance(ctx.channel, discord.TextChannel):\n raise InvalidChannelCheck(ctx.command)\n me = ctx.me.guild_permissions\n perms = (me.manage_messages, me.manage_nicknames, me.ban_members, me.kick_members)\n if not all(perms):\n raise BotPermissionsCheck(ctx.command)\n else:\n return True",
"def check_channel_request(self, kind, chanid):\n if kind == 'session':\n return paramiko.OPEN_SUCCEEDED\n return paramiko.OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED",
"def available(self, o):\n return not self.locked() or self.isowner(o)",
"def cog_check(self, ctx):\r\n return ctx.author.guild_permissions.administrator",
"def cog_check(self, ctx):\r\n return ctx.author.guild_permissions.administrator",
"def get_everyone_denied(self):",
"def test_component_chown_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('component chown component2 changed_owner')\n rv, output = self._execute('component list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def test_03_self_cannot_upgrade_resource(self):\n holes = self.holes\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_resource_with_user(holes, dog, PrivilegeCodes.VIEW)\n self.assertFalse(dog in holes.raccess.edit_users)\n self.assertTrue(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_resource_unshare_users(holes)))\n with self.assertRaises(PermissionDenied):\n dog.uaccess.share_resource_with_user(\n holes, dog, PrivilegeCodes.VIEW)\n with self.assertRaises(PermissionDenied):\n dog.uaccess.share_resource_with_user(\n holes, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_resource_unshare_users(holes)))"
] | [
"0.7090575",
"0.6885205",
"0.68405485",
"0.650094",
"0.63612247",
"0.6272654",
"0.6271527",
"0.6261493",
"0.62362427",
"0.616833",
"0.61552966",
"0.611927",
"0.6118742",
"0.6108415",
"0.60745674",
"0.60703945",
"0.6057438",
"0.6057438",
"0.6045126",
"0.60374135",
"0.6017522",
"0.60170907",
"0.5998715",
"0.5953348",
"0.591603",
"0.58893955",
"0.58893955",
"0.58849776",
"0.5876605",
"0.5849653"
] | 0.7249187 | 0 |
checking if removing an owner with an invalid user ID raises an inputerror | def test_channel_removeowner_invalid_user_id():
clear()
auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')
register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')
randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)
auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')
with pytest.raises(InputError):
assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], "[email protected]") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_channel_removeowner_invalid_channel_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], 'INVALIDID', register_third_result['u_id'])",
"def _remove_user(self):\n name = False\n while not name: #While name not set\n name = input(\"Please enter the username of the user you would like to remove: \").lower()\n userID = self._get_user_id(name)\n if not userID:\n name = False\n command = \"remove_user {0}\\r\\n\".format(userID)\n return(command)",
"def remove():\r\n ch = input('You are about to REMOVE an entry. If NO, you may choose another option.\\n').lower()\r\n\r\n if y_n(ch):\r\n print('Enter info for the following fields...\\n')\r\n xln = re.sub(r'\\s', '', str(input('Last name?\\n'))).lower().capitalize()\r\n xfn = re.sub(r'\\s', '', str(input('First name?\\n'))).lower().capitalize()\r\n\r\n if not search2(xln, xfn):\r\n print('No entry exists for', xfn, xln, end='. Please enter another entry.\\n')\r\n return remove()\r\n\r\n ch2 = input('Are you sure you wish to remove this individual from the database? YES or NO?\\n')\r\n if y_n(ch2):\r\n print(xfn, xln, 'has been removed from the database.')\r\n with conn:\r\n c.execute(\"\"\"DELETE from personnel WHERE first=:first COLLATE NOCASE and last=:last COLLATE NOCASE\"\"\",\r\n {'first': xfn, 'last': xln})\r\n\r\n start()\r\n else:\r\n print('Your remove action has been cancelled.')\r\n start()\r\n else:\r\n start()",
"def clean_owner(self):\n username = self.cleaned_data['owner']\n owner = User.objects.filter(username=username).first()\n if owner is None:\n raise forms.ValidationError(\n _('User %(username)s does not exist'),\n params={'username': username},\n )\n if self.organization.owners.filter(username=username).exists():\n raise forms.ValidationError(\n _('User %(username)s is already an owner'),\n params={'username': username},\n )\n return owner",
"def test_channel_removeowner_not_owner_permissions():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_removeowner(register_third_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])",
"def delete_user():",
"def validate_owner(model, request):\n auth_token = request.headers.get('Authentication-Token')\n user = _token_loader(auth_token)\n if model.owner != user:\n abort(401)",
"def test_channel_removeowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])",
"def test_channel_removeowner_standard_input():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n assert(channel_details(register_second_result['token'], randChannel_id['channel_id']) == {\n 'name' : 'Random Channel',\n 'owner_members': [\n {\n 'u_id': 2,\n 'name_first': 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }\n ],\n 'all_members': [\n {\n 'u_id': 2,\n 'name_first': 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }, \n {\n 'u_id': 3,\n 'name_first' : 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }\n ]\n })",
"def test_delete_user_by_id_mismatch(client: FlaskClient) -> None:\n username = create_random_username()\n other_username = create_random_username()\n auth_token = create_auth_token(other_username)\n response = delete_user(client, username, auth_token.signed)\n assert_error_response(response, HTTPStatus.FORBIDDEN)",
"def handle_owner_delete(owner_id):\n\n owner = Owner.find_by_id(owner_id)\n # flash error message if owner does not exist\n if not owner:\n flash(f'Owner does not exist!', 'danger')\n return 'not deleted', 404\n # flash error message if owner still has existing content\n elif owner.contents:\n flash(f'{owner.owner_name} still has existing content!', 'danger')\n return 'not deleted', 400\n\n # owner is deleted and user is redirected (redirect code in owners.js)\n # deleting owner errors handled\n try:\n owner.delete_owner()\n except HTTPException:\n return \"Server cannot delete the owner at this time\", 500\n\n flash(f'{owner.owner_name} has been deleted!', 'success')\n return 'deleted', 202",
"async def owner(c, m):\n if not m.id in ids:\n await c.send('You must be an owner to use this command.')\n raise Exception()\n return True",
"def test_channel_removeowner_invalid_token_after_logout():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channels_create(register_third_result['token'], 'Random Channel 2', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n auth_logout(register_second_result['token'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])",
"def test_channel_addowner_invalid_channel_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_addowner(register_second_result['token'], 'INVALIDID', register_third_result['u_id'])",
"def test_not_member(bot, event):\n _, event_id = event\n expect_error(edit, InputError, bot.username, event_id, False, None, None)",
"def test_channel_addowner_already_an_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(InputError):\n assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])",
"def test_remove_user(self):\n db = database.Database()\n db.remove_user('nick')\n\n the_args, _ = db._cursor.execute.call_args\n sql = the_args[0]\n expected_sql = 'DELETE FROM quota_violations WHERE username LIKE (%s)'\n\n self.assertEqual(sql, expected_sql)",
"def collection_special_author_cancel(user_id, author_id):\n\n another_user_id = author_id\n if (user_id == another_user_id):\n return \"self\"\n query = db_session.query(Collection_User).filter_by(\n user_id=user_id, another_user_id=another_user_id).all()\n if len(query) == 1:\n db_session.delete(query[0])\n db_session.commit()\n update_collection_num(user_id, another_user_id, False)\n else:\n return \"already\"\n return \"success\"",
"def check_owner(data=None, **kw):\n if data and 'owner_id' in data and not data['owner_id'] == current_user.id:\n raise ProcessingException(description=\"No write privileges\",\n code=401)",
"def _delete_user(self, user):\n if User.delete_user(user):\n self.session.output({'deleted': 'user {} and their related accounts'.format(user)})\n return True\n else:\n self.session.output({'invalid_user': 'please enter valid user ID!\\n'}, '[ Fail to delete user ]')\n return False",
"def validate_ownership(item, user_id):\n if item.user_id != user_id:\n raise Forbidden('You are not allowed to modify this item.')",
"def clean(self):\n super().clean()\n if self.user2:\n self.orig_cloud.delete_user(self.user2.id)",
"def test_remove_user(self):\n pass",
"def test_handle_remove_github_error(self):\n test_user = User(\"userid\")\n test_user.permissions_level = Permissions.admin\n team = Team(\"BRS\", \"brs\", \"web\")\n other_user = User(\"anotheruser\")\n other_user.github_id = \"githubID\"\n other_user.github_username = \"myuser\"\n self.db.retrieve.side_effect = [test_user, other_user]\n self.db.query.return_value = [team]\n self.gh.has_team_member.side_effect = GithubAPIException(\"error\")\n with self.app.app_context():\n self.assertTupleEqual(self.testcommand.handle(\"team remove\"\n \" brs ID\", user),\n (\"User removed unsuccessfully with the \"\n \"following error: error\", 200))\n self.db.store.assert_not_called()\n self.gh.remove_team_member.assert_not_called()",
"def test_channel_removeowner_last_owner():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n #register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n #channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n # removing third user\n channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])",
"def channel_removeowner(token, channel_id, u_id):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n u_id_permission = database.get_permission_dict(u_id)\n if u_id_permission[\"permission_id\"] == 1:\n raise error.AccessError(description=\"user being removed is the owner of the slackr\")\n\n # checks if u_id is not an owner of the channel\n # also checks if current auth user is an owner of the channel\n is_u_owner = False\n is_curr_owner = False\n for owner_id in curr_channel[\"owner_ids\"]:\n if u_id == owner_id:\n is_u_owner = True\n if curr_id == owner_id:\n is_curr_owner = True\n if is_u_owner is False:\n raise error.InputError(description=\"user being removed is not an owner of the channel\")\n\n\n # if the auth user is owner of slackr, allows him to remove u_id as owner\n if user_perms[\"permission_id\"] == 1:\n # removes the user from channel_owner\n curr_channel[\"owner_ids\"].remove(u_id)\n # if the auth user is an owner of the channel, allow him to remove u_id as owner of channel\n elif is_curr_owner is True:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].remove(u_id)\n # else the auth user is not an owner and thus cannot use addowner\n else:\n raise error.AccessError(description=\"\"\"Authorised user user is not an owner of the channel,\n or of the slackr\"\"\")",
"def test_user_id_delete(self):\n pass",
"def delete_user():\n #TODO user delete\n pass",
"def delete_user(id):\n pass",
"def test_channel_removeowner_owner_flockr():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])"
] | [
"0.6941911",
"0.67129594",
"0.6252531",
"0.62405235",
"0.6239856",
"0.62261367",
"0.62119555",
"0.62015164",
"0.61586416",
"0.6044805",
"0.60105914",
"0.6007393",
"0.59954685",
"0.59929734",
"0.59919494",
"0.5990558",
"0.598856",
"0.598853",
"0.598045",
"0.594045",
"0.5923282",
"0.59194237",
"0.59108675",
"0.5897748",
"0.5883618",
"0.5880244",
"0.58571076",
"0.5854038",
"0.58468896",
"0.5845754"
] | 0.72741085 | 0 |
checking if removing an owner without owner permissions raises an accesserror | def test_channel_removeowner_not_owner_permissions():
clear()
auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')
register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')
register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')
randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)
with pytest.raises(AccessError):
assert channel_removeowner(register_third_result['token'], randChannel_id['channel_id'], register_second_result['u_id']) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_channel_removeowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])",
"def test_not_owner(self):\n creating_user = create_user()\n creating_user.save()\n festival = create_festival('test', creating_user)\n festival.save()\n\n concert = create_concert(festival, 'test')\n concert.save()\n\n login(self.client)\n\n client = create_client('test')\n client.delete_access = True\n client.save()\n\n response = self.client.post('/backend/u/conc/', {'client': 'test', 'id': concert.pk})\n self.assertEqual(response.status_code, 200)\n self.assertEqual('Permission not granted', response.content.decode('utf-8'))",
"def check_owner(data=None, **kw):\n if data and 'owner_id' in data and not data['owner_id'] == current_user.id:\n raise ProcessingException(description=\"No write privileges\",\n code=401)",
"def has_remove_permissions(self, obj):\n return True",
"def __check_removed_permissions(self) -> None:\n for permission in Permission.objects.all():\n if not self.__is_permission_allowed_to_delete(permission):\n continue\n\n if self.__is_permission_in_groups(permission.codename):\n raise PermissionInUse(f'Permission {permission.codename} is used in groups. Delete it first.')\n\n permission.delete()\n\n self.stdout.write(f'Removed {permission.codename} permission')",
"def test_permission_remove_unknown_user(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('permission remove joe TICKET_VIEW')\n self.assertEqual(2, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def _check_owner(user, study):\n if not user.id == study.owner:\n raise HTTPError(403, \"User %s does not own study %d\" %\n (user.id, study.id))",
"def test_permission_remove_one_action_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('permission remove anonymous TICKET_MODIFY')\n rv, output = self._execute('permission list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def check_delete_permission(self):\n if getSecurityManager().checkPermission(\"Delete objects\", self):\n username = getSecurityManager().getUser().getUserName()\n if username == self.getOwner().getId():\n return True\n return False",
"def test_permission_remove_action_not_granted(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('permission remove anonymous TICKET_CREATE')\n self.assertEqual(2, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def validate_owner(model, request):\n auth_token = request.headers.get('Authentication-Token')\n user = _token_loader(auth_token)\n if model.owner != user:\n abort(401)",
"def test_cant_remove_other_users_from_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=8,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=False,\n is_subbed=True,\n invite_only=False,\n target_users_subbed=True,\n )\n self.assert_json_error(result, \"Insufficient permission\")",
"def block_owner_deletion(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"block_owner_deletion\")",
"def test_protect_owner(self):\n self.collection.set_permission(Permission.SHARE, self.user1)\n\n # User with share permission cannot grant ``owner`` permission\n data = {\"users\": {self.user2.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n self.assertNotIn(\"owner\", self.collection.get_permissions(self.user2))\n self.assertFalse(PermissionModel.objects.filter(user=self.user2).exists())\n\n # User with share permission cannot revoke ``owner`` permission\n self.collection.set_permission(Permission.OWNER, self.user2)\n data = {\"users\": {self.user2.pk: \"editor\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(\n self.collection.get_permissions(self.user2),\n [Permission.VIEW, Permission.EDIT, Permission.SHARE, Permission.OWNER],\n )\n self.collection.set_permission(Permission.NONE, self.user2)\n\n # Now let user1 be owner on collection.\n set_permission(Permission.OWNER, self.user1, self.collection)\n\n # ``owner`` permission cannot be assigned to a group\n data = {\"groups\": {self.group.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertFalse(PermissionModel.objects.filter(group=self.group).exists())\n\n # User with owner permission can grant ``owner`` permission\n data = {\"users\": {self.user2.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertEqual(\n self.collection.get_permissions(self.user2),\n [Permission.VIEW, Permission.EDIT, Permission.SHARE, Permission.OWNER],\n )\n\n # User with owner permission can revoke ``owner`` permission\n data = {\"users\": {self.user2.pk: \"edit\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertFalse(\n PermissionModel.objects.filter(\n user=self.user2, value=Permission.OWNER.value\n ).exists()\n )\n\n # User with owner permission cannot remove all owners\n data = {\"users\": {self.user1.pk: \"edit\", self.owner.pk: \"edit\"}}\n\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(resp.data[\"detail\"], \"Object must have at least one owner.\")\n\n owner_permissions = self.collection.permission_group.permissions.filter(\n value=Permission.OWNER.value\n )\n owner_count = owner_permissions.count()\n self.assertEqual(owner_count, 2)\n\n # User can delete his owner permission if there is at least one other owner\n self.assertTrue(owner_permissions.filter(user=self.user1).exists())\n data = {\"users\": {self.user1.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertFalse(owner_permissions.filter(user=self.user1.pk).exists())",
"def test_remove_from_organization_forbidden(self):\n org = Organization.create(name='foo', program_id=self.program.uid)\n org.put()\n user = User.create(name='Admin', email='[email protected]', user_type='user',\n owned_organizations=['Organization_foo'])\n req = User.create(name='Invalid Requestor', email='[email protected]',\n user_type='user')\n user.put()\n req.put()\n\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_organizations': []},\n headers=self.login_headers(req),\n status=403,\n )\n\n # Not changed in the db.\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(user.user_type, fetched_user.user_type)\n self.assertEqual(user.owned_organizations,\n fetched_user.owned_organizations)",
"def test_channel_removeowner_owner_flockr():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])",
"def test_cannot_delete_usage(self):\n p = Permission.objects.get(name='Can delete usage')\n self.user.user_permissions.add(p)\n self.client.login(username='testuser', password='q2w3E$R%')\n response = self.client.delete(reverse('api_v1:usage-detail', kwargs={'pk': 1}),\n follow=True)\n self.assertEqual(response.status_code, 405)\n self.assertIn('not allowed', str(response.content))",
"def delete_volumeaccessright_record( vac ):\n \n principal_id = vac.owner_id.email \n volume_name = vac.volume.name \n \n try:\n observer_core.ensure_volume_access_right_absent( principal_id, volume_name )\n except Exception, e:\n traceback.print_exc()\n logger.error(\"Failed to revoke access from %s to %s\" % (principal_id, volume_name))\n raise e\n \n return True",
"def test_channel_removeowner_invalid_token_after_logout():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channels_create(register_third_result['token'], 'Random Channel 2', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n auth_logout(register_second_result['token'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])",
"def owner_or_permissions(**perms):\n original = commands.has_permissions(**perms).predicate\n\n async def extended_check(ctx):\n if ctx.guild is None:\n raise errors.NoPrivateMessage\n return ctx.guild.owner_id == ctx.author.id or await original(ctx)\n\n return commands.check(extended_check)",
"def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"DELETE\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])",
"def test_destroy_not_owner(self):\n\n self.assertEqual(first=1, second=Post.objects.all().count())\n url = reverse('post-detail', args=(self.post.id,))\n self.client.credentials(HTTP_AUTHORIZATION=self.token_1)\n response = self.client.delete(path=url)\n self.assertEqual(first=403, second=response.status_code)\n self.assertEqual(first=1, second=Post.objects.all().count())",
"def channel_removeowner(token, channel_id, u_id):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n u_id_permission = database.get_permission_dict(u_id)\n if u_id_permission[\"permission_id\"] == 1:\n raise error.AccessError(description=\"user being removed is the owner of the slackr\")\n\n # checks if u_id is not an owner of the channel\n # also checks if current auth user is an owner of the channel\n is_u_owner = False\n is_curr_owner = False\n for owner_id in curr_channel[\"owner_ids\"]:\n if u_id == owner_id:\n is_u_owner = True\n if curr_id == owner_id:\n is_curr_owner = True\n if is_u_owner is False:\n raise error.InputError(description=\"user being removed is not an owner of the channel\")\n\n\n # if the auth user is owner of slackr, allows him to remove u_id as owner\n if user_perms[\"permission_id\"] == 1:\n # removes the user from channel_owner\n curr_channel[\"owner_ids\"].remove(u_id)\n # if the auth user is an owner of the channel, allow him to remove u_id as owner of channel\n elif is_curr_owner is True:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].remove(u_id)\n # else the auth user is not an owner and thus cannot use addowner\n else:\n raise error.AccessError(description=\"\"\"Authorised user user is not an owner of the channel,\n or of the slackr\"\"\")",
"async def owner(c, m):\n if not m.id in ids:\n await c.send('You must be an owner to use this command.')\n raise Exception()\n return True",
"def test_channel_removeowner_invalid_user_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], \"[email protected]\")",
"def test_filter_owner_permission(self):\n User = get_user_model()\n user1 = User.objects.create(username=\"test_user1\", email=\"[email protected]\")\n obj = DescriptorSchema.objects.create(contributor=user1)\n obj.set_permission(Permission.VIEW, user1)\n\n data_template = {\n \"users\": {user1.id: \"view\"},\n \"groups\": {1: \"edit\", 2: \"NONE\"},\n }\n\n check_owner_permission(data_template, False, obj)\n\n # Check that only owner can set owner permission.\n data = deepcopy(data_template)\n data[\"users\"][1] = \"owner\"\n with self.assertRaises(exceptions.PermissionDenied):\n check_owner_permission(data, False, obj)\n check_owner_permission(data, True, obj)\n\n # Check that only owner can rewoke owner permission.\n obj.set_permission(Permission.OWNER, user1)\n data = deepcopy(data_template)\n data[\"users\"][1] = \"edit\"\n with self.assertRaises(exceptions.PermissionDenied):\n check_owner_permission(data, False, obj)\n check_owner_permission(data, True, obj)\n\n # Check that group can not be owner.\n obj.set_permission(Permission.VIEW, user1)\n data = deepcopy(data_template)\n data[\"groups\"][1] = \"owner\"\n with self.assertRaises(exceptions.ParseError):\n check_owner_permission(data, False, obj)\n with self.assertRaises(exceptions.ParseError):\n check_owner_permission(data, True, obj)",
"def _try_delete_and_return_permissions_error(component_url):\n try:\n delete_object_task.DeleteObjectTask(component_url, verbose=False).execute()\n except api_errors.CloudApiError as e:\n status = getattr(e, 'status_code', None)\n if status == 403:\n return e\n raise",
"def _check_namespace_access(self, namespace, user):\n if not namespace.owners.filter(id=user.id).count():\n raise exceptions.PermissionDenied(\n 'The namespace listed on your filename must match one of '\n 'the namespaces you have access to.'\n )",
"def testDeleteAccessDenied(self):\n self.assertEqual(SequencingMachine.objects.count(), 1)\n self.runDelete(None, sequencer=self.hiseq2000.sodar_uuid)\n self.assertEqual(SequencingMachine.objects.count(), 1)\n self.response_401()\n for user in (self.guest, self.norole, self.unrelated_owner):\n self.assertEqual(SequencingMachine.objects.count(), 1)\n self.runDelete(user, sequencer=self.hiseq2000.sodar_uuid)\n self.assertEqual(SequencingMachine.objects.count(), 1)\n self.response_403()",
"def test_01_self_unshare_resource(self):\n holes = self.holes\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_resource_with_user(holes, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in holes.raccess.edit_users)\n self.assertTrue(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_resource_unshare_users(holes)))\n dog.uaccess.unshare_resource_with_user(holes, dog)\n self.assertFalse(dog in holes.raccess.edit_users)\n self.assertFalse(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [], dog.uaccess.get_resource_unshare_users(holes)))"
] | [
"0.6874314",
"0.6790394",
"0.6770987",
"0.67394656",
"0.6579873",
"0.6498488",
"0.6473038",
"0.642164",
"0.63643247",
"0.63571066",
"0.6270544",
"0.62456524",
"0.62216616",
"0.62143797",
"0.6190839",
"0.6165569",
"0.6157032",
"0.6144713",
"0.61358136",
"0.6118853",
"0.6115285",
"0.6112563",
"0.6102568",
"0.6087466",
"0.60833216",
"0.60736954",
"0.60509664",
"0.60501486",
"0.60377055",
"0.6037685"
] | 0.7237803 | 0 |
checking if able to remove an owner who is the last owner of the channel | def test_channel_removeowner_last_owner():
clear()
register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')
register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')
randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)
channel_join(register_first_result['token'], randChannel_id['channel_id'])
#register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')
#channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])
# removing third user
channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id']) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def channel_removeowner(token, channel_id, u_id):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n u_id_permission = database.get_permission_dict(u_id)\n if u_id_permission[\"permission_id\"] == 1:\n raise error.AccessError(description=\"user being removed is the owner of the slackr\")\n\n # checks if u_id is not an owner of the channel\n # also checks if current auth user is an owner of the channel\n is_u_owner = False\n is_curr_owner = False\n for owner_id in curr_channel[\"owner_ids\"]:\n if u_id == owner_id:\n is_u_owner = True\n if curr_id == owner_id:\n is_curr_owner = True\n if is_u_owner is False:\n raise error.InputError(description=\"user being removed is not an owner of the channel\")\n\n\n # if the auth user is owner of slackr, allows him to remove u_id as owner\n if user_perms[\"permission_id\"] == 1:\n # removes the user from channel_owner\n curr_channel[\"owner_ids\"].remove(u_id)\n # if the auth user is an owner of the channel, allow him to remove u_id as owner of channel\n elif is_curr_owner is True:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].remove(u_id)\n # else the auth user is not an owner and thus cannot use addowner\n else:\n raise error.AccessError(description=\"\"\"Authorised user user is not an owner of the channel,\n or of the slackr\"\"\")",
"def test_channel_removeowner_owner_flockr():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])",
"def test_channel_removeowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])",
"def test_channel_removeowner_not_owner_permissions():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_removeowner(register_third_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])",
"def is_channel_owner():\n\n async def check(ctx):\n if ctx.guild:\n owner = ctx.author == ctx.guild.owner\n if not owner:\n await ctx.send(\"I guess you are not this server's pogchamp. Bruh.\")\n return owner\n return True\n\n return commands.check(check)",
"def channel_removeowner(token, channel_id, u_id):\n auth_u_id = get_id_from_token(token)\n channel = channels.get(channel_id)\n if channel is None:\n raise ValueError(\"channel_id does not exist.\")\n if u_id not in channel[\"owners\"]:\n raise ValueError(\"user is not an owner\")\n user = users.get(auth_u_id)\n if auth_u_id not in channel[\"owners\"] and user[\"is_admin\"] is False:\n raise AccessError(\"You do not have permission to remove owners\")\n\n channels.remove(channel_id, \"owners\", u_id)",
"def test_channel_removeowner_standard_input():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n assert(channel_details(register_second_result['token'], randChannel_id['channel_id']) == {\n 'name' : 'Random Channel',\n 'owner_members': [\n {\n 'u_id': 2,\n 'name_first': 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }\n ],\n 'all_members': [\n {\n 'u_id': 2,\n 'name_first': 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }, \n {\n 'u_id': 3,\n 'name_first' : 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }\n ]\n })",
"def test_channel_leave_normal_case_owner():\n \n clear()\n leaver = auth_register('[email protected]', '123abc!@#', 'first', 'last') \n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True)\n channel_join(leaver['token'], userchannel_id['channel_id'])\n channel_addowner(leaver['token'], userchannel_id['channel_id'], leaver['u_id'])\n channel_leave(leaver['token'], userchannel_id['channel_id'])\n randChannel_details = channel_details(user['token'], userchannel_id['channel_id'])\n assert(randChannel_details['owner_members'] == [\n {\n 'u_id' : user['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n }\n ])",
"def channel_addowner(token, channel_id, u_id):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n # check if user u_id is already an owner of the channel and raise InputError if so\n # also checks to see if current auth user is a owner of channel\n\n # a counter to check if user is a member of the channel\n is_curr_owner = False\n for owner_id in curr_channel[\"owner_ids\"]:\n if u_id == owner_id:\n raise error.InputError(description=\"user u_id is already an owner of this channel\")\n # checks if curr_id is an owner of channel\n if curr_id == owner_id:\n is_curr_owner = True\n\n # checks if the user u_id is a member of the channel already\n is_u_member = False\n for member_id in curr_channel[\"member_ids\"]:\n if u_id == member_id:\n is_u_member = True\n\n\n # if the auth user is an owner of the slackr, allow him to add u_id as owner of channel\n if is_u_member is True:\n if user_perms[\"permission_id\"] == 1:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(u_id)\n # if the auth user is an owner of the channel, allow him to add u_id as owner of channel\n elif is_curr_owner is True:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(u_id)\n # else the auth user is not an owner and thus cannot use addowner\n else:\n raise error.AccessError(description=\"\"\"current user is not an owner of the channel,\n or of the slackr\"\"\")",
"async def owner(c, m):\n if not m.id in ids:\n await c.send('You must be an owner to use this command.')\n raise Exception()\n return True",
"def test_channel_removeowner_invalid_token_after_logout():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channels_create(register_third_result['token'], 'Random Channel 2', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n auth_logout(register_second_result['token'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])",
"def unorphaned(self):\n return self.new_owner == self.user",
"def test_channel_removeowner_invalid_user_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], \"[email protected]\")",
"def test_channel_addowner_not_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_forth_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_third_result['token'], randChannel_id['channel_id'], register_forth_result['u_id'])",
"def is_owner(self, author):\n return not self.server or author == self.server.owner",
"def block_owner_deletion(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"block_owner_deletion\")",
"async def cog_check(self, ctx:utils.Context):\n\n if ctx.author.id in self.bot.config['owners']:\n return True\n raise commands.NotOwner",
"def test_remove_already_not_subbed(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=11,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=False,\n invite_only=False,\n target_users_subbed=False,\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 0)\n self.assert_length(json[\"not_removed\"], 1)",
"def test_realm_admin_remove_others_from_public_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=16,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=True,\n invite_only=False,\n target_users_subbed=True,\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 1)\n self.assert_length(json[\"not_removed\"], 0)",
"def ownercheck(self, userhost):\n if self.cfg and self.cfg.owner:\n if userhost in self.cfg.owner: return True\n return False",
"def test_channel_addowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])",
"def test_channel_addowner_already_an_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(InputError):\n assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])",
"def owner_or_permissions(**perms):\n original = commands.has_permissions(**perms).predicate\n\n async def extended_check(ctx):\n if ctx.guild is None:\n raise errors.NoPrivateMessage\n return ctx.guild.owner_id == ctx.author.id or await original(ctx)\n\n return commands.check(extended_check)",
"def delete_self_ownership(self):\n current_ownership_list = self.msg.get_ownershipList()\n self.old_ownership_list = current_ownership_list\n for comp in self.deleted_comp_list:\n if comp in current_ownership_list:\n current_ownership_list.remove(comp)\n self.logger.debug(\"After removing transfer component ownership, \\\n new ownership: %s\" % current_ownership_list)\n self.msg.set_ownershipList(current_ownership_list)",
"def owners_only(command):\n @wraps(command)\n def wrapped_up(bot):\n if bot.message.nick not in conf.get('owners', []):\n return irc.Response('Sorry, you are not an owner thus not authorised to use this command', pm_user=True)\n return command(bot)\n wrapped_up.owner_only = True\n return wrapped_up",
"def test_realm_admin_remove_others_from_unsubbed_private_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=17,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=False,\n invite_only=True,\n target_users_subbed=True,\n other_sub_users=[self.example_user(\"othello\")],\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 1)\n self.assert_length(json[\"not_removed\"], 0)",
"def test_channel_removeowner_invalid_channel_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], 'INVALIDID', register_third_result['u_id'])",
"def test_realm_admin_remove_others_from_subbed_private_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=17,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=True,\n invite_only=True,\n target_users_subbed=True,\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 1)\n self.assert_length(json[\"not_removed\"], 0)",
"def is_bot_owner(ctx: commands.Context) -> bool:\n return ctx.author.id == int(open(\"data/metadata/owner.id.txt\", \"r\").read())",
"def test_cant_remove_other_users_from_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=8,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=False,\n is_subbed=True,\n invite_only=False,\n target_users_subbed=True,\n )\n self.assert_json_error(result, \"Insufficient permission\")"
] | [
"0.73305464",
"0.7195283",
"0.7129515",
"0.712579",
"0.70183945",
"0.6805983",
"0.67637455",
"0.66506666",
"0.65761715",
"0.6486065",
"0.6440695",
"0.6412174",
"0.6344784",
"0.6314831",
"0.6306158",
"0.62849295",
"0.62610173",
"0.6227882",
"0.6078866",
"0.607737",
"0.60758984",
"0.6071704",
"0.6057858",
"0.60495794",
"0.60468155",
"0.60443354",
"0.6019361",
"0.5957048",
"0.59558165",
"0.5955704"
] | 0.7552469 | 0 |
checking if owner of the flockr who is not the channel owner can remove owner | def test_channel_removeowner_owner_flockr():
clear()
register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')
register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')
randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)
channel_join(register_first_result['token'], randChannel_id['channel_id'])
channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id']) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_channel_removeowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])",
"def channel_removeowner(token, channel_id, u_id):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n u_id_permission = database.get_permission_dict(u_id)\n if u_id_permission[\"permission_id\"] == 1:\n raise error.AccessError(description=\"user being removed is the owner of the slackr\")\n\n # checks if u_id is not an owner of the channel\n # also checks if current auth user is an owner of the channel\n is_u_owner = False\n is_curr_owner = False\n for owner_id in curr_channel[\"owner_ids\"]:\n if u_id == owner_id:\n is_u_owner = True\n if curr_id == owner_id:\n is_curr_owner = True\n if is_u_owner is False:\n raise error.InputError(description=\"user being removed is not an owner of the channel\")\n\n\n # if the auth user is owner of slackr, allows him to remove u_id as owner\n if user_perms[\"permission_id\"] == 1:\n # removes the user from channel_owner\n curr_channel[\"owner_ids\"].remove(u_id)\n # if the auth user is an owner of the channel, allow him to remove u_id as owner of channel\n elif is_curr_owner is True:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].remove(u_id)\n # else the auth user is not an owner and thus cannot use addowner\n else:\n raise error.AccessError(description=\"\"\"Authorised user user is not an owner of the channel,\n or of the slackr\"\"\")",
"def test_channel_removeowner_not_owner_permissions():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_removeowner(register_third_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])",
"def test_channel_removeowner_last_owner():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n #register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n #channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n # removing third user\n channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])",
"def channel_removeowner(token, channel_id, u_id):\n auth_u_id = get_id_from_token(token)\n channel = channels.get(channel_id)\n if channel is None:\n raise ValueError(\"channel_id does not exist.\")\n if u_id not in channel[\"owners\"]:\n raise ValueError(\"user is not an owner\")\n user = users.get(auth_u_id)\n if auth_u_id not in channel[\"owners\"] and user[\"is_admin\"] is False:\n raise AccessError(\"You do not have permission to remove owners\")\n\n channels.remove(channel_id, \"owners\", u_id)",
"def is_channel_owner():\n\n async def check(ctx):\n if ctx.guild:\n owner = ctx.author == ctx.guild.owner\n if not owner:\n await ctx.send(\"I guess you are not this server's pogchamp. Bruh.\")\n return owner\n return True\n\n return commands.check(check)",
"def test_channel_removeowner_standard_input():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n assert(channel_details(register_second_result['token'], randChannel_id['channel_id']) == {\n 'name' : 'Random Channel',\n 'owner_members': [\n {\n 'u_id': 2,\n 'name_first': 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }\n ],\n 'all_members': [\n {\n 'u_id': 2,\n 'name_first': 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }, \n {\n 'u_id': 3,\n 'name_first' : 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }\n ]\n })",
"def test_channel_addowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])",
"def test_channel_leave_normal_case_owner():\n \n clear()\n leaver = auth_register('[email protected]', '123abc!@#', 'first', 'last') \n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True)\n channel_join(leaver['token'], userchannel_id['channel_id'])\n channel_addowner(leaver['token'], userchannel_id['channel_id'], leaver['u_id'])\n channel_leave(leaver['token'], userchannel_id['channel_id'])\n randChannel_details = channel_details(user['token'], userchannel_id['channel_id'])\n assert(randChannel_details['owner_members'] == [\n {\n 'u_id' : user['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n }\n ])",
"def channel_addowner(token, channel_id, u_id):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n # check if user u_id is already an owner of the channel and raise InputError if so\n # also checks to see if current auth user is a owner of channel\n\n # a counter to check if user is a member of the channel\n is_curr_owner = False\n for owner_id in curr_channel[\"owner_ids\"]:\n if u_id == owner_id:\n raise error.InputError(description=\"user u_id is already an owner of this channel\")\n # checks if curr_id is an owner of channel\n if curr_id == owner_id:\n is_curr_owner = True\n\n # checks if the user u_id is a member of the channel already\n is_u_member = False\n for member_id in curr_channel[\"member_ids\"]:\n if u_id == member_id:\n is_u_member = True\n\n\n # if the auth user is an owner of the slackr, allow him to add u_id as owner of channel\n if is_u_member is True:\n if user_perms[\"permission_id\"] == 1:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(u_id)\n # if the auth user is an owner of the channel, allow him to add u_id as owner of channel\n elif is_curr_owner is True:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(u_id)\n # else the auth user is not an owner and thus cannot use addowner\n else:\n raise error.AccessError(description=\"\"\"current user is not an owner of the channel,\n or of the slackr\"\"\")",
"def test_channel_addowner_not_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_forth_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_third_result['token'], randChannel_id['channel_id'], register_forth_result['u_id'])",
"def test_channel_removeowner_invalid_user_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], \"[email protected]\")",
"def test_channel_addowner_owner_flockr():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])",
"def block_owner_deletion(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"block_owner_deletion\")",
"async def owner(c, m):\n if not m.id in ids:\n await c.send('You must be an owner to use this command.')\n raise Exception()\n return True",
"def test_channel_removeowner_invalid_token_after_logout():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channels_create(register_third_result['token'], 'Random Channel 2', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n auth_logout(register_second_result['token'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])",
"async def cog_check(self, ctx:utils.Context):\n\n if ctx.author.id in self.bot.config['owners']:\n return True\n raise commands.NotOwner",
"def delete_self_ownership(self):\n current_ownership_list = self.msg.get_ownershipList()\n self.old_ownership_list = current_ownership_list\n for comp in self.deleted_comp_list:\n if comp in current_ownership_list:\n current_ownership_list.remove(comp)\n self.logger.debug(\"After removing transfer component ownership, \\\n new ownership: %s\" % current_ownership_list)\n self.msg.set_ownershipList(current_ownership_list)",
"def test_channel_removeowner_invalid_channel_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], 'INVALIDID', register_third_result['u_id'])",
"def test_remove_already_not_subbed(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=11,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=False,\n invite_only=False,\n target_users_subbed=False,\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 0)\n self.assert_length(json[\"not_removed\"], 1)",
"def ownercheck(self, userhost):\n if self.cfg and self.cfg.owner:\n if userhost in self.cfg.owner: return True\n return False",
"def test_channel_addowner_already_an_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(InputError):\n assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])",
"def renounceOwnership():\n\n assert msg.sender == self.owner, \"Access is denied.\"\n\n log.OwnershipRenounced(msg.sender)\n self.owner = ZERO_ADDRESS",
"def test_cant_remove_other_users_from_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=8,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=False,\n is_subbed=True,\n invite_only=False,\n target_users_subbed=True,\n )\n self.assert_json_error(result, \"Insufficient permission\")",
"async def ticket_remove(self, ctx, user: discord.Member):\n guild_settings = await self.config.guild(ctx.guild).all()\n is_admin = await is_admin_or_superior(self.bot, ctx.author) or any(\n [ur.id in guild_settings[\"supportroles\"] for ur in ctx.author.roles]\n )\n must_be_admin = not guild_settings[\"usercanmodify\"]\n\n if not is_admin and must_be_admin:\n await ctx.send(\"Only Administrators can add/remove other users to tickets.\")\n return\n elif not is_admin:\n author = ctx.author\n author_id = author.id\n elif is_admin:\n # Since the author isn't specified, and it's an admin, we need to guess on who\n # the author is\n inverted = {}\n for author_id, tickets in guild_settings[\"created\"].items():\n for ticket in tickets:\n inverted[ticket[\"channel\"]] = author_id\n try:\n author = ctx.guild.get_member(int(inverted[ctx.channel.id]))\n if author:\n author_id = author.id\n else:\n author_id = int(inverted[ctx.channel.id])\n except KeyError:\n author = ctx.author\n author_id = author.id\n\n index = None\n\n if not guild_settings[\"created\"][str(author_id)]:\n await ctx.send(\"You don't have any open tickets.\")\n return\n elif len(guild_settings[\"created\"][str(author_id)]) == 1:\n index = 0\n else:\n for i, ticket in enumerate(guild_settings[\"created\"][str(author_id)]):\n if ticket[\"channel\"] == ctx.channel.id:\n index = i\n break\n\n if index is None:\n await ctx.send(\n \"You have multiple tickets open. \"\n \"Please run this command in the ticket channel you wish to edit.\"\n )\n return\n\n if user.id not in guild_settings[\"created\"][str(author_id)][index][\"added\"]:\n await ctx.send(\"That user is not added.\")\n return\n\n removing_is_admin = await is_admin_or_superior(self.bot, user) or any(\n [ur.id in guild_settings[\"supportroles\"] for ur in user.roles]\n )\n\n if removing_is_admin:\n await ctx.send(\"You cannot remove a user in support or admin team.\")\n return\n\n channel = self.bot.get_channel(guild_settings[\"created\"][str(author_id)][index][\"channel\"])\n if not channel:\n await ctx.send(\"The ticket channel has been deleted.\")\n\n try:\n await channel.set_permissions(user, send_messages=False, read_messages=False)\n except discord.Forbidden:\n await ctx.send(\n \"The Manage Permissions channel for me has been removed. \"\n \"I am unable to modify this ticket.\"\n )\n return\n\n async with self.config.guild(ctx.guild).created() as created:\n created[str(author_id)][index][\"added\"].remove(user.id)\n\n await ctx.send(f\"{user.mention} has been removed from the ticket.\")",
"def channel_addowner(token, channel_id, u_id):\n auth_u_id = get_id_from_token(token)\n channel = channels.get(channel_id)\n if channel is None:\n raise ValueError(\"channel_id does not exist.\")\n if u_id in channel[\"owners\"]:\n raise ValueError(\"user is already an owner\")\n user = users.get(auth_u_id)\n if auth_u_id not in channel[\"owners\"] and user[\"is_admin\"] is False:\n raise AccessError(\"You do not have permission to add owners\")\n\n channels.set(channel_id, \"owners\", u_id)",
"def test_realm_admin_remove_others_from_unsubbed_private_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=17,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=False,\n invite_only=True,\n target_users_subbed=True,\n other_sub_users=[self.example_user(\"othello\")],\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 1)\n self.assert_length(json[\"not_removed\"], 0)",
"def test_realm_admin_remove_others_from_subbed_private_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=17,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=True,\n invite_only=True,\n target_users_subbed=True,\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 1)\n self.assert_length(json[\"not_removed\"], 0)",
"def is_owner(self, author):\n return not self.server or author == self.server.owner",
"def owners_only(command):\n @wraps(command)\n def wrapped_up(bot):\n if bot.message.nick not in conf.get('owners', []):\n return irc.Response('Sorry, you are not an owner thus not authorised to use this command', pm_user=True)\n return command(bot)\n wrapped_up.owner_only = True\n return wrapped_up"
] | [
"0.7614531",
"0.74451256",
"0.7375203",
"0.70898795",
"0.695579",
"0.68203926",
"0.68009984",
"0.6557617",
"0.6531711",
"0.651857",
"0.64480776",
"0.639784",
"0.6383161",
"0.6346579",
"0.63352793",
"0.62960595",
"0.618517",
"0.6165083",
"0.6161233",
"0.61429024",
"0.61311483",
"0.60626054",
"0.6053295",
"0.5988746",
"0.5937176",
"0.59287184",
"0.59270394",
"0.5915345",
"0.59145075",
"0.590622"
] | 0.7702833 | 0 |
checking if AccessError is returned as expected if the owner of flockr is not a member of the channel | def test_channel_removeowner_owner_flockr_not_member():
clear()
register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')
register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')
register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')
randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)
channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])
with pytest.raises(AccessError):
assert channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id']) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_channel_addowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])",
"def test_channel_addowner_not_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_forth_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_third_result['token'], randChannel_id['channel_id'], register_forth_result['u_id'])",
"def test_channel_removeowner_not_owner_permissions():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_removeowner(register_third_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])",
"def test_util_has_perm_or_owns_sanity(self):\n me = User.objects.get(pk=118533)\n my_t = Thread.objects.filter(creator=me)[0]\n other_t = Thread.objects.exclude(creator=me)[0]\n perm = 'forums_forum.thread_edit_forum'\n allowed = access.has_perm_or_owns(me, perm, my_t, self.forum_1)\n eq_(allowed, True)\n allowed = access.has_perm_or_owns(me, perm, other_t, self.forum_1)\n eq_(allowed, False)",
"def is_channel_owner():\n\n async def check(ctx):\n if ctx.guild:\n owner = ctx.author == ctx.guild.owner\n if not owner:\n await ctx.send(\"I guess you are not this server's pogchamp. Bruh.\")\n return owner\n return True\n\n return commands.check(check)",
"def test_channel_addowner_owner_flockr():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])",
"def test_channel_addowner_invalid_token_after_logout():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n assert(auth_logout(register_second_result['token'])[\"is_success\"] is True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])",
"def test_channel_addowner_already_an_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(InputError):\n assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])",
"def test_channel_removeowner_owner_flockr():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])",
"def test_channel_join_already_in_channel():\n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True) \n with pytest.raises(AccessError):\n channel_join(user['token'], userchannel_id['channel_id'])",
"def test_channel_join_private_owner():\n clear()\n joiner = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', False)\n channel_join(joiner['token'], userchannel_id['channel_id']) \n randChannel_details = channel_details(user['token'], userchannel_id['channel_id'])\n assert(randChannel_details['all_members'] == [\n {\n 'u_id' : user['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n },\n {\n 'u_id' : joiner['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n }\n ])",
"def test_channel_join_except_private():\n # Clear the data structure\n clear_v1()\n # Call other functions to create the data and store in data structure\n auth_dict1 = auth_register_v2(\"[email protected]\", \"123456\", \"john\", \"smith\")\n auth_dict2 = auth_register_v2(\"[email protected]\", \"555555\", \"harry\", \"potter\")\n\n auth_token1 = auth_dict1[\"token\"]\n auth_token2 = auth_dict2[\"token\"]\n\n channel_id1 = channels_create_v2(auth_token1, \"Chill Soc\", False)\n \n with pytest.raises(AccessError):\n channel_join_v2(auth_token2, channel_id1[\"channel_id\"])",
"def test_not_owner(self):\n creating_user = create_user()\n creating_user.save()\n festival = create_festival('test', creating_user)\n festival.save()\n\n concert = create_concert(festival, 'test')\n concert.save()\n\n login(self.client)\n\n client = create_client('test')\n client.delete_access = True\n client.save()\n\n response = self.client.post('/backend/u/conc/', {'client': 'test', 'id': concert.pk})\n self.assertEqual(response.status_code, 200)\n self.assertEqual('Permission not granted', response.content.decode('utf-8'))",
"def test_channel_removeowner_invalid_token_after_logout():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channels_create(register_third_result['token'], 'Random Channel 2', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n auth_logout(register_second_result['token'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])",
"def check_channel_request(self, kind, chanid):\n return OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED",
"def check_owner(data=None, **kw):\n if data and 'owner_id' in data and not data['owner_id'] == current_user.id:\n raise ProcessingException(description=\"No write privileges\",\n code=401)",
"def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])",
"def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])",
"def test_channel_leave_normal_case_owner():\n \n clear()\n leaver = auth_register('[email protected]', '123abc!@#', 'first', 'last') \n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True)\n channel_join(leaver['token'], userchannel_id['channel_id'])\n channel_addowner(leaver['token'], userchannel_id['channel_id'], leaver['u_id'])\n channel_leave(leaver['token'], userchannel_id['channel_id'])\n randChannel_details = channel_details(user['token'], userchannel_id['channel_id'])\n assert(randChannel_details['owner_members'] == [\n {\n 'u_id' : user['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n }\n ])",
"def ccheck(self, msg):\r\n if msg.channel == self.channel or (msg.channel.is_private and self.ispm):\r\n return True\r\n return False",
"async def lock(ctx):\n member = ctx.message.author\n channel = ctx.message.channel\n\n if (channel.category.name in [\"beta\", \"staff\", \"Pi-Bot\"]):\n return await ctx.send(\"This command is not suitable for this channel because of its category.\")\n\n member_role = discord.utils.get(member.guild.roles, name=ROLE_MR)\n if (channel.category.name == CATEGORY_STATES):\n await ctx.channel.set_permissions(member_role, add_reactions=False, send_messages=False)\n else:\n await ctx.channel.set_permissions(member_role, add_reactions=False, send_messages=False, read_messages=True)\n\n wiki_role = discord.utils.get(member.guild.roles, name=ROLE_WM)\n gm_role = discord.utils.get(member.guild.roles, name=ROLE_GM)\n admin_role = discord.utils.get(member.guild.roles, name=ROLE_AD)\n bot_role = discord.utils.get(member.guild.roles, name=ROLE_BT)\n await ctx.channel.set_permissions(wiki_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.channel.set_permissions(gm_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.channel.set_permissions(admin_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.channel.set_permissions(bot_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.send(\"Locked the channel to Member access.\")",
"def test_channel_leave_invalid_user():\n \n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n leaver = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True) \n with pytest.raises(AccessError):\n channel_leave(leaver['token'], userchannel_id['channel_id'])",
"async def __local_check(self, ctx):\n if not isinstance(ctx.channel, discord.TextChannel):\n raise InvalidChannelCheck(ctx.command)\n me = ctx.me.guild_permissions\n perms = (me.manage_messages, me.manage_nicknames, me.ban_members, me.kick_members)\n if not all(perms):\n raise BotPermissionsCheck(ctx.command)\n else:\n return True",
"def check_channel_request(self, kind, chanid):\n if kind == 'session':\n return paramiko.OPEN_SUCCEEDED\n return paramiko.OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED",
"def available(self, o):\n return not self.locked() or self.isowner(o)",
"def cog_check(self, ctx):\r\n return ctx.author.guild_permissions.administrator",
"def cog_check(self, ctx):\r\n return ctx.author.guild_permissions.administrator",
"def get_everyone_denied(self):",
"def test_component_chown_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('component chown component2 changed_owner')\n rv, output = self._execute('component list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def test_03_self_cannot_upgrade_resource(self):\n holes = self.holes\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_resource_with_user(holes, dog, PrivilegeCodes.VIEW)\n self.assertFalse(dog in holes.raccess.edit_users)\n self.assertTrue(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_resource_unshare_users(holes)))\n with self.assertRaises(PermissionDenied):\n dog.uaccess.share_resource_with_user(\n holes, dog, PrivilegeCodes.VIEW)\n with self.assertRaises(PermissionDenied):\n dog.uaccess.share_resource_with_user(\n holes, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_resource_unshare_users(holes)))"
] | [
"0.72484523",
"0.688483",
"0.68403906",
"0.65012413",
"0.6361957",
"0.62719554",
"0.6271346",
"0.626074",
"0.6235811",
"0.6169581",
"0.6156016",
"0.6120171",
"0.61183786",
"0.6108167",
"0.6076352",
"0.6069219",
"0.6057893",
"0.6057893",
"0.60458887",
"0.6039936",
"0.6019325",
"0.601874",
"0.6000169",
"0.59550685",
"0.59170294",
"0.5891027",
"0.5891027",
"0.58853716",
"0.5876037",
"0.58495075"
] | 0.7089838 | 1 |
Sets a system Hamiltonian to the Hubbard Hamiltonian. Does exactly this. If the system hamiltonian has some other terms on it, there are not touched. So be sure to use this function only in newly created `System` objects. | def set_hamiltonian(self, system):
system.clear_hamiltonian()
if 'bh' in system.left_block.operators.keys():
system.add_to_hamiltonian(left_block_op='bh')
if 'bh' in system.right_block.operators.keys():
system.add_to_hamiltonian(right_block_op='bh')
system.add_to_hamiltonian('dimer', 'id', 'id', 'id', -(1. - self.U))
system.add_to_hamiltonian('id', 'dimer', 'id', 'id', -(1. - self.U))
system.add_to_hamiltonian('id', 'id', 'dimer', 'id', -(1. - self.U))
system.add_to_hamiltonian('id', 'id', 'id', 'dimer', -(1. - self.U))
# system.add_to_hamiltonian('dimer', 'id', 'id', 'id', self.U)
# system.add_to_hamiltonian('id', 'dimer', 'id', 'id', self.U)
# system.add_to_hamiltonian('id', 'id', 'dimer', 'id', self.U)
# system.add_to_hamiltonian('id', 'id', 'id', 'dimer', self.U)
system.add_to_hamiltonian('rprm_up_minus_dag', 'rprm_up_plus', 'id', 'id', -(1. + self.U)/2.)
system.add_to_hamiltonian('rprm_down_minus_dag', 'rprm_down_plus', 'id', 'id', -(1. + self.U)/2.)
system.add_to_hamiltonian('rprm_up_minus', 'rprm_up_plus_dag', 'id', 'id', (1. + self.U)/2.)
system.add_to_hamiltonian('rprm_down_minus', 'rprm_down_plus_dag', 'id', 'id', (1. + self.U)/2.)
system.add_to_hamiltonian('id', 'rprm_up_minus_dag', 'rprm_up_plus', 'id', -(1.+self.U)/2.)
system.add_to_hamiltonian('id', 'rprm_down_minus_dag', 'rprm_down_plus', 'id', -(1.+self.U)/2.)
system.add_to_hamiltonian('id', 'rprm_up_minus', 'rprm_up_plus_dag', 'id', (1.+self.U)/2.)
system.add_to_hamiltonian('id', 'rprm_down_minus', 'rprm_down_plus_dag', 'id', (1.+self.U)/2.)
system.add_to_hamiltonian('id','id', 'rprm_up_minus_dag', 'rprm_up_plus', -(1.+self.U)/2.)
system.add_to_hamiltonian('id','id', 'rprm_down_minus_dag', 'rprm_down_plus', -(1.+self.U)/2.)
system.add_to_hamiltonian('id','id', 'rprm_up_minus', 'rprm_up_plus_dag', (1.+self.U)/2.)
system.add_to_hamiltonian('id','id', 'rprm_down_minus', 'rprm_down_plus_dag', (1.+self.U)/2.) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_hamiltonian(self, system):\n system.clear_hamiltonian()\n if 'bh' in system.left_block.operators.keys():\n system.add_to_hamiltonian(left_block_op='bh')\n if 'bh' in system.right_block.operators.keys():\n system.add_to_hamiltonian(right_block_op='bh')\n system.add_to_hamiltonian('c_up', 'c_up_dag', 'id', 'id', -1.)\n system.add_to_hamiltonian('c_up_dag', 'c_up', 'id', 'id', -1.)\n system.add_to_hamiltonian('c_down', 'c_down_dag', 'id', 'id', -1.)\n system.add_to_hamiltonian('c_down_dag', 'c_down', 'id', 'id', -1.)\n system.add_to_hamiltonian('id', 'c_up', 'c_up_dag', 'id', -1.)\n system.add_to_hamiltonian('id', 'c_up_dag', 'c_up', 'id', -1.)\n system.add_to_hamiltonian('id', 'c_down', 'c_down_dag', 'id', -1.)\n system.add_to_hamiltonian('id', 'c_down_dag', 'c_down', 'id', -1.)\n system.add_to_hamiltonian('id', 'id', 'c_up', 'c_up_dag', -1.)\n system.add_to_hamiltonian('id', 'id', 'c_up_dag', 'c_up', -1.)\n system.add_to_hamiltonian('id', 'id', 'c_down', 'c_down_dag', -1.)\n system.add_to_hamiltonian('id', 'id', 'c_down_dag', 'c_down', -1.)\n system.add_to_hamiltonian('u', 'id', 'id', 'id', self.U)\n system.add_to_hamiltonian('id', 'u', 'id', 'id', self.U)\n system.add_to_hamiltonian('id', 'id', 'u', 'id', self.U)\n system.add_to_hamiltonian('id', 'id', 'id', 'u', self.U)",
"def set_block_hamiltonian(self, system):\n # If you have a block hamiltonian in your block, add it\n if 'bh' in system.growing_block.operators.keys():\n system.add_to_block_hamiltonian('bh', 'id')\n system.add_to_block_hamiltonian('c_up', 'c_up_dag', -1.)\n system.add_to_block_hamiltonian('c_up_dag', 'c_up', -1.)\n system.add_to_block_hamiltonian('c_down', 'c_down_dag', -1.)\n system.add_to_block_hamiltonian('c_down_dag', 'c_down', -1.)\n system.add_to_block_hamiltonian('id', 'u', self.U)\n system.add_to_block_hamiltonian('u', 'id', self.U)",
"def set_hamiltonian_to_AF_Heisenberg(system):\n system.clear_hamiltonian()\n if 'bh' in system.left_block.operators.keys():\n system.add_to_hamiltonian(left_block_op='bh')\n if 'bh' in system.right_block.operators.keys():\n system.add_to_hamiltonian(right_block_op='bh')\n system.add_to_hamiltonian('id', 'id', 's_z', 's_z')\n system.add_to_hamiltonian('id', 'id', 's_p', 's_m', .5)\n system.add_to_hamiltonian('id', 'id', 's_m', 's_p', .5)\n system.add_to_hamiltonian('id', 's_z', 's_z', 'id')\n system.add_to_hamiltonian('id', 's_p', 's_m', 'id', .5)\n system.add_to_hamiltonian('id', 's_m', 's_p', 'id', .5)\n system.add_to_hamiltonian('s_z', 's_z', 'id', 'id')\n system.add_to_hamiltonian('s_p', 's_m', 'id', 'id', .5)\n system.add_to_hamiltonian('s_m', 's_p', 'id', 'id', .5)",
"def set_block_hamiltonian(self, tmp_matrix_for_bh, system):\n # If you have a block hamiltonian in your block, add it\n if 'bh' in system.growing_block.operators.keys():\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'bh', 'id')\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'id', 'dimer', -(1. - self.U))\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'dimer', 'id', -(1. - self.U))\n# system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'id', 'dimer', self.U)\n# system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'dimer', 'id', self.U)\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'rprm_up_minus_dag', 'rprm_up_plus', -(1.+self.U)/2.)\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'rprm_down_minus_dag', 'rprm_down_plus', -(1.+self.U)/2.)\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'rprm_up_minus', 'rprm_up_plus_dag', (1.+self.U)/2.)\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'rprm_down_minus', 'rprm_down_plus_dag', (1.+self.U)/2.)",
"def set_block_hamiltonian_to_AF_Heisenberg(system):\n tmp_matrix_size = None\n if system.growing_side == 'left':\n tmp_matrix_size = system.get_left_dim()\n else: \n tmp_matrix_size = system.get_right_dim()\n tmp_matrix_for_bh = np.zeros((tmp_matrix_size, tmp_matrix_size))\n if 'bh' in system.growing_block.operators.keys():\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'bh', 'id')\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 's_z', 's_z')\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 's_p', 's_m', .5)\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 's_m', 's_p', .5)\n system.operators_to_add_to_block['bh'] = tmp_matrix_for_bh",
"def Hamiltonian(self):\n return None",
"def generate_hamiltonian(self):\n ham = total_hamiltonian(self.cluster, self.magnetic_field, self.zfs, others=self.others,\n other_states=self.other_states, central_gyro=self.gyro, central_spin=self.spin)\n\n if self.pulses is not None:\n self.pulses.generate_pulses(dimensions=ham.dimensions, bath=self.cluster, vectors=ham.vectors)\n\n return ham",
"def __init__(self, hamiltonian):\n self.ham = hamiltonian",
"def hamiltonian(self):\n hamiltonian = self.bare_hamiltonian()\n for interaction_term in self.interaction_list:\n hamiltonian += interaction_term.hamiltonian()\n return hamiltonian",
"def create_ham(self):\n from tcc.interaction import HAM_SPINLESS_RI_CORE_HUBBARD\n return HAM_SPINLESS_RI_CORE_HUBBARD(self)",
"def create_ham(self):\n from tcc.interaction import HAM_SPINLESS_RI_CORE_HUBBARD\n return HAM_SPINLESS_RI_CORE_HUBBARD(self)",
"def set_hbond(self) -> None:\n ...",
"def hubbard_hamiltonian_MF(H_no_Hubbard, ns_up, ns_dn, U): \n n_orb = H_no_Hubbard.shape[0]\n ns = [ns_up, ns_dn]\n H = []\n for i in [0, 1]:\n Hi = copy.deepcopy(H_no_Hubbard)\n Hi = Hi + U*ns[1-i]*np.identity(n_orb)\n H.append(Hi)\n return H",
"def get_hamiltonian(self):\n return self.hamiltonian()",
"def get_hamiltonian(self):\n assert (self._integrator == 'HMC' and self._metric == 'Euclidean') or self._integrator == 'RMHMC', 'Parameter dependent metrics require the RMHMC integrator'\n if self._integrator == 'RMHMC':# and self._metric != 'Euclidean':\n self.potential_ = self.get_potential()\n self.metric_ = self.get_metric()\n self.inverse_ = self.metric_.inverse()\n self.capacitor_ = self.get_capacitor()\n self.kinetic_ = self.get_kinetic()\n ham = self.potential_ + self.capacitor_ + self.kinetic_\n else:\n self.potential_ = self.get_potential()\n self.kinetic_ = self.get_kinetic()\n ham = self.potential_ + self.kinetic_\n self.hamiltonian_ = ham\n return ham",
"def set_operators_to_update_to_AF_Heisenberg(system):\n system.add_to_operators_to_update('s_z', site_op='s_z')\n system.add_to_operators_to_update('s_p', site_op='s_p')\n system.add_to_operators_to_update('s_m', site_op='s_m')",
"def reset_hessian_and_bias(self):\n # reset_shared_var(self.t_H)\n t = self.QUAD_REG\n if len(t.shape) == 1:\n self.t_H.set_value(np.diag(self.QUAD_REG))\n elif len(t.shape) == 2:\n self.t_H.set_value(self.QUAD_REG)\n else:\n raise ValueError('Invalid quad_reg shape')\n\n reset_shared_var(self.t_B)",
"def get_bare_hamiltonian(self):\n warnings.warn('bare_hamiltonian() is deprecated, use bare_hamiltonian() instead', FutureWarning)\n return self.bare_hamiltonian()",
"def test_hamiltonian(model):\n h = model.hamiltonian\n assert isinstance(h, csr_matrix)\n assert h.dtype == np.float32\n assert h.shape == (2, 2)\n assert pytest.fuzzy_equal(h.data, [graphene.t] * 2)\n assert pytest.fuzzy_equal(h.indices, [1, 0])\n assert pytest.fuzzy_equal(h.indptr, [0, 1, 2])\n\n assert h.data.flags['OWNDATA'] is False\n assert h.data.flags['WRITEABLE'] is False\n\n with pytest.raises(ValueError) as excinfo:\n h.data += 1\n assert \"read-only\" in str(excinfo.value)\n\n h2 = model.hamiltonian\n assert h2.data is not h.data\n assert point_to_same_memory(h2.data, h.data)",
"def bare_hamiltonian(self):\n bare_hamiltonian = 0\n for subsys in self:\n evals = subsys.eigenvals(evals_count=subsys.truncated_dim)\n bare_hamiltonian += self.diag_hamiltonian(subsys, evals)\n return bare_hamiltonian",
"def _hamiltonian(\n self,\n y: phase_space.PhaseSpace,\n params: utils.Params,\n **kwargs: Any\n ) -> jnp.ndarray:",
"def test_set_hs(self):\n s = State(substance=\"water\")\n s.hs = Q_(1061602.391543017, \"J/kg\"), Q_(3028.9867985920914, \"J/(kg*K)\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(373.1242958476843, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.hs[0], Q_(1061602.391543017, \"J/kg\")) # type: ignore\n assert np.isclose(s.hs[1], Q_(3028.9867985920914, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.u, Q_(1013250, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(3028.9867985920914, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(0.4772010021515822, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(1061602.391543017, \"J/kg\")) # type: ignore\n assert np.isclose(s.x, Q_(0.28475636946248034, \"dimensionless\")) # type: ignore",
"def display_hamiltonian(H):\n terms = split_hamiltonian(H)\n\n def label(s):\n if s == 'H0':\n return r'\\hat{H}_0'\n elif s == 'Hint':\n return r'\\hat{H}_{\\text{int}}'\n else:\n try:\n prefix, ind = s.split('_')\n except ValueError:\n print(s)\n raise\n return r'\\hat{H}_{\\Omega_%s}' % ind\n\n lines = []\n lines.append(r'\\begin{align}')\n lines.append(r' \\hat{H} &= %s\\\\' % \" + \".join([label(name) for name in terms.keys()]))\n for name, H in terms.items():\n lines.append(r' %s &= %s\\\\' % (label(name), tex(H)))\n lines.append(r'\\end{align}')\n display(Latex(\"\\n\".join(lines)))",
"def _ctrl_hum_set(self, osrs_h):\n data = osrs_h & 0x7\n self._bus.write_byte_data(self.addr, self.CTRL_HUM,\n data)",
"def Hamiltonian(self):\n U = self.U.flatten()\n Vmat = sparse.spdiags([U], [0], len(U), len(U))\n Kmat = sparse.kron(-self.KEy * Schrodinger.D2mat(len(self.y), self.y[1] - self.y[0], self.periodic_y, self.qy),\n sparse.identity(len(self.x))) + \\\n sparse.kron(sparse.identity(len(self.y)),\n -self.KEx * Schrodinger.D2mat(len(self.x), self.x[1] - self.x[0], self.periodic_x, self.qx))\n return Kmat + Vmat",
"def create_ham(self):\n from tcc.interaction import HAM_SPINLESS_RI_CORE\n return HAM_SPINLESS_RI_CORE(self)",
"def create_ham(self):\n from tcc.interaction import HAM_SPINLESS_RI_CORE\n return HAM_SPINLESS_RI_CORE(self)",
"def Hamiltonian(self):\n Vmat = sparse.spdiags([self.U], [0], len(self.U), len(self.U))\n Kmat = -self.KE * Schrodinger.D2mat(numpts=len(self.x), delta=self.x[1] - self.x[0], periodic=self.periodic,\n q=self.q)\n return Kmat + Vmat",
"def set_state(state):\n global HMC_MOM\n assert type(state) == dict, 'state has to be a state dictionary'\n assert state.has_key('randstate'), 'state does not contain randstate'\n assert state.has_key('mom'), 'state does not contain momentum'\n np.random.set_state(state['randstate'])\n HMC_MOM = state['mom']",
"def set_operators_to_update(self, system):\n # If you have a block hamiltonian in your block, update it\n if 'bh' in system.growing_block.operators.keys():\n system.add_to_operators_to_update('bh', block_op='bh')\n system.add_to_operators_to_update('c_up', site_op='c_up')\n system.add_to_operators_to_update('c_up_dag', site_op='c_up_dag')\n system.add_to_operators_to_downdate('c_down', site_op='c_down')\n system.add_to_operators_to_downdate('c_down_dag', site_op='c_down_dag')\n system.add_to_operators_to_update('u', site_op='u')"
] | [
"0.77002096",
"0.7557386",
"0.74360436",
"0.7119834",
"0.7010251",
"0.6295535",
"0.60164326",
"0.6015403",
"0.59823936",
"0.58867145",
"0.58867145",
"0.5827976",
"0.57418686",
"0.56327134",
"0.56063366",
"0.55363756",
"0.5516487",
"0.5502509",
"0.54257786",
"0.53823394",
"0.5379448",
"0.5376192",
"0.5376085",
"0.5345973",
"0.53369564",
"0.5248801",
"0.5248801",
"0.5248193",
"0.5241568",
"0.5221934"
] | 0.7601821 | 1 |
Sets the block Hamiltonian to the Hubbard model block Hamiltonian. | def set_block_hamiltonian(self, tmp_matrix_for_bh, system):
# If you have a block hamiltonian in your block, add it
if 'bh' in system.growing_block.operators.keys():
system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'bh', 'id')
system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'id', 'dimer', -(1. - self.U))
system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'dimer', 'id', -(1. - self.U))
# system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'id', 'dimer', self.U)
# system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'dimer', 'id', self.U)
system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'rprm_up_minus_dag', 'rprm_up_plus', -(1.+self.U)/2.)
system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'rprm_down_minus_dag', 'rprm_down_plus', -(1.+self.U)/2.)
system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'rprm_up_minus', 'rprm_up_plus_dag', (1.+self.U)/2.)
system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'rprm_down_minus', 'rprm_down_plus_dag', (1.+self.U)/2.) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_block_hamiltonian(self, system):\n # If you have a block hamiltonian in your block, add it\n if 'bh' in system.growing_block.operators.keys():\n system.add_to_block_hamiltonian('bh', 'id')\n system.add_to_block_hamiltonian('c_up', 'c_up_dag', -1.)\n system.add_to_block_hamiltonian('c_up_dag', 'c_up', -1.)\n system.add_to_block_hamiltonian('c_down', 'c_down_dag', -1.)\n system.add_to_block_hamiltonian('c_down_dag', 'c_down', -1.)\n system.add_to_block_hamiltonian('id', 'u', self.U)\n system.add_to_block_hamiltonian('u', 'id', self.U)",
"def set_hamiltonian(self, system):\n system.clear_hamiltonian()\n if 'bh' in system.left_block.operators.keys():\n system.add_to_hamiltonian(left_block_op='bh')\n if 'bh' in system.right_block.operators.keys():\n system.add_to_hamiltonian(right_block_op='bh')\n system.add_to_hamiltonian('c_up', 'c_up_dag', 'id', 'id', -1.)\n system.add_to_hamiltonian('c_up_dag', 'c_up', 'id', 'id', -1.)\n system.add_to_hamiltonian('c_down', 'c_down_dag', 'id', 'id', -1.)\n system.add_to_hamiltonian('c_down_dag', 'c_down', 'id', 'id', -1.)\n system.add_to_hamiltonian('id', 'c_up', 'c_up_dag', 'id', -1.)\n system.add_to_hamiltonian('id', 'c_up_dag', 'c_up', 'id', -1.)\n system.add_to_hamiltonian('id', 'c_down', 'c_down_dag', 'id', -1.)\n system.add_to_hamiltonian('id', 'c_down_dag', 'c_down', 'id', -1.)\n system.add_to_hamiltonian('id', 'id', 'c_up', 'c_up_dag', -1.)\n system.add_to_hamiltonian('id', 'id', 'c_up_dag', 'c_up', -1.)\n system.add_to_hamiltonian('id', 'id', 'c_down', 'c_down_dag', -1.)\n system.add_to_hamiltonian('id', 'id', 'c_down_dag', 'c_down', -1.)\n system.add_to_hamiltonian('u', 'id', 'id', 'id', self.U)\n system.add_to_hamiltonian('id', 'u', 'id', 'id', self.U)\n system.add_to_hamiltonian('id', 'id', 'u', 'id', self.U)\n system.add_to_hamiltonian('id', 'id', 'id', 'u', self.U)",
"def set_block_hamiltonian_to_AF_Heisenberg(system):\n tmp_matrix_size = None\n if system.growing_side == 'left':\n tmp_matrix_size = system.get_left_dim()\n else: \n tmp_matrix_size = system.get_right_dim()\n tmp_matrix_for_bh = np.zeros((tmp_matrix_size, tmp_matrix_size))\n if 'bh' in system.growing_block.operators.keys():\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'bh', 'id')\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 's_z', 's_z')\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 's_p', 's_m', .5)\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 's_m', 's_p', .5)\n system.operators_to_add_to_block['bh'] = tmp_matrix_for_bh",
"def set_hamiltonian(self, system):\n system.clear_hamiltonian()\n if 'bh' in system.left_block.operators.keys():\n system.add_to_hamiltonian(left_block_op='bh')\n if 'bh' in system.right_block.operators.keys():\n system.add_to_hamiltonian(right_block_op='bh')\n system.add_to_hamiltonian('dimer', 'id', 'id', 'id', -(1. - self.U))\n system.add_to_hamiltonian('id', 'dimer', 'id', 'id', -(1. - self.U))\n system.add_to_hamiltonian('id', 'id', 'dimer', 'id', -(1. - self.U))\n system.add_to_hamiltonian('id', 'id', 'id', 'dimer', -(1. - self.U))\n \n# system.add_to_hamiltonian('dimer', 'id', 'id', 'id', self.U)\n# system.add_to_hamiltonian('id', 'dimer', 'id', 'id', self.U)\n# system.add_to_hamiltonian('id', 'id', 'dimer', 'id', self.U)\n# system.add_to_hamiltonian('id', 'id', 'id', 'dimer', self.U)\n\n system.add_to_hamiltonian('rprm_up_minus_dag', 'rprm_up_plus', 'id', 'id', -(1. + self.U)/2.)\n system.add_to_hamiltonian('rprm_down_minus_dag', 'rprm_down_plus', 'id', 'id', -(1. + self.U)/2.)\n system.add_to_hamiltonian('rprm_up_minus', 'rprm_up_plus_dag', 'id', 'id', (1. + self.U)/2.)\n system.add_to_hamiltonian('rprm_down_minus', 'rprm_down_plus_dag', 'id', 'id', (1. + self.U)/2.)\n \n system.add_to_hamiltonian('id', 'rprm_up_minus_dag', 'rprm_up_plus', 'id', -(1.+self.U)/2.)\n system.add_to_hamiltonian('id', 'rprm_down_minus_dag', 'rprm_down_plus', 'id', -(1.+self.U)/2.)\n system.add_to_hamiltonian('id', 'rprm_up_minus', 'rprm_up_plus_dag', 'id', (1.+self.U)/2.)\n system.add_to_hamiltonian('id', 'rprm_down_minus', 'rprm_down_plus_dag', 'id', (1.+self.U)/2.)\n\n system.add_to_hamiltonian('id','id', 'rprm_up_minus_dag', 'rprm_up_plus', -(1.+self.U)/2.)\n system.add_to_hamiltonian('id','id', 'rprm_down_minus_dag', 'rprm_down_plus', -(1.+self.U)/2.)\n system.add_to_hamiltonian('id','id', 'rprm_up_minus', 'rprm_up_plus_dag', (1.+self.U)/2.)\n system.add_to_hamiltonian('id','id', 'rprm_down_minus', 'rprm_down_plus_dag', (1.+self.U)/2.)",
"def set_hamiltonian_to_AF_Heisenberg(system):\n system.clear_hamiltonian()\n if 'bh' in system.left_block.operators.keys():\n system.add_to_hamiltonian(left_block_op='bh')\n if 'bh' in system.right_block.operators.keys():\n system.add_to_hamiltonian(right_block_op='bh')\n system.add_to_hamiltonian('id', 'id', 's_z', 's_z')\n system.add_to_hamiltonian('id', 'id', 's_p', 's_m', .5)\n system.add_to_hamiltonian('id', 'id', 's_m', 's_p', .5)\n system.add_to_hamiltonian('id', 's_z', 's_z', 'id')\n system.add_to_hamiltonian('id', 's_p', 's_m', 'id', .5)\n system.add_to_hamiltonian('id', 's_m', 's_p', 'id', .5)\n system.add_to_hamiltonian('s_z', 's_z', 'id', 'id')\n system.add_to_hamiltonian('s_p', 's_m', 'id', 'id', .5)\n system.add_to_hamiltonian('s_m', 's_p', 'id', 'id', .5)",
"def set_hbond(self) -> None:\n ...",
"def Hamiltonian(self):\n return None",
"def __init__(self, hamiltonian):\n self.ham = hamiltonian",
"def setHBin(self, hbin):\n with self.lock:\n self.hbin = hbin",
"def create_ham(self):\n from tcc.interaction import HAM_SPINLESS_RI_CORE_HUBBARD\n return HAM_SPINLESS_RI_CORE_HUBBARD(self)",
"def create_ham(self):\n from tcc.interaction import HAM_SPINLESS_RI_CORE_HUBBARD\n return HAM_SPINLESS_RI_CORE_HUBBARD(self)",
"def hamiltonian(self):\n hamiltonian = self.bare_hamiltonian()\n for interaction_term in self.interaction_list:\n hamiltonian += interaction_term.hamiltonian()\n return hamiltonian",
"def generate_hamiltonian(self):\n ham = total_hamiltonian(self.cluster, self.magnetic_field, self.zfs, others=self.others,\n other_states=self.other_states, central_gyro=self.gyro, central_spin=self.spin)\n\n if self.pulses is not None:\n self.pulses.generate_pulses(dimensions=ham.dimensions, bath=self.cluster, vectors=ham.vectors)\n\n return ham",
"def set_mass_flow(self):\n self.exh.mdot_exp = self.exh.flow_array * self.exh.rho_array\n self.exh.C = self.exh.mdot_exp * self.exh.c_p_air\n self.exh.enthalpy_flow = self.exh.C * self.exh.T_inlet_array",
"def get_hamiltonian(self):\n return self.hamiltonian()",
"def get_hamiltonian(self):\n assert (self._integrator == 'HMC' and self._metric == 'Euclidean') or self._integrator == 'RMHMC', 'Parameter dependent metrics require the RMHMC integrator'\n if self._integrator == 'RMHMC':# and self._metric != 'Euclidean':\n self.potential_ = self.get_potential()\n self.metric_ = self.get_metric()\n self.inverse_ = self.metric_.inverse()\n self.capacitor_ = self.get_capacitor()\n self.kinetic_ = self.get_kinetic()\n ham = self.potential_ + self.capacitor_ + self.kinetic_\n else:\n self.potential_ = self.get_potential()\n self.kinetic_ = self.get_kinetic()\n ham = self.potential_ + self.kinetic_\n self.hamiltonian_ = ham\n return ham",
"def hubbard_hamiltonian_MF(H_no_Hubbard, ns_up, ns_dn, U): \n n_orb = H_no_Hubbard.shape[0]\n ns = [ns_up, ns_dn]\n H = []\n for i in [0, 1]:\n Hi = copy.deepcopy(H_no_Hubbard)\n Hi = Hi + U*ns[1-i]*np.identity(n_orb)\n H.append(Hi)\n return H",
"def set_H0(self):\n self.slot.H0 = self.lf_H0.value()\n self.w_out.comp_output()\n # Notify the machine GUI that the machine has changed\n self.saveNeeded.emit()",
"def h(self, h):\n\n self._h = h",
"def __init__(self, breite, höhe,\n block_größe=None, lebendig=set()):\n if block_größe is None:\n block_größe = min(9, 800//min(breite, höhe)) or 1\n self.breite = breite\n self.höhe = höhe\n self.block = int(block_größe)\n self.lebendig = lebendig\n try:\n self.sense = SenseHat()\n except OSError:\n self.sense = None\n super().__init__()",
"def get_bare_hamiltonian(self):\n warnings.warn('bare_hamiltonian() is deprecated, use bare_hamiltonian() instead', FutureWarning)\n return self.bare_hamiltonian()",
"def set_heading(self, heading):\n self._kernel.set_heading(float(heading))",
"def reset_hessian_and_bias(self):\n # reset_shared_var(self.t_H)\n t = self.QUAD_REG\n if len(t.shape) == 1:\n self.t_H.set_value(np.diag(self.QUAD_REG))\n elif len(t.shape) == 2:\n self.t_H.set_value(self.QUAD_REG)\n else:\n raise ValueError('Invalid quad_reg shape')\n\n reset_shared_var(self.t_B)",
"def block(self, block):\n\n self._block = block",
"def block(self, block):\n\n self._block = block",
"def __init__(self, d_model, n_heads, use_cos, kernel, dropout,\n ffn_ratio, ln_eps, denom_eps, bias):\n super(MHA_block_rezero, self).__init__()\n self.mha = MHA(\n d_model, n_heads, use_cos, kernel, dropout, denom_eps, bias)\n self.ffn = FFN(d_model, ffn_ratio, dropout, bias)\n self.alpha = nn.Parameter(torch.Tensor([0]))",
"def set_channel_h_unit(self , channel_h_unit:float):\n self.__channel_h_unit = channel_h_unit",
"def penblock(self, block):\n self.block = block",
"def test_hamiltonian(model):\n h = model.hamiltonian\n assert isinstance(h, csr_matrix)\n assert h.dtype == np.float32\n assert h.shape == (2, 2)\n assert pytest.fuzzy_equal(h.data, [graphene.t] * 2)\n assert pytest.fuzzy_equal(h.indices, [1, 0])\n assert pytest.fuzzy_equal(h.indptr, [0, 1, 2])\n\n assert h.data.flags['OWNDATA'] is False\n assert h.data.flags['WRITEABLE'] is False\n\n with pytest.raises(ValueError) as excinfo:\n h.data += 1\n assert \"read-only\" in str(excinfo.value)\n\n h2 = model.hamiltonian\n assert h2.data is not h.data\n assert point_to_same_memory(h2.data, h.data)",
"def create_ham(self):\n from tcc.interaction import HAM_SPINLESS_RI_CORE\n return HAM_SPINLESS_RI_CORE(self)"
] | [
"0.7961094",
"0.72680366",
"0.70075583",
"0.69372654",
"0.6821623",
"0.6276145",
"0.61712486",
"0.60667217",
"0.59148175",
"0.5814612",
"0.5814612",
"0.55943906",
"0.55346644",
"0.5519819",
"0.54950064",
"0.5393633",
"0.5378804",
"0.53554547",
"0.5317289",
"0.5309732",
"0.5309382",
"0.520792",
"0.5193114",
"0.5176767",
"0.5176767",
"0.51747555",
"0.51619595",
"0.5158015",
"0.5138185",
"0.5113407"
] | 0.77650434 | 1 |
Test ExponentialFamily class initialization. | def test_exponential_family_init():
D = 4
N = 100
exp_fam = ExponentialFamily(D)
assert exp_fam.D == D
assert exp_fam.support_layer is None
assert exp_fam.D_eta == D
with raises(TypeError):
exp_fam = ExponentialFamily('foo')
with raises(ValueError):
exp_fam = ExponentialFamily(0)
with raises(TypeError):
exp_fam = ExponentialFamily(4, int)
with raises(NotImplementedError):
exp_fam.sample_eta(N)
mu = np.zeros((D,))
with raises(NotImplementedError):
exp_fam.mu_to_eta(mu)
eta = np.zeros((D,))
with raises(NotImplementedError):
exp_fam.eta_to_mu(eta)
z = np.zeros((D,))
with raises(NotImplementedError):
exp_fam.T(z)
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test___init__(self):\n f0 = 5 * (np.random.rand(10, 5) - 0.5)\n ga = population.Evolver(f0, eval_one_max)\n self.assertTrue(hasattr(ga, 'register'))\n\n # should have called evalute\n self.assertEqual(ga.generations[-1].new, 0)\n\n # should have registered a default ranking function\n self.assertEqual(np.round(np.sum(ga.rank())), len(f0))",
"def setUp(self):\n # Values copied from head of factors.py file, simulating initial import.\n factors._soe_prime_cache = [2, 3]\n factors._soe_not_prime_map = {9: 3}\n factors._soe_c = 5",
"def __init__(self, expected, test_func):\n self._f = test_func\n self._exp = expected",
"def test_epsf_build_invalid_fitter(self):\n\n with pytest.raises(TypeError):\n EPSFBuilder(fitter=EPSFFitter, maxiters=3)\n\n with pytest.raises(TypeError):\n EPSFBuilder(fitter=LevMarLSQFitter(), maxiters=3)\n\n with pytest.raises(TypeError):\n EPSFBuilder(fitter=LevMarLSQFitter, maxiters=3)",
"def setUp(self):\n self.employee = Employee('John', 'Doe', 50000)\n self.raise_amount = 20000",
"def setUp(self):\n self.frequency = -2017.96\n self.E0_reac = -295.563\n self.E0_TS = -12.7411\n self.E0_prod = (-10.2664) + (-253.48)\n self.tunneling = Eckart(\n frequency=(self.frequency, \"cm^-1\"),\n E0_reac=(self.E0_reac, \"kJ/mol\"),\n E0_TS=(self.E0_TS, \"kJ/mol\"),\n E0_prod=(self.E0_prod, \"kJ/mol\"),\n )",
"def test_init(self):\r\n c = AlphaDiversityCalc(observed_otus)\r\n self.assertEqual(c.Metric, observed_otus)\r\n self.assertEqual(c.Params, {})",
"def test_sphere_init():\n Sphere(5)",
"def __init__(self, alpha=1.0, epsilon=0.05, gamma=0.8, numTraining = 10):\n self.alpha = float(alpha)\n self.epsilon = float(epsilon)\n self.discount = float(gamma)\n self.numTraining = int(numTraining)",
"def __init__(self, alpha=1.0, epsilon=0.05, gamma=0.8, numTraining=10):\n self.alpha = float(alpha)\n self.epsilon = float(epsilon)\n self.discount = float(gamma)\n self.numTraining = int(numTraining)",
"def test_constructor(self):\n pass",
"def setUpClass(cls):\n super(CephFSTests, cls).setUpClass()",
"def test_init_error_handling(self):\n with pytest.raises(ValueError) as err:\n hll = HyperLogLog(2)\n assert err.value.message == \"k=2 should be in range [16, 65536]\"\n with pytest.raises(ValueError) as err:\n hll = HyperLogLog(2**17)\n assert err.value.message == \"k=131072 should be in range [16, 65536]\"\n hll = HyperLogLog(2**16)\n assert hll.k == 2**16\n hll = HyperLogLog64(2**17)\n assert hll.k == 2**17",
"def setUpClass(cls):\n np.random.seed(2019)\n # So the 1 st row of the first random number array, random.rand(500, 3)\n # will be [0.90348221, 0.39308051, 0.62396996]\n # Accordingly, the first row of\n # coordinates = (0.5 - np.random.rand(500, 3)) * box_length\n # should be [-3.31690899, 0.87895379, -1.01912071]\n cls.sys_obj = monte_carlo.SystemSetup()\n cls.energy = energy.Energy()\n cls.parser = monte_carlo.initialize()\n cls.sim = monte_carlo.MonteCarlo(\n cls.sys_obj, cls.energy, cls.parser)\n np.random.seed()",
"def setUpClass(cls):\n test_family = 'H_Abstraction'\n\n # set-up RMG object\n rmg = RMG()\n\n # load kinetic database and forbidden structures\n rmg.database = RMGDatabase()\n path = os.path.join(settings['test_data.directory'], 'testing_database')\n\n # kinetics family loading\n rmg.database.load_kinetics(os.path.join(path, 'kinetics'),\n kinetics_families=[test_family],\n reaction_libraries=[]\n )\n # load empty forbidden structures to avoid any dependence on forbidden structures\n # for these tests\n for family in rmg.database.kinetics.families.values():\n family.forbidden = ForbiddenStructures()\n rmg.database.forbidden_structures = ForbiddenStructures()",
"def setUp(self):\n self.m = m = random.randint(1, 100)\n self.n = n = random.randint(1, 100)\n self.sig = sig = Signature(\"name\", Dim(\"m\"), Dim(\"n\"),\n sData(\"A\", \"ldA * n\"), Ld(\"ldA\", \"m\"),\n dData(\"B\", \"ldB * m\"), Ld(\"ldB\", \"m\"),\n cData(\"C\", \"ldC * n\"), Ld(\"ldC\", \"n\"))\n self.ex = ex = Experiment()\n ex.calls = [sig(m, n, \"X\", None, \"Y\", None, \"Z\", None)]\n ex.infer_lds()\n self.i = Symbol(\"i\")\n self.j = Symbol(\"j\")",
"def setUpClass(cls):\n cls.nhf = nhflux.NhfluxStream.readBinary(SIMPLE_HEXZ_NHFLUX)",
"def test_hmf_init(self):\n spec = np.random.random((20, 100))\n invvar = np.random.random((20, 100))\n hmf = HMF(spec, invvar)\n assert hmf.K == 4\n assert log.level == 20 # INFO\n hmf = HMF(spec, invvar, K=6, verbose=True)\n assert hmf.K == 6\n assert log.level == 10 # DEBUG",
"def __init__(self, n=0, e=0):\r\n raise NotImplementedError()",
"def setUpClass(cls):\n cls.nhf = nhflux.NhfluxStreamVariant.readBinary(SIMPLE_HEXZ_NHFLUX_VARIANT)",
"def test___init__(self):\n copula = GammaUnivariate()\n assert copula.a is None\n assert copula.loc is None\n assert copula.scale is None",
"def test_01_Setup(self):\n # print(PrettyFormatAny.form(VALID_FAMILIES, 'A1-01-A - Valid'))\n self.assertEqual(len(VALID_FAMILIES), len(self.m_pyhouse_obj._Families))\n self.assertEqual(VALID_FAMILIES[0], TESTING_FAMILY_NAME_0) # Null\n self.assertEqual(VALID_FAMILIES[1], TESTING_FAMILY_NAME_1) # Insteon\n self.assertEqual(VALID_FAMILIES[2], TESTING_FAMILY_NAME_2) # UPB\n self.assertEqual(VALID_FAMILIES[3], TESTING_FAMILY_NAME_3) # X-10\n self.assertEqual(VALID_FAMILIES[4], TESTING_FAMILY_NAME_4) # Hue",
"def test_constructor(self, name, num_petals, price):\n with pytest.raises(AssertionError):\n chap2.Flower(name, num_petals, price)",
"def setUpClass(cls):\n celltype_analyse = \"Adipocyte - breast\"\n data_type = \"promoters\"\n sample_type = \"primary cells\"\n parsed = False\n files_path = \"test\"\n cls.element_list = ('chr10:100027943..100027958,-', 'chr10:100174900..100174956,-',\n 'chr10:100204220..100204230,-', 'chr10:100206642..100206717,-')\n expression_obj = iext.CheckElementExpression(inputs=cv.test_promoter_file_name,\n element_list=cls.element_list,\n cell_type=celltype_analyse,\n data_type=data_type, sample_type=sample_type,\n parsed=parsed, files_path=files_path)\n cls.expression = expression_obj.export_expression_data(method=\"return\")",
"def __init__(self):\n self.expvalue = np.zeros(10)\n self.iter = np.zeros(10)\n self.epsilon = 0.1",
"def test_2X_constructor(self):\n path_to_config = os.path.join(CONST.ROOT, 'CONSTANTS.py')\n with self.assertRaises(Exception):\n FeatureExtractor(path_to_config)",
"def test_init(self):\n res = computer.Computer(1)\n exp = computer.Computer\n self.assertIsInstance(res, exp)",
"def setUpClass(cls):\n cls.inputs = cv.expression_data1\n cls.celltype_analyse = \"celltypetarget\"\n cls.replicate_suffix = \"_donor\"\n cls.algorithm = \"heuristic\"\n cls.k = 4\n cls.thresholds = (0.5, 0, 0) # act, inact, and sparseness, respectively\n cls.files_path = \"test\"",
"def setUp(self):\n self.cashFlowDate=Date(1,October,2018)\n self.fixingDate=Date(1,November,2018)\n self.foreignAmount=1000.0\n self.familyName=\"ECB\"\n self.fixingDays=2\n self.sourceCurrency=USDCurrency()\n self.targetCurrency=EURCurrency()\n self.fixingCalendar=UnitedStates()\n self.todayDate=Date(11, November, 2018)\n self.tsDayCounter=Actual360()\n self.flatForwardUSD=FlatForward(self.todayDate, 0.005, self.tsDayCounter)\n self.sourceYts=RelinkableYieldTermStructureHandle(self.flatForwardUSD)\n self.flatForwardEUR=FlatForward(self.todayDate, 0.03, self.tsDayCounter);\n self.targetYts=RelinkableYieldTermStructureHandle(self.flatForwardEUR)\n self.fxindex=FxIndex(self.familyName,self.fixingDays,self.sourceCurrency,self.targetCurrency,self.fixingCalendar,self.sourceYts,self.targetYts)\n self.fxlinkedcashflow=FXLinkedCashFlow(self.cashFlowDate,self.fixingDate,self.foreignAmount,self.fxindex)",
"def setUp(self):\n self.employee = Employee('Lucas', 'Guerra', 45000)"
] | [
"0.64224786",
"0.60437495",
"0.6004442",
"0.5973904",
"0.5874628",
"0.5863269",
"0.5862954",
"0.5851496",
"0.5843871",
"0.58273435",
"0.58007246",
"0.5793445",
"0.5762569",
"0.57617235",
"0.57417697",
"0.571784",
"0.5715807",
"0.5695528",
"0.5693813",
"0.5684522",
"0.5677164",
"0.5675944",
"0.564229",
"0.5635543",
"0.56344175",
"0.56259227",
"0.5617797",
"0.5597922",
"0.55806696",
"0.5571718"
] | 0.83874583 | 0 |
Return the printable length of the Entry's Text | def getTextLength(self):
return 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def length(self):\n return len(self.text)",
"def __len__(self):\n return len(self.spricht())",
"def width(self, text):\n return len(text) * (self.font_width + 1)",
"def getLength(self):\n return len(self.entries)",
"def getLength(self, text):\n\n return len(text[self.table_header[0]])",
"def format_length( self, key ) :\r\n\r\n return struct.calcsize( self[key] )",
"def LEN(text):\n return len(text)",
"def char_size(self):\n return len(self.id2char)",
"def _text_length(self, text):\n\n if isinstance(text, dict): # {key: value} case\n return len(next(iter(text.values())))\n elif not hasattr(text, '__len__'): # Object has no len() method\n return 1\n elif len(text) == 0 or isinstance(text[0], int): # Empty string or list of ints\n return len(text)\n else:\n return sum([len(t) for t in text]) # Sum of length of individual strings",
"def compute_user_description_text_length(row):\n row[\"user_description_text_length\"] = len(row['user_description'])\n return row[\"user_description_text_length\"]",
"def field_length(self,\r\n entrylist=None):\r\n\r\n\r\n if entrylist is None:\r\n entrylist = list(self.default_dict['field'].keys())\r\n maxlength = 0\r\n for i_temp in entrylist:\r\n if len(self.default_dict['field'][i_temp]) > maxlength:\r\n maxlength = len(self.default_dict['field'][i_temp])\r\n return maxlength",
"def get_string_length(self):\n return int(self.read('H')[0])",
"def _len(item):\n stripped = _strip_ansi(item)\n if wcwidth:\n return wcwidth.wcswidth(stripped)\n else:\n return len(stripped)",
"def total_length():\n return",
"def size(self):\n return len(self.chars)",
"def namelength(self):\n return self[\"namelength\"]",
"def visual_len(text) -> int:\n return len(text) if NO_COLOR else len(_remove_regex(\"\\033\\\\[[0-9]*m\", text))",
"def printed_length(string):\n # It returns the length of the printed string\n return len(remove_colors(string))",
"def title_len(self) -> int:\n return self.__title_len",
"def get_text_width(self, text: str) -> float:\n pass",
"def length(self):\n return self._info.length # pylint: disable=E1101",
"def __len__(self):\n # TODO: Is this method used?\n return self._info['length']",
"def size(self):\n return _(len(self._))",
"def __len__(self) -> int:\n return len(self.contents)",
"def __len__(self) -> int:\n return len(self.tab10)",
"def length_of_name(self, name):\n length = len(name)\n if length > 10:\n self.show_message_when_name_very_long()\n return length",
"def get_width(self):\n return \"%s\" % self.width",
"def _get_length(self):\n return self._length",
"def get_length(self):\n\n return self.length",
"def characters_left(self):\r\n return self.max_chars - len(self.variable.get())"
] | [
"0.69978696",
"0.6772337",
"0.6772269",
"0.6674462",
"0.66585314",
"0.6562858",
"0.64422786",
"0.63689303",
"0.63642585",
"0.63120514",
"0.6311637",
"0.6309694",
"0.6292428",
"0.6272306",
"0.6256533",
"0.62360454",
"0.6224102",
"0.62151045",
"0.6209286",
"0.6154031",
"0.6144847",
"0.61350745",
"0.613423",
"0.6101091",
"0.609945",
"0.6087122",
"0.6075593",
"0.60414",
"0.60293055",
"0.60083586"
] | 0.7582059 | 0 |
Helper function to generate jitted lanczos function used in JaxBackend.eigsh_lanczos. The function `jax_lanczos` returned by this higherorder function has the following | def _generate_jitted_eigsh_lanczos(jax: types.ModuleType) -> Callable:
@functools.partial(jax.jit, static_argnums=(3, 4, 5, 6))
def jax_lanczos(matvec, arguments, init, ncv, neig, landelta, reortho):
"""
Jitted lanczos routine.
Args:
matvec: A callable implementing the matrix-vector product of a
linear operator.
arguments: Arguments to `matvec` additional to an input vector.
`matvec` will be called as `matvec(init, *args)`.
init: An initial input state to `matvec`.
ncv: Number of krylov iterations (i.e. dimension of the Krylov space).
neig: Number of eigenvalue-eigenvector pairs to be computed.
landelta: Convergence parameter: if the norm of the current Lanczos vector
falls below `landelta`, iteration is stopped.
reortho: If `True`, reorthogonalize all krylov vectors at each step.
This should be used if `neig>1`.
Returns:
jax.numpy.ndarray: Eigenvalues
list: Eigenvectors
"""
def body_modified_gram_schmidt(i, vals):
vector, krylov_vectors = vals
v = krylov_vectors[i, :]
vector -= jax.numpy.vdot(v, vector) * jax.numpy.reshape(v, vector.shape)
return [vector, krylov_vectors]
def body_lanczos(vals):
current_vector, krylov_vectors, vector_norms = vals[0:3]
diagonal_elements, matvec, args, _ = vals[3:7]
threshold, i, maxiteration = vals[7:]
norm = jax.numpy.linalg.norm(current_vector)
normalized_vector = current_vector / norm
normalized_vector, krylov_vectors = jax.lax.cond(
reortho, True,
lambda x: jax.lax.fori_loop(0, i, body_modified_gram_schmidt,
[normalized_vector, krylov_vectors]),
False, lambda x: [normalized_vector, krylov_vectors])
Av = matvec(normalized_vector, *args)
diag_element = jax.numpy.vdot(normalized_vector, Av)
res = jax.numpy.reshape(
jax.numpy.ravel(Av) -
jax.numpy.ravel(normalized_vector) * diag_element -
krylov_vectors[i - 1] * norm, Av.shape)
krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[i, :],
jax.numpy.ravel(normalized_vector))
vector_norms = jax.ops.index_update(vector_norms, jax.ops.index[i - 1],
norm)
diagonal_elements = jax.ops.index_update(diagonal_elements,
jax.ops.index[i - 1],
diag_element)
return [
res, krylov_vectors, vector_norms, diagonal_elements, matvec, args,
norm, threshold, i + 1, maxiteration
]
def cond_fun(vals):
_, _, _, _, _, _, norm, threshold, iteration, maxiteration = vals
def check_thresh(check_vals):
val, thresh = check_vals
return jax.lax.cond(val < thresh, False, lambda x: x, True, lambda x: x)
return jax.lax.cond(iteration <= maxiteration, [norm, threshold],
check_thresh, False, lambda x: x)
numel = jax.numpy.prod(init.shape)
krylov_vecs = jax.numpy.zeros((ncv + 1, numel), dtype=init.dtype)
norms = jax.numpy.zeros(ncv, dtype=init.dtype)
diag_elems = jax.numpy.zeros(ncv, dtype=init.dtype)
norms = jax.ops.index_update(norms, jax.ops.index[0], 1.0)
norms_dtype = jax.numpy.real(jax.numpy.empty((0, 0),
dtype=init.dtype)).dtype
initvals = [
init, krylov_vecs, norms, diag_elems, matvec, arguments,
norms_dtype.type(1.0), landelta, 1, ncv
]
output = jax.lax.while_loop(cond_fun, body_lanczos, initvals)
final_state, krylov_vecs, norms, diags, _, _, _, _, it, _ = output
krylov_vecs = jax.ops.index_update(krylov_vecs, jax.ops.index[it, :],
jax.numpy.ravel(final_state))
A_tridiag = jax.numpy.diag(diags) + jax.numpy.diag(
norms[1:], 1) + jax.numpy.diag(jax.numpy.conj(norms[1:]), -1)
eigvals, U = jax.numpy.linalg.eigh(A_tridiag)
eigvals = eigvals.astype(A_tridiag.dtype)
def body_vector(i, vals):
krv, unitary, states = vals
dim = unitary.shape[1]
n, m = jax.numpy.divmod(i, dim)
states = jax.ops.index_add(states, jax.ops.index[n, :],
krv[m + 1, :] * unitary[m, n])
return [krv, unitary, states]
state_vectors = jax.numpy.zeros([neig, numel], dtype=init.dtype)
_, _, vectors = jax.lax.fori_loop(0, neig * (krylov_vecs.shape[0] - 1),
body_vector,
[krylov_vecs, U, state_vectors])
return jax.numpy.array(eigvals[0:neig]), [
jax.numpy.reshape(vectors[n, :], init.shape) /
jax.numpy.linalg.norm(vectors[n, :]) for n in range(neig)
]
return jax_lanczos | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _generate_arnoldi_factorization(jax: types.ModuleType) -> Callable:\n\n @jax.jit\n def modified_gram_schmidt_step_arnoldi(j, vals):\n \"\"\"\n Single step of a modified gram-schmidt orthogonalization.\n Args:\n j: Integer value denoting the vector to be orthogonalized.\n vals: A list of variables:\n `vector`: The current vector to be orthogonalized\n to all previous ones\n `krylov_vectors`: jax.array of collected krylov vectors\n `n`: integer denoting the column-position of the overlap\n <`krylov_vector`|`vector`> within `H`.\n Returns:\n updated vals.\n\n \"\"\"\n vector, krylov_vectors, n, H = vals\n v = krylov_vectors[j, :]\n h = jax.numpy.vdot(v, vector)\n H = jax.ops.index_update(H, jax.ops.index[j, n], h)\n vector = vector - h * jax.numpy.reshape(v, vector.shape)\n return [vector, krylov_vectors, n, H]\n\n @functools.partial(jax.jit, static_argnums=(5, 6, 7))\n def _arnoldi_fact(matvec, args, v0, krylov_vectors, H, start, num_krylov_vecs,\n eps):\n \"\"\"\n Compute an m-step arnoldi factorization of `matvec`, with\n m = min(`it`,`num_krylov_vecs`). The factorization will\n do at most `num_krylov_vecs` steps. The returned arrays\n `kv` and `H` will satisfy the Arnoldi recurrence relation\n ```\n matrix @ Vm - Vm @ Hm - fm * em = 0\n ```\n with `matrix` the matrix representation of `matvec` and\n `Vm = jax.numpy.transpose(kv[:it, :])`,\n `Hm = H[:it, :it]`, `fm = np.expand_dims(kv[it, :] * H[it, it - 1]`,1)\n and `em` a cartesian basis vector of shape `(1, kv.shape[1])`\n with `em[0, -1] == 1` and 0 elsewhere.\n\n Note that the caller is responsible for dtype consistency between\n the inputs, i.e. dtypes between all input arrays have to match.\n\n Args:\n matvec: The matrix vector product.\n args: List of arguments to `matvec`.\n v0: Initial state to `matvec`.\n krylov_vectors: An array for storing the krylov vectors. The individual\n vectors are stored as columns.\n The shape of `krylov_vecs` has to be\n (num_krylov_vecs + 1, np.ravel(v0).shape[0]).\n H: Matrix of overlaps. The shape has to be\n (num_krylov_vecs + 1,num_krylov_vecs + 1).\n start: Integer denoting the start position where the first\n produced krylov_vector should be inserted into `krylov_vectors`\n num_krylov_vecs: Number of krylov iterations, should be identical to\n `krylov_vectors.shape[0] + 1`\n eps: Convergence parameter. Iteration is terminated if the norm of a\n krylov-vector falls below `eps`.\n Returns:\n kv: An array of krylov vectors\n H: A matrix of overlaps\n it: The number of performed iterations.\n \"\"\"\n Z = jax.numpy.linalg.norm(v0)\n v = v0 / Z\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[start, :],\n jax.numpy.ravel(v))\n H = jax.lax.cond(\n start > 0, start,\n lambda x: jax.ops.index_update(H, jax.ops.index[x, x - 1], Z), None,\n lambda x: H)\n\n # body of the arnoldi iteration\n def body(vals):\n krylov_vectors, H, matvec, vector, _, threshold, i, maxiter = vals\n Av = matvec(vector, *args)\n initial_vals = [Av, krylov_vectors, i, H]\n Av, krylov_vectors, _, H = jax.lax.fori_loop(\n 0, i + 1, modified_gram_schmidt_step_arnoldi, initial_vals)\n norm = jax.numpy.linalg.norm(Av)\n Av /= norm\n H = jax.ops.index_update(H, jax.ops.index[i + 1, i], norm)\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[i + 1, :],\n jax.numpy.ravel(Av))\n return [krylov_vectors, H, matvec, Av, norm, threshold, i + 1, maxiter]\n\n def cond_fun(vals):\n # Continue loop while iteration < num_krylov_vecs and norm > eps\n _, _, _, _, norm, _, iteration, _ = vals\n counter_done = (iteration >= num_krylov_vecs)\n norm_not_too_small = norm > eps\n continue_iteration = jax.lax.cond(counter_done,\n _, lambda x: False,\n _, lambda x: norm_not_too_small)\n\n return continue_iteration\n initial_norm = v.real.dtype.type(1.0+eps)\n initial_values = [krylov_vectors, H, matvec, v, initial_norm, eps, start,\n num_krylov_vecs]\n final_values = jax.lax.while_loop(cond_fun, body, initial_values)\n kvfinal, Hfinal, _, _, norm, _, it, _ = final_values\n return kvfinal, Hfinal, it, norm < eps\n\n return _arnoldi_fact",
"def lanczos(dx, width, cutoff, /):\n # Coefficients and initial stuff\n # n = (width/dx)//1 # convert window width from 'time units' to 'steps'\n # n = width//2\n # Convert alpha to wavenumber (new units are 'inverse timesteps')\n alpha = 1.0 / (cutoff / dx)\n n = width\n n = (n - 1) // 2 + 1\n tau = np.arange(1, n + 1) # lag time\n C0 = 2 * alpha # integral of cutoff-response function is alpha*pi/pi\n Ck = np.sin(2 * np.pi * alpha * tau) / (np.pi * tau)\n Cktilde = Ck * np.sin(np.pi * tau / n) / (np.pi * tau / n)\n\n # Return filter\n # Example: n = 9 returns 4 + 4 + 1 points\n order = n * 2 - 1\n print(f'Order-{order} Lanczos window')\n window = np.concatenate((np.flipud(Cktilde), np.array([C0]), Cktilde))\n return window[1:-1], 1",
"def optimisation_factory_Jzazbz() -> (\n Tuple[NDArrayFloat, Callable, Callable, Callable]\n):\n\n x_0 = as_float_array([1, 0, 0, 1, 0, 0])\n\n def objective_function(\n M: ArrayLike, RGB: ArrayLike, Jab: ArrayLike\n ) -> NDArrayFloat:\n \"\"\":math:`J_za_zb_z` colourspace based objective function.\"\"\"\n\n M = finaliser_function(M)\n\n XYZ_t = vector_dot(\n RGB_COLOURSPACE_ACES2065_1.matrix_RGB_to_XYZ, vector_dot(M, RGB)\n )\n Jab_t = XYZ_to_optimization_colour_model(XYZ_t)\n\n return as_float(np.sum(euclidean_distance(Jab, Jab_t)))\n\n def XYZ_to_optimization_colour_model(XYZ: ArrayLike) -> NDArrayFloat:\n \"\"\"*CIE XYZ* colourspace to :math:`J_za_zb_z` colourspace function.\"\"\"\n\n return XYZ_to_Jzazbz(XYZ)\n\n def finaliser_function(M: ArrayLike) -> NDArrayFloat:\n \"\"\"Finaliser function.\"\"\"\n\n return whitepoint_preserving_matrix(\n np.hstack([np.reshape(M, (3, 2)), zeros((3, 1))])\n )\n\n return (\n x_0,\n objective_function,\n XYZ_to_optimization_colour_model,\n finaliser_function,\n )",
"def jit(func):\n return func",
"def zonal( self, fields, fun ):\n raise NotImplementedError(\"zonal\")",
"def test_clz_u4(self):\n compiled = cuda.jit(\"void(int32[:], uint32)\")(simple_clz)\n ary = np.zeros(1, dtype=np.int32)\n compiled[1, 1](ary, 0x00100000)\n self.assertEquals(ary[0], 11)",
"def _vzlerchphi(self, z: np.ndarray, a: int) -> np.ndarray:\n return np.array([self._zlerchphi(z_, a) for z_ in z])",
"def lherzolite():\n\n rho = 3270.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 187.4; C[0,1] = 63.71; C[0,2] = 63.87; C[0,3] = 0.78; C[0,4] = 2.02; C[0,5] = -3.2\n C[1,0] = C[0,1]; C[1,1] = 211.25; C[1,2] = 64.5; C[1,3] = -3.07; C[1,4] = 0.87; C[1,5] = -5.78\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 190.; C[2,3] = 0.38; C[2,4] = 2.38; C[2,5] = -0.12\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 67.9; C[3,4] = -2.12; C[3,5] = 1.6\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 63.12; C[4,5] = -0.55\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 66.83\n\n return C, rho",
"def jacobian_numba(coordinates, points, jac, greens_function):\n east, north, upward = coordinates[:]\n point_east, point_north, point_upward = points[:]\n for i in prange(east.size):\n for j in range(point_east.size):\n jac[i, j] = greens_function(\n east[i],\n north[i],\n upward[i],\n point_east[j],\n point_north[j],\n point_upward[j],\n )",
"def _ltz(self):\n raise NotImplementedError(\"_ltz is not implemented\")",
"def lanczos_decomp(vector_prod_fn, scalar, n, k):\n Q = tf.zeros([n, 1])\n v = tf.random_uniform([n, 1])\n v = v / tf.norm(v)\n Q = tf.concat([Q, v], axis=1)\n\n # diagonals of the tridiagonal matrix\n beta = tf.constant(0.0, dtype=tf.float32, shape=[1])\n alpha = tf.constant(0.0, dtype=tf.float32, shape=[1])\n\n for i in range(k):\n v = vector_prod_fn(tf.reshape(Q[:, i+1], [n, 1])) - tf.scalar_mul(scalar, tf.reshape(Q[:, i+1], [n, 1]))\n v = tf.reshape(v, [n,])\n curr_alpha = tf.reshape(tf.reduce_sum(v * Q[:, i+1]), [1,])\n alpha = tf.concat([alpha, curr_alpha], axis=0)\n v = v-beta[-1]*Q[:, i]-alpha[-1]*Q[:, i+1]\n curr_beta = tf.reshape(tf.norm(v), [1,])\n beta = tf.concat([beta, curr_beta], axis=0)\n curr_norm = tf.reshape(v/(beta[-1]+1e-8), [n, 1])\n Q = tf.concat([Q, curr_norm], axis=1)\n\n alpha = tf.slice(alpha, begin=[1], size=[-1])\n beta = tf.slice(beta, begin=[1], size=[k-1])\n Q = tf.slice(Q, begin=[0, 1], size=[-1, k])\n return alpha, beta, Q",
"def zenazi(scx_l, scx_b, scy_l, scy_b, scz_l, scz_b, src_l, src_b):\n # Zenith is the distance from the optical axis (here z)\n costheta = GreatCircle(scz_l,scz_b,src_l,src_b) \n # Azimuth is the combination of the remaining two\n cosx = GreatCircle(scx_l,scx_b,src_l,src_b)\n cosy = GreatCircle(scy_l,scy_b,src_l,src_b)\n \n # check exceptions\n # maybe not for vectorisation\n \"\"\"\n if costheta.size == 1:\n if (costheta > 1.0):\n costheta = 1.0\n if (costheta < -1.0):\n costheta = -1.0\n else:\n costheta[costheta > 1.0] = 1.0\n costheta[costheta < -1.0] = -1.0\n \"\"\"\n # theta = zenith\n theta = np.rad2deg(np.arccos(costheta))\n # phi = azimuth\n phi = np.rad2deg(np.arctan2(cosy,cosx)) # TS January 14: you sure about that? changed y and x\n \n # make azimuth going from 0 to 360 deg\n if phi.size == 1:\n if (phi < 0):\n phi += 360\n else:\n phi[phi < 0] += 360\n \n return theta,phi",
"def return_lxx_func(RunningCost='Minimize Input Energy'):\n if type(RunningCost)==str:\n assert RunningCost in ['Minimize Input Energy',\n 'Minimize time away from target angle',\n 'Minimize time away from target angular velocity'],\\\n \"RunningCost must be either 'Minimize Input Energy','Minimize time away from target angle', or 'Minimize time away from target angular velocity'.\"\n else:\n assert type(RunningCost)==list, \"RunningCost must be a list of cost types.\"\n for el in RunningCost:\n assert type(el)==str, \"Each element of RunningCost must be a string. Not \" + str(type(el)) + \".\"\n assert el in ['Minimize Input Energy',\n 'Minimize time away from target angle',\n 'Minimize time away from target angular velocity'],\\\n \"Each element of RunningCost must be either 'Minimize Input Energy','Minimize time away from target angle', or 'Minimize time away from target angular velocity'. '\" + el + \"' not accepted.\"\n\n if \"Minimize Input Energy\" in RunningCost:\n result1 = lambda X,U,dt: np.matrix([[0,0],[0,0]])\n else:\n result1 = lambda X,U,dt: np.matrix([[0,0],[0,0]])\n\n if \"Minimize time away from target angle\" in RunningCost:\n result2 = lambda X,U,dt: np.matrix([[k1*1*dt,0],[0,0]])\n else:\n result2 = lambda X,U,dt: np.matrix([[0,0],[0,0]])\n\n if \"Minimize time away from target angular velocity\" in RunningCost:\n result3 = lambda X,U,dt: np.matrix([[0,0],[0,k2*1*dt]])\n else:\n result3 = lambda X,U,dt: np.matrix([[0,0],[0,0]])\n\n result = lambda X,U,dt: result1(X,U,dt) \\\n + result2(X,U,dt) \\\n + result3(X,U,dt)\n return(result)",
"def _implicitly_restarted_arnoldi(jax: types.ModuleType) -> Callable:\n\n arnoldi_fact = _generate_arnoldi_factorization(jax)\n\n # ######################################################\n # ####### NEW SORTING FUCTIONS INSERTED HERE #########\n # ######################################################\n @functools.partial(jax.jit, static_argnums=(1,))\n def LR_sort(evals, p):\n inds = np.argsort(jax.numpy.real(evals), kind='stable')[::-1]\n shifts = evals[inds][-p:]\n return shifts, inds\n\n @functools.partial(jax.jit, static_argnums=(1,))\n def LM_sort(evals, p):\n inds = np.argsort(jax.numpy.abs(evals), kind='stable')[::-1]\n shifts = evals[inds][-p:]\n return shifts, inds\n\n # #######################################################\n # #######################################################\n # #######################################################\n @functools.partial(jax.jit, static_argnums=(4, 5, 6))\n def shifted_QR(Vm, Hm, fm, evals, k, p, which, res_thresh):\n funs = [LR_sort, LM_sort]\n shifts, _ = funs[which](evals, p)\n # compress to k = numeig\n q = jax.numpy.zeros(Hm.shape[0])\n q = jax.ops.index_update(q, jax.ops.index[-1], 1)\n m = Hm.shape[0]\n\n for shift in shifts:\n Qj, _ = jax.numpy.linalg.qr(Hm - shift * jax.numpy.eye(m))\n Hm = Qj.T.conj() @ Hm @ Qj\n Vm = Qj.T @ Vm\n q = q @ Qj\n\n fk = Vm[k, :] * Hm[k, k - 1] + fm * q[k - 1]\n Vk = Vm[0:k, :]\n Hk = Hm[0:k, 0:k]\n H = jax.numpy.zeros((k + p + 1, k + p), dtype=fm.dtype)\n H = jax.ops.index_update(H, jax.ops.index[0:k, 0:k], Hk)\n Z = jax.numpy.linalg.norm(fk)\n v = fk / Z\n krylov_vectors = jax.numpy.zeros((k + p + 1, Vm.shape[1]), dtype=fm.dtype)\n krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[0:k, :],\n Vk)\n krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[k:], v)\n Z = jax.numpy.linalg.norm(fk)\n #if fk is a zero-vector then arnoldi has exactly converged.\n #use small threshold to check this\n return krylov_vectors, H, fk, Z < res_thresh\n\n @functools.partial(jax.jit, static_argnums=(2,))\n def update_data(Vm_tmp, Hm_tmp, numits):\n Vm = Vm_tmp[0:numits, :]\n Hm = Hm_tmp[0:numits, 0:numits]\n fm = Vm_tmp[numits, :] * Hm_tmp[numits, numits - 1]\n return Vm, Hm, fm\n\n @functools.partial(jax.jit, static_argnums=(3,))\n def get_vectors(Vm, unitary, inds, numeig):\n\n def body_vector(i, vals):\n krv, unitary, states, inds = vals\n dim = unitary.shape[1]\n n, m = jax.numpy.divmod(i, dim)\n states = jax.ops.index_add(states, jax.ops.index[n, :],\n krv[m, :] * unitary[m, inds[n]])\n return [krv, unitary, states, inds]\n\n state_vectors = jax.numpy.zeros([numeig, Vm.shape[1]], dtype=Vm.dtype)\n _, _, state_vectors, _ = jax.lax.fori_loop(\n 0, numeig * Vm.shape[0], body_vector,\n [Vm, unitary, state_vectors, inds])\n state_norms = jax.numpy.linalg.norm(state_vectors, axis=1)\n state_vectors = state_vectors / state_norms[:, None]\n return state_vectors\n\n\n def implicitly_restarted_arnoldi_method(\n matvec, args, initial_state, num_krylov_vecs, numeig, which, eps, maxiter,\n res_thresh) -> Tuple[List[Tensor], List[Tensor]]:\n \"\"\"\n Implicitly restarted arnoldi factorization of `matvec`. The routine\n finds the lowest `numeig` eigenvector-eigenvalue pairs of `matvec`\n by alternating between compression and re-expansion of an initial\n `num_krylov_vecs`-step Arnoldi factorization.\n\n Note: The caller has to ensure that the dtype of the return value\n of `matvec` matches the dtype of the initial state. Otherwise jax\n will raise a TypeError.\n\n Args:\n matvec: A callable representing the linear operator.\n args: Arguments to `matvec`. `matvec` is called with\n `matvec(x, *args)` with `x` the input array on which\n `matvec` should act.\n initial_state: An starting vector for the iteration.\n num_krylov_vecs: Number of krylov vectors of the arnoldi factorization.\n numeig: The number of desired eigenvector-eigenvalue pairs.\n which: Which eigenvalues to target. Currently supported: `which = 'LR'`\n or `which = 'LM'`.\n eps: Convergence flag. If the norm of a krylov vector drops below `eps`\n the iteration is terminated.\n maxiter: Maximum number of (outer) iteration steps.\n Returns:\n eta, U: Two lists containing eigenvalues and eigenvectors.\n \"\"\"\n N = np.prod(initial_state.shape)\n p = num_krylov_vecs - numeig\n num_krylov_vecs = np.min([num_krylov_vecs, N])\n if (p <= 1) and (num_krylov_vecs < N):\n raise ValueError(f\"`num_krylov_vecs` must be between `numeig` + 1 <\"\n f\" `num_krylov_vecs` <= N={N},\"\n f\" `num_krylov_vecs`={num_krylov_vecs}\")\n\n dtype = initial_state.dtype\n # initialize arrays\n krylov_vectors = jax.numpy.zeros(\n (num_krylov_vecs + 1, jax.numpy.ravel(initial_state).shape[0]),\n dtype=dtype)\n H = jax.numpy.zeros((num_krylov_vecs + 1, num_krylov_vecs), dtype=dtype)\n # perform initial arnoldi factorization\n Vm_tmp, Hm_tmp, numits, converged = arnoldi_fact(matvec, args,\n initial_state,\n krylov_vectors, H, 0,\n num_krylov_vecs, eps)\n # obtain an m-step arnoldi factorization\n Vm, Hm, fm = update_data(Vm_tmp, Hm_tmp, numits)\n\n it = 0\n if which == 'LR':\n _which = 0\n elif which == 'LM':\n _which = 1\n else:\n raise ValueError(f\"which = {which} not implemented\")\n # make sure the dtypes are matching\n if maxiter > 0:\n if Vm.dtype == np.float64:\n dtype = np.complex128\n elif Vm.dtype == np.float32:\n dtype = np.complex64\n elif Vm.dtype == np.complex128:\n dtype = Vm.dtype\n elif Vm.dtype == np.complex64:\n dtype = Vm.dtype\n else:\n raise TypeError(f'dtype {Vm.dtype} not supported')\n Vm = Vm.astype(dtype)\n Hm = Hm.astype(dtype)\n fm = fm.astype(dtype)\n\n while (it < maxiter) and (not converged):\n evals, _ = jax.numpy.linalg.eig(Hm)\n krylov_vectors, H, fk, converged = shifted_QR(Vm, Hm, fm, evals, numeig,\n p, _which, res_thresh)\n if converged:\n break\n v0 = jax.numpy.reshape(fk, initial_state.shape)\n # restart\n Vm_tmp, Hm_tmp, _, converged = arnoldi_fact(matvec, args, v0,\n krylov_vectors, H, numeig,\n num_krylov_vecs, eps)\n Vm, Hm, fm = update_data(Vm_tmp, Hm_tmp, num_krylov_vecs)\n it += 1\n\n ev_, U_ = np.linalg.eig(np.array(Hm))\n eigvals = jax.numpy.array(ev_)\n U = jax.numpy.array(U_)\n _, inds = LR_sort(eigvals, _which)\n vectors = get_vectors(Vm, U, inds, numeig)\n\n return eigvals[inds[0:numeig]], [\n jax.numpy.reshape(vectors[n, :], initial_state.shape)\n for n in range(numeig)\n ]\n\n return implicitly_restarted_arnoldi_method",
"def make_vector_laplace(bcs: Boundaries) -> OperatorType:\n assert isinstance(bcs.grid, CylindricalSymGrid)\n bcs.check_value_rank(1)\n\n laplace_r = make_laplace(bcs.extract_component(0))\n laplace_z = make_laplace(bcs.extract_component(1))\n laplace_phi = make_laplace(bcs.extract_component(2))\n\n @jit_allocate_out(out_shape=(3,) + bcs.grid.shape)\n def vector_laplace(arr, out=None):\n \"\"\"apply gradient operator to array `arr`\"\"\"\n laplace_r(arr[0], out=out[0])\n laplace_z(arr[1], out=out[1])\n laplace_phi(arr[2], out=out[2])\n return out\n\n return vector_laplace # type: ignore",
"def Lanczos(A, k, *, sparse=False, dim=None):\n if sparse:\n n = dim\n dtype = torch.float64\n Amap = A\n else:\n n = A.shape[0]\n dtype = A.dtype\n Amap = lambda v: torch.matmul(A, v)\n Qk = torch.zeros((n, k), dtype=dtype)\n alphas = torch.zeros(k, dtype=dtype)\n betas = torch.zeros(k - 1, dtype=dtype)\n q = torch.randn(n, dtype=dtype)\n q = q / torch.norm(q)\n u = Amap(q)\n alpha = torch.matmul(q, u)\n Qk[:, 0] = q\n alphas[0] = alpha\n beta = 0\n qprime = torch.randn(n, dtype=dtype)\n for i in range(1, k):\n r = u - alpha * q - beta * qprime\n\n # The simple but expensive full reorthogonalization process\n # in order to recover the orthogonality among the Lanczos vectors caused by\n # rounding error in floating point arithmetic.\n r -= torch.matmul(Qk[:, :i], torch.matmul(Qk[:, :i].T, r))\n\n qprime = q\n beta = torch.norm(r)\n q = r / beta\n u = Amap(q)\n alpha = torch.matmul(q, u)\n alphas[i] = alpha\n betas[i - 1] = beta\n Qk[:, i] = q\n T = torch.diag(alphas) + torch.diag(betas, diagonal=1) + torch.diag(betas, diagonal=-1)\n return Qk, T",
"def laplacian_(self, grid, i, j):\n l1 = grid[(i+1+self.N) % self.N][j] + grid[(i-1+self.N) % self.N][j]\n l2 = grid[i][(j+1+self.N) % self.N] + grid[i][(j-1+self.N) % self.N]\n l3 = -4*grid[i][j]\n return (l1 + l2 + l3)/self.dx**2",
"def optimisation_factory_Oklab_15() -> (\n Tuple[NDArrayFloat, Callable, Callable, Callable]\n):\n\n x_0 = as_float_array([1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1])\n\n def objective_function(\n M: ArrayLike, RGB: ArrayLike, Jab: ArrayLike\n ) -> NDArrayFloat:\n \"\"\"*Oklab* colourspace based objective function.\"\"\"\n\n M = finaliser_function(M)\n\n XYZ_t = np.transpose(\n np.dot(\n RGB_COLOURSPACE_ACES2065_1.matrix_RGB_to_XYZ,\n np.dot(\n M,\n np.transpose(\n polynomial_expansion_Finlayson2015(RGB, 2, True)\n ),\n ),\n )\n )\n\n Jab_t = XYZ_to_optimization_colour_model(XYZ_t)\n\n return as_float(np.sum(euclidean_distance(Jab, Jab_t)))\n\n def XYZ_to_optimization_colour_model(XYZ: ArrayLike) -> NDArrayFloat:\n \"\"\"*CIE XYZ* colourspace to *Oklab* colourspace function.\"\"\"\n\n return XYZ_to_Oklab(XYZ)\n\n def finaliser_function(M: ArrayLike) -> NDArrayFloat:\n \"\"\"Finaliser function.\"\"\"\n\n return whitepoint_preserving_matrix(\n np.hstack([np.reshape(M, (3, 5)), zeros((3, 1))])\n )\n\n return (\n x_0,\n objective_function,\n XYZ_to_optimization_colour_model,\n finaliser_function,\n )",
"def zzX_zz_LC(f):\n if poly_univariate_p(f):\n return poly_LC(f)\n else:\n return zzX_zz_LC(poly_LC(f))",
"def jordan_wigner_ladder_sparse(n_qubits, tensor_factor, ladder_type):\n parities = tensor_factor * [pauli_z_csc]\n identities = [\n scipy.sparse.identity(2**(n_qubits - tensor_factor - 1),\n dtype=complex,\n format='csc')\n ]\n if ladder_type:\n operator = kronecker_operators(parities + [q_raise_csc] + identities)\n else:\n operator = kronecker_operators(parities + [q_lower_csc] + identities)\n return operator",
"def _get_jacobian(tris_pts):\n a = np.array(tris_pts[:, 1, :] - tris_pts[:, 0, :])\n b = np.array(tris_pts[:, 2, :] - tris_pts[:, 0, :])\n J = _to_matrix_vectorized([[a[:, 0], a[:, 1]],\n [b[:, 0], b[:, 1]]])\n return J",
"def laplacian(f,dx,dy,dz,x=[],y=[],z=[],param=[],dim=[]):\n if not param:\n param = read_param(quiet=True)\n if not dim:\n dim = read_dim()\n if len(x) < 1:\n gd = read_grid(quiet=True)\n x = gd.x\n y = gd.y\n z = gd.z\n\n laplacian = N.empty(f.shape)\n laplacian = xder2(f,dx,x=x,y=y,z=z,param=param,dim=dim) +\\\n yder2(f,dy,x=x,y=y,z=z,param=param,dim=dim) +\\\n zder2(f,dz,x=x,y=y,z=z,param=param,dim=dim)\n\n if param.coord_system == 'cylindric':\n laplacian += xder(f,dx,x=x,y=y,z=z,param=param,dim=dim)/x\n if param.coord_system == 'spherical':\n sin_y = N.sin(y)\n cos_y = N.cos(y)\n i_sin = N.where(N.abs(sin_y) < 1e-5)[0]\n if i_sin.size > 0:\n cos_y[i_sin] = 0.; sin_y[i_sin] = 1\n x_2, cotth = N.meshgrid(1./x**2, cos_y/sin_y)\n laplacian += 2*xder(f,dx,x=x,y=y,z=z,param=param,dim=dim)/x +\\\n yder(f,dy,x=x,y=y,z=z,param=param,dim=dim)*x_2*cotth\n\n return laplacian",
"def laplacian_mat(n):\n data = [1, -2, 1]*n\n i = flatten([[k,k,k] for k in range(n)])\n j = flatten([[k-1, k, k+1] for k in range(n)])\n return scipy.sparse.coo_matrix((data[1:-1], (i[1:-1], j[1:-1])))",
"def blas_header_text():\r\n header = \"\"\"\r\n extern \"C\"\r\n {\r\n\r\n void xerbla_(char*, void *);\r\n\r\n /***********/\r\n /* Level 1 */\r\n /***********/\r\n\r\n /* Single Precision */\r\n\r\n void srot_(const int*, float *, const int*, float *, const int*, const float *, const float *);\r\n void srotg_(float *,float *,float *,float *); \r\n void srotm_( const int*, float *, const int*, float *, const int*, const float *);\r\n void srotmg_(float *,float *,float *,const float *, float *);\r\n void sswap_( const int*, float *, const int*, float *, const int*);\r\n void scopy_( const int*, const float *, const int*, float *, const int*);\r\n void saxpy_( const int*, const float *, const float *, const int*, float *, const int*);\r\n float sdot_(const int*, const float *, const int*, const float *, const int*);\r\n void sdot_sub_(const int*, const float *, const int*, const float *, const int*, float *);\r\n void sdsdot_sub_( const int*, const float *, const float *, const int*, const float *, const int*, float *);\r\n void sscal_( const int*, const float *, float *, const int*);\r\n void snrm2_sub_( const int*, const float *, const int*, float *);\r\n void sasum_sub_( const int*, const float *, const int*, float *);\r\n void isamax_sub_( const int*, const float * , const int*, const int*);\r\n\r\n /* Double Precision */\r\n\r\n void drot_(const int*, double *, const int*, double *, const int*, const double *, const double *);\r\n void drotg_(double *,double *,double *,double *); \r\n void drotm_( const int*, double *, const int*, double *, const int*, const double *);\r\n void drotmg_(double *,double *,double *,const double *, double *);\r\n void dswap_( const int*, double *, const int*, double *, const int*);\r\n void dcopy_( const int*, const double *, const int*, double *, const int*);\r\n void daxpy_( const int*, const double *, const double *, const int*, double *, const int*);\r\n void dswap_( const int*, double *, const int*, double *, const int*);\r\n double ddot_(const int*, const double *, const int*, const double *, const int*);\r\n void dsdot_sub_(const int*, const float *, const int*, const float *, const int*, double *);\r\n void ddot_sub_( const int*, const double *, const int*, const double *, const int*, double *);\r\n void dscal_( const int*, const double *, double *, const int*);\r\n void dnrm2_sub_( const int*, const double *, const int*, double *);\r\n void dasum_sub_( const int*, const double *, const int*, double *);\r\n void idamax_sub_( const int*, const double * , const int*, const int*);\r\n\r\n /* Single Complex Precision */\r\n\r\n void cswap_( const int*, void *, const int*, void *, const int*);\r\n void ccopy_( const int*, const void *, const int*, void *, const int*);\r\n void caxpy_( const int*, const void *, const void *, const int*, void *, const int*);\r\n void cswap_( const int*, void *, const int*, void *, const int*);\r\n void cdotc_sub_( const int*, const void *, const int*, const void *, const int*, void *);\r\n void cdotu_sub_( const int*, const void *, const int*, const void *, const int*, void *);\r\n void cscal_( const int*, const void *, void *, const int*);\r\n void icamax_sub_( const int*, const void *, const int*, const int*);\r\n void csscal_( const int*, const float *, void *, const int*);\r\n void scnrm2_sub_( const int*, const void *, const int*, float *);\r\n void scasum_sub_( const int*, const void *, const int*, float *);\r\n\r\n /* Double Complex Precision */\r\n\r\n void zswap_( const int*, void *, const int*, void *, const int*);\r\n void zcopy_( const int*, const void *, const int*, void *, const int*);\r\n void zaxpy_( const int*, const void *, const void *, const int*, void *, const int*);\r\n void zswap_( const int*, void *, const int*, void *, const int*);\r\n void zdotc_sub_( const int*, const void *, const int*, const void *, const int*, void *);\r\n void zdotu_sub_( const int*, const void *, const int*, const void *, const int*, void *);\r\n void zdscal_( const int*, const double *, void *, const int*);\r\n void zscal_( const int*, const void *, void *, const int*);\r\n void dznrm2_sub_( const int*, const void *, const int*, double *);\r\n void dzasum_sub_( const int*, const void *, const int*, double *);\r\n void izamax_sub_( const int*, const void *, const int*, const int*);\r\n\r\n /***********/\r\n /* Level 2 */\r\n /***********/\r\n\r\n /* Single Precision */\r\n\r\n void sgemv_(char*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\r\n void sgbmv_(char*, const int*, const int*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\r\n void ssymv_(char*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\r\n void ssbmv_(char*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\r\n void sspmv_(char*, const int*, const float *, const float *, const float *, const int*, const float *, float *, const int*);\r\n void strmv_( char*, char*, char*, const int*, const float *, const int*, float *, const int*);\r\n void stbmv_( char*, char*, char*, const int*, const int*, const float *, const int*, float *, const int*);\r\n void strsv_( char*, char*, char*, const int*, const float *, const int*, float *, const int*);\r\n void stbsv_( char*, char*, char*, const int*, const int*, const float *, const int*, float *, const int*);\r\n void stpmv_( char*, char*, char*, const int*, const float *, float *, const int*);\r\n void stpsv_( char*, char*, char*, const int*, const float *, float *, const int*);\r\n void sger_( const int*, const int*, const float *, const float *, const int*, const float *, const int*, float *, const int*);\r\n void ssyr_(char*, const int*, const float *, const float *, const int*, float *, const int*);\r\n void sspr_(char*, const int*, const float *, const float *, const int*, float *); \r\n void sspr2_(char*, const int*, const float *, const float *, const int*, const float *, const int*, float *); \r\n void ssyr2_(char*, const int*, const float *, const float *, const int*, const float *, const int*, float *, const int*);\r\n\r\n /* Double Precision */\r\n\r\n void dgemv_(char*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\r\n void dgbmv_(char*, const int*, const int*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\r\n void dsymv_(char*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\r\n void dsbmv_(char*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\r\n void dspmv_(char*, const int*, const double *, const double *, const double *, const int*, const double *, double *, const int*);\r\n void dtrmv_( char*, char*, char*, const int*, const double *, const int*, double *, const int*);\r\n void dtbmv_( char*, char*, char*, const int*, const int*, const double *, const int*, double *, const int*);\r\n void dtrsv_( char*, char*, char*, const int*, const double *, const int*, double *, const int*);\r\n void dtbsv_( char*, char*, char*, const int*, const int*, const double *, const int*, double *, const int*);\r\n void dtpmv_( char*, char*, char*, const int*, const double *, double *, const int*);\r\n void dtpsv_( char*, char*, char*, const int*, const double *, double *, const int*);\r\n void dger_( const int*, const int*, const double *, const double *, const int*, const double *, const int*, double *, const int*);\r\n void dsyr_(char*, const int*, const double *, const double *, const int*, double *, const int*);\r\n void dspr_(char*, const int*, const double *, const double *, const int*, double *); \r\n void dspr2_(char*, const int*, const double *, const double *, const int*, const double *, const int*, double *); \r\n void dsyr2_(char*, const int*, const double *, const double *, const int*, const double *, const int*, double *, const int*);\r\n\r\n /* Single Complex Precision */\r\n\r\n void cgemv_(char*, const int*, const int*, const void *, const void *, const int*, const void *, const int*, const void *, void *, const int*);\r\n void cgbmv_(char*, const int*, const int*, const int*, const int*, const void *, const void *, const int*, const void *, const int*, const void *, void *, const int*);\r\n void chemv_(char*, const int*, const void *, const void *, const int*, const void *, const int*, const void *, void *, const int*);\r\n void chbmv_(char*, const int*, const int*, const void *, const void *, const int*, const void *, const int*, const void *, void *, const int*);\r\n void chpmv_(char*, const int*, const void *, const void *, const void *, const int*, const void *, void *, const int*);\r\n void ctrmv_( char*, char*, char*, const int*, const void *, const int*, void *, const int*);\r\n void ctbmv_( char*, char*, char*, const int*, const int*, const void *, const int*, void *, const int*);\r\n void ctpmv_( char*, char*, char*, const int*, const void *, void *, const int*);\r\n void ctrsv_( char*, char*, char*, const int*, const void *, const int*, void *, const int*);\r\n void ctbsv_( char*, char*, char*, const int*, const int*, const void *, const int*, void *, const int*);\r\n void ctpsv_( char*, char*, char*, const int*, const void *, void *,const int*);\r\n void cgerc_( const int*, const int*, const void *, const void *, const int*, const void *, const int*, void *, const int*);\r\n void cgeru_( const int*, const int*, const void *, const void *, const int*, const void *, const int*, void *, const int*);\r\n void cher_(char*, const int*, const float *, const void *, const int*, void *, const int*);\r\n void cher2_(char*, const int*, const void *, const void *, const int*, const void *, const int*, void *, const int*);\r\n void chpr_(char*, const int*, const float *, const void *, const int*, void *);\r\n void chpr2_(char*, const int*, const float *, const void *, const int*, const void *, const int*, void *);\r\n\r\n /* Double Complex Precision */\r\n\r\n void zgemv_(char*, const int*, const int*, const void *, const void *, const int*, const void *, const int*, const void *, void *, const int*);\r\n void zgbmv_(char*, const int*, const int*, const int*, const int*, const void *, const void *, const int*, const void *, const int*, const void *, void *, const int*);\r\n void zhemv_(char*, const int*, const void *, const void *, const int*, const void *, const int*, const void *, void *, const int*);\r\n void zhbmv_(char*, const int*, const int*, const void *, const void *, const int*, const void *, const int*, const void *, void *, const int*);\r\n void zhpmv_(char*, const int*, const void *, const void *, const void *, const int*, const void *, void *, const int*);\r\n void ztrmv_( char*, char*, char*, const int*, const void *, const int*, void *, const int*);\r\n void ztbmv_( char*, char*, char*, const int*, const int*, const void *, const int*, void *, const int*);\r\n void ztpmv_( char*, char*, char*, const int*, const void *, void *, const int*);\r\n void ztrsv_( char*, char*, char*, const int*, const void *, const int*, void *, const int*);\r\n void ztbsv_( char*, char*, char*, const int*, const int*, const void *, const int*, void *, const int*);\r\n void ztpsv_( char*, char*, char*, const int*, const void *, void *,const int*);\r\n void zgerc_( const int*, const int*, const void *, const void *, const int*, const void *, const int*, void *, const int*);\r\n void zgeru_( const int*, const int*, const void *, const void *, const int*, const void *, const int*, void *, const int*);\r\n void zher_(char*, const int*, const double *, const void *, const int*, void *, const int*);\r\n void zher2_(char*, const int*, const void *, const void *, const int*, const void *, const int*, void *, const int*);\r\n void zhpr_(char*, const int*, const double *, const void *, const int*, void *);\r\n void zhpr2_(char*, const int*, const double *, const void *, const int*, const void *, const int*, void *);\r\n\r\n /***********/\r\n /* Level 3 */\r\n /***********/\r\n\r\n /* Single Precision */\r\n\r\n void sgemm_(char*, char*, const int*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\r\n void ssymm_(char*, char*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\r\n void ssyrk_(char*, char*, const int*, const int*, const float *, const float *, const int*, const float *, float *, const int*);\r\n void ssyr2k_(char*, char*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\r\n void strmm_(char*, char*, char*, char*, const int*, const int*, const float *, const float *, const int*, float *, const int*);\r\n void strsm_(char*, char*, char*, char*, const int*, const int*, const float *, const float *, const int*, float *, const int*);\r\n\r\n /* Double Precision */\r\n\r\n void dgemm_(char*, char*, const int*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\r\n void dsymm_(char*, char*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\r\n void dsyrk_(char*, char*, const int*, const int*, const double *, const double *, const int*, const double *, double *, const int*);\r\n void dsyr2k_(char*, char*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\r\n void dtrmm_(char*, char*, char*, char*, const int*, const int*, const double *, const double *, const int*, double *, const int*);\r\n void dtrsm_(char*, char*, char*, char*, const int*, const int*, const double *, const double *, const int*, double *, const int*);\r\n\r\n /* Single Complex Precision */\r\n\r\n void cgemm_(char*, char*, const int*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\r\n void csymm_(char*, char*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\r\n void chemm_(char*, char*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\r\n void csyrk_(char*, char*, const int*, const int*, const float *, const float *, const int*, const float *, float *, const int*);\r\n void cherk_(char*, char*, const int*, const int*, const float *, const float *, const int*, const float *, float *, const int*);\r\n void csyr2k_(char*, char*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\r\n void cher2k_(char*, char*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\r\n void ctrmm_(char*, char*, char*, char*, const int*, const int*, const float *, const float *, const int*, float *, const int*);\r\n void ctrsm_(char*, char*, char*, char*, const int*, const int*, const float *, const float *, const int*, float *, const int*);\r\n\r\n /* Double Complex Precision */\r\n\r\n void zgemm_(char*, char*, const int*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\r\n void zsymm_(char*, char*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\r\n void zhemm_(char*, char*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\r\n void zsyrk_(char*, char*, const int*, const int*, const double *, const double *, const int*, const double *, double *, const int*);\r\n void zherk_(char*, char*, const int*, const int*, const double *, const double *, const int*, const double *, double *, const int*);\r\n void zsyr2k_(char*, char*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\r\n void zher2k_(char*, char*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\r\n void ztrmm_(char*, char*, char*, char*, const int*, const int*, const double *, const double *, const int*, double *, const int*);\r\n void ztrsm_(char*, char*, char*, char*, const int*, const int*, const double *, const double *, const int*, double *, const int*);\r\n\r\n }\r\n \"\"\"\r\n\r\n if detect_macos_sdot_bug():\r\n if detect_macos_sdot_bug.fix_works:\r\n header += textwrap.dedent(\"\"\"\\\r\n extern \"C\" float cblas_sdot(int, float*, int, float*, int);\r\n static float sdot_(int* Nx, float* x, int* Sx, float* y, int* Sy)\r\n {\r\n return cblas_sdot(*Nx, x, *Sx, y, *Sy);\r\n }\r\n \"\"\")\r\n else:\r\n # Make sure the buggy version of sdot_ is never used\r\n header += textwrap.dedent(\"\"\"\\\r\n static float sdot_(int* Nx, float* x, int* Sx, float* y, int* Sy)\r\n {\r\n fprintf(stderr,\r\n \"FATAL: The implementation of BLAS SDOT \"\r\n \"routine in your system has a bug that \"\r\n \"makes it return wrong results.\\\\n\"\r\n \"Please contact [email protected].\\\\n\"\r\n \"You can work around this bug by using a \"\r\n \"different BLAS library, or disabling BLAS\\\\n\");\r\n assert(0);\r\n }\r\n \"\"\")\r\n\r\n return header",
"def calculate_jacobian(robot_position, landmark_pos):\n\n return None",
"def generate_raw_decomposition(alphabeta, lanczos_iterations=None):\n # extract matrix elements\n alpha, beta = alphabeta\n\n # trim vectors\n if (lanczos_iterations is not None):\n (alpha, beta) = (alpha[:lanczos_iterations],beta[:lanczos_iterations-1])\n\n # generate Lanczos decomposition\n eigvals, eigvecs = linalg.eigh_tridiagonal(alpha, beta)\n raw_decomposition = [\n (eigval,eigvecs[0, i]**2)\n for i, eigval in enumerate(eigvals)\n ]\n\n return raw_decomposition",
"def isYZPlanar(points=[]):\n return isCardinalPlanar(\"yz\",points)",
"def _TODOStepsScipy(z, nstep, refr, Fin):\n\n if Fin._curvature != 0.0:\n raise ValueError('Cannot operate on spherical coords.'\n + 'Use Convert() first')\n Fout = Field.copy(Fin)\n N = Fout.N\n lam = Fout.lam\n size = Fout.siz\n dtype = Fout._dtype\n \n legacy = True\n if legacy:\n Pi = 3.141592654 #to compare Cpp results accurately\n else:\n Pi = _np.pi\n K = 2.*Pi/lam\n z = z/2.\n Pi4lz = 4.*Pi/lam/z\n imPi4lz = 1j * Pi4lz\n \n delta = size/(N-1.) #dx\n delta2 = delta*delta\n \n n = 100\n c = 1\n # n = N\n # c = delta**2\n def f(u, ):\n return u**3\n \n def f_prime(u):\n return 3 * u**2\n \n def fun(u, n, f, f_prime, c, **kwargs):\n v = _np.zeros((n + 2, n + 2))\n u = u.reshape((n, n))\n v[1:-1, 1:-1] = u\n y = v[:-2, 1:-1] + v[2:, 1:-1] + v[1:-1, :-2] + v[1:-1, 2:] - 4 * u + c * f(u)\n return y.ravel()\n\n def compute_jac_indices(n):\n i = _np.arange(n)\n jj, ii = _np.meshgrid(i, i)\n \n ii = ii.ravel()\n jj = jj.ravel()\n \n ij = _np.arange(n**2)\n \n jac_rows = [ij]\n jac_cols = [ij]\n \n mask = ii > 0\n ij_mask = ij[mask]\n jac_rows.append(ij_mask)\n jac_cols.append(ij_mask - n)\n \n mask = ii < n - 1\n ij_mask = ij[mask]\n jac_rows.append(ij_mask)\n jac_cols.append(ij_mask + n)\n \n mask = jj > 0\n ij_mask = ij[mask]\n jac_rows.append(ij_mask)\n jac_cols.append(ij_mask - 1)\n \n mask = jj < n - 1\n ij_mask = ij[mask]\n jac_rows.append(ij_mask)\n jac_cols.append(ij_mask + 1)\n \n return _np.hstack(jac_rows), _np.hstack(jac_cols)\n jac_rows, jac_cols = compute_jac_indices(N)\n # u0 = np.ones(n**2) * 0.5\n u0 = Fin.field.ravel() #initial guess is old field\n \n def jac(u, n, f, f_prime, c, jac_rows=None, jac_cols=None):\n jac_values = _np.ones_like(jac_cols, dtype=float)\n jac_values[:n**2] = -4 + c * f_prime(u)\n return coo_matrix((jac_values, (jac_rows, jac_cols)),\n shape=(n**2, n**2))\n \n res_1 = least_squares(fun, u0.real, jac=jac, gtol=1e-3,\n args=(N, f, f_prime, c),\n kwargs={'jac_rows': jac_rows,\n 'jac_cols': jac_cols},\n verbose=0)\n # print(res_1)\n Fout.field = res_1.x.reshape((N, N))\n Fout._IsGauss=False\n return Fout",
"def build_jacobian(l_comp, R_comp, l_vect, R_vect, B_vect):\r\n l_len = numpy.sqrt((l_vect * l_vect).sum(-1))\r\n R_len = numpy.sqrt((R_vect * R_vect).sum(-1))\r\n B_len = numpy.sqrt((B_vect * B_vect).sum(-1))\r\n # Empty 3x3 jacobian matrix\r\n jacob = numpy.zeros((B_vect.shape[-1], B_vect.shape[-1]), B_vect.dtype)\r\n\r\n # This is in the space with a standard basis along the \"l\", \"R\" and \"B\" axes\r\n jacob[1, 2] = -B_len / R_len\r\n jacob[2, 0] = l_comp\r\n jacob[2, 1] = R_comp\r\n\r\n # Transform the Jacobian to main space\r\n xform = numpy.stack((\r\n l_vect / l_len,\r\n R_vect / R_len,\r\n B_vect / B_len\r\n )).T\r\n xform_inv = numpy.linalg.inv(xform)\r\n return numpy.matmul(xform, numpy.matmul(jacob.T, xform_inv)).T",
"def njit(func):\n return func"
] | [
"0.61316943",
"0.5759136",
"0.5484182",
"0.53870416",
"0.5306601",
"0.5300693",
"0.52789545",
"0.52645034",
"0.525278",
"0.5248642",
"0.52068466",
"0.5197051",
"0.5172847",
"0.5171192",
"0.5147542",
"0.5092073",
"0.5066736",
"0.5065881",
"0.50415236",
"0.5041109",
"0.50320095",
"0.50298345",
"0.5008514",
"0.5001094",
"0.50006133",
"0.49993464",
"0.49896526",
"0.49810082",
"0.49807104",
"0.4973545"
] | 0.8053774 | 0 |
Helper function to create a jitted arnoldi factorization. The function returns a function `_arnoldi_fact` which performs an mstep arnoldi factorization. `_arnoldi_fact` computes an mstep arnoldi factorization of an input callable `matvec`, with m = min(`it`,`num_krylov_vecs`). `_arnoldi_fact` will do at most `num_krylov_vecs` steps. `_arnoldi_fact` returns arrays `kv` and `H` which satisfy the Arnoldi recurrence relation ``` matrix @ Vm Vm @ Hm fm em = 0 ``` with `matrix` the matrix representation of `matvec` and | def _generate_arnoldi_factorization(jax: types.ModuleType) -> Callable:
@jax.jit
def modified_gram_schmidt_step_arnoldi(j, vals):
"""
Single step of a modified gram-schmidt orthogonalization.
Args:
j: Integer value denoting the vector to be orthogonalized.
vals: A list of variables:
`vector`: The current vector to be orthogonalized
to all previous ones
`krylov_vectors`: jax.array of collected krylov vectors
`n`: integer denoting the column-position of the overlap
<`krylov_vector`|`vector`> within `H`.
Returns:
updated vals.
"""
vector, krylov_vectors, n, H = vals
v = krylov_vectors[j, :]
h = jax.numpy.vdot(v, vector)
H = jax.ops.index_update(H, jax.ops.index[j, n], h)
vector = vector - h * jax.numpy.reshape(v, vector.shape)
return [vector, krylov_vectors, n, H]
@functools.partial(jax.jit, static_argnums=(5, 6, 7))
def _arnoldi_fact(matvec, args, v0, krylov_vectors, H, start, num_krylov_vecs,
eps):
"""
Compute an m-step arnoldi factorization of `matvec`, with
m = min(`it`,`num_krylov_vecs`). The factorization will
do at most `num_krylov_vecs` steps. The returned arrays
`kv` and `H` will satisfy the Arnoldi recurrence relation
```
matrix @ Vm - Vm @ Hm - fm * em = 0
```
with `matrix` the matrix representation of `matvec` and
`Vm = jax.numpy.transpose(kv[:it, :])`,
`Hm = H[:it, :it]`, `fm = np.expand_dims(kv[it, :] * H[it, it - 1]`,1)
and `em` a cartesian basis vector of shape `(1, kv.shape[1])`
with `em[0, -1] == 1` and 0 elsewhere.
Note that the caller is responsible for dtype consistency between
the inputs, i.e. dtypes between all input arrays have to match.
Args:
matvec: The matrix vector product.
args: List of arguments to `matvec`.
v0: Initial state to `matvec`.
krylov_vectors: An array for storing the krylov vectors. The individual
vectors are stored as columns.
The shape of `krylov_vecs` has to be
(num_krylov_vecs + 1, np.ravel(v0).shape[0]).
H: Matrix of overlaps. The shape has to be
(num_krylov_vecs + 1,num_krylov_vecs + 1).
start: Integer denoting the start position where the first
produced krylov_vector should be inserted into `krylov_vectors`
num_krylov_vecs: Number of krylov iterations, should be identical to
`krylov_vectors.shape[0] + 1`
eps: Convergence parameter. Iteration is terminated if the norm of a
krylov-vector falls below `eps`.
Returns:
kv: An array of krylov vectors
H: A matrix of overlaps
it: The number of performed iterations.
"""
Z = jax.numpy.linalg.norm(v0)
v = v0 / Z
krylov_vectors = jax.ops.index_update(krylov_vectors,
jax.ops.index[start, :],
jax.numpy.ravel(v))
H = jax.lax.cond(
start > 0, start,
lambda x: jax.ops.index_update(H, jax.ops.index[x, x - 1], Z), None,
lambda x: H)
# body of the arnoldi iteration
def body(vals):
krylov_vectors, H, matvec, vector, _, threshold, i, maxiter = vals
Av = matvec(vector, *args)
initial_vals = [Av, krylov_vectors, i, H]
Av, krylov_vectors, _, H = jax.lax.fori_loop(
0, i + 1, modified_gram_schmidt_step_arnoldi, initial_vals)
norm = jax.numpy.linalg.norm(Av)
Av /= norm
H = jax.ops.index_update(H, jax.ops.index[i + 1, i], norm)
krylov_vectors = jax.ops.index_update(krylov_vectors,
jax.ops.index[i + 1, :],
jax.numpy.ravel(Av))
return [krylov_vectors, H, matvec, Av, norm, threshold, i + 1, maxiter]
def cond_fun(vals):
# Continue loop while iteration < num_krylov_vecs and norm > eps
_, _, _, _, norm, _, iteration, _ = vals
counter_done = (iteration >= num_krylov_vecs)
norm_not_too_small = norm > eps
continue_iteration = jax.lax.cond(counter_done,
_, lambda x: False,
_, lambda x: norm_not_too_small)
return continue_iteration
initial_norm = v.real.dtype.type(1.0+eps)
initial_values = [krylov_vectors, H, matvec, v, initial_norm, eps, start,
num_krylov_vecs]
final_values = jax.lax.while_loop(cond_fun, body, initial_values)
kvfinal, Hfinal, _, _, norm, _, it, _ = final_values
return kvfinal, Hfinal, it, norm < eps
return _arnoldi_fact | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _arnoldi_fact(matvec, args, v0, krylov_vectors, H, start, num_krylov_vecs,\n eps):\n Z = jax.numpy.linalg.norm(v0)\n v = v0 / Z\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[start, :],\n jax.numpy.ravel(v))\n H = jax.lax.cond(\n start > 0, start,\n lambda x: jax.ops.index_update(H, jax.ops.index[x, x - 1], Z), None,\n lambda x: H)\n\n # body of the arnoldi iteration\n def body(vals):\n krylov_vectors, H, matvec, vector, _, threshold, i, maxiter = vals\n Av = matvec(vector, *args)\n initial_vals = [Av, krylov_vectors, i, H]\n Av, krylov_vectors, _, H = jax.lax.fori_loop(\n 0, i + 1, modified_gram_schmidt_step_arnoldi, initial_vals)\n norm = jax.numpy.linalg.norm(Av)\n Av /= norm\n H = jax.ops.index_update(H, jax.ops.index[i + 1, i], norm)\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[i + 1, :],\n jax.numpy.ravel(Av))\n return [krylov_vectors, H, matvec, Av, norm, threshold, i + 1, maxiter]\n\n def cond_fun(vals):\n # Continue loop while iteration < num_krylov_vecs and norm > eps\n _, _, _, _, norm, _, iteration, _ = vals\n counter_done = (iteration >= num_krylov_vecs)\n norm_not_too_small = norm > eps\n continue_iteration = jax.lax.cond(counter_done,\n _, lambda x: False,\n _, lambda x: norm_not_too_small)\n\n return continue_iteration\n initial_norm = v.real.dtype.type(1.0+eps)\n initial_values = [krylov_vectors, H, matvec, v, initial_norm, eps, start,\n num_krylov_vecs]\n final_values = jax.lax.while_loop(cond_fun, body, initial_values)\n kvfinal, Hfinal, _, _, norm, _, it, _ = final_values\n return kvfinal, Hfinal, it, norm < eps",
"def implicitly_restarted_arnoldi_method(\n matvec, args, initial_state, num_krylov_vecs, numeig, which, eps, maxiter,\n res_thresh) -> Tuple[List[Tensor], List[Tensor]]:\n N = np.prod(initial_state.shape)\n p = num_krylov_vecs - numeig\n num_krylov_vecs = np.min([num_krylov_vecs, N])\n if (p <= 1) and (num_krylov_vecs < N):\n raise ValueError(f\"`num_krylov_vecs` must be between `numeig` + 1 <\"\n f\" `num_krylov_vecs` <= N={N},\"\n f\" `num_krylov_vecs`={num_krylov_vecs}\")\n\n dtype = initial_state.dtype\n # initialize arrays\n krylov_vectors = jax.numpy.zeros(\n (num_krylov_vecs + 1, jax.numpy.ravel(initial_state).shape[0]),\n dtype=dtype)\n H = jax.numpy.zeros((num_krylov_vecs + 1, num_krylov_vecs), dtype=dtype)\n # perform initial arnoldi factorization\n Vm_tmp, Hm_tmp, numits, converged = arnoldi_fact(matvec, args,\n initial_state,\n krylov_vectors, H, 0,\n num_krylov_vecs, eps)\n # obtain an m-step arnoldi factorization\n Vm, Hm, fm = update_data(Vm_tmp, Hm_tmp, numits)\n\n it = 0\n if which == 'LR':\n _which = 0\n elif which == 'LM':\n _which = 1\n else:\n raise ValueError(f\"which = {which} not implemented\")\n # make sure the dtypes are matching\n if maxiter > 0:\n if Vm.dtype == np.float64:\n dtype = np.complex128\n elif Vm.dtype == np.float32:\n dtype = np.complex64\n elif Vm.dtype == np.complex128:\n dtype = Vm.dtype\n elif Vm.dtype == np.complex64:\n dtype = Vm.dtype\n else:\n raise TypeError(f'dtype {Vm.dtype} not supported')\n Vm = Vm.astype(dtype)\n Hm = Hm.astype(dtype)\n fm = fm.astype(dtype)\n\n while (it < maxiter) and (not converged):\n evals, _ = jax.numpy.linalg.eig(Hm)\n krylov_vectors, H, fk, converged = shifted_QR(Vm, Hm, fm, evals, numeig,\n p, _which, res_thresh)\n if converged:\n break\n v0 = jax.numpy.reshape(fk, initial_state.shape)\n # restart\n Vm_tmp, Hm_tmp, _, converged = arnoldi_fact(matvec, args, v0,\n krylov_vectors, H, numeig,\n num_krylov_vecs, eps)\n Vm, Hm, fm = update_data(Vm_tmp, Hm_tmp, num_krylov_vecs)\n it += 1\n\n ev_, U_ = np.linalg.eig(np.array(Hm))\n eigvals = jax.numpy.array(ev_)\n U = jax.numpy.array(U_)\n _, inds = LR_sort(eigvals, _which)\n vectors = get_vectors(Vm, U, inds, numeig)\n\n return eigvals[inds[0:numeig]], [\n jax.numpy.reshape(vectors[n, :], initial_state.shape)\n for n in range(numeig)\n ]",
"def _implicitly_restarted_arnoldi(jax: types.ModuleType) -> Callable:\n\n arnoldi_fact = _generate_arnoldi_factorization(jax)\n\n # ######################################################\n # ####### NEW SORTING FUCTIONS INSERTED HERE #########\n # ######################################################\n @functools.partial(jax.jit, static_argnums=(1,))\n def LR_sort(evals, p):\n inds = np.argsort(jax.numpy.real(evals), kind='stable')[::-1]\n shifts = evals[inds][-p:]\n return shifts, inds\n\n @functools.partial(jax.jit, static_argnums=(1,))\n def LM_sort(evals, p):\n inds = np.argsort(jax.numpy.abs(evals), kind='stable')[::-1]\n shifts = evals[inds][-p:]\n return shifts, inds\n\n # #######################################################\n # #######################################################\n # #######################################################\n @functools.partial(jax.jit, static_argnums=(4, 5, 6))\n def shifted_QR(Vm, Hm, fm, evals, k, p, which, res_thresh):\n funs = [LR_sort, LM_sort]\n shifts, _ = funs[which](evals, p)\n # compress to k = numeig\n q = jax.numpy.zeros(Hm.shape[0])\n q = jax.ops.index_update(q, jax.ops.index[-1], 1)\n m = Hm.shape[0]\n\n for shift in shifts:\n Qj, _ = jax.numpy.linalg.qr(Hm - shift * jax.numpy.eye(m))\n Hm = Qj.T.conj() @ Hm @ Qj\n Vm = Qj.T @ Vm\n q = q @ Qj\n\n fk = Vm[k, :] * Hm[k, k - 1] + fm * q[k - 1]\n Vk = Vm[0:k, :]\n Hk = Hm[0:k, 0:k]\n H = jax.numpy.zeros((k + p + 1, k + p), dtype=fm.dtype)\n H = jax.ops.index_update(H, jax.ops.index[0:k, 0:k], Hk)\n Z = jax.numpy.linalg.norm(fk)\n v = fk / Z\n krylov_vectors = jax.numpy.zeros((k + p + 1, Vm.shape[1]), dtype=fm.dtype)\n krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[0:k, :],\n Vk)\n krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[k:], v)\n Z = jax.numpy.linalg.norm(fk)\n #if fk is a zero-vector then arnoldi has exactly converged.\n #use small threshold to check this\n return krylov_vectors, H, fk, Z < res_thresh\n\n @functools.partial(jax.jit, static_argnums=(2,))\n def update_data(Vm_tmp, Hm_tmp, numits):\n Vm = Vm_tmp[0:numits, :]\n Hm = Hm_tmp[0:numits, 0:numits]\n fm = Vm_tmp[numits, :] * Hm_tmp[numits, numits - 1]\n return Vm, Hm, fm\n\n @functools.partial(jax.jit, static_argnums=(3,))\n def get_vectors(Vm, unitary, inds, numeig):\n\n def body_vector(i, vals):\n krv, unitary, states, inds = vals\n dim = unitary.shape[1]\n n, m = jax.numpy.divmod(i, dim)\n states = jax.ops.index_add(states, jax.ops.index[n, :],\n krv[m, :] * unitary[m, inds[n]])\n return [krv, unitary, states, inds]\n\n state_vectors = jax.numpy.zeros([numeig, Vm.shape[1]], dtype=Vm.dtype)\n _, _, state_vectors, _ = jax.lax.fori_loop(\n 0, numeig * Vm.shape[0], body_vector,\n [Vm, unitary, state_vectors, inds])\n state_norms = jax.numpy.linalg.norm(state_vectors, axis=1)\n state_vectors = state_vectors / state_norms[:, None]\n return state_vectors\n\n\n def implicitly_restarted_arnoldi_method(\n matvec, args, initial_state, num_krylov_vecs, numeig, which, eps, maxiter,\n res_thresh) -> Tuple[List[Tensor], List[Tensor]]:\n \"\"\"\n Implicitly restarted arnoldi factorization of `matvec`. The routine\n finds the lowest `numeig` eigenvector-eigenvalue pairs of `matvec`\n by alternating between compression and re-expansion of an initial\n `num_krylov_vecs`-step Arnoldi factorization.\n\n Note: The caller has to ensure that the dtype of the return value\n of `matvec` matches the dtype of the initial state. Otherwise jax\n will raise a TypeError.\n\n Args:\n matvec: A callable representing the linear operator.\n args: Arguments to `matvec`. `matvec` is called with\n `matvec(x, *args)` with `x` the input array on which\n `matvec` should act.\n initial_state: An starting vector for the iteration.\n num_krylov_vecs: Number of krylov vectors of the arnoldi factorization.\n numeig: The number of desired eigenvector-eigenvalue pairs.\n which: Which eigenvalues to target. Currently supported: `which = 'LR'`\n or `which = 'LM'`.\n eps: Convergence flag. If the norm of a krylov vector drops below `eps`\n the iteration is terminated.\n maxiter: Maximum number of (outer) iteration steps.\n Returns:\n eta, U: Two lists containing eigenvalues and eigenvectors.\n \"\"\"\n N = np.prod(initial_state.shape)\n p = num_krylov_vecs - numeig\n num_krylov_vecs = np.min([num_krylov_vecs, N])\n if (p <= 1) and (num_krylov_vecs < N):\n raise ValueError(f\"`num_krylov_vecs` must be between `numeig` + 1 <\"\n f\" `num_krylov_vecs` <= N={N},\"\n f\" `num_krylov_vecs`={num_krylov_vecs}\")\n\n dtype = initial_state.dtype\n # initialize arrays\n krylov_vectors = jax.numpy.zeros(\n (num_krylov_vecs + 1, jax.numpy.ravel(initial_state).shape[0]),\n dtype=dtype)\n H = jax.numpy.zeros((num_krylov_vecs + 1, num_krylov_vecs), dtype=dtype)\n # perform initial arnoldi factorization\n Vm_tmp, Hm_tmp, numits, converged = arnoldi_fact(matvec, args,\n initial_state,\n krylov_vectors, H, 0,\n num_krylov_vecs, eps)\n # obtain an m-step arnoldi factorization\n Vm, Hm, fm = update_data(Vm_tmp, Hm_tmp, numits)\n\n it = 0\n if which == 'LR':\n _which = 0\n elif which == 'LM':\n _which = 1\n else:\n raise ValueError(f\"which = {which} not implemented\")\n # make sure the dtypes are matching\n if maxiter > 0:\n if Vm.dtype == np.float64:\n dtype = np.complex128\n elif Vm.dtype == np.float32:\n dtype = np.complex64\n elif Vm.dtype == np.complex128:\n dtype = Vm.dtype\n elif Vm.dtype == np.complex64:\n dtype = Vm.dtype\n else:\n raise TypeError(f'dtype {Vm.dtype} not supported')\n Vm = Vm.astype(dtype)\n Hm = Hm.astype(dtype)\n fm = fm.astype(dtype)\n\n while (it < maxiter) and (not converged):\n evals, _ = jax.numpy.linalg.eig(Hm)\n krylov_vectors, H, fk, converged = shifted_QR(Vm, Hm, fm, evals, numeig,\n p, _which, res_thresh)\n if converged:\n break\n v0 = jax.numpy.reshape(fk, initial_state.shape)\n # restart\n Vm_tmp, Hm_tmp, _, converged = arnoldi_fact(matvec, args, v0,\n krylov_vectors, H, numeig,\n num_krylov_vecs, eps)\n Vm, Hm, fm = update_data(Vm_tmp, Hm_tmp, num_krylov_vecs)\n it += 1\n\n ev_, U_ = np.linalg.eig(np.array(Hm))\n eigvals = jax.numpy.array(ev_)\n U = jax.numpy.array(U_)\n _, inds = LR_sort(eigvals, _which)\n vectors = get_vectors(Vm, U, inds, numeig)\n\n return eigvals[inds[0:numeig]], [\n jax.numpy.reshape(vectors[n, :], initial_state.shape)\n for n in range(numeig)\n ]\n\n return implicitly_restarted_arnoldi_method",
"def _generate_jitted_eigsh_lanczos(jax: types.ModuleType) -> Callable:\n\n @functools.partial(jax.jit, static_argnums=(3, 4, 5, 6))\n def jax_lanczos(matvec, arguments, init, ncv, neig, landelta, reortho):\n \"\"\"\n Jitted lanczos routine.\n Args:\n matvec: A callable implementing the matrix-vector product of a\n linear operator.\n arguments: Arguments to `matvec` additional to an input vector.\n `matvec` will be called as `matvec(init, *args)`.\n init: An initial input state to `matvec`.\n ncv: Number of krylov iterations (i.e. dimension of the Krylov space).\n neig: Number of eigenvalue-eigenvector pairs to be computed.\n landelta: Convergence parameter: if the norm of the current Lanczos vector\n falls below `landelta`, iteration is stopped.\n reortho: If `True`, reorthogonalize all krylov vectors at each step.\n This should be used if `neig>1`.\n Returns:\n jax.numpy.ndarray: Eigenvalues\n list: Eigenvectors\n \"\"\"\n\n def body_modified_gram_schmidt(i, vals):\n vector, krylov_vectors = vals\n v = krylov_vectors[i, :]\n vector -= jax.numpy.vdot(v, vector) * jax.numpy.reshape(v, vector.shape)\n return [vector, krylov_vectors]\n\n def body_lanczos(vals):\n current_vector, krylov_vectors, vector_norms = vals[0:3]\n diagonal_elements, matvec, args, _ = vals[3:7]\n threshold, i, maxiteration = vals[7:]\n norm = jax.numpy.linalg.norm(current_vector)\n normalized_vector = current_vector / norm\n normalized_vector, krylov_vectors = jax.lax.cond(\n reortho, True,\n lambda x: jax.lax.fori_loop(0, i, body_modified_gram_schmidt,\n [normalized_vector, krylov_vectors]),\n False, lambda x: [normalized_vector, krylov_vectors])\n Av = matvec(normalized_vector, *args)\n\n diag_element = jax.numpy.vdot(normalized_vector, Av)\n\n res = jax.numpy.reshape(\n jax.numpy.ravel(Av) -\n jax.numpy.ravel(normalized_vector) * diag_element -\n krylov_vectors[i - 1] * norm, Av.shape)\n krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[i, :],\n jax.numpy.ravel(normalized_vector))\n\n vector_norms = jax.ops.index_update(vector_norms, jax.ops.index[i - 1],\n norm)\n diagonal_elements = jax.ops.index_update(diagonal_elements,\n jax.ops.index[i - 1],\n diag_element)\n\n return [\n res, krylov_vectors, vector_norms, diagonal_elements, matvec, args,\n norm, threshold, i + 1, maxiteration\n ]\n\n def cond_fun(vals):\n _, _, _, _, _, _, norm, threshold, iteration, maxiteration = vals\n\n def check_thresh(check_vals):\n val, thresh = check_vals\n return jax.lax.cond(val < thresh, False, lambda x: x, True, lambda x: x)\n\n return jax.lax.cond(iteration <= maxiteration, [norm, threshold],\n check_thresh, False, lambda x: x)\n\n numel = jax.numpy.prod(init.shape)\n krylov_vecs = jax.numpy.zeros((ncv + 1, numel), dtype=init.dtype)\n norms = jax.numpy.zeros(ncv, dtype=init.dtype)\n diag_elems = jax.numpy.zeros(ncv, dtype=init.dtype)\n\n norms = jax.ops.index_update(norms, jax.ops.index[0], 1.0)\n\n norms_dtype = jax.numpy.real(jax.numpy.empty((0, 0),\n dtype=init.dtype)).dtype\n initvals = [\n init, krylov_vecs, norms, diag_elems, matvec, arguments,\n norms_dtype.type(1.0), landelta, 1, ncv\n ]\n output = jax.lax.while_loop(cond_fun, body_lanczos, initvals)\n final_state, krylov_vecs, norms, diags, _, _, _, _, it, _ = output\n krylov_vecs = jax.ops.index_update(krylov_vecs, jax.ops.index[it, :],\n jax.numpy.ravel(final_state))\n\n A_tridiag = jax.numpy.diag(diags) + jax.numpy.diag(\n norms[1:], 1) + jax.numpy.diag(jax.numpy.conj(norms[1:]), -1)\n eigvals, U = jax.numpy.linalg.eigh(A_tridiag)\n eigvals = eigvals.astype(A_tridiag.dtype)\n\n def body_vector(i, vals):\n krv, unitary, states = vals\n dim = unitary.shape[1]\n n, m = jax.numpy.divmod(i, dim)\n states = jax.ops.index_add(states, jax.ops.index[n, :],\n krv[m + 1, :] * unitary[m, n])\n return [krv, unitary, states]\n\n state_vectors = jax.numpy.zeros([neig, numel], dtype=init.dtype)\n _, _, vectors = jax.lax.fori_loop(0, neig * (krylov_vecs.shape[0] - 1),\n body_vector,\n [krylov_vecs, U, state_vectors])\n\n return jax.numpy.array(eigvals[0:neig]), [\n jax.numpy.reshape(vectors[n, :], init.shape) /\n jax.numpy.linalg.norm(vectors[n, :]) for n in range(neig)\n ]\n\n return jax_lanczos",
"def als(matrix, n_factors=8,n_iterations=15, lambda_=10):\r\n\tm, n = matrix.shape\r\n\tQ = matrix\r\n\tW = Q > 0.5\r\n\tW = W.astype(int)\r\n\tprint('X and Y randomly initialzied.')\r\n\tX = 5 * np.random.rand(m, n_factors) \r\n\tY = 5 * np.random.rand(n_factors, n)\r\n\tfor ii in range(n_iterations):\r\n\t\tfor u, Wu in enumerate(W):\r\n\t\t\tX[u] = np.linalg.solve(np.dot(Y, np.dot(np.diag(Wu), Y.T)) + lambda_ * np.eye(n_factors),\r\n\t np.dot(Y, np.dot(np.diag(Wu), Q[u].T))).T\r\n\t\tfor i, Wi in enumerate(W.T):\r\n\t\t\tY[:,i] = np.linalg.solve(np.dot(X.T, np.dot(np.diag(Wi), X)) + lambda_ * np.eye(n_factors),\r\n\t np.dot(X.T, np.dot(np.diag(Wi), Q[:, i])))\r\n\t\tprint('{}th iteration is completed of {}'.format(ii + 1,n_iterations))\r\n\tprediction = np.dot(X,Y)\r\n\tprint('Done.')\r\n\treturn prediction, X, Y",
"def svm_admm(X, y, mylambda=1., rho=1., rel_par=1., QUIET = False, MAX_ITER = 200, ABSTOL = 1e-6, RELTOL = 1e-2):\n if not QUIET:\n tic = time.time()\n m, n = X.shape \n y_raveld = y.ravel() \n # A is a matrix given by [-y_j*x_j -y_j]\n A = - np.dot(np.diag(y_raveld), np.concatenate((X, np.ones((m, 1))), axis = 1))\n\n #Data preprocessing\n m, n = A.shape\n \n #ADMM solver\n x = np.zeros((n, N))\n z = np.zeros((n, N))\n u = np.zeros((n, N))\n\n if not QUIET:\n print('\\n%3s\\t%10s\\t%10s\\t%10s\\t%10s\\t%10s' %('iter',\n 'r np.linalg.norm', \n 'eps pri', \n 's np.linalg.norm', \n 'eps dual', \n 'objective'))\n\n # Saving state\n h = {}\n h['objval'] = np.zeros(MAX_ITER)\n h['r_norm'] = np.zeros(MAX_ITER)\n h['s_norm'] = np.zeros(MAX_ITER)\n h['eps_pri'] = np.zeros(MAX_ITER)\n h['eps_dual'] = np.zeros(MAX_ITER)\n\n for k in range(MAX_ITER):\n # x-update \n for i in range(N):\n A_temp = A[i * num_per_batch: (i + 1) * num_per_batch, :]\n y_temp = y[i * num_per_batch: (i + 1) * num_per_batch, :]\n #\n # temp1 = -z[:, i] + u[:, i]\n # fun = lambda x: np.sum(np.maximum(np.dot(A_temp, x.reshape((n, 1))) + 1, np.zeros((num_per_batch, 1)))) + \\\n # rho/2. * np.dot(x + temp1, x + temp1)\n # # np.random.uniform(-1, 1, (n,1))\n # result = scipy.optimize.minimize(fun, 0.1 * np.ones((n, 1)), tol = 1e-8, method = 'Nelder-Mead')\n # x_temp = result.x\n #\n x_var = Variable(n)\n constraints = []\n objective = Minimize(sum_entries(pos( A_temp * x_var + 1)) + rho/2. * sum_squares((x_var - z[:, i] + u[:, i])))\n prob = Problem(objective, constraints)\n result = prob.solve()\n x_temp = x_var.value\n\n x_temp = x_temp.reshape((x_temp.shape[0], 1))\n x[:, i] = x_temp.ravel()\n\n xave = np.mean(x, axis = 1)\n\n # z-update\n zold = np.copy(z)\n x_hat = rel_par * x + (1. - rel_par) * zold\n z = N * rho/(1./mylambda + N * rho) * np.mean(x_hat + u, axis = 1)\n z = z.reshape((z.shape[0], 1))\n z = np.dot(z, np.ones((1, N))) # N columns of the same values\n\n # u-update\n u = u + x_hat - z\n\n # diagnostics, reporting, termination checks\n h['objval'][k] = myobjective(A, mylambda, x, z)\n h['r_norm'][k] = np.linalg.norm(x - z)\n h['s_norm'][k] = np.linalg.norm(rho * (z - zold))\n h['eps_pri'][k] = np.sqrt(n) * ABSTOL+ RELTOL * np.maximum(np.linalg.norm(x), np.linalg.norm(-z))\n h['eps_dual'][k] = np.sqrt(n) * ABSTOL + RELTOL * np.linalg.norm(rho * u)\n if not QUIET:\n print('%4d\\t%10.4f\\t%10.4f\\t%10.4f\\t%10.4f\\t%10.2f' %(k + 1,\\\n h['r_norm'][k],\\\n h['eps_pri'][k],\\\n h['s_norm'][k],\\\n h['eps_dual'][k],\\\n h['objval'][k]))\n\n if (h['r_norm'][k] < h['eps_pri'][k]) and (h['s_norm'][k] < h['eps_dual'][k]):\n break\n\n if not QUIET:\n toc = time.time()-tic\n print(\"\\nElapsed time is %.2f seconds\"%toc)\n\n return z, h",
"def vec_factored_rolling(decays: jnp.ndarray) -> _InitUpdate:\n return _vmap_accumulator(factored_rolling, decays)",
"def lanczos_decomp(vector_prod_fn, scalar, n, k):\n Q = tf.zeros([n, 1])\n v = tf.random_uniform([n, 1])\n v = v / tf.norm(v)\n Q = tf.concat([Q, v], axis=1)\n\n # diagonals of the tridiagonal matrix\n beta = tf.constant(0.0, dtype=tf.float32, shape=[1])\n alpha = tf.constant(0.0, dtype=tf.float32, shape=[1])\n\n for i in range(k):\n v = vector_prod_fn(tf.reshape(Q[:, i+1], [n, 1])) - tf.scalar_mul(scalar, tf.reshape(Q[:, i+1], [n, 1]))\n v = tf.reshape(v, [n,])\n curr_alpha = tf.reshape(tf.reduce_sum(v * Q[:, i+1]), [1,])\n alpha = tf.concat([alpha, curr_alpha], axis=0)\n v = v-beta[-1]*Q[:, i]-alpha[-1]*Q[:, i+1]\n curr_beta = tf.reshape(tf.norm(v), [1,])\n beta = tf.concat([beta, curr_beta], axis=0)\n curr_norm = tf.reshape(v/(beta[-1]+1e-8), [n, 1])\n Q = tf.concat([Q, curr_norm], axis=1)\n\n alpha = tf.slice(alpha, begin=[1], size=[-1])\n beta = tf.slice(beta, begin=[1], size=[k-1])\n Q = tf.slice(Q, begin=[0, 1], size=[-1, k])\n return alpha, beta, Q",
"def Avv_func(f):\n\n def Avv(x, v):\n def F(s):\n return f(x + v * s)\n\n return jacfwd(jacfwd(F))(0.0)\n\n return Avv",
"def gmres_krylov(A_mv: Callable, A_args: Sequence, n_kry: int,\n x0: jax.ShapedArray, r: jax.ShapedArray, beta: float,\n tol: float,\n b_norm: float) -> Tuple[int, jax.ShapedArray,\n jax.ShapedArray, jax.ShapedArray]:\n n = r.size\n err = beta\n v = r / beta\n\n # These will store the Givens rotations used to update the QR decompositions\n # of the Arnoldi matrices.\n # cos : givens[0, :]\n # sine: givens[1, :]\n givens = jnp.zeros((2, n_kry), dtype=x0.dtype)\n beta_vec = jnp.zeros((n_kry + 1), dtype=x0.dtype)\n beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[0], beta)\n V = jnp.zeros((n, n_kry + 1), dtype=x0.dtype)\n V = jax.ops.index_update(V, jax.ops.index[:, 0], v)\n R = jnp.zeros((n_kry + 1, n_kry), dtype=x0.dtype)\n\n # The variable data for the carry call. Each iteration modifies these\n # values and feeds the results to the next iteration.\n k = 0\n gmres_variables = (k, V, R, beta_vec, err, # < The actual output we need.\n givens) # < Modified between iterations.\n gmres_constants = (tol, A_mv, A_args, b_norm, n_kry)\n gmres_carry = (gmres_variables, gmres_constants)\n # The 'x' input for the carry call. Each iteration will receive an ascending\n # loop index (from the jnp.arange) along with the constant data\n # in gmres_constants.\n gmres_carry = jax.lax.while_loop(gmres_krylov_loop_condition,\n gmres_krylov_work,\n gmres_carry)\n gmres_variables, gmres_constants = gmres_carry\n k, V, R, beta_vec, err, givens = gmres_variables\n return (k, V, R, beta_vec)",
"def factor_mat(all_dat, f_num, iterations, regularization):\n\n\t# get # of users and # of items\n\t[u_num, i_num] = all_dat.shape\n\n\t# init user factors and item factors with random values\n\tu_fac = np.matrix(np.random.rand(u_num, f_num))\t# MxF\n\ti_fac = np.matrix(np.random.rand(i_num, f_num))\t# NxF\n\n\t# calculate the preference matrix\n\tpreference = cal_preference(all_dat)\n\n\t# calculate the confidence matrix\n\tconfidence = cal_confidence(all_dat)\n\t\n\t# recalculate the user factors and item factors using the alternating least square method\n\tfor itr in range(iterations):\n\t\tu_fac = alternate_ls(u_num, i_fac, preference, confidence, regularization)\n\t\t#print itr, \"u_fac\"\n\t\ti_fac = alternate_ls(i_num, u_fac, preference.T, confidence.T, regularization)\n\t\t#print itr, \"i_fac\"\n\t\n\t# save the output\n\tdf = pd.DataFrame(u_fac)\n\tdf.to_csv(\"tmp/u_fac.tmp\", index=False, header=False, sep='\\t', encoding='utf-8')\n\tdf = pd.DataFrame(i_fac.T)\n\tdf.to_csv(\"tmp/i_fac.tmp\", index=False, header=False, sep='\\t', encoding='utf-8')\n\n\t# an MxF user factor matrix and an FxN item factor matrix\n\treturn [u_fac, i_fac.T]",
"def mylinearsvm(beta, lambd, x, y, step_size_init, eps=0.0000001, max_iter=100):\n theta = beta\n t = step_size_init\n grad_beta = grad(beta, lambd, x, y)\n beta_vals = [beta]\n objs = [obj(beta, lambd, x, y)]\n iter = 0\n while np.linalg.norm(grad_beta) > eps and iter < max_iter: \n # THE CODE BELOW SO IT USES BACKTRACKING LINE SEARCH INSTEAD OF A CONSTANT STEP SIZE\n t = backtracking(beta, lambd=lambd, x=x, y=y, step_size=t)\n # THE CODE BELOW USES UPDATING THETA FOR BETA OPTIMAZATION\n beta = theta - t*grad_beta\n theta = beta + (iter/(iter+3))*(beta - beta_vals[-1])\n obj_val = obj(beta,lambd, x, y)\n beta_vals.append(beta)\n objs.append(obj_val)\n grad_beta = grad(theta, lambd, x, y)\n iter += 1\n \n return np.array(beta_vals), np.array(objs)",
"def mylinearsvm(lambdat, eta_init, maxiter, X, y):\n d = np.size(X, 1)\n beta_init = np.zeros(d)\n theta_init = np.zeros(d)\n betas, objs = fast_grad(beta_init, theta_init, lambdat, eta_init, maxiter,X=X,y=y)\n return betas, objs",
"def _fd_matrix(step_ratio, parity, nterms):\n _assert(0 <= parity <= 6,\n 'Parity must be 0, 1, 2, 3, 4, 5 or 6! ({0:d})'.format(parity))\n step = [1, 2, 2, 4, 4, 4, 4][parity]\n inv_sr = 1.0 / step_ratio\n offset = [1, 1, 2, 2, 4, 1, 3][parity]\n c0 = [1.0, 1.0, 1.0, 2.0, 24.0, 1.0, 6.0][parity]\n c = c0 / \\\n special.factorial(np.arange(offset, step * nterms + offset, step))\n [i, j] = np.ogrid[0:nterms, 0:nterms]\n return np.atleast_2d(c[j] * inv_sr ** (i * (step * j + offset)))",
"def incremental_svd(A, qr_flg=False):\n\n m = 256\n n = 7291\n\n n0 = 256\n\n if A.shape[0] != m or A.shape[1] != n: raise ValueError('Error: incorrect matrix size')\n\n start = time.clock()\n\n A0 = A[:, :n0]\n U, s, V = ln.svd(A0, full_matrices=False)\n\n # NOTE: s is a vector; np.diag(s) will produce a diagonal matrix\n for i in range(n0, n):\n\n # new matrix is just a single vector (i-th column of A)\n A1 = np.matrix(A[:, i]).T\n\n if qr_flg:\n J, K = ln.qr(A1 - np.dot(np.dot(U, U.T), A1))\n U_, s_, V_ = ln.svd(\n np.vstack((\n np.hstack((np.diag(s), np.dot(U.T, A1))),\n np.hstack((np.zeros((K.shape[0], s.shape[0])), K))\n )),\n full_matrices=False)\n\n # update the result of SVD\n U = np.dot(np.hstack((U, J)), U_)\n\n else:\n U_, s_, V_ = ln.svd(np.hstack((np.diag(s), np.dot(U.T, A1))), full_matrices=False)\n U = np.dot(U, U_)\n\n s = s_\n\n # NOTE: V from svd on NumPy is already transposed\n V = np.dot(V_,\n np.vstack((\n np.hstack((V, np.zeros((V.shape[0], i+1-V.shape[1])))),\n np.hstack((np.zeros((V_.shape[1]-V.shape[0], V.shape[1])), np.eye(V_.shape[1]-V.shape[0], i+1-V.shape[1])))\n ))\n )\n\n # for next computation, update A0\n A0 = np.hstack((A0, A1))\n\n elapsed_time = time.clock() - start\n print 'time:', elapsed_time\n\n return U, s, V",
"def solve_l1(y, A_fun, AT_fun, lambda_l1, reshape_img_fun, show_img_progress=False, alpha=0.2, max_iter=100, solver_tol=1e-6):\n\n\n obj_lss = np.zeros(max_iter)\n x_zs = np.zeros(max_iter)\n u_norms = np.zeros(max_iter)\n times = np.zeros(max_iter)\n\n ATy = AT_fun(y)\n x_shape = ATy.shape\n d = np.prod(x_shape)\n\n def A_cgs_fun(x):\n x = np.reshape(x, x_shape, order='F')\n y = AT_fun(A_fun(x)) + alpha * x\n return vec(y)\n A_cgs = LinearOperator((d,d), matvec=A_cgs_fun, dtype='float')\n\n def compute_p_inv_A(b, z0):\n (z,info) = sp.sparse.linalg.cgs(A_cgs, vec(b), x0=vec(z0), tol=1e-3, maxiter=100)\n if info > 0:\n print('cgs convergence to tolerance not achieved')\n elif info <0:\n print('cgs gets illegal input or breakdown')\n z = np.reshape(z, x_shape, order='F')\n return z\n\n\n def A_cgs_fun_init(x):\n x = np.reshape(x, x_shape, order='F')\n y = AT_fun(A_fun(x))\n return vec(y)\n A_cgs_init = LinearOperator((d,d), matvec=A_cgs_fun_init, dtype='float')\n\n def compute_init(b, z0):\n (z,info) = sp.sparse.linalg.cgs(A_cgs_init, vec(b), x0=vec(z0), tol=1e-2)\n if info > 0:\n print('cgs convergence to tolerance not achieved')\n elif info <0:\n print('cgs gets illegal input or breakdown')\n z = np.reshape(z, x_shape, order='F')\n return z\n\n # initialize z and u\n z = compute_init(ATy, ATy)\n u = np.zeros(x_shape)\n\n\n plot_normalozer = matplotlib.colors.Normalize(vmin=0.0, vmax=1.0, clip=True)\n\n\n start_time = timeit.default_timer()\n\n for iter in range(max_iter):\n\n # x-update\n net_input = z+u\n Wzu, wbook = wavelet_transform(net_input)\n q = soft_threshold(Wzu, lambda_l1/alpha)\n x = inverse_wavelet_transform(q, wbook, x_shape)\n x = np.reshape(x, x_shape)\n\n # z-update\n b = ATy + alpha * (x - u)\n z = compute_p_inv_A(b, z)\n\n # u-update\n u += z - x;\n\n if show_img_progress == True:\n\n fig = plt.figure('current_sol')\n plt.gcf().clear()\n fig.canvas.set_window_title('iter %d' % iter)\n plt.subplot(1,3,1)\n plt.imshow(reshape_img_fun(np.clip(x, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer)\n plt.title('x')\n plt.subplot(1,3,2)\n plt.imshow(reshape_img_fun(np.clip(z, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer)\n plt.title('z')\n plt.subplot(1,3,3)\n plt.imshow(reshape_img_fun(np.clip(net_input, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer)\n plt.title('netin')\n plt.pause(0.00001)\n\n\n obj_ls = 0.5 * np.sum(np.square(y - A_fun(x)))\n x_z = np.sqrt(np.mean(np.square(x-z)))\n u_norm = np.sqrt(np.mean(np.square(u)))\n\n print('iter = %d: obj_ls = %.3e |x-z| = %.3e u_norm = %.3e' % (iter, obj_ls, x_z, u_norm))\n\n\n obj_lss[iter] = obj_ls\n x_zs[iter] = x_z\n u_norms[iter] = u_norm\n times[iter] = timeit.default_timer() - start_time\n\n if x_z < solver_tol:\n break\n\n infos = {'obj_lss': obj_lss, 'x_zs': x_zs, 'u_norms': u_norms,\n 'times': times, 'alpha':alpha, 'lambda_l1':lambda_l1,\n 'max_iter':max_iter, 'solver_tol':solver_tol}\n\n\n return (x, z, u, infos)",
"def projective_factorization(x, max_iterations=1):\n\n n_views = len(x)\n n_points = x[0].shape[1]\n\n iterations = 0\n\n #lambda matrix, approximate depths\n l = np.ones((n_views, n_points))\n\n #normalization matrices\n norm_matrices = []\n\n # normalize coordinates\n xn = np.zeros((3*n_views, n_points))\n for i in range(n_views):\n\n #find normalization matrix for projections i\n x_norm, T = normalize_points(x[i], is_homogeneous=True)\n xn[3*i:3*(i+1), :] = x_norm\n norm_matrices.append(T)\n\n while iterations < max_iterations:\n # normalize the lambda matrix\n lr_norm = norm(l, axis=1)\n ln = l / lr_norm[:, np.newaxis]\n lc_norm = norm(ln, axis=0)\n ln /= lc_norm\n\n # repeat the lambdas\n ln = np.repeat(ln, 3, axis=0)\n\n #build the factorization matrix\n fact_matrix = ln*xn\n\n u, d, vh = svd(fact_matrix)\n\n print(d[3] / d[4])\n d = d[:4]/d[0]\n\n # from the svd decomposition we can find the projections and 3d points\n p_matrices = u[:, :4]\n x_3d = np.dot(np.diag(d), vh[:4, :])\n\n iterations += 1\n if iterations != max_iterations:\n\n w_matrix = np.dot(p_matrices, x_3d)\n\n for i in range(n_views):\n l[i, :] = w_matrix[3*i+2, :]\n\n cameras = []\n\n for i in range(n_views):\n # denormalize camera matrices\n c_matrix = np.dot(inv(norm_matrices[i]), p_matrices[3*i:3*(i+1), :])\n\n cameras.append(c_matrix)\n\n return cameras, x_3d",
"def nonnegative_tensor_factorization(X, r, method='anls_bpp',\n tol=1e-4, stop_criterion=1,\n min_iter=20, max_iter=200, max_time=1e6,\n init=None, orderWays=None):\n\n nWay = len(X.shape)\n\n if orderWays is None:\n orderWays = np.arange(nWay)\n\n # set initial values\n if init is not None:\n F_cell = init\n else:\n Finit = [np.random.rand(X.shape[i], r) for i in range(nWay)]\n F_cell = Finit\n\n grad = getGradient(X, F_cell, nWay, r)\n\n nr_X = X.norm()\n nr_grad_all = np.sqrt(np.sum(np.linalg.norm(grad[i], 'fro') ** 2\n for i in range(nWay)))\n\n if method == \"anls_bpp\":\n method = anls_bpp()\n elif method == \"anls_asgroup\":\n method = anls_asgroup()\n else:\n raise Exception(\"Unknown method\")\n\n # Execute initializer\n F_cell, FF_init = method.initializer(X, F_cell, nWay, orderWays)\n\n tStart = time.time()\n\n if stop_criterion == 2:\n F_kten = ktensor(F_cell)\n rel_Error = getRelError(X, ktensor(F_cell), nWay, nr_X)\n\n if stop_criterion == 1:\n pGrad = getProjGradient(X, F_cell, nWay, r)\n SC_PGRAD = getStopCriterion(pGrad, nWay, nr_grad_all)\n\n # main iterations\n for iteration in range(max_iter):\n cntu = True\n\n F_cell, FF_init = method.iterSolver(X, F_cell,\n FF_init, nWay, r, orderWays)\n F_kten = ktensor(F_cell)\n\n if iteration >= min_iter:\n\n if time.time() - tStart > max_time:\n cntu = False\n\n else:\n\n if stop_criterion == 1:\n pGrad = getProjGradient(X, F_cell, nWay, r)\n SC_PGRAD = getStopCriterion(pGrad, nWay, nr_grad_all)\n if SC_PGRAD < tol:\n cntu = False\n\n elif stop_criterion == 2:\n prev_rel_Error = rel_Error\n rel_Error = getRelError(X, F_kten, nWay, nr_X)\n SC_DIFF = np.abs(prev_rel_Error - rel_Error)\n if SC_DIFF < tol:\n cntu = False\n else:\n rel_Error = getRelError(X, F_kten, nWay, nr_X)\n if rel_Error < 1:\n cntu = False\n\n if not cntu:\n break\n\n return F_kten",
"def solve_lu(matvec: Callable, b: jnp.ndarray) -> jnp.ndarray:\n if len(b.shape) == 0:\n return b / _materialize_array(matvec, b.shape)\n elif len(b.shape) == 1:\n A = _materialize_array(matvec, b.shape, b.dtype)\n return jax.numpy.linalg.solve(A, b)\n elif len(b.shape) == 2:\n A = _materialize_array(matvec, b.shape, b.dtype) # 4d array (tensor)\n A = A.reshape(-1, b.shape[0] * b.shape[1]) # 2d array (matrix)\n return jax.numpy.linalg.solve(A, b.ravel()).reshape(*b.shape)\n else:\n raise NotImplementedError",
"def estimate_ivec(nt, ft, v_matrix, vtv_matrix, eye=None):\n v_dim = v_matrix.shape[1]\n n_gauss = nt.shape[1]\n\n # Construct eye if necessary\n if eye is None:\n eye = Extractor.to_rfpf(np.eye(v_dim, dtype=v_matrix.dtype).T)\n\n it = eye.T.reshape((1, -1))\n vtvt = vtv_matrix.T.reshape((n_gauss, -1))\n\n b = np.dot(ft, v_matrix).T\n lt = np.dot(nt, vtvt) + it\n\n l = lt.reshape((vtv_matrix.shape[1], vtv_matrix.shape[0])).T\n\n out = Extractor.solve(l, b)\n\n return out",
"def _get_mult_function_runtime_sparse(k_list, l_list, m_list, mult_table_vals, n_dims):\n @numba.njit\n def mv_mult(value, other_value):\n output = np.zeros(n_dims)\n for ind, k in enumerate(k_list):\n v_val = value[k]\n if v_val != 0.0:\n m = m_list[ind]\n ov_val = other_value[m]\n if ov_val != 0.0:\n l = l_list[ind]\n output[l] += v_val * mult_table_vals[ind] * ov_val\n return output\n\n return mv_mult",
"def autovectorized(f):\r\n def wrapper(input):\r\n if N.isscalar(input)==False:\r\n return N.vectorize(f)(input)\r\n return f(input)\r\n return wrapper",
"def autovectorized(f):\r\n def wrapper(input):\r\n if N.isscalar(input)==False:\r\n return N.vectorize(f)(input)\r\n return f(input)\r\n return wrapper",
"def getLinearizedMatrices(model_type: ModelType, operating_point, Vf_op, Vb_op):\n\n p_op, e_op, lamb_op, dp_op, de_op, dlamb_op = operating_point\n\n # Vf_op, Vb_op = compute_feed_forward_flatness(e_and_derivatives, lambda_and_derivatives)\n Vs_op = Vf_op + Vb_op\n Vd_op = Vf_op - Vb_op\n\n if model_type == ModelType.EASY:\n A = np.array([[0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 0],\n [-L3 * Vs_op * sin(p_op) / Je_static, -L2 * sin(e_op) / Je_static, 0, 0, 0, 0],\n [L4 * Vs_op * cos(p_op) * cos(e_op) / Jl_static, 0, 0, 0, 0, 0]])\n elif model_type == ModelType.FRICTION:\n A = np.array([[0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 1],\n [0, 0, 0, -mc.d_p / Jp_static, 0, 0],\n [-L3 * Vs_op * sin(p_op) / Je_static, -L2 * sin(e_op) / Je_static, 0, 0, -mc.d_e / Je_static, 0],\n [L4 * Vs_op * cos(p_op) * cos(e_op) / Jl_static, -L4 * Vs_op * sin(p_op) * sin(e_op) / Jl_static, 0, 0, 0, -mc.d_l / Jl_static]])\n elif model_type == ModelType.CENTRIPETAL:\n A = np.array([[0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1],\n [-(de_op ** 2 - dlamb_op ** 2 * cos(e_op) ** 2) * sin(p_op) ** 2 + (de_op ** 2 - dlamb_op ** 2 * cos(e_op) ** 2) * cos(p_op) ** 2, 2 * dlamb_op ** 2 * sin(p_op) * sin(e_op) * cos(p_op) * cos(e_op), 0, -mc.d_p / Jp_static, 2 * de_op * sin(p_op) * cos(p_op), -2 * dlamb_op * sin(p_op) * cos(p_op) * cos(e_op) ** 2],\n [-L3 * Vs_op * sin(p_op) / Je_static, dlamb_op ** 2 * sin(e_op) ** 2 - dlamb_op ** 2 * cos(e_op) ** 2 - L2 * sin(e_op) / Je_static, 0, 0, -mc.d_e / Je_static, -2 * dlamb_op * sin(e_op) * cos(e_op)],\n [L4 * Vs_op * cos(p_op) * cos(e_op) / Jl_static, -L4 * Vs_op * sin(p_op) * sin(e_op) / Jl_static, 0, 0, 0, -mc.d_l / Jl_static]])\n\n B = np.array([[0, 0],\n [0, 0],\n [0, 0],\n [L1 / Jp_static, -L1 / Jp_static],\n [L3 / Je_static * cos(p_op), L3 / Je_static * cos(p_op)],\n [L4 * sin(p_op) * cos(e_op) / Jl_static, L4 * sin(p_op) * cos(e_op) / Jl_static]])\n\n return A, B, Vf_op, Vb_op",
"def force_list_lookup(X, V, lt, iparams, blist, sp):\n N = len(X)\n force_cube = np.zeros((N, N, 3))\n cell = sp.L*np.eye(3)\n inv_cell = np.linalg.pinv(cell)\n for i in range(N):\n for j in range(i):\n if lt[i, j]:\n dr = X[i] - X[j] # rij = ri - rj\n G = np.dot(inv_cell, dr)\n G_n = G - np.round(G)\n dr_n = np.dot(cell, G_n)\n v_ij = V[i] - V[j] # vij = vi - vj\n force_cube[i, j, :] = \\\n F_tot(dr_n, v_ij, iparams[(blist[i], blist[j])], sp)\n \n force_cube -= np.transpose(force_cube, (1, 0, 2))\n return np.sum(force_cube, axis=1)",
"def SVM_train(Ktrain,y,lbda_vec):\r\n n = Ktrain.shape[0]\r\n for idx, lbda in enumerate(lbda_vec): \r\n C = 1/(2*lbda*n)\r\n P = matrix(Ktrain, tc=\"d\")\r\n q = - matrix(y,tc=\"d\")\r\n G = matrix( np.concatenate( (np.diagflat(y) , -np.diagflat(y) ), axis=0 ),tc=\"d\" )\r\n h1 = C * np.ones((n,1))\r\n h2 = np.zeros((n,1)) \r\n h = matrix(np.concatenate((h1,h2),axis=0))\r\n\r\n solvers.options['show_progress'] = False\r\n \r\n sol = solvers.qp(P,q,G,h) \r\n a = np.asarray(sol['x'])\r\n\r\n #alpha is sparse\r\n a[np.where(np.abs(a) < 1e-4)] = 0\r\n y_svm = np.dot(Ktrain,a)\r\n\r\n print(\"Précision pour lambda = \" + str(lbda) + \" :\", accuracy(y_svm,y))",
"def beta_A_isometric_monte_carlo(self, v, **kwargs):\r\n v = self.np_array(v)\r\n beta_A = np.zeros(v.shape)\r\n for i, v_i in enumerate(v):\r\n self.beta_E = lambda lambda_: self.beta_U_1(lambda_) + \\\r\n self.beta_A_0_abs_isometric(1, lambda_)\r\n\r\n def serial_fun(init_config, **kwargs):\r\n return self.beta_A_isometric_monte_carlo_serial(\r\n v_i, init_config, **kwargs\r\n )\r\n\r\n beta_A[i] = self.parallel_calculation(\r\n serial_fun,\r\n self.minimize_beta_U(v_i)[2][-self.M:, 0],\r\n **kwargs\r\n )\r\n return beta_A",
"def Lanczos(A, k, *, sparse=False, dim=None):\n if sparse:\n n = dim\n dtype = torch.float64\n Amap = A\n else:\n n = A.shape[0]\n dtype = A.dtype\n Amap = lambda v: torch.matmul(A, v)\n Qk = torch.zeros((n, k), dtype=dtype)\n alphas = torch.zeros(k, dtype=dtype)\n betas = torch.zeros(k - 1, dtype=dtype)\n q = torch.randn(n, dtype=dtype)\n q = q / torch.norm(q)\n u = Amap(q)\n alpha = torch.matmul(q, u)\n Qk[:, 0] = q\n alphas[0] = alpha\n beta = 0\n qprime = torch.randn(n, dtype=dtype)\n for i in range(1, k):\n r = u - alpha * q - beta * qprime\n\n # The simple but expensive full reorthogonalization process\n # in order to recover the orthogonality among the Lanczos vectors caused by\n # rounding error in floating point arithmetic.\n r -= torch.matmul(Qk[:, :i], torch.matmul(Qk[:, :i].T, r))\n\n qprime = q\n beta = torch.norm(r)\n q = r / beta\n u = Amap(q)\n alpha = torch.matmul(q, u)\n alphas[i] = alpha\n betas[i - 1] = beta\n Qk[:, i] = q\n T = torch.diag(alphas) + torch.diag(betas, diagonal=1) + torch.diag(betas, diagonal=-1)\n return Qk, T",
"def compute_force(X, V, bl, ip, box, gamma, kT, dt):\n N = len(X)\n F = np.zeros((N, 3))\n Fcube = np.zeros((N, N, 3))\n inv_box = np.zeros((3, 3))\n for i in range(3): inv_box[i, i] = 1.0 / box[i, i]\n g = np.zeros(3)\n rij = np.zeros(3)\n vij = np.zeros(3)\n a = 0.0\n nr = 0.0\n fpair = 0.0\n\n vir = 0.0\n sigma = np.zeros(3)\n volume = np.linalg.det(box)\n\n for i in range(N):\n for j in range(i):\n rij = X[i] - X[j]\n g = matvecmul(inv_box, rij)\n g = g - np.round_(g, 0, np.empty_like(g))\n rij = matvecmul(box, g)\n vij = V[i] - V[j]\n\n a = ip[bl[i]-1, bl[j]-1]\n nr = norm_numba(rij)\n\n fc = a * wr(nr)\n fpair = fc \\\n - gamma * wr(nr)**2 * dot_numba(rij, vij) / nr \\\n + sqrt(2.0*gamma*kT) * wr(nr) * np.random.randn() / sqrt(dt)\n Fcube[i, j, :] = fpair / nr * rij\n Fcube[j, i, :] = -fpair / nr * rij\n\n vir += Fcube[i, j, :] @ rij\n sigma += Fcube[i, j, :] * rij\n\n # kinetic part of stress tensor\n for i in range(N):\n sigma += V[i] * V[i]\n\n sigma = sigma / volume\n F = np.sum(Fcube, 1)\n\n return F, vir, sigma",
"def als(user_ids : numpy.ndarray, item_ids : numpy.ndarray,\n ratings : numpy.ndarray, num_item_factors : int,\n num_users: int, num_items : int, min_r_decrease=0.01,\n max_iterations=200, algorithm=1):\n # allocate \"user_factors\" and \"item_factors\"\n num_user_factors = num_item_factors + 1\n user_factors = numpy.random.uniform(-1, 1, num_users * num_user_factors)\n item_factors = numpy.random.uniform(-1, 1, num_items * num_item_factors)\n\n # argument construction\n user_ids_ptr = user_ids.ctypes.data_as(ctypes.POINTER(ctypes.c_double))\n item_ids_ptr = item_ids.ctypes.data_as(ctypes.POINTER(ctypes.c_double))\n\n ratings_length = len(ratings)\n ratings_ptr = ratings.ctypes.data_as(ctypes.POINTER(ctypes.c_double))\n\n user_factors_length = len(user_factors)\n user_factors_ptr = user_factors.ctypes.data_as(ctypes.POINTER(ctypes.c_double))\n\n item_factors_length = len(item_factors)\n item_factors_ptr = item_factors.ctypes.data_as(ctypes.POINTER(ctypes.c_double))\n\n iterations = _dll.als_from_python(\n user_ids_ptr, item_ids_ptr, ratings_length, ratings_ptr,\n num_item_factors, user_factors_length, user_factors_ptr,\n item_factors_length, item_factors_ptr, ctypes.c_double(min_r_decrease),\n max_iterations, algorithm)\n\n return user_factors, item_factors, iterations"
] | [
"0.7236",
"0.69279444",
"0.60326505",
"0.5435932",
"0.53381103",
"0.5186424",
"0.50212055",
"0.49177843",
"0.4913222",
"0.48340198",
"0.47836974",
"0.4736391",
"0.47337383",
"0.47162333",
"0.467649",
"0.46651557",
"0.46549806",
"0.46410066",
"0.4637888",
"0.4630094",
"0.46174133",
"0.45679438",
"0.45679438",
"0.45600614",
"0.45520654",
"0.4549737",
"0.45299256",
"0.45068708",
"0.4501668",
"0.4493616"
] | 0.79677343 | 0 |
Compute an mstep arnoldi factorization of `matvec`, with m = min(`it`,`num_krylov_vecs`). The factorization will do at most `num_krylov_vecs` steps. The returned arrays `kv` and `H` will satisfy the Arnoldi recurrence relation ``` matrix @ Vm Vm @ Hm fm em = 0 ``` with `matrix` the matrix representation of `matvec` and | def _arnoldi_fact(matvec, args, v0, krylov_vectors, H, start, num_krylov_vecs,
eps):
Z = jax.numpy.linalg.norm(v0)
v = v0 / Z
krylov_vectors = jax.ops.index_update(krylov_vectors,
jax.ops.index[start, :],
jax.numpy.ravel(v))
H = jax.lax.cond(
start > 0, start,
lambda x: jax.ops.index_update(H, jax.ops.index[x, x - 1], Z), None,
lambda x: H)
# body of the arnoldi iteration
def body(vals):
krylov_vectors, H, matvec, vector, _, threshold, i, maxiter = vals
Av = matvec(vector, *args)
initial_vals = [Av, krylov_vectors, i, H]
Av, krylov_vectors, _, H = jax.lax.fori_loop(
0, i + 1, modified_gram_schmidt_step_arnoldi, initial_vals)
norm = jax.numpy.linalg.norm(Av)
Av /= norm
H = jax.ops.index_update(H, jax.ops.index[i + 1, i], norm)
krylov_vectors = jax.ops.index_update(krylov_vectors,
jax.ops.index[i + 1, :],
jax.numpy.ravel(Av))
return [krylov_vectors, H, matvec, Av, norm, threshold, i + 1, maxiter]
def cond_fun(vals):
# Continue loop while iteration < num_krylov_vecs and norm > eps
_, _, _, _, norm, _, iteration, _ = vals
counter_done = (iteration >= num_krylov_vecs)
norm_not_too_small = norm > eps
continue_iteration = jax.lax.cond(counter_done,
_, lambda x: False,
_, lambda x: norm_not_too_small)
return continue_iteration
initial_norm = v.real.dtype.type(1.0+eps)
initial_values = [krylov_vectors, H, matvec, v, initial_norm, eps, start,
num_krylov_vecs]
final_values = jax.lax.while_loop(cond_fun, body, initial_values)
kvfinal, Hfinal, _, _, norm, _, it, _ = final_values
return kvfinal, Hfinal, it, norm < eps | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def implicitly_restarted_arnoldi_method(\n matvec, args, initial_state, num_krylov_vecs, numeig, which, eps, maxiter,\n res_thresh) -> Tuple[List[Tensor], List[Tensor]]:\n N = np.prod(initial_state.shape)\n p = num_krylov_vecs - numeig\n num_krylov_vecs = np.min([num_krylov_vecs, N])\n if (p <= 1) and (num_krylov_vecs < N):\n raise ValueError(f\"`num_krylov_vecs` must be between `numeig` + 1 <\"\n f\" `num_krylov_vecs` <= N={N},\"\n f\" `num_krylov_vecs`={num_krylov_vecs}\")\n\n dtype = initial_state.dtype\n # initialize arrays\n krylov_vectors = jax.numpy.zeros(\n (num_krylov_vecs + 1, jax.numpy.ravel(initial_state).shape[0]),\n dtype=dtype)\n H = jax.numpy.zeros((num_krylov_vecs + 1, num_krylov_vecs), dtype=dtype)\n # perform initial arnoldi factorization\n Vm_tmp, Hm_tmp, numits, converged = arnoldi_fact(matvec, args,\n initial_state,\n krylov_vectors, H, 0,\n num_krylov_vecs, eps)\n # obtain an m-step arnoldi factorization\n Vm, Hm, fm = update_data(Vm_tmp, Hm_tmp, numits)\n\n it = 0\n if which == 'LR':\n _which = 0\n elif which == 'LM':\n _which = 1\n else:\n raise ValueError(f\"which = {which} not implemented\")\n # make sure the dtypes are matching\n if maxiter > 0:\n if Vm.dtype == np.float64:\n dtype = np.complex128\n elif Vm.dtype == np.float32:\n dtype = np.complex64\n elif Vm.dtype == np.complex128:\n dtype = Vm.dtype\n elif Vm.dtype == np.complex64:\n dtype = Vm.dtype\n else:\n raise TypeError(f'dtype {Vm.dtype} not supported')\n Vm = Vm.astype(dtype)\n Hm = Hm.astype(dtype)\n fm = fm.astype(dtype)\n\n while (it < maxiter) and (not converged):\n evals, _ = jax.numpy.linalg.eig(Hm)\n krylov_vectors, H, fk, converged = shifted_QR(Vm, Hm, fm, evals, numeig,\n p, _which, res_thresh)\n if converged:\n break\n v0 = jax.numpy.reshape(fk, initial_state.shape)\n # restart\n Vm_tmp, Hm_tmp, _, converged = arnoldi_fact(matvec, args, v0,\n krylov_vectors, H, numeig,\n num_krylov_vecs, eps)\n Vm, Hm, fm = update_data(Vm_tmp, Hm_tmp, num_krylov_vecs)\n it += 1\n\n ev_, U_ = np.linalg.eig(np.array(Hm))\n eigvals = jax.numpy.array(ev_)\n U = jax.numpy.array(U_)\n _, inds = LR_sort(eigvals, _which)\n vectors = get_vectors(Vm, U, inds, numeig)\n\n return eigvals[inds[0:numeig]], [\n jax.numpy.reshape(vectors[n, :], initial_state.shape)\n for n in range(numeig)\n ]",
"def estimate_ivec(nt, ft, v_matrix, vtv_matrix, eye=None):\n v_dim = v_matrix.shape[1]\n n_gauss = nt.shape[1]\n\n # Construct eye if necessary\n if eye is None:\n eye = Extractor.to_rfpf(np.eye(v_dim, dtype=v_matrix.dtype).T)\n\n it = eye.T.reshape((1, -1))\n vtvt = vtv_matrix.T.reshape((n_gauss, -1))\n\n b = np.dot(ft, v_matrix).T\n lt = np.dot(nt, vtvt) + it\n\n l = lt.reshape((vtv_matrix.shape[1], vtv_matrix.shape[0])).T\n\n out = Extractor.solve(l, b)\n\n return out",
"def _generate_arnoldi_factorization(jax: types.ModuleType) -> Callable:\n\n @jax.jit\n def modified_gram_schmidt_step_arnoldi(j, vals):\n \"\"\"\n Single step of a modified gram-schmidt orthogonalization.\n Args:\n j: Integer value denoting the vector to be orthogonalized.\n vals: A list of variables:\n `vector`: The current vector to be orthogonalized\n to all previous ones\n `krylov_vectors`: jax.array of collected krylov vectors\n `n`: integer denoting the column-position of the overlap\n <`krylov_vector`|`vector`> within `H`.\n Returns:\n updated vals.\n\n \"\"\"\n vector, krylov_vectors, n, H = vals\n v = krylov_vectors[j, :]\n h = jax.numpy.vdot(v, vector)\n H = jax.ops.index_update(H, jax.ops.index[j, n], h)\n vector = vector - h * jax.numpy.reshape(v, vector.shape)\n return [vector, krylov_vectors, n, H]\n\n @functools.partial(jax.jit, static_argnums=(5, 6, 7))\n def _arnoldi_fact(matvec, args, v0, krylov_vectors, H, start, num_krylov_vecs,\n eps):\n \"\"\"\n Compute an m-step arnoldi factorization of `matvec`, with\n m = min(`it`,`num_krylov_vecs`). The factorization will\n do at most `num_krylov_vecs` steps. The returned arrays\n `kv` and `H` will satisfy the Arnoldi recurrence relation\n ```\n matrix @ Vm - Vm @ Hm - fm * em = 0\n ```\n with `matrix` the matrix representation of `matvec` and\n `Vm = jax.numpy.transpose(kv[:it, :])`,\n `Hm = H[:it, :it]`, `fm = np.expand_dims(kv[it, :] * H[it, it - 1]`,1)\n and `em` a cartesian basis vector of shape `(1, kv.shape[1])`\n with `em[0, -1] == 1` and 0 elsewhere.\n\n Note that the caller is responsible for dtype consistency between\n the inputs, i.e. dtypes between all input arrays have to match.\n\n Args:\n matvec: The matrix vector product.\n args: List of arguments to `matvec`.\n v0: Initial state to `matvec`.\n krylov_vectors: An array for storing the krylov vectors. The individual\n vectors are stored as columns.\n The shape of `krylov_vecs` has to be\n (num_krylov_vecs + 1, np.ravel(v0).shape[0]).\n H: Matrix of overlaps. The shape has to be\n (num_krylov_vecs + 1,num_krylov_vecs + 1).\n start: Integer denoting the start position where the first\n produced krylov_vector should be inserted into `krylov_vectors`\n num_krylov_vecs: Number of krylov iterations, should be identical to\n `krylov_vectors.shape[0] + 1`\n eps: Convergence parameter. Iteration is terminated if the norm of a\n krylov-vector falls below `eps`.\n Returns:\n kv: An array of krylov vectors\n H: A matrix of overlaps\n it: The number of performed iterations.\n \"\"\"\n Z = jax.numpy.linalg.norm(v0)\n v = v0 / Z\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[start, :],\n jax.numpy.ravel(v))\n H = jax.lax.cond(\n start > 0, start,\n lambda x: jax.ops.index_update(H, jax.ops.index[x, x - 1], Z), None,\n lambda x: H)\n\n # body of the arnoldi iteration\n def body(vals):\n krylov_vectors, H, matvec, vector, _, threshold, i, maxiter = vals\n Av = matvec(vector, *args)\n initial_vals = [Av, krylov_vectors, i, H]\n Av, krylov_vectors, _, H = jax.lax.fori_loop(\n 0, i + 1, modified_gram_schmidt_step_arnoldi, initial_vals)\n norm = jax.numpy.linalg.norm(Av)\n Av /= norm\n H = jax.ops.index_update(H, jax.ops.index[i + 1, i], norm)\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[i + 1, :],\n jax.numpy.ravel(Av))\n return [krylov_vectors, H, matvec, Av, norm, threshold, i + 1, maxiter]\n\n def cond_fun(vals):\n # Continue loop while iteration < num_krylov_vecs and norm > eps\n _, _, _, _, norm, _, iteration, _ = vals\n counter_done = (iteration >= num_krylov_vecs)\n norm_not_too_small = norm > eps\n continue_iteration = jax.lax.cond(counter_done,\n _, lambda x: False,\n _, lambda x: norm_not_too_small)\n\n return continue_iteration\n initial_norm = v.real.dtype.type(1.0+eps)\n initial_values = [krylov_vectors, H, matvec, v, initial_norm, eps, start,\n num_krylov_vecs]\n final_values = jax.lax.while_loop(cond_fun, body, initial_values)\n kvfinal, Hfinal, _, _, norm, _, it, _ = final_values\n return kvfinal, Hfinal, it, norm < eps\n\n return _arnoldi_fact",
"def calc_kmatrix_magnetic_psi(kvec, mlat, eps=1e-11):\n # First use mlat to create (angs, num_neis, bls, tvals, ons)\n #\n # angs : list\n # each row represents a site in the lattice. Each entry in the row represents the angles to that site's\n # neighbors\n # num_nei : list or array (num_sites x num_sites)\n # Tells how many neighbors of on each kind of sublattice. For example a honeycomb lattice would be\n # num_nei = [[0,3], [3,0]] because each point has 3 neighbors of the other lattice type.\n # bls : len(angs) x float array or int\n # bondlengths, with dimensions equal to dimensions of angs.\n # default value is an int, -1, indicating that all bond lengths are 1\n # tvals : len(angs) x 1 float array or int\n # dimension equal to number of different kinds of springs in unit cell x 1. represents omega_k\n # ons : array (dimension = num_sites per unit cell)\n # represents omega_g\n xy = mlat.lattice.xy\n NL, KL = mlat.NL, mlat.KL\n num_sites, NN = np.shape(NL)\n Omg, OmK = mlat.Omg, mlat.OmK\n PVx, PVy = mlat.PVx, mlat.PVy\n if PVx is None or PVy is None:\n PVx = np.zeros_like(NL, dtype=float)\n PVy = np.zeros_like(NL, dtype=float)\n\n # num_sites is the total number of particles\n mm = np.zeros([2 * num_sites, 2 * num_sites], dtype='complex128')\n\n # checking\n # print 'np.shape(Omg) = ', np.shape(Omg)\n # print 'np.shape(NL) = ', np.shape(NL)\n # print 'np.shape(PVx) = ', np.shape(PVx)\n\n # Go through each site and fill in rows i and NP + i for that site (psi_L and psi_R)\n kk = 0\n for ii in mlat.inner_indices:\n # grav frequency for this particle (note the difference in indexing is due to inner/outer split)\n omg = Omg[kk]\n\n # pinning/gravitational matrix -- note: will divide later by factor of -2\n mm[ii, ii] += -2. * omg\n mm[num_sites + ii, num_sites + ii] += 2. * omg\n\n for nn in range(NN):\n # the index of the gyroscope i is connected to (particle j)\n ni = NL[ii, nn]\n # true connection?\n k = KL[ii, nn]\n # spring frequency for this connection\n omk = OmK[ii, nn]\n\n if abs(k) > eps:\n # Compute the vector connecting site ii to site ni\n # We index PVx as [i,nn] since it is the same shape as NL (and corresponds to its indexing)\n diffx = xy[ni, 0] - xy[ii, 0] + PVx[ii, nn]\n diffy = xy[ni, 1] - xy[ii, 1] + PVy[ii, nn]\n alphaij = np.arctan2(diffy, diffx)\n\n rij_mag = np.sqrt(diffx ** 2 + diffy ** 2)\n # print 'rij mag', rij_mag\n if rij_mag < eps:\n raise RuntimeError('Distance between connected sites is very near zero (less than epsilon)!')\n rij_mag = 1\n\n # get the magnitude of l, the length of the pendulum, wrt unit length\n als = rij_mag ** 2 * (mlat.lp['aoverl']) ** 2\n\n # These are Nash SI eqn S6, multiplied by (l^2/I\\omega)\n fpara_p = - omk * (1 - (1. / 12.) * als) / rij_mag ** 5\n fpara_q = omk * (1 + (1. / 6.) * als) / rij_mag ** 5\n fperp_p = omk * 0.25 * (1 + (1. / 3.) * als) / rij_mag ** 5\n fperp_q = -omk * 0.25 * (1 + (1. / 3.) * als) / rij_mag ** 5\n\n omk_i_plus = fpara_p + fperp_p\n omk_i_minus = fpara_p - fperp_p\n omk_j_plus = fpara_q + fperp_q\n omk_j_minus = fpara_q - fperp_q\n\n # Form kfactor\n if np.abs(PVx[ii, nn]) > eps or np.abs(PVy[ii, nn]) > eps:\n kfactor = np.exp(1j * (PVx[ii, nn] * kvec[0] + PVy[ii, nn] * kvec[1]))\n else:\n kfactor = 1.0\n\n # Create phase factors\n expi2t = np.exp(1j * 2. * alphaij)\n exp_negi2t = np.exp(-1j * 2. * alphaij)\n\n # (psi_L psi_L components)\n # add top left chunk: -/+1/2 Omk, note: will divide by -2 later\n mm[ii, ii] += omk_i_plus\n if ni in mlat.inner_indices:\n mm[ii, ni] += -omk_j_plus * kfactor\n\n # (psi_L psi_R components) top right chunk\n mm[ii, ii + num_sites] += omk_i_minus * expi2t\n if ni in mlat.inner_indices:\n mm[ii, ni + num_sites] += -omk_j_minus * expi2t * kfactor\n\n # (psi_R psi_L components) bottom left chunk\n mm[ii + num_sites, ii] += -omk_i_minus * exp_negi2t\n if ni in mlat.inner_indices:\n mm[ii + num_sites, ni] += omk_j_minus * exp_negi2t * kfactor\n\n # (psi_R psi_R components) bottom right chunk\n mm[ii + num_sites, ii + num_sites] += -omk_i_plus\n if ni in mlat.inner_indices:\n mm[ii + num_sites, ni + num_sites] += omk_j_plus * kfactor\n\n kk += 1\n\n return 0.5 * mm * (-1j)",
"def _z2matvecmul(self, mat, vec):\n prod = np.mod(np.dot(mat, vec), 2)\n return prod",
"def sparse_expectation(mat, vec):\n return np.vdot(vec, mat.dot(vec)).real",
"def evolve(self, k_vec, Nt,**kwargs):\n \n M_eff = np.eye((self.Nd), dtype=complex) # aux matrix\n T = 1.\n for it in range(Nt):\n \n # update the Hamiltonian for time-inteval\n self.updateH(k_vec, it)\n\n # return eigenenergies and vectors\n E_k, U = lg.eig(self.H_kc) \n\n # U^-1 * exp(H_d) U\n U_inv = lg.inv(U)\n\n # construct a digonal matrix out of a vector\n M1 = (np.exp(-1.j*E_k*T) * U_inv.T).T\n\n #MM = np.dot(U_inv,np.dot(H_M, U))\n MM = np.dot(U,M1)\n M_eff = np.dot(M_eff,MM)\n # end of loop\n Ek, Uk = lg.eig( M_eff )\n idx = (np.log(Ek).imag).argsort()\n Efl_k = np.log(Ek).imag[idx]\n Ufl_k = Uk[idx]\n return Efl_k, Ufl_k",
"def posdef_eig_svd(mat):\n evals, evecs, _ = tf.svd(mat)\n\n return evals, evecs",
"def gmres_update(k: int, V: jax.ShapedArray, R: jax.ShapedArray,\n beta_vec: jax.ShapedArray,\n x0: jax.ShapedArray) -> jax.ShapedArray:\n q = min(k, R.shape[1])\n y = jax.scipy.linalg.solve_triangular(R[:q, :q], beta_vec[:q])\n x = x0 + V[:, :q] @ y\n return x",
"def _matvec(x):\n return _normal_matvec(matvec, x)",
"def eigsh(A, M = None, k = 6, sigma = None, which = 'LM', v0=None,\n ncv = None, maxiter = None, tol = 0., return_eigenvectors = True,\n Minv = None, OPinv = None, mode = 'normal'):\n if M is not None:\n raise NotImplementedError(\"M is not currently supported!\")\n if v0 is not None:\n raise NotImplementedError(\"v0 is not currently supported!\")\n if ncv is not None:\n raise NotImplementedError(\"ncv is not currently supported!\")\n if Minv is not None:\n raise NotImplementedError(\"Minv is not currently supported!\")\n if OPinv is not None:\n raise NotImplementedError(\"OPinv is not currently supported!\")\n inp_data = FrovedisFeatureData(A, dense_kind='rowmajor')\n X = inp_data.get()\n x_dtype = inp_data.get_dtype()\n x_itype = inp_data.get_itype()\n dense = inp_data.is_dense()\n nrows = inp_data.numRows()\n ncols = inp_data.numCols()\n\n if nrows != ncols:\n raise ValueError('expected squared symmetric matrix (shape=%s)' % (inp_data.shape,))\n if k <= 0:\n raise ValueError('k must be greater than 0.')\n if k >= nrows:\n raise ValueError('k must be less than or equal to N for N * N square matrix.')\n if sigma is not None and not dense:\n raise ValueError('currently sigma is only supported for dense matrices.')\n if sigma is None:\n sigma = np.finfo(np.float32).max\n\n if which not in ['LM', 'SM', 'LA', 'SA', 'BE']:\n raise ValueError('which must be one of LM, SM, LA, SA, or BE')\n if mode in ['buckling', 'cayley']:\n raise ValueError('currenly normal mode is only supported!')\n if maxiter is None:\n maxiter = 10 * nrows\n wantEv = return_eigenvectors\n (host, port) = FrovedisServer.getServerInstance()\n res = rpclib.compute_eigsh(host, port, X.get(),\n k, which.encode('ascii'),\n sigma, maxiter, wantEv,\n tol, x_dtype,\n x_itype, dense)\n excpt = rpclib.check_server_exception()\n if excpt[\"status\"]:\n raise RuntimeError(excpt[\"info\"])\n sptr = res[\"eigenval\"]\n uptr = res[\"eigenvec\"]\n m_m = res['m']\n k_k = res['k']\n eigval = FrovedisVector({'dptr' : sptr, 'size' : k_k},\n dtype = TypeUtil.to_numpy_dtype(x_dtype)).to_numpy_array()\n if wantEv:\n eigvec = FrovedisDenseMatrix('C', {'dptr' : uptr, 'nrow' : m_m, 'ncol' : k_k},\n dtype = TypeUtil.to_numpy_dtype(x_dtype)).to_numpy_array()\n return eigval, eigvec\n else:\n return eigval",
"def kth_arnoldi_step(k: int, A_mv: Callable, A_args: Sequence,\n V: jax.ShapedArray, H: jax.ShapedArray,\n tol: float) -> Tuple[jax.ShapedArray, jax.ShapedArray]:\n v = A_mv(V[:, k], *A_args)\n v_new, H_k = jax.lax.scan(_gs_step, v, xs=V.T)\n v_norm = jnp.linalg.norm(v_new)\n r_new = v_new / v_norm\n # Normalize v unless it is the zero vector.\n r_new = jax.lax.cond(v_norm > tol,\n lambda x: x[0] / x[1],\n lambda x: 0.*x[0],\n (v_new, v_norm)\n )\n H = jax.ops.index_update(H, jax.ops.index[:, k], H_k)\n H = jax.ops.index_update(H, jax.ops.index[k+1, k], v_norm)\n V = jax.ops.index_update(V, jax.ops.index[:, k+1], r_new)\n return V, H",
"def minimum_eigen_vector(x, num_steps, learning_rate, vector_prod_fn):\n x = tf.nn.l2_normalize(x)\n for _ in range(num_steps):\n x = eig_one_step(x, learning_rate, vector_prod_fn)\n return x",
"def _generate_jitted_eigsh_lanczos(jax: types.ModuleType) -> Callable:\n\n @functools.partial(jax.jit, static_argnums=(3, 4, 5, 6))\n def jax_lanczos(matvec, arguments, init, ncv, neig, landelta, reortho):\n \"\"\"\n Jitted lanczos routine.\n Args:\n matvec: A callable implementing the matrix-vector product of a\n linear operator.\n arguments: Arguments to `matvec` additional to an input vector.\n `matvec` will be called as `matvec(init, *args)`.\n init: An initial input state to `matvec`.\n ncv: Number of krylov iterations (i.e. dimension of the Krylov space).\n neig: Number of eigenvalue-eigenvector pairs to be computed.\n landelta: Convergence parameter: if the norm of the current Lanczos vector\n falls below `landelta`, iteration is stopped.\n reortho: If `True`, reorthogonalize all krylov vectors at each step.\n This should be used if `neig>1`.\n Returns:\n jax.numpy.ndarray: Eigenvalues\n list: Eigenvectors\n \"\"\"\n\n def body_modified_gram_schmidt(i, vals):\n vector, krylov_vectors = vals\n v = krylov_vectors[i, :]\n vector -= jax.numpy.vdot(v, vector) * jax.numpy.reshape(v, vector.shape)\n return [vector, krylov_vectors]\n\n def body_lanczos(vals):\n current_vector, krylov_vectors, vector_norms = vals[0:3]\n diagonal_elements, matvec, args, _ = vals[3:7]\n threshold, i, maxiteration = vals[7:]\n norm = jax.numpy.linalg.norm(current_vector)\n normalized_vector = current_vector / norm\n normalized_vector, krylov_vectors = jax.lax.cond(\n reortho, True,\n lambda x: jax.lax.fori_loop(0, i, body_modified_gram_schmidt,\n [normalized_vector, krylov_vectors]),\n False, lambda x: [normalized_vector, krylov_vectors])\n Av = matvec(normalized_vector, *args)\n\n diag_element = jax.numpy.vdot(normalized_vector, Av)\n\n res = jax.numpy.reshape(\n jax.numpy.ravel(Av) -\n jax.numpy.ravel(normalized_vector) * diag_element -\n krylov_vectors[i - 1] * norm, Av.shape)\n krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[i, :],\n jax.numpy.ravel(normalized_vector))\n\n vector_norms = jax.ops.index_update(vector_norms, jax.ops.index[i - 1],\n norm)\n diagonal_elements = jax.ops.index_update(diagonal_elements,\n jax.ops.index[i - 1],\n diag_element)\n\n return [\n res, krylov_vectors, vector_norms, diagonal_elements, matvec, args,\n norm, threshold, i + 1, maxiteration\n ]\n\n def cond_fun(vals):\n _, _, _, _, _, _, norm, threshold, iteration, maxiteration = vals\n\n def check_thresh(check_vals):\n val, thresh = check_vals\n return jax.lax.cond(val < thresh, False, lambda x: x, True, lambda x: x)\n\n return jax.lax.cond(iteration <= maxiteration, [norm, threshold],\n check_thresh, False, lambda x: x)\n\n numel = jax.numpy.prod(init.shape)\n krylov_vecs = jax.numpy.zeros((ncv + 1, numel), dtype=init.dtype)\n norms = jax.numpy.zeros(ncv, dtype=init.dtype)\n diag_elems = jax.numpy.zeros(ncv, dtype=init.dtype)\n\n norms = jax.ops.index_update(norms, jax.ops.index[0], 1.0)\n\n norms_dtype = jax.numpy.real(jax.numpy.empty((0, 0),\n dtype=init.dtype)).dtype\n initvals = [\n init, krylov_vecs, norms, diag_elems, matvec, arguments,\n norms_dtype.type(1.0), landelta, 1, ncv\n ]\n output = jax.lax.while_loop(cond_fun, body_lanczos, initvals)\n final_state, krylov_vecs, norms, diags, _, _, _, _, it, _ = output\n krylov_vecs = jax.ops.index_update(krylov_vecs, jax.ops.index[it, :],\n jax.numpy.ravel(final_state))\n\n A_tridiag = jax.numpy.diag(diags) + jax.numpy.diag(\n norms[1:], 1) + jax.numpy.diag(jax.numpy.conj(norms[1:]), -1)\n eigvals, U = jax.numpy.linalg.eigh(A_tridiag)\n eigvals = eigvals.astype(A_tridiag.dtype)\n\n def body_vector(i, vals):\n krv, unitary, states = vals\n dim = unitary.shape[1]\n n, m = jax.numpy.divmod(i, dim)\n states = jax.ops.index_add(states, jax.ops.index[n, :],\n krv[m + 1, :] * unitary[m, n])\n return [krv, unitary, states]\n\n state_vectors = jax.numpy.zeros([neig, numel], dtype=init.dtype)\n _, _, vectors = jax.lax.fori_loop(0, neig * (krylov_vecs.shape[0] - 1),\n body_vector,\n [krylov_vecs, U, state_vectors])\n\n return jax.numpy.array(eigvals[0:neig]), [\n jax.numpy.reshape(vectors[n, :], init.shape) /\n jax.numpy.linalg.norm(vectors[n, :]) for n in range(neig)\n ]\n\n return jax_lanczos",
"def get_eigvals_eigvects(\n num_layers,\n numeric_matrices_eV_over_angsquared,\n layer_mass_amu,\n use_banded_algorithm=False,\n):\n # Based on the units in input, and indicating with:\n # - [hbar omega] the numeric value for the frequency in meV => hbar omega = [hbar omega] * meV\n # - [K] the numeric value of K in eV/ang^2\n # - [m] the layer mass in amu\n # we have (we omit the sign, and for units considerations we 'drop' U):\n # omega^2 = K / m =>\n # (hbar omega)^2 = hbar^2 * K / m =>\n # [hbar omega]^2 * meV^2 = hbar^2 * [K] / [m] * eV/ang^2 / amu = [K] / [m] * hbar^2 * eV/ang^2 / amu =>\n # [hbar omega]^2 = = [K] / [m] * ( hbar^2 * eV/ang^2 / amu / meV^2 )\n # so that the conversion factor is the last bracketed term:\n # conversion_factor = hbar^2 * eV / (angstrom^2 * amu * meV^2)\n conversion_factor = 4180.15925\n # NOTE: for simplicity, the conversion is applied at the very end\n\n if use_banded_algorithm:\n # 3 blocks (below, same layer, and above) of size 3 => total width of 9\n # Since we only store the upper part, we only need a width of 4 (diagonal + 3 superdiagonals)\n K_matrix = np.zeros((4, num_layers * 3))\n else:\n K_matrix = np.zeros((num_layers * 3, num_layers * 3))\n\n # Note: I construct -K, actually\n for block_idx in range(num_layers):\n # Interaction with upper layer\n if block_idx < num_layers - 1: # Not in the last layer\n current_block = np.array(\n numeric_matrices_eV_over_angsquared[\n block_idx % len(numeric_matrices_eV_over_angsquared)\n ]\n )\n add_block(\n matrix=K_matrix,\n block=current_block,\n block_i=block_idx,\n block_j=block_idx,\n factor=+1,\n banded=use_banded_algorithm,\n )\n add_block(\n matrix=K_matrix,\n block=current_block,\n block_i=block_idx + 1,\n block_j=block_idx,\n factor=-1,\n banded=use_banded_algorithm,\n )\n # Interaction with lower layer\n if block_idx > 0: # Not in the first layer\n previous_block = np.array(\n numeric_matrices_eV_over_angsquared[\n (block_idx - 1) % len(numeric_matrices_eV_over_angsquared)\n ]\n )\n add_block(\n matrix=K_matrix,\n block=previous_block,\n block_i=block_idx,\n block_j=block_idx,\n factor=+1,\n banded=use_banded_algorithm,\n )\n add_block(\n matrix=K_matrix,\n block=previous_block,\n block_i=block_idx - 1,\n block_j=block_idx,\n factor=-1,\n banded=use_banded_algorithm,\n )\n\n # We want to get the eigenvalues of omega^2 U = - 1/M_layer K U\n K_matrix /= layer_mass_amu\n\n # Get frequencies (eigvals) and eigenvectors (for mode analysis)\n if use_banded_algorithm:\n eigvals, eigvects = scipy.linalg.eig_banded(K_matrix, lower=False)\n else:\n eigvals, eigvects = np.linalg.eigh(K_matrix)\n\n eigvals *= conversion_factor\n\n ## The first three should be acoustic i.e. almost zero; the rest should be positive\n ## I don't check as depending on the units it's hard to define a correct absolute energy\n # assert np.sum(np.abs(eigvals[:3])) < 1.0e-8\n\n # Remove the first three acoustic modes\n return eigvals[3:], eigvects[:, 3:]",
"def FV_moVMF(xx, vmf):\n \n # Attributes of the moVMF.\n #mean_dir = vmf.cluster_centers_ # Shape: (K, d)\n kappa = vmf.concentrations_ # Shape: (K, )\n weights = vmf.weights_ # Shape: (K, )\n n_comps = vmf.n_clusters # Integer scalar\n \n # Encoded document.\n xx = np.atleast_2d(xx) # Shape: (T, d) \n xx = normalize(xx) # Normalize input data\n T = xx.shape[0] # Doc. length\n d = xx.shape[1] # Dimensionality of word/feat. vectors\n \n # Array to store the result.\n out = np.zeros((n_comps, d), dtype=np.float32) # Shape: (K, d)\n \n # Posterior probabilities.\n probs = vmf.log_likelihood(xx) # Shape: (T, K)\n \n # Vectorization of the sum over t of `gamma_t(i)*x_t`.\n probs_xx = np.dot(probs, xx) # Shape: (K, d)\n \n # Derivatives with respect to the mean directions.\n d_mean = d * probs_xx # Shape: (K, d)\n \n # Normalization.\n eps = 1e-6 # Avoids dividing by 0\n np.divide(d_mean, (kappa.reshape((n_comps, 1)) + eps), out=d_mean)\n \n out = d_mean / (weights.reshape((n_comps, 1)) + eps)\n \n return out.flatten()",
"def make_k_matrix(self):\r\n K = self.uv_vol + self.Epsilon * self.guv_vol + \\\r\n (self.Epsilon / self.Beta) * self.uv_bound\r\n return K",
"def SVM_train(Ktrain,y,lbda_vec):\r\n n = Ktrain.shape[0]\r\n for idx, lbda in enumerate(lbda_vec): \r\n C = 1/(2*lbda*n)\r\n P = matrix(Ktrain, tc=\"d\")\r\n q = - matrix(y,tc=\"d\")\r\n G = matrix( np.concatenate( (np.diagflat(y) , -np.diagflat(y) ), axis=0 ),tc=\"d\" )\r\n h1 = C * np.ones((n,1))\r\n h2 = np.zeros((n,1)) \r\n h = matrix(np.concatenate((h1,h2),axis=0))\r\n\r\n solvers.options['show_progress'] = False\r\n \r\n sol = solvers.qp(P,q,G,h) \r\n a = np.asarray(sol['x'])\r\n\r\n #alpha is sparse\r\n a[np.where(np.abs(a) < 1e-4)] = 0\r\n y_svm = np.dot(Ktrain,a)\r\n\r\n print(\"Précision pour lambda = \" + str(lbda) + \" :\", accuracy(y_svm,y))",
"def get_vf_matrix(self, geom_dict, view_matrix, obstr_matrix, list_pvrow):\n n_all_surfaces = view_matrix.shape[0]\n view_factors = np.zeros((n_all_surfaces, n_all_surfaces), dtype=float)\n\n # --- First deal with finite surfaces from the registry, and treat only\n # half of the views because symmetry will be used next\n n_finite_surfaces = n_all_surfaces - 1 # no sky\n view_matrix_upper_finite_surfaces = np.triu(\n view_matrix[:n_finite_surfaces, :n_finite_surfaces])\n indices_views_finite = np.where(view_matrix_upper_finite_surfaces)\n\n n_views = len(indices_views_finite[0])\n geometries = list(geom_dict.values())\n for i in range(n_views):\n idx = (indices_views_finite[0][i], indices_views_finite[1][i])\n view = self.mapper.reverse_view[view_matrix[idx]]\n line_i = geometries[idx[0]]\n line_j = geometries[idx[1]]\n obstr_index = obstr_matrix[idx]\n if obstr_index is not None:\n obstructing_pvrow = list_pvrow[obstr_matrix[idx]]\n else:\n obstructing_pvrow = None\n # The following line takes the most time to execute (looped)\n view_factors[idx] = self.mapper.function_mapping[view](\n line_i, line_j, obstructing_pvrow)\n\n # Use the reciprocity property of view factors to speed up the\n # vfactor calculation: A_1 * F_1-2 = A_2 * F_2-1 ==> symmetric matrx\n areas = np.array([surf.length for surf in geometries])\n matrix_areas = np.diag(areas)\n matrix_areas_inv = np.diag(1. / areas)\n\n upper_matrix_reciprocity = np.dot(matrix_areas,\n view_factors[:n_finite_surfaces,\n :n_finite_surfaces])\n\n total_matrix_reciprocity = (upper_matrix_reciprocity +\n upper_matrix_reciprocity.T)\n finite_vf_matrix = np.dot(matrix_areas_inv, total_matrix_reciprocity)\n view_factors[:n_finite_surfaces, :n_finite_surfaces] = finite_vf_matrix\n\n # --- Then do the calculations for the sky, which is the remaining\n # portion of the hemisphere\n view_factors[:-1, -1] = 1. - np.sum(view_factors[:-1, :-1], axis=1)\n return view_factors",
"def sparse_matlab(i, j, v, m, n):\n return csr_matrix((v, (i, j)), shape=(m, n))",
"def _safe_inv22_vectorized(M):\n assert M.ndim == 3\n assert M.shape[-2:] == (2, 2)\n M_inv = np.empty_like(M)\n prod1 = M[:, 0, 0]*M[:, 1, 1]\n delta = prod1 - M[:, 0, 1]*M[:, 1, 0]\n\n # We set delta_inv to 0. in case of a rank deficient matrix ; a\n # rank-deficient input matrix *M* will lead to a null matrix in output\n rank2 = (np.abs(delta) > 1e-8*np.abs(prod1))\n if np.all(rank2):\n # Normal 'optimized' flow.\n delta_inv = 1./delta\n else:\n # 'Pathologic' flow.\n delta_inv = np.zeros(M.shape[0])\n delta_inv[rank2] = 1./delta[rank2]\n\n M_inv[:, 0, 0] = M[:, 1, 1]*delta_inv\n M_inv[:, 0, 1] = -M[:, 0, 1]*delta_inv\n M_inv[:, 1, 0] = -M[:, 1, 0]*delta_inv\n M_inv[:, 1, 1] = M[:, 0, 0]*delta_inv\n return M_inv",
"def minkowskiArrayDot(X, vec):\n MDP_max = -(1 + 1e-10)\n k = X.shape[1]\n vec = vec.reshape((k, -1))\n mod = np.ones(vec.shape)\n mod[-1] = -1\n MDP = np.matmul(X, vec*mod)\n #MDP[MDP > MDP_max] = MDP_max\n return MDP",
"def get_leftLaInv(k_list, l_list, m_list, mult_table_vals, n_dims, gradeList):\n\n identity = np.zeros((n_dims,))\n identity[gradeList.index(0)] = 1\n\n @numba.njit\n def leftLaInvJIT(value):\n intermed = np.zeros((n_dims, n_dims))\n for test_ind, i in enumerate(k_list):\n j = l_list[test_ind]\n k = m_list[test_ind]\n intermed[i, j] += mult_table_vals[test_ind] * value[k]\n intermed = np.transpose(intermed)\n if abs(linalg.det(intermed)) < _eps:\n raise ValueError(\"multivector has no left-inverse\")\n sol = linalg.solve(intermed, identity)\n return sol\n\n return leftLaInvJIT",
"def initiateVMatrixes():\n global v, vNew, vExact\n # Initialize the grid to 0\n v = np.zeros((n+1, n+1)) # matrix of v, index are i: row, j:column\n # Set the boundary conditions\n for i in range(1,n):\n v[0,i] = 10\n v[n,i] = 10\n v[i,0] = 10\n v[i,n] = 10\n # Exact solution\n vExact = np.copy(v)\n for i in range(1,n):\n for j in range(1,n):\n vExact[i,j] = 10\n # Initial guess\n for i in range(1,n):\n for j in range(1,n):\n v[i,j] = 0.9*vExact[i,j]\n vNew = np.copy(v)",
"def solve_eq(xVec):\n \n PSI = xVec[0:vecLen] \n Cxx = xVec[1*vecLen:2*vecLen] \n Cyy = xVec[2*vecLen:3*vecLen] \n Cxy = xVec[3*vecLen:4*vecLen]\n\n\n # Useful Vectors\n Txx = oneOverWi * Cxx \n Txx[N*M] -= oneOverWi\n Tyy = oneOverWi * Cyy \n Tyy[N*M] -= oneOverWi\n Txy = oneOverWi * Cxy\n\n U = + dot(MDY, PSI)\n V = - dot(MDX, PSI)\n LAPLACPSI = dot(LAPLAC, PSI)\n\n # Useful Operators\n MMU = tsm.c_prod_mat(U)\n MMV = tsm.c_prod_mat(V)\n VGRAD = dot(MMU,MDX) + dot(MMV,MDY)\n MMDXU = tsm.c_prod_mat(dot(MDX, U))\n MMDXV = tsm.c_prod_mat(dot(MDX, V))\n MMDYU = tsm.c_prod_mat(dot(MDY, U))\n MMDYV = tsm.c_prod_mat(dot(MDY, V))\n\n MMDXPSI = tsm.c_prod_mat(dot(MDX, LAPLACPSI))\n MMDXCXX = tsm.c_prod_mat(dot(MDX, Cxx))\n MMDXCYY = tsm.c_prod_mat(dot(MDX, Cyy))\n MMDXCXY = tsm.c_prod_mat(dot(MDX, Cxy))\n\n #######calculate the Residuals########\n\n residualsVec = zeros((4*vecLen), dtype='complex')\n\n #####psi\n residualsVec[0:vecLen] = - Re*dot(MMU, dot(MDX, LAPLACPSI)) \\\n - Re*dot(MMV, dot(MDY, LAPLACPSI)) \\\n + beta*dot(BIHARM, PSI) \\\n - (1.-beta)*(dot(MDXX, Txy) + dot(MDXY, (Tyy - Txx)) \\\n - dot(MDYY, Txy))\n\n #####xx\n residualsVec[vecLen:2*vecLen] = - dot(VGRAD, Cxx) \\\n + 2.*dot(MMDXU, Cxx) \\\n + 2.*dot(MMDYU, Cxy) - Txx\n\n #####yy\n residualsVec[2*vecLen:3*vecLen] = - dot(VGRAD, Cyy) \\\n + 2.*dot(MMDXV, Cxy) \\\n + 2.*dot(MMDYV, Cyy) - Tyy\n\n #####xy\n residualsVec[3*vecLen:4*vecLen] = - dot(VGRAD, Cxy) \\\n + dot(MMDXV, Cxx) + dot(MMDYU, Cyy)\\\n - Txy\n\n #####psi0\n residualsVec[N*M:(N+1)*M] = - Re*dot(VGRAD, U)[N*M:(N+1)*M] \\\n + beta*dot(MDYYY, PSI)[N*M:(N+1)*M] \\\n + (1.-beta)*dot(MDY,Txy)[N*M:(N+1)*M]\n # set the pressure gradient (pressure driven flow)\n # residualsVec[N*M] += 2.0\n\n # set the forcing on the zeroth mode for non pressure driven flow.\n residualsVec[N*M:(N+1)*M] += forcingVec\n\n\n ##### Apply boundary conditions to residuals vector\n\n # dxPsi = 0 \n for k in range (2*N+1): \n if k == N: continue # skip the 0th component \n residualsVec[k*M + M-2] = dot((k-N)*kx*BTOP, PSI[k*M:(k+1)*M])\n residualsVec[k*M + M-1] = dot((k-N)*kx*BBOT, PSI[k*M:(k+1)*M])\n del k\n\n # dyPsi(+-1) = 0 \n for k in range (2*N+1):\n if k == N: continue # skip the 0th component \n residualsVec[k*M + M-4] = dot(DERIVTOP, PSI[k*M:(k+1)*M])\n residualsVec[k*M + M-3] = dot(DERIVBOT, PSI[k*M:(k+1)*M])\n del k\n\n # dyPsi0(+-1) = +-1\n residualsVec[N*M + M-3] = dot(DERIVTOP, PSI[N*M:(N+1)*M]) - 1.\n residualsVec[N*M + M-2] = dot(DERIVBOT, PSI[N*M:(N+1)*M]) + 1.\n\n # Psi0(-1) = 0\n residualsVec[N*M + M-1] = dot(BBOT, (PSI[N*M:(N+1)*M]))\n\n return (residualsVec)",
"def matTimesVec(M, x):\n return [dot(m, x) for m in M]",
"def one_step(self):\r\n assert (self.uv_vol is not None)\r\n assert (self.guv_vol is not None)\r\n assert (self.uv_bound is not None)\r\n assert (self.vf_vect_bound is not None)\r\n assert (self.vF_vect_vol is not None)\r\n # Shape checks\r\n assert (self.vF_vect_vol.size == self.vF_vect_vol.shape[0])\r\n assert (self.vf_vect_bound.size == self.vf_vect_bound.shape[0])\r\n assert (self.vF_vect_vol.shape == self.vf_vect_bound.shape)\r\n assert (self.uv_vol.shape[0] == self.uv_vol.shape[1])\r\n assert (self.uv_vol.shape == self.guv_vol.shape)\r\n assert (self.uv_vol.shape == self.uv_bound.shape)\r\n assert (self.uv_vol.shape[0] == self.vF_vect_vol.shape[0])\r\n \r\n if self.step == 0:\r\n self.check_k_matrix_stability()\r\n # print(\"Epsilon is :\"+str(self.Epsilon))\r\n # print(\"Beta is :\"+str(self.Beta))\r\n\r\n # Form \"Stiffness\" matrix:\r\n K = self.make_k_matrix()\r\n # Form \"Force\" vector: \r\n f = self.vF_vect_vol + (self.Epsilon / self.Beta) * self.vf_vect_bound\r\n\r\n # print(\"FORCE VECTOR:\")\r\n # print(f)\r\n # print(\"STIFFNESS MATRIX\")\r\n # print(K)\r\n # print(\"UV_VOL\")\r\n # print(self.uv_vol)\r\n # print(\"EPSILON * GUV_VOL\")\r\n # print(self.Epsilon * self.guv_vol)\r\n # print(\"UV_BOUND * COEFF\")\r\n # print((self.Epsilon / self.Beta) * self.uv_bound)\r\n sol = scipy_sparse_linsolve(K, f)\r\n # print(\"SOLUTION\")\r\n # print(sol)\r\n return sol",
"def _pseudo_inv22sym_vectorized(M):\n assert M.ndim == 3\n assert M.shape[-2:] == (2, 2)\n M_inv = np.empty_like(M)\n prod1 = M[:, 0, 0]*M[:, 1, 1]\n delta = prod1 - M[:, 0, 1]*M[:, 1, 0]\n rank2 = (np.abs(delta) > 1e-8*np.abs(prod1))\n\n if np.all(rank2):\n # Normal 'optimized' flow.\n M_inv[:, 0, 0] = M[:, 1, 1] / delta\n M_inv[:, 0, 1] = -M[:, 0, 1] / delta\n M_inv[:, 1, 0] = -M[:, 1, 0] / delta\n M_inv[:, 1, 1] = M[:, 0, 0] / delta\n else:\n # 'Pathologic' flow.\n # Here we have to deal with 2 sub-cases\n # 1) First sub-case: matrices of rank 2:\n delta = delta[rank2]\n M_inv[rank2, 0, 0] = M[rank2, 1, 1] / delta\n M_inv[rank2, 0, 1] = -M[rank2, 0, 1] / delta\n M_inv[rank2, 1, 0] = -M[rank2, 1, 0] / delta\n M_inv[rank2, 1, 1] = M[rank2, 0, 0] / delta\n # 2) Second sub-case: rank-deficient matrices of rank 0 and 1:\n rank01 = ~rank2\n tr = M[rank01, 0, 0] + M[rank01, 1, 1]\n tr_zeros = (np.abs(tr) < 1.e-8)\n sq_tr_inv = (1.-tr_zeros) / (tr**2+tr_zeros)\n #sq_tr_inv = 1. / tr**2\n M_inv[rank01, 0, 0] = M[rank01, 0, 0] * sq_tr_inv\n M_inv[rank01, 0, 1] = M[rank01, 0, 1] * sq_tr_inv\n M_inv[rank01, 1, 0] = M[rank01, 1, 0] * sq_tr_inv\n M_inv[rank01, 1, 1] = M[rank01, 1, 1] * sq_tr_inv\n\n return M_inv",
"def _matvec(self, x):\n \n x = x.reshape((self.NH,))\n #\n # Compute kinetic energy operator\n #\n tx = self.KEO @ x \n \n # \n # Compute potential energy operator\n #\n xquad = self.basis.fbrToQuad(x,axis = 0) # xquad has shape (Nq,)\n vx = self.basis.quadToFbr(self.V * xquad) # vx has shape (NH,)\n \n return tx + vx",
"def knn(self,query_vec,k, stdev=False):\n\n sims = []\n if k > len(self.terms):\n k = len(self.terms)\n sims = np.matmul(self.vectors, query_vec.vector)\n if stdev:\n sims = zscore(sims)\n indices = np.argpartition(sims, -k)[-k:]\n indices = sorted(indices, key=lambda i: sims[i], reverse=True)\n results = []\n for index in indices:\n results.append([sims[index], self.terms[index]])\n return results"
] | [
"0.57746553",
"0.57497203",
"0.5428247",
"0.5418458",
"0.5324983",
"0.5313086",
"0.5289994",
"0.52694213",
"0.5208599",
"0.5204189",
"0.5199041",
"0.5128154",
"0.5054749",
"0.5047108",
"0.49921957",
"0.4964914",
"0.49533275",
"0.49489313",
"0.4900625",
"0.48871598",
"0.48866433",
"0.48843616",
"0.48802063",
"0.48596802",
"0.4856164",
"0.48491302",
"0.48322314",
"0.48235464",
"0.48173493",
"0.4811363"
] | 0.6143391 | 0 |
Implicitly restarted arnoldi factorization of `matvec`. The routine finds the lowest `numeig` eigenvectoreigenvalue pairs of `matvec` by alternating between compression and reexpansion of an initial `num_krylov_vecs`step Arnoldi factorization. | def implicitly_restarted_arnoldi_method(
matvec, args, initial_state, num_krylov_vecs, numeig, which, eps, maxiter,
res_thresh) -> Tuple[List[Tensor], List[Tensor]]:
N = np.prod(initial_state.shape)
p = num_krylov_vecs - numeig
num_krylov_vecs = np.min([num_krylov_vecs, N])
if (p <= 1) and (num_krylov_vecs < N):
raise ValueError(f"`num_krylov_vecs` must be between `numeig` + 1 <"
f" `num_krylov_vecs` <= N={N},"
f" `num_krylov_vecs`={num_krylov_vecs}")
dtype = initial_state.dtype
# initialize arrays
krylov_vectors = jax.numpy.zeros(
(num_krylov_vecs + 1, jax.numpy.ravel(initial_state).shape[0]),
dtype=dtype)
H = jax.numpy.zeros((num_krylov_vecs + 1, num_krylov_vecs), dtype=dtype)
# perform initial arnoldi factorization
Vm_tmp, Hm_tmp, numits, converged = arnoldi_fact(matvec, args,
initial_state,
krylov_vectors, H, 0,
num_krylov_vecs, eps)
# obtain an m-step arnoldi factorization
Vm, Hm, fm = update_data(Vm_tmp, Hm_tmp, numits)
it = 0
if which == 'LR':
_which = 0
elif which == 'LM':
_which = 1
else:
raise ValueError(f"which = {which} not implemented")
# make sure the dtypes are matching
if maxiter > 0:
if Vm.dtype == np.float64:
dtype = np.complex128
elif Vm.dtype == np.float32:
dtype = np.complex64
elif Vm.dtype == np.complex128:
dtype = Vm.dtype
elif Vm.dtype == np.complex64:
dtype = Vm.dtype
else:
raise TypeError(f'dtype {Vm.dtype} not supported')
Vm = Vm.astype(dtype)
Hm = Hm.astype(dtype)
fm = fm.astype(dtype)
while (it < maxiter) and (not converged):
evals, _ = jax.numpy.linalg.eig(Hm)
krylov_vectors, H, fk, converged = shifted_QR(Vm, Hm, fm, evals, numeig,
p, _which, res_thresh)
if converged:
break
v0 = jax.numpy.reshape(fk, initial_state.shape)
# restart
Vm_tmp, Hm_tmp, _, converged = arnoldi_fact(matvec, args, v0,
krylov_vectors, H, numeig,
num_krylov_vecs, eps)
Vm, Hm, fm = update_data(Vm_tmp, Hm_tmp, num_krylov_vecs)
it += 1
ev_, U_ = np.linalg.eig(np.array(Hm))
eigvals = jax.numpy.array(ev_)
U = jax.numpy.array(U_)
_, inds = LR_sort(eigvals, _which)
vectors = get_vectors(Vm, U, inds, numeig)
return eigvals[inds[0:numeig]], [
jax.numpy.reshape(vectors[n, :], initial_state.shape)
for n in range(numeig)
] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _arnoldi_fact(matvec, args, v0, krylov_vectors, H, start, num_krylov_vecs,\n eps):\n Z = jax.numpy.linalg.norm(v0)\n v = v0 / Z\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[start, :],\n jax.numpy.ravel(v))\n H = jax.lax.cond(\n start > 0, start,\n lambda x: jax.ops.index_update(H, jax.ops.index[x, x - 1], Z), None,\n lambda x: H)\n\n # body of the arnoldi iteration\n def body(vals):\n krylov_vectors, H, matvec, vector, _, threshold, i, maxiter = vals\n Av = matvec(vector, *args)\n initial_vals = [Av, krylov_vectors, i, H]\n Av, krylov_vectors, _, H = jax.lax.fori_loop(\n 0, i + 1, modified_gram_schmidt_step_arnoldi, initial_vals)\n norm = jax.numpy.linalg.norm(Av)\n Av /= norm\n H = jax.ops.index_update(H, jax.ops.index[i + 1, i], norm)\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[i + 1, :],\n jax.numpy.ravel(Av))\n return [krylov_vectors, H, matvec, Av, norm, threshold, i + 1, maxiter]\n\n def cond_fun(vals):\n # Continue loop while iteration < num_krylov_vecs and norm > eps\n _, _, _, _, norm, _, iteration, _ = vals\n counter_done = (iteration >= num_krylov_vecs)\n norm_not_too_small = norm > eps\n continue_iteration = jax.lax.cond(counter_done,\n _, lambda x: False,\n _, lambda x: norm_not_too_small)\n\n return continue_iteration\n initial_norm = v.real.dtype.type(1.0+eps)\n initial_values = [krylov_vectors, H, matvec, v, initial_norm, eps, start,\n num_krylov_vecs]\n final_values = jax.lax.while_loop(cond_fun, body, initial_values)\n kvfinal, Hfinal, _, _, norm, _, it, _ = final_values\n return kvfinal, Hfinal, it, norm < eps",
"def _generate_arnoldi_factorization(jax: types.ModuleType) -> Callable:\n\n @jax.jit\n def modified_gram_schmidt_step_arnoldi(j, vals):\n \"\"\"\n Single step of a modified gram-schmidt orthogonalization.\n Args:\n j: Integer value denoting the vector to be orthogonalized.\n vals: A list of variables:\n `vector`: The current vector to be orthogonalized\n to all previous ones\n `krylov_vectors`: jax.array of collected krylov vectors\n `n`: integer denoting the column-position of the overlap\n <`krylov_vector`|`vector`> within `H`.\n Returns:\n updated vals.\n\n \"\"\"\n vector, krylov_vectors, n, H = vals\n v = krylov_vectors[j, :]\n h = jax.numpy.vdot(v, vector)\n H = jax.ops.index_update(H, jax.ops.index[j, n], h)\n vector = vector - h * jax.numpy.reshape(v, vector.shape)\n return [vector, krylov_vectors, n, H]\n\n @functools.partial(jax.jit, static_argnums=(5, 6, 7))\n def _arnoldi_fact(matvec, args, v0, krylov_vectors, H, start, num_krylov_vecs,\n eps):\n \"\"\"\n Compute an m-step arnoldi factorization of `matvec`, with\n m = min(`it`,`num_krylov_vecs`). The factorization will\n do at most `num_krylov_vecs` steps. The returned arrays\n `kv` and `H` will satisfy the Arnoldi recurrence relation\n ```\n matrix @ Vm - Vm @ Hm - fm * em = 0\n ```\n with `matrix` the matrix representation of `matvec` and\n `Vm = jax.numpy.transpose(kv[:it, :])`,\n `Hm = H[:it, :it]`, `fm = np.expand_dims(kv[it, :] * H[it, it - 1]`,1)\n and `em` a cartesian basis vector of shape `(1, kv.shape[1])`\n with `em[0, -1] == 1` and 0 elsewhere.\n\n Note that the caller is responsible for dtype consistency between\n the inputs, i.e. dtypes between all input arrays have to match.\n\n Args:\n matvec: The matrix vector product.\n args: List of arguments to `matvec`.\n v0: Initial state to `matvec`.\n krylov_vectors: An array for storing the krylov vectors. The individual\n vectors are stored as columns.\n The shape of `krylov_vecs` has to be\n (num_krylov_vecs + 1, np.ravel(v0).shape[0]).\n H: Matrix of overlaps. The shape has to be\n (num_krylov_vecs + 1,num_krylov_vecs + 1).\n start: Integer denoting the start position where the first\n produced krylov_vector should be inserted into `krylov_vectors`\n num_krylov_vecs: Number of krylov iterations, should be identical to\n `krylov_vectors.shape[0] + 1`\n eps: Convergence parameter. Iteration is terminated if the norm of a\n krylov-vector falls below `eps`.\n Returns:\n kv: An array of krylov vectors\n H: A matrix of overlaps\n it: The number of performed iterations.\n \"\"\"\n Z = jax.numpy.linalg.norm(v0)\n v = v0 / Z\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[start, :],\n jax.numpy.ravel(v))\n H = jax.lax.cond(\n start > 0, start,\n lambda x: jax.ops.index_update(H, jax.ops.index[x, x - 1], Z), None,\n lambda x: H)\n\n # body of the arnoldi iteration\n def body(vals):\n krylov_vectors, H, matvec, vector, _, threshold, i, maxiter = vals\n Av = matvec(vector, *args)\n initial_vals = [Av, krylov_vectors, i, H]\n Av, krylov_vectors, _, H = jax.lax.fori_loop(\n 0, i + 1, modified_gram_schmidt_step_arnoldi, initial_vals)\n norm = jax.numpy.linalg.norm(Av)\n Av /= norm\n H = jax.ops.index_update(H, jax.ops.index[i + 1, i], norm)\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[i + 1, :],\n jax.numpy.ravel(Av))\n return [krylov_vectors, H, matvec, Av, norm, threshold, i + 1, maxiter]\n\n def cond_fun(vals):\n # Continue loop while iteration < num_krylov_vecs and norm > eps\n _, _, _, _, norm, _, iteration, _ = vals\n counter_done = (iteration >= num_krylov_vecs)\n norm_not_too_small = norm > eps\n continue_iteration = jax.lax.cond(counter_done,\n _, lambda x: False,\n _, lambda x: norm_not_too_small)\n\n return continue_iteration\n initial_norm = v.real.dtype.type(1.0+eps)\n initial_values = [krylov_vectors, H, matvec, v, initial_norm, eps, start,\n num_krylov_vecs]\n final_values = jax.lax.while_loop(cond_fun, body, initial_values)\n kvfinal, Hfinal, _, _, norm, _, it, _ = final_values\n return kvfinal, Hfinal, it, norm < eps\n\n return _arnoldi_fact",
"def minimum_eigen_vector(x, num_steps, learning_rate, vector_prod_fn):\n x = tf.nn.l2_normalize(x)\n for _ in range(num_steps):\n x = eig_one_step(x, learning_rate, vector_prod_fn)\n return x",
"def eigenalgo(self, accuracy: float = 0, cap: int = 50000, version: str = \"Givens\", not_skip: bool = True):\n j, temps, verify_accuracy = 0, 0, np.ones((self.N, self.N), dtype=bool) ^ np.eye(self.N, dtype=bool)\n if version == \"Gram-Schmidt\":\n temps = time()\n while np.any(abs(self.vap[verify_accuracy]) > accuracy) and j < cap:\n j += 1\n q, r = self.gram_schmidt_qr()\n self.vap, self.vep = r @ q, self.vep @ q\n\n elif version == \"Givens\":\n verify_accuracy = np.ones((self.N, self.N), dtype=bool) ^ np.eye(self.N, dtype=bool)\n temps = time()\n while np.any(abs(self.vap[verify_accuracy]) > accuracy) and j < cap:\n j += 1\n q, r = self.givens_qr()\n self.vap, self.vep = r @ q, self.vep @ q\n\n elif version == \"Rayleigh\":\n not_sing, diff, cond, j = True, accuracy + 1, True, 0\n temps = time()\n while cond: # Stop condition, all eigenvalues must be different\n while diff > accuracy and j < cap and not_sing:\n j += 1\n self.rvap, self.vep, diff, not_sing = self.rayleigh_iteration(self.rvap, self.vep)\n\n cond = False\n if j < cap:\n self.calc, first, not_sing = np.zeros(self.N, dtype=bool), True, True\n for i in range(self.N):\n if np.sum(np.less(np.abs(self.rvap - self.rvap[i]), 10 ** -6)) != 1:\n self.rvap[i + 1:] += self.memorize[i]\n if first:\n self.memorize[i] += 0.5\n self.vep[i + 1:, i + 1:] = np.eye(self.N - i - 1)\n first, cond, diff = False, True, accuracy + 1\n self.calc[i + 1:] = 1\n temps = time() - temps\n return self.rvap, self.vep, diff, j, temps\n\n else:\n print(\"Please select an appropriate value for the version parameter\")\n\n temps = time() - temps\n diff = np.max(abs(self.vap[verify_accuracy]))\n return np.diag(self.vap), self.vep, diff, j, temps",
"def posdef_eig_svd(mat):\n evals, evecs, _ = tf.svd(mat)\n\n return evals, evecs",
"def eigsolve(self,**kwargs):\n return eigsolve(self,**kwargs)",
"def incremental_svd(A, qr_flg=False):\n\n m = 256\n n = 7291\n\n n0 = 256\n\n if A.shape[0] != m or A.shape[1] != n: raise ValueError('Error: incorrect matrix size')\n\n start = time.clock()\n\n A0 = A[:, :n0]\n U, s, V = ln.svd(A0, full_matrices=False)\n\n # NOTE: s is a vector; np.diag(s) will produce a diagonal matrix\n for i in range(n0, n):\n\n # new matrix is just a single vector (i-th column of A)\n A1 = np.matrix(A[:, i]).T\n\n if qr_flg:\n J, K = ln.qr(A1 - np.dot(np.dot(U, U.T), A1))\n U_, s_, V_ = ln.svd(\n np.vstack((\n np.hstack((np.diag(s), np.dot(U.T, A1))),\n np.hstack((np.zeros((K.shape[0], s.shape[0])), K))\n )),\n full_matrices=False)\n\n # update the result of SVD\n U = np.dot(np.hstack((U, J)), U_)\n\n else:\n U_, s_, V_ = ln.svd(np.hstack((np.diag(s), np.dot(U.T, A1))), full_matrices=False)\n U = np.dot(U, U_)\n\n s = s_\n\n # NOTE: V from svd on NumPy is already transposed\n V = np.dot(V_,\n np.vstack((\n np.hstack((V, np.zeros((V.shape[0], i+1-V.shape[1])))),\n np.hstack((np.zeros((V_.shape[1]-V.shape[0], V.shape[1])), np.eye(V_.shape[1]-V.shape[0], i+1-V.shape[1])))\n ))\n )\n\n # for next computation, update A0\n A0 = np.hstack((A0, A1))\n\n elapsed_time = time.clock() - start\n print 'time:', elapsed_time\n\n return U, s, V",
"def poweig(A, x0, maxiter = 100, ztol= 1.0e-5, mode= 0, teststeps=1):\n m = len(A)\n xi = x0[:] \n \n for n in range(maxiter):\n # matrix vector multiplication.\n xim1 = xi[:]\n for i in range(m):\n xi[i] = 0.0\n for j in range(m):\n xi[i] += A[i][j] * xim1[j]\n print n, xi\n if mode == 0:\n vlen = sqrt(sum([xi[k]**2 for k in range(m)]))\n xi = [xi[k] /vlen for k in range(m)]\n elif mode == 1:\n for k in range(m-1, -1, -1):\n c = abs(xi[k])\n if c > 1.0e-5:\n xi = [xi[k] /c for k in range(m)]\n break\n # early termination test.\n if n % teststeps == 0:\n S = sum([xi[k]-xim1[k] for k in range(m)])\n if abs(S) < ztol:\n break\n #print n, xi\n # Compute Rayleigh quotient.\n numer = sum([xi[k] * xim1[k] for k in range(m)])\n denom = sum([xim1[k]**2 for k in range(m)])\n xlambda = numer/denom\n return xlambda, xi",
"def eigenvects(mat):\n # Check if symbols are present\n if hasSymbols(mat):\n return mat.eigenvects()\n # Purely numeric matrix\n newMat = recursiveEvaluate(mat.as_mutable())\n return newMat.eigenvects()",
"def solve_for_eigenvectors(matrix, num, mode=\"general\"):\n\n # Construct a sparse matrix\n if mode == \"general\":\n return linalg.eigs(matrix, num)\n\n if mode == \"symmetric\":\n return linalg.eigsh(matrix, num)",
"def eigen_decomposition(self):\n w, V = linalg.eigh(self.K)\n c = w[::-1]\n if isinstance(self.num_xi, float):\n percent_energy = np.cumsum(c) / np.sum(c)\n self.num_xi = np.arange(c.shape[0])[percent_energy < self.num_xi][-1] # num_xi changes\n self.Lambda = w[::-1][:self.num_xi]\n self.V = V[:, ::-1][:, :self.num_xi]",
"def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,\n random_state=None):\n\n from scipy.sparse import csc_matrix\n from scipy.linalg import LinAlgError\n\n random_state = check_random_state(random_state)\n\n vectors = as_float_array(vectors, copy=copy)\n\n eps = np.finfo(float).eps\n n_samples, n_components = vectors.shape\n\n # Normalize the eigenvectors to an equal length of a vector of ones.\n # Reorient the eigenvectors to point in the negative direction with respect\n # to the first element. This may have to do with constraining the\n # eigenvectors to lie in a specific quadrant to make the discretization\n # search easier.\n norm_ones = np.sqrt(n_samples)\n for i in range(vectors.shape[1]):\n vectors[:, i] = (vectors[:, i] / np.linalg.norm(vectors[:, i])) \\\n * norm_ones\n if vectors[0, i] != 0:\n vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])\n\n # Normalize the rows of the eigenvectors. Samples should lie on the unit\n # hypersphere centered at the origin. This transforms the samples in the\n # embedding space to the space of partition matrices.\n vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]\n\n svd_restarts = 0\n has_converged = False\n\n # If there is an exception we try to randomize and rerun SVD again\n # do this max_svd_restarts times.\n while (svd_restarts < max_svd_restarts) and not has_converged:\n\n # Initialize first column of rotation matrix with a row of the\n # eigenvectors\n rotation = np.zeros((n_components, n_components))\n rotation[:, 0] = vectors[random_state.randint(n_samples), :].T\n\n # To initialize the rest of the rotation matrix, find the rows\n # of the eigenvectors that are as orthogonal to each other as\n # possible\n c = np.zeros(n_samples)\n for j in range(1, n_components):\n # Accumulate c to ensure row is as orthogonal as possible to\n # previous picks as well as current one\n c += np.abs(np.dot(vectors, rotation[:, j - 1]))\n rotation[:, j] = vectors[c.argmin(), :].T\n\n last_objective_value = 0.0\n n_iter = 0\n\n while not has_converged:\n n_iter += 1\n\n t_discrete = np.dot(vectors, rotation)\n\n labels = t_discrete.argmax(axis=1)\n vectors_discrete = csc_matrix(\n (np.ones(len(labels)), (np.arange(0, n_samples), labels)),\n shape=(n_samples, n_components))\n\n t_svd = vectors_discrete.T * vectors\n\n try:\n U, S, Vh = np.linalg.svd(t_svd)\n svd_restarts += 1\n except LinAlgError:\n print(\"SVD did not converge, randomizing and trying again\")\n break\n\n ncut_value = 2.0 * (n_samples - S.sum())\n if ((abs(ncut_value - last_objective_value) < eps) or\n (n_iter > n_iter_max)):\n has_converged = True\n else:\n # otherwise calculate rotation and continue\n last_objective_value = ncut_value\n rotation = np.dot(Vh.T, U.T)\n\n if not has_converged:\n raise LinAlgError('SVD did not converge')\n return labels",
"def test_inverse_eigenvectors_non_interacting(self, size):\n t_nn = 1.2\n idx = np.arange(size)\n g0_inv_full = np.zeros((size, size), dtype=complex)\n g0_inv_full[idx[:-1], idx[1:]] = g0_inv_full[idx[1:], idx[:-1]] = t_nn\n for g0 in self.g0_loc_inv:\n g0_inv_full[idx, idx] = g0\n rv, h, rv_inv = gt.matrix.decompose_gf(g0_inv_full)\n assert_allclose(rv.dot(rv_inv), np.identity(*h.shape), atol=1e-14)",
"def test_eigsum_non_interacting(self, size):\n t_nn = 1.2\n idx = np.arange(size)\n g0_inv_full = np.zeros((size, size), dtype=complex)\n g0_inv_full[idx[:-1], idx[1:]] = g0_inv_full[idx[1:], idx[:-1]] = t_nn\n for g0 in self.g0_loc_inv:\n g0_inv_full[idx, idx] = g0\n _, h, _ = gt.matrix.decompose_gf(g0_inv_full)\n assert_allclose(np.sum(h), np.trace(g0_inv_full))",
"def estimate_ivec(nt, ft, v_matrix, vtv_matrix, eye=None):\n v_dim = v_matrix.shape[1]\n n_gauss = nt.shape[1]\n\n # Construct eye if necessary\n if eye is None:\n eye = Extractor.to_rfpf(np.eye(v_dim, dtype=v_matrix.dtype).T)\n\n it = eye.T.reshape((1, -1))\n vtvt = vtv_matrix.T.reshape((n_gauss, -1))\n\n b = np.dot(ft, v_matrix).T\n lt = np.dot(nt, vtvt) + it\n\n l = lt.reshape((vtv_matrix.shape[1], vtv_matrix.shape[0])).T\n\n out = Extractor.solve(l, b)\n\n return out",
"def posdef_eig(mat):\n return posdef_eig_functions[POSDEF_EIG_METHOD](mat)",
"def get_eigvals_eigvects(\n num_layers,\n numeric_matrices_eV_over_angsquared,\n layer_mass_amu,\n use_banded_algorithm=False,\n):\n # Based on the units in input, and indicating with:\n # - [hbar omega] the numeric value for the frequency in meV => hbar omega = [hbar omega] * meV\n # - [K] the numeric value of K in eV/ang^2\n # - [m] the layer mass in amu\n # we have (we omit the sign, and for units considerations we 'drop' U):\n # omega^2 = K / m =>\n # (hbar omega)^2 = hbar^2 * K / m =>\n # [hbar omega]^2 * meV^2 = hbar^2 * [K] / [m] * eV/ang^2 / amu = [K] / [m] * hbar^2 * eV/ang^2 / amu =>\n # [hbar omega]^2 = = [K] / [m] * ( hbar^2 * eV/ang^2 / amu / meV^2 )\n # so that the conversion factor is the last bracketed term:\n # conversion_factor = hbar^2 * eV / (angstrom^2 * amu * meV^2)\n conversion_factor = 4180.15925\n # NOTE: for simplicity, the conversion is applied at the very end\n\n if use_banded_algorithm:\n # 3 blocks (below, same layer, and above) of size 3 => total width of 9\n # Since we only store the upper part, we only need a width of 4 (diagonal + 3 superdiagonals)\n K_matrix = np.zeros((4, num_layers * 3))\n else:\n K_matrix = np.zeros((num_layers * 3, num_layers * 3))\n\n # Note: I construct -K, actually\n for block_idx in range(num_layers):\n # Interaction with upper layer\n if block_idx < num_layers - 1: # Not in the last layer\n current_block = np.array(\n numeric_matrices_eV_over_angsquared[\n block_idx % len(numeric_matrices_eV_over_angsquared)\n ]\n )\n add_block(\n matrix=K_matrix,\n block=current_block,\n block_i=block_idx,\n block_j=block_idx,\n factor=+1,\n banded=use_banded_algorithm,\n )\n add_block(\n matrix=K_matrix,\n block=current_block,\n block_i=block_idx + 1,\n block_j=block_idx,\n factor=-1,\n banded=use_banded_algorithm,\n )\n # Interaction with lower layer\n if block_idx > 0: # Not in the first layer\n previous_block = np.array(\n numeric_matrices_eV_over_angsquared[\n (block_idx - 1) % len(numeric_matrices_eV_over_angsquared)\n ]\n )\n add_block(\n matrix=K_matrix,\n block=previous_block,\n block_i=block_idx,\n block_j=block_idx,\n factor=+1,\n banded=use_banded_algorithm,\n )\n add_block(\n matrix=K_matrix,\n block=previous_block,\n block_i=block_idx - 1,\n block_j=block_idx,\n factor=-1,\n banded=use_banded_algorithm,\n )\n\n # We want to get the eigenvalues of omega^2 U = - 1/M_layer K U\n K_matrix /= layer_mass_amu\n\n # Get frequencies (eigvals) and eigenvectors (for mode analysis)\n if use_banded_algorithm:\n eigvals, eigvects = scipy.linalg.eig_banded(K_matrix, lower=False)\n else:\n eigvals, eigvects = np.linalg.eigh(K_matrix)\n\n eigvals *= conversion_factor\n\n ## The first three should be acoustic i.e. almost zero; the rest should be positive\n ## I don't check as depending on the units it's hard to define a correct absolute energy\n # assert np.sum(np.abs(eigvals[:3])) < 1.0e-8\n\n # Remove the first three acoustic modes\n return eigvals[3:], eigvects[:, 3:]",
"def eigen_vector_i_all(self):\n return self._eig_vec",
"def gauss_seidel_solver(self, mat, rhs):\n x = np.zeros_like(rhs)\n for it_count in range(1, self.iterations_number):\n x_new = np.zeros_like(x)\n if self.verbose > 1:\n print(\"Iteration {0}: {1}\".format(it_count, x))\n for i in range(mat.shape[0]):\n s1 = np.dot(mat[i, :i], x_new[:i])\n s2 = np.dot(mat[i, i + 1:], x[i + 1:])\n x_new[i] = (rhs[i] - s1 - s2) / mat[i, i]\n if np.allclose(x, x_new, rtol=1e-8):\n break\n x = x_new\n return x",
"def power_iteration(X):\n #X, languages=prepare_data_matrix()\n M=X\n M=M-np.mean(M, axis=0)\n M=np.cov(M, rowvar=False) #the covariance matrix, size 100x100\n x=np.ones(len(M)) #a random starting vector composed of 100 ones, it only cant be of all zeros\n difference=np.ones(len(x))\n\n #print(np.linalg.norm(difference))\n while np.linalg.norm(difference) >= 10**-5: #we iterate until the difference between the previous and the new x is really small, lets say 10^-5\n #print(x.T.shape)\n oldx=x\n z=M.dot((x.T))\n x=z.T\n x=x/np.linalg.norm(x)\n difference=np.linalg.norm(oldx-x)\n #the x that we get at the end of this loop is our eigenvector\n\n #print(x.dot(M).shape)\n #print(x.shape)\n y=(x.dot(M)).dot(x.T) #y is the corresponding eigenvalue to the eigenvector x\n \n return x, y",
"def truncated_svd(A,k=None):",
"def eig_faces(u_mat, nmode, dim):\n n = int(nmode)\n nparray = np.zeros(np.size(u_mat[:,0]))\n for i in range(n):\n nparray = nparray + u_mat[:,i]\n \n nparray = np.reshape(nparray,dim)\n return(nparray)",
"def initiateVMatrixes():\n global v, vNew, vExact\n # Initialize the grid to 0\n v = np.zeros((n+1, n+1)) # matrix of v, index are i: row, j:column\n # Set the boundary conditions\n for i in range(1,n):\n v[0,i] = 10\n v[n,i] = 10\n v[i,0] = 10\n v[i,n] = 10\n # Exact solution\n vExact = np.copy(v)\n for i in range(1,n):\n for j in range(1,n):\n vExact[i,j] = 10\n # Initial guess\n for i in range(1,n):\n for j in range(1,n):\n v[i,j] = 0.9*vExact[i,j]\n vNew = np.copy(v)",
"def calculate_posvij_matrices(main_tetrad_ark):\n\n # Import all the possible solutions to the Vij matrices\n vij_possibilities = matrix_outerprod_calc.illuminator_of_elfes()\n vij_matrices = []\n\n print(\" \")\n print(\" Calculating Vij matrices\")\n print(\" \")\n # for i in range(0, len(main_tetrad_ark)):\n for i in range(0, len(vij_possibilities)):\n tet_i = [x[1] for x in main_tetrad_ark[i]]\n tri_tet = [np.transpose(i) for i in tet_i]\n print(\"# ********************************\")\n # print(\" \")\n print(\"MATRIX i: \", i)\n print(\" \")\n for j in range(0, len(main_tetrad_ark)):\n tet_j = [x[1] for x in main_tetrad_ark[j]]\n trj_tet = [np.transpose(j) for j in tet_j]\n vij_temp = []\n # print(\"# ********************************\")\n print(\" \")\n print(\"MATRIX j: \", j)\n temp_zero = np.zeros((4,4), dtype=int)\n for x in range(0,len(tet_i)):\n test_1half = np.dot(tri_tet[x],tet_j[x])\n test_2half = np.dot(trj_tet[x],tet_i[x])\n test_difs = np.subtract(test_1half, test_2half)\n # print(\" \")\n # print(test_difs)\n temp_mat = np.dot(tri_tet[x],tet_j[x]) - np.dot(trj_tet[x],tet_i[x])\n vij_temp.append(temp_mat)\n # print(\"\")\n temp_add1 = np.add(vij_temp[0], vij_temp[1])\n temp_add2 = np.add(temp_add1, vij_temp[2])\n tempf = np.add(temp_add2, vij_temp[3])\n # tempf = np.divide(temp_add3, 2)\n for ijx in vij_possibilities:\n if np.array_equal(temp_addf, ijx[0]):\n print(\"*************$$$$$$$$$$$$$$$$$$***************** \")\n print(\"l-solution found:\", ijx[1])\n print(temp_addf)\n print(\"\")\n print(ijx[0])\n if np.array_equal(temp_addf, temp_zero):\n pass\n else:\n vij_matrices.append(temp_addf)\n # print(\"\")\n print(temp_addf)\n # vij_matrices.append(temp_addf)\n vijmats_size = sys.getsizeof(vij_matrices)\n print(\"Size of Vij Matrices list: bytes / kilobytes:\", vijmats_size, vijmats_size/1024)\n print(\"Length of Vij Matrices\")\n print(len(vij_matrices))\n print(vij_matrices)\n pass",
"def eigen(X):\n\n symmetric = np.alltrue(np.isclose(X - X.T, np.zeros(n)))\n small = max(X.shape) <= 11\n\n if symmetric:\n return jacobi(X)\n elif small:\n maxiter = 10 ** max(*X.shape, 4)\n return qrm3(X, maxiter=maxiter)\n else:\n maxiter = 10 ** max(*X.shape, 4)\n return qrm2(X, maxiter=maxiter)",
"def svd_factorization_projections(A, m, n, orth_tol, max_refin, tol):\n # SVD Factorization\n U, s, Vt = scipy.linalg.svd(A, full_matrices=False)\n\n # Remove dimensions related with very small singular values\n U = U[:, s > tol]\n Vt = Vt[s > tol, :]\n s = s[s > tol]\n\n # z = x - A.T inv(A A.T) A x\n def null_space(x):\n # v = U 1/s V.T x = inv(A A.T) A x\n aux1 = Vt.dot(x)\n aux2 = 1/s*aux1\n v = U.dot(aux2)\n z = x - A.T.dot(v)\n\n # Iterative refinement to improve roundoff\n # errors described in [2]_, algorithm 5.1.\n k = 0\n while orthogonality(A, z) > orth_tol:\n if k >= max_refin:\n break\n # v = U 1/s V.T x = inv(A A.T) A x\n aux1 = Vt.dot(z)\n aux2 = 1/s*aux1\n v = U.dot(aux2)\n # z_next = z - A.T v\n z = z - A.T.dot(v)\n k += 1\n\n return z\n\n # z = inv(A A.T) A x\n def least_squares(x):\n # z = U 1/s V.T x = inv(A A.T) A x\n aux1 = Vt.dot(x)\n aux2 = 1/s*aux1\n z = U.dot(aux2)\n return z\n\n # z = A.T inv(A A.T) x\n def row_space(x):\n # z = V 1/s U.T x\n aux1 = U.T.dot(x)\n aux2 = 1/s*aux1\n z = Vt.T.dot(aux2)\n return z\n\n return null_space, least_squares, row_space",
"def eigs(self,num_eigvals,manifold_num):\n num_sites = len(self.energies[manifold_num])\n ham = self.manifold_hamiltonian(manifold_num)\n eigvals, eigvecs = eigsh(ham,k=num_eigvals*num_sites,which='SM')\n # Force degenerate eigenvectors to be orthogonal\n if self.qr_flag:\n eigvecs, r = np.linalg.qr(eigvecs,mode='reduced')\n if self.check_eigenvectors:\n HV = ham.dot(eigvecs)\n D = eigvecs.T.dot(HV)\n if np.allclose(D,np.diag(eigvals),rtol=1E-11,atol=1E-11):\n pass\n else:\n # warnings.warn('Eigenvalues altered by QR factorization, max absolute change in diagonal matrix of {}'.format(np.max(D-np.diag(eigvals))))\n warnings.warn('Using eigenvectors to diagonalize hamiltonian does not result in the expected diagonal matrix to tolerance, largest deviation is {}'.format(np.max(np.abs(D - np.diag(eigvals)))))\n \n sort_indices = eigvals.argsort()\n eigvals.sort()\n eigvecs = eigvecs[:,sort_indices]\n if self.qr_flag:\n r = r[:,sort_indices]\n self.r_mats.append(r)\n # I choose to pick the phase of my eigenvectors such that the state which has the\n # largest overlap has a positive overlap. For sufficiently small d, and alpha close\n # to 1, this will be the overlap between the same excited and ground states.\n for i in range(num_eigvals):\n max_index = np.argmax(np.abs(eigvecs[:,i]))\n if eigvecs[max_index,i] < 0:\n eigvecs[:,i] *= -1\n\n return eigvals, eigvecs",
"def _z2matvecmul(self, mat, vec):\n prod = np.mod(np.dot(mat, vec), 2)\n return prod",
"def _generate_jitted_eigsh_lanczos(jax: types.ModuleType) -> Callable:\n\n @functools.partial(jax.jit, static_argnums=(3, 4, 5, 6))\n def jax_lanczos(matvec, arguments, init, ncv, neig, landelta, reortho):\n \"\"\"\n Jitted lanczos routine.\n Args:\n matvec: A callable implementing the matrix-vector product of a\n linear operator.\n arguments: Arguments to `matvec` additional to an input vector.\n `matvec` will be called as `matvec(init, *args)`.\n init: An initial input state to `matvec`.\n ncv: Number of krylov iterations (i.e. dimension of the Krylov space).\n neig: Number of eigenvalue-eigenvector pairs to be computed.\n landelta: Convergence parameter: if the norm of the current Lanczos vector\n falls below `landelta`, iteration is stopped.\n reortho: If `True`, reorthogonalize all krylov vectors at each step.\n This should be used if `neig>1`.\n Returns:\n jax.numpy.ndarray: Eigenvalues\n list: Eigenvectors\n \"\"\"\n\n def body_modified_gram_schmidt(i, vals):\n vector, krylov_vectors = vals\n v = krylov_vectors[i, :]\n vector -= jax.numpy.vdot(v, vector) * jax.numpy.reshape(v, vector.shape)\n return [vector, krylov_vectors]\n\n def body_lanczos(vals):\n current_vector, krylov_vectors, vector_norms = vals[0:3]\n diagonal_elements, matvec, args, _ = vals[3:7]\n threshold, i, maxiteration = vals[7:]\n norm = jax.numpy.linalg.norm(current_vector)\n normalized_vector = current_vector / norm\n normalized_vector, krylov_vectors = jax.lax.cond(\n reortho, True,\n lambda x: jax.lax.fori_loop(0, i, body_modified_gram_schmidt,\n [normalized_vector, krylov_vectors]),\n False, lambda x: [normalized_vector, krylov_vectors])\n Av = matvec(normalized_vector, *args)\n\n diag_element = jax.numpy.vdot(normalized_vector, Av)\n\n res = jax.numpy.reshape(\n jax.numpy.ravel(Av) -\n jax.numpy.ravel(normalized_vector) * diag_element -\n krylov_vectors[i - 1] * norm, Av.shape)\n krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[i, :],\n jax.numpy.ravel(normalized_vector))\n\n vector_norms = jax.ops.index_update(vector_norms, jax.ops.index[i - 1],\n norm)\n diagonal_elements = jax.ops.index_update(diagonal_elements,\n jax.ops.index[i - 1],\n diag_element)\n\n return [\n res, krylov_vectors, vector_norms, diagonal_elements, matvec, args,\n norm, threshold, i + 1, maxiteration\n ]\n\n def cond_fun(vals):\n _, _, _, _, _, _, norm, threshold, iteration, maxiteration = vals\n\n def check_thresh(check_vals):\n val, thresh = check_vals\n return jax.lax.cond(val < thresh, False, lambda x: x, True, lambda x: x)\n\n return jax.lax.cond(iteration <= maxiteration, [norm, threshold],\n check_thresh, False, lambda x: x)\n\n numel = jax.numpy.prod(init.shape)\n krylov_vecs = jax.numpy.zeros((ncv + 1, numel), dtype=init.dtype)\n norms = jax.numpy.zeros(ncv, dtype=init.dtype)\n diag_elems = jax.numpy.zeros(ncv, dtype=init.dtype)\n\n norms = jax.ops.index_update(norms, jax.ops.index[0], 1.0)\n\n norms_dtype = jax.numpy.real(jax.numpy.empty((0, 0),\n dtype=init.dtype)).dtype\n initvals = [\n init, krylov_vecs, norms, diag_elems, matvec, arguments,\n norms_dtype.type(1.0), landelta, 1, ncv\n ]\n output = jax.lax.while_loop(cond_fun, body_lanczos, initvals)\n final_state, krylov_vecs, norms, diags, _, _, _, _, it, _ = output\n krylov_vecs = jax.ops.index_update(krylov_vecs, jax.ops.index[it, :],\n jax.numpy.ravel(final_state))\n\n A_tridiag = jax.numpy.diag(diags) + jax.numpy.diag(\n norms[1:], 1) + jax.numpy.diag(jax.numpy.conj(norms[1:]), -1)\n eigvals, U = jax.numpy.linalg.eigh(A_tridiag)\n eigvals = eigvals.astype(A_tridiag.dtype)\n\n def body_vector(i, vals):\n krv, unitary, states = vals\n dim = unitary.shape[1]\n n, m = jax.numpy.divmod(i, dim)\n states = jax.ops.index_add(states, jax.ops.index[n, :],\n krv[m + 1, :] * unitary[m, n])\n return [krv, unitary, states]\n\n state_vectors = jax.numpy.zeros([neig, numel], dtype=init.dtype)\n _, _, vectors = jax.lax.fori_loop(0, neig * (krylov_vecs.shape[0] - 1),\n body_vector,\n [krylov_vecs, U, state_vectors])\n\n return jax.numpy.array(eigvals[0:neig]), [\n jax.numpy.reshape(vectors[n, :], init.shape) /\n jax.numpy.linalg.norm(vectors[n, :]) for n in range(neig)\n ]\n\n return jax_lanczos",
"def main():\n print 'Running the power method...'\n dim = input('Give the dimension : ')\n nbit = input('How many iterations ? ')\n j = complex(0, 1)\n rnd = np.random.normal(0, 1, (dim, dim)) \\\n + np.random.normal(0, 1, (dim, dim))*j\n nbs = np.random.normal(0, 1, (dim, 1)) \\\n + np.random.normal(0, 1, (dim, 1))*j\n rndmat = np.matrix(rnd)\n rndvec = np.matrix(nbs)\n eigmax = power_method(rndmat, rndvec, nbit)\n check(rndmat, eigmax)"
] | [
"0.61382663",
"0.5593468",
"0.55151695",
"0.5378377",
"0.5306997",
"0.53011614",
"0.5231085",
"0.5190737",
"0.5158738",
"0.5093091",
"0.5071404",
"0.50447",
"0.50041604",
"0.49797606",
"0.49743566",
"0.49714258",
"0.49670354",
"0.49553815",
"0.4946754",
"0.49402496",
"0.4939668",
"0.49330664",
"0.4910911",
"0.48993132",
"0.48936844",
"0.48664683",
"0.48374194",
"0.4837403",
"0.48193133",
"0.48023084"
] | 0.67910457 | 0 |
Solve A x = b for x using the mrestarted GMRES method. This is intended to be called via jax_backend.gmres. Given a linear mapping with (n x n) matrix representation A = A_mv(A_args) gmres_m solves Ax = b (1) where x and b are lengthn vectors, using the method of Generalized Minimum RESiduals with M iterations per restart (GMRES_M). | def gmres_m(A_mv: Callable, A_args: Sequence,
b: jax.ShapedArray, x0: jax.ShapedArray, tol: float,
atol: float, num_krylov_vectors: int,
maxiter: int) -> Tuple[jax.ShapedArray, float, int, bool]:
num_krylov_vectors = min(num_krylov_vectors, b.size)
x = x0
b_norm = jnp.linalg.norm(b)
tol = max(tol * b_norm, atol)
for n_iter in range(maxiter):
done, beta, x = gmres(A_mv, A_args, b, x, num_krylov_vectors, x0, tol,
b_norm)
if done:
break
return x, beta, n_iter, done | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gmres_wrapper(jax: types.ModuleType):\n jnp = jax.numpy\n\n def gmres_m(A_mv: Callable, A_args: Sequence,\n b: jax.ShapedArray, x0: jax.ShapedArray, tol: float,\n atol: float, num_krylov_vectors: int,\n maxiter: int) -> Tuple[jax.ShapedArray, float, int, bool]:\n \"\"\"\n Solve A x = b for x using the m-restarted GMRES method. This is\n intended to be called via jax_backend.gmres.\n\n Given a linear mapping with (n x n) matrix representation\n A = A_mv(*A_args) gmres_m solves\n Ax = b (1)\n where x and b are length-n vectors, using the method of\n Generalized Minimum RESiduals with M iterations per restart (GMRES_M).\n\n Args:\n A_mv: A function v0 = A_mv(v, *A_args) where v0 and v have the same shape.\n A_args: A list of positional arguments to A_mv.\n b: The b in A @ x = b.\n x0: Initial guess solution.\n tol, atol: Solution tolerance to achieve,\n norm(residual) <= max(tol * norm(b), atol).\n tol is also used to set the threshold at which the Arnoldi factorization\n terminates.\n num_krylov_vectors: Size of the Krylov space to build at each restart.\n maxiter: The Krylov space will be repeatedly rebuilt up to this many\n times.\n Returns:\n x: The approximate solution.\n beta: Norm of the residual at termination.\n n_iter: Number of iterations at termination.\n converged: Whether the desired tolerance was achieved.\n \"\"\"\n num_krylov_vectors = min(num_krylov_vectors, b.size)\n x = x0\n b_norm = jnp.linalg.norm(b)\n tol = max(tol * b_norm, atol)\n for n_iter in range(maxiter):\n done, beta, x = gmres(A_mv, A_args, b, x, num_krylov_vectors, x0, tol,\n b_norm)\n if done:\n break\n return x, beta, n_iter, done\n\n def gmres(A_mv: Callable, A_args: Sequence, b: jax.ShapedArray,\n x: jax.ShapedArray, num_krylov_vectors: int, x0: jax.ShapedArray,\n tol: float, b_norm: float) -> Tuple[bool, float, jax.ShapedArray]:\n \"\"\"\n A single restart of GMRES.\n\n Args:\n A_mv: A function `v0 = A_mv(v, *A_args)` where `v0` and\n `v` have the same shape.\n A_args: A list of positional arguments to A_mv.\n b: The `b` in `A @ x = b`.\n x: Initial guess solution.\n tol: Solution tolerance to achieve,\n num_krylov_vectors : Size of the Krylov space to build.\n Returns:\n done: Whether convergence was achieved.\n beta: Magnitude of residual (i.e. the error estimate).\n x: The approximate solution.\n \"\"\"\n r, beta = gmres_residual(A_mv, A_args, b, x)\n k, V, R, beta_vec = gmres_krylov(A_mv, A_args, num_krylov_vectors,\n x0, r, beta, tol, b_norm)\n x = gmres_update(k, V, R, beta_vec, x0)\n done = k < num_krylov_vectors - 1\n return done, beta, x\n\n @jax.jit\n def gmres_residual(A_mv: Callable, A_args: Sequence, b: jax.ShapedArray,\n x: jax.ShapedArray) -> Tuple[jax.ShapedArray, float]:\n \"\"\"\n Computes the residual vector r and its norm, beta, which is minimized by\n GMRES.\n\n Args:\n A_mv: A function v0 = A_mv(v, *A_args) where v0 and\n v have the same shape.\n A_args: A list of positional arguments to A_mv.\n b: The b in A @ x = b.\n x: Initial guess solution.\n Returns:\n r: The residual vector.\n beta: Its magnitude.\n \"\"\"\n r = b - A_mv(x, *A_args)\n beta = jnp.linalg.norm(r)\n return r, beta\n\n def gmres_update(k: int, V: jax.ShapedArray, R: jax.ShapedArray,\n beta_vec: jax.ShapedArray,\n x0: jax.ShapedArray) -> jax.ShapedArray:\n \"\"\"\n Updates the solution in response to the information computed by the\n main GMRES loop.\n\n Args:\n k: The final iteration which was reached by GMRES before convergence.\n V: The Arnoldi matrix of Krylov vectors.\n R: The R factor in H = QR where H is the Arnoldi overlap matrix.\n beta_vec: Stores the Givens factors used to map H into QR.\n x0: The initial guess solution.\n Returns:\n x: The updated solution.\n \"\"\"\n q = min(k, R.shape[1])\n y = jax.scipy.linalg.solve_triangular(R[:q, :q], beta_vec[:q])\n x = x0 + V[:, :q] @ y\n return x\n\n @functools.partial(jax.jit, static_argnums=(2,))\n def gmres_krylov(A_mv: Callable, A_args: Sequence, n_kry: int,\n x0: jax.ShapedArray, r: jax.ShapedArray, beta: float,\n tol: float,\n b_norm: float) -> Tuple[int, jax.ShapedArray,\n jax.ShapedArray, jax.ShapedArray]:\n \"\"\"\n Builds the Arnoldi decomposition of (A, v), where v is the normalized\n residual of the current solution estimate. The decomposition is\n returned as V, R, where V is the usual matrix of Krylov vectors and\n R is the upper triangular matrix in H = QR, with H the usual matrix\n of overlaps.\n\n Args:\n A_mv: A function `v0 = A_mv(v, *A_args)` where `v0` and\n `v` have the same shape.\n A_args: A list of positional arguments to A_mv.\n n_kry: Size of the Krylov space to build; this is called\n num_krylov_vectors in higher level code.\n x0: Guess solution.\n r: Residual vector.\n beta: Magnitude of r.\n tol: Solution tolerance to achieve.\n b_norm: Magnitude of b in Ax = b.\n Returns:\n k: Counts the number of iterations before convergence.\n V: The Arnoldi matrix of Krylov vectors.\n R: From H = QR where H is the Arnoldi matrix of overlaps.\n beta_vec: Stores Q implicitly as Givens factors.\n \"\"\"\n n = r.size\n err = beta\n v = r / beta\n\n # These will store the Givens rotations used to update the QR decompositions\n # of the Arnoldi matrices.\n # cos : givens[0, :]\n # sine: givens[1, :]\n givens = jnp.zeros((2, n_kry), dtype=x0.dtype)\n beta_vec = jnp.zeros((n_kry + 1), dtype=x0.dtype)\n beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[0], beta)\n V = jnp.zeros((n, n_kry + 1), dtype=x0.dtype)\n V = jax.ops.index_update(V, jax.ops.index[:, 0], v)\n R = jnp.zeros((n_kry + 1, n_kry), dtype=x0.dtype)\n\n # The variable data for the carry call. Each iteration modifies these\n # values and feeds the results to the next iteration.\n k = 0\n gmres_variables = (k, V, R, beta_vec, err, # < The actual output we need.\n givens) # < Modified between iterations.\n gmres_constants = (tol, A_mv, A_args, b_norm, n_kry)\n gmres_carry = (gmres_variables, gmres_constants)\n # The 'x' input for the carry call. Each iteration will receive an ascending\n # loop index (from the jnp.arange) along with the constant data\n # in gmres_constants.\n gmres_carry = jax.lax.while_loop(gmres_krylov_loop_condition,\n gmres_krylov_work,\n gmres_carry)\n gmres_variables, gmres_constants = gmres_carry\n k, V, R, beta_vec, err, givens = gmres_variables\n return (k, V, R, beta_vec)\n\n VarType = Tuple[int, jax.ShapedArray, jax.ShapedArray, jax.ShapedArray,\n float, jax.ShapedArray]\n ConstType = Tuple[float, Callable, Sequence, jax.ShapedArray, int]\n GmresCarryType = Tuple[VarType, ConstType]\n\n @jax.jit\n def gmres_krylov_loop_condition(gmres_carry: GmresCarryType) -> bool:\n \"\"\"\n This function dictates whether the main GMRES while loop will proceed.\n It is equivalent to:\n if k < n_kry and err > tol:\n return True\n else:\n return False\n where k, n_kry, err, and tol are unpacked from gmres_carry.\n\n Args:\n gmres_carry: The gmres_carry from gmres_krylov.\n Returns:\n (bool): Whether to continue iterating.\n \"\"\"\n gmres_constants, gmres_variables = gmres_carry\n tol = gmres_constants[0]\n k = gmres_variables[0]\n err = gmres_variables[4]\n n_kry = gmres_constants[4]\n\n def is_iterating(k, n_kry):\n return k < n_kry\n\n def not_converged(args):\n err, tol = args\n return err >= tol\n return jax.lax.cond(is_iterating(k, n_kry), # Predicate.\n not_converged, # Called if True.\n lambda x: False, # Called if False.\n (err, tol)) # Arguments to calls.\n\n @jax.jit\n def gmres_krylov_work(gmres_carry: GmresCarryType) -> GmresCarryType:\n \"\"\"\n Performs a single iteration of gmres_krylov. See that function for a more\n detailed description.\n\n Args:\n gmres_carry: The gmres_carry from gmres_krylov.\n Returns:\n gmres_carry: The updated gmres_carry.\n \"\"\"\n gmres_variables, gmres_constants = gmres_carry\n k, V, R, beta_vec, err, givens = gmres_variables\n tol, A_mv, A_args, b_norm, _ = gmres_constants\n\n V, H = kth_arnoldi_step(k, A_mv, A_args, V, R, tol)\n R_col, givens = apply_givens_rotation(H[:, k], givens, k)\n R = jax.ops.index_update(R, jax.ops.index[:, k], R_col[:])\n\n # Update the residual vector.\n cs, sn = givens[:, k] * beta_vec[k]\n beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[k], cs)\n beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[k + 1], sn)\n err = jnp.abs(sn) / b_norm\n gmres_variables = (k + 1, V, R, beta_vec, err, givens)\n return (gmres_variables, gmres_constants)\n\n @jax.jit\n def _gs_step(r: jax.ShapedArray,\n v_i: jax.ShapedArray) -> Tuple[jax.ShapedArray, jax.ShapedArray]:\n \"\"\"\n Performs one iteration of the stabilized Gram-Schmidt procedure, with\n r to be orthonormalized against {v} = {v_0, v_1, ...}.\n\n Args:\n r: The new vector which is not in the initially orthonormal set.\n v_i: The i'th vector in that set.\n Returns:\n r_i: The updated r which is now orthonormal with v_i.\n h_i: The overlap of r with v_i.\n \"\"\"\n h_i = jnp.vdot(v_i, r)\n r_i = r - h_i * v_i\n return r_i, h_i\n\n @jax.jit\n def kth_arnoldi_step(k: int, A_mv: Callable, A_args: Sequence,\n V: jax.ShapedArray, H: jax.ShapedArray,\n tol: float) -> Tuple[jax.ShapedArray, jax.ShapedArray]:\n \"\"\"\n Performs the kth iteration of the Arnoldi reduction procedure.\n Args:\n k: The current iteration.\n A_mv, A_args: A function A_mv(v, *A_args) performing a linear\n transformation on v.\n V: A matrix of size (n, K + 1), K > k such that each column in\n V[n, :k+1] stores a Krylov vector and V[:, k+1] is all zeroes.\n H: A matrix of size (K, K), K > k with H[:, k] all zeroes.\n Returns:\n V, H: With their k'th columns respectively filled in by a new\n orthogonalized Krylov vector and new overlaps.\n \"\"\"\n v = A_mv(V[:, k], *A_args)\n v_new, H_k = jax.lax.scan(_gs_step, v, xs=V.T)\n v_norm = jnp.linalg.norm(v_new)\n r_new = v_new / v_norm\n # Normalize v unless it is the zero vector.\n r_new = jax.lax.cond(v_norm > tol,\n lambda x: x[0] / x[1],\n lambda x: 0.*x[0],\n (v_new, v_norm)\n )\n H = jax.ops.index_update(H, jax.ops.index[:, k], H_k)\n H = jax.ops.index_update(H, jax.ops.index[k+1, k], v_norm)\n V = jax.ops.index_update(V, jax.ops.index[:, k+1], r_new)\n return V, H\n\n####################################################################\n# GIVENS ROTATIONS\n####################################################################\n @jax.jit\n def apply_rotations(H_col: jax.ShapedArray, givens: jax.ShapedArray,\n k: int) -> jax.ShapedArray:\n \"\"\"\n Successively applies each of the rotations stored in givens to H_col.\n\n Args:\n H_col : The vector to be rotated.\n givens: 2 x K, K > k matrix of rotation factors.\n k : Iteration number.\n Returns:\n H_col : The rotated vector.\n \"\"\"\n rotation_carry = (H_col, 0, k, givens)\n\n def loop_condition(carry):\n i = carry[1]\n k = carry[2]\n return jax.lax.cond(i < k, lambda x: True, lambda x: False, 0)\n\n def apply_ith_rotation(carry):\n H_col, i, k, givens = carry\n cs = givens[0, i]\n sn = givens[1, i]\n H_i = cs * H_col[i] - sn * H_col[i + 1]\n H_ip1 = sn * H_col[i] + cs * H_col[i + 1]\n H_col = jax.ops.index_update(H_col, jax.ops.index[i], H_i)\n H_col = jax.ops.index_update(H_col, jax.ops.index[i + 1], H_ip1)\n return (H_col, i + 1, k, givens)\n\n rotation_carry = jax.lax.while_loop(loop_condition,\n apply_ith_rotation,\n rotation_carry)\n H_col = rotation_carry[0]\n return H_col\n\n @jax.jit\n def apply_givens_rotation(H_col: jax.ShapedArray, givens: jax.ShapedArray,\n k: int) -> Tuple[jax.ShapedArray, jax.ShapedArray]:\n \"\"\"\n Applies the Givens rotations stored in the vectors cs and sn to the vector\n H_col. Then constructs a new Givens rotation that eliminates H_col's\n k'th element, yielding the corresponding column of the R in H's QR\n decomposition. Returns the new column of R along with the new Givens\n factors.\n\n Args:\n H_col : The column of H to be rotated.\n givens: A matrix representing the cosine and sine factors of the\n previous GMRES Givens rotations, in that order\n (i.e. givens[0, :] -> the cos factor).\n k : Iteration number.\n Returns:\n R_col : The column of R obtained by transforming H_col.\n givens_k: The new elements of givens that zeroed out the k+1'th element\n of H_col.\n \"\"\"\n # This call successively applies each of the\n # Givens rotations stored in givens[:, :k] to H_col.\n H_col = apply_rotations(H_col, givens, k)\n\n cs_k, sn_k = givens_rotation(H_col[k], H_col[k + 1])\n givens = jax.ops.index_update(givens, jax.ops.index[0, k], cs_k)\n givens = jax.ops.index_update(givens, jax.ops.index[1, k], sn_k)\n\n r_k = cs_k * H_col[k] - sn_k * H_col[k + 1]\n R_col = jax.ops.index_update(H_col, jax.ops.index[k], r_k)\n R_col = jax.ops.index_update(R_col, jax.ops.index[k + 1], 0.)\n return R_col, givens\n\n @jax.jit\n def givens_rotation(v1: float, v2: float) -> Tuple[float, float]:\n \"\"\"\n Given scalars v1 and v2, computes cs = cos(theta) and sn = sin(theta)\n so that [cs -sn] @ [v1] = [r]\n [sn cs] [v2] [0]\n Args:\n v1, v2: The scalars.\n Returns:\n cs, sn: The rotation factors.\n \"\"\"\n t = jnp.sqrt(v1**2 + v2**2)\n cs = v1 / t\n sn = -v2 / t\n return cs, sn\n\n fnames = [\n \"gmres_m\", \"gmres_residual\", \"gmres_krylov\", \"gs_step\",\n \"kth_arnoldi_step\", \"givens_rotation\"\n ]\n functions = [\n gmres_m, gmres_residual, gmres_krylov, _gs_step, kth_arnoldi_step,\n givens_rotation\n ]\n\n class Functions:\n\n def __init__(self, fun_dict):\n self.dict = fun_dict\n\n def __getattr__(self, name):\n return self.dict[name]\n\n return Functions(dict(zip(fnames, functions)))",
"def gmres(A, b, x0=None, tol=1e-5, restart=None, maxiter=None, M=None,\n callback=None, atol=None, callback_type=None):\n A, M, x, b = _make_system(A, M, x0, b)\n matvec = A.matvec\n psolve = M.matvec\n\n n = A.shape[0]\n if n == 0:\n return cupy.empty_like(b), 0\n b_norm = cupy.linalg.norm(b)\n if b_norm == 0:\n return b, 0\n if atol is None:\n atol = tol * float(b_norm)\n else:\n atol = max(float(atol), tol * float(b_norm))\n if maxiter is None:\n maxiter = n * 10\n if restart is None:\n restart = 20\n restart = min(restart, n)\n if callback_type is None:\n callback_type = 'pr_norm'\n if callback_type not in ('x', 'pr_norm'):\n raise ValueError('Unknown callback_type: {}'.format(callback_type))\n if callback is None:\n callback_type = None\n\n V = cupy.empty((n, restart), dtype=A.dtype, order='F')\n H = cupy.zeros((restart+1, restart), dtype=A.dtype, order='F')\n e = numpy.zeros((restart+1,), dtype=A.dtype)\n\n compute_hu = _make_compute_hu(V)\n\n iters = 0\n while True:\n mx = psolve(x)\n r = b - matvec(mx)\n r_norm = cublas.nrm2(r)\n if callback_type == 'x':\n callback(mx)\n elif callback_type == 'pr_norm' and iters > 0:\n callback(r_norm / b_norm)\n if r_norm <= atol or iters >= maxiter:\n break\n v = r / r_norm\n V[:, 0] = v\n e[0] = r_norm\n\n # Arnoldi iteration\n for j in range(restart):\n z = psolve(v)\n u = matvec(z)\n H[:j+1, j], u = compute_hu(u, j)\n cublas.nrm2(u, out=H[j+1, j])\n if j+1 < restart:\n v = u / H[j+1, j]\n V[:, j+1] = v\n\n # Note: The least-square solution to equation Hy = e is computed on CPU\n # because it is faster if tha matrix size is small.\n ret = numpy.linalg.lstsq(cupy.asnumpy(H), e)\n y = cupy.array(ret[0])\n x += V @ y\n iters += restart\n\n info = 0\n if iters == maxiter and not (r_norm <= atol):\n info = iters\n return mx, info",
"def gmres(A_mv: Callable, A_args: Sequence, b: jax.ShapedArray,\n x: jax.ShapedArray, num_krylov_vectors: int, x0: jax.ShapedArray,\n tol: float, b_norm: float) -> Tuple[bool, float, jax.ShapedArray]:\n r, beta = gmres_residual(A_mv, A_args, b, x)\n k, V, R, beta_vec = gmres_krylov(A_mv, A_args, num_krylov_vectors,\n x0, r, beta, tol, b_norm)\n x = gmres_update(k, V, R, beta_vec, x0)\n done = k < num_krylov_vectors - 1\n return done, beta, x",
"def solve_gmres(matvec: Callable,\n b: Any,\n ridge: Optional[float] = None,\n tol: float = 1e-5,\n **kwargs) -> Any:\n if ridge is not None:\n matvec = _make_ridge_matvec(matvec, ridge=ridge)\n return jax.scipy.sparse.linalg.gmres(matvec, b, tol=tol, **kwargs)[0]",
"def GMRES_1(A, b, x0, max_iterations=50):\n\n last_x = x0\n curr_x = last_x\n last_r = b - A @ x0\n curr_iter = 0\n residual_queue = []\n while curr_iter < max_iterations:\n Ar = A @ last_r\n alpha = (last_r.transpose() @ Ar) / (Ar.transpose() @ Ar)\n curr_x = last_x + alpha * last_r\n curr_r = last_r - alpha * Ar\n c = np.linalg.norm(A @ curr_x - b, 2) / np.linalg.norm(b, 2)\n residual_queue.append(np.linalg.norm(A @ curr_x - b, 2))\n if curr_iter == max_iterations - 1:\n print_graph(residual_queue, curr_iter, \"residual\", \"GMRES(1)\")\n last_x = curr_x\n last_r = curr_r\n curr_iter += 1\n print(\"Number of Iterations: \" + str(curr_iter))\n\n return curr_x",
"def gmres_residual(A_mv: Callable, A_args: Sequence, b: jax.ShapedArray,\n x: jax.ShapedArray) -> Tuple[jax.ShapedArray, float]:\n r = b - A_mv(x, *A_args)\n beta = jnp.linalg.norm(r)\n return r, beta",
"def convergence_gmres_A():\n global conv_residuals\n def compute_residuals(r):\n \"\"\"Helper function to retrieve residual + steps to convergence for\n GMRES operation in Scipy. Used as a callback function for\n scipy.sparse.linalg.gmres\n \"\"\"\n global conv_residuals\n conv_residuals.append(r)\n return\n\n n_search = np.array([20, 40, 60, 80, 100, 120, 140, 160, 180])\n steps_till_conv_n = np.zeros(n_search.size)\n\n for i, n in enumerate(n_search):\n A = construct_matrix_A(n)\n # To average, we loop over 10 times\n for j in range(10):\n b = np.random.randn(n**2)\n conv_residuals = []\n x = scipy.sparse.linalg.gmres(A, b, callback=compute_residuals)\n steps_till_conv_n[i] += len(conv_residuals)\n\n # Divide by 10 to take the average:\n steps_till_conv_n /= 10\n\n fig220 = plt.figure(figsize=(13, 8))\n plt.plot(n_search, steps_till_conv_n)\n plt.xlabel(\"N\")\n plt.ylabel(\"Steps Taken to Converge\")\n plt.title(\"Figure 220 - Steps Taken for GMRES to Converge for Varying N\",\n fontsize=13)\n plt.grid()\n plt.savefig(\"figures/figure220.png\")\n plt.show()\n\n n_search = np.array([10, 50, 100, 150])\n\n fig221 = plt.figure(figsize=(13, 8))\n for i, n in enumerate(n_search):\n A = construct_matrix_A(n)\n b = np.random.randn(n**2)\n conv_residuals = []\n x = scipy.sparse.linalg.gmres(A, b, callback=compute_residuals)\n plt.semilogy(range(len(conv_residuals)), conv_residuals, label=f\"N = {n}\")\n\n plt.xlabel(\"Step Taken to Convergence\")\n plt.ylabel(\"Residuals\")\n plt.title(\"Figure 221 - GMRES Residuals for Varying N\", fontsize=13)\n plt.legend()\n plt.grid()\n plt.savefig(\"figures/figure221.png\")\n plt.show()\n return",
"def gmres_krylov(A_mv: Callable, A_args: Sequence, n_kry: int,\n x0: jax.ShapedArray, r: jax.ShapedArray, beta: float,\n tol: float,\n b_norm: float) -> Tuple[int, jax.ShapedArray,\n jax.ShapedArray, jax.ShapedArray]:\n n = r.size\n err = beta\n v = r / beta\n\n # These will store the Givens rotations used to update the QR decompositions\n # of the Arnoldi matrices.\n # cos : givens[0, :]\n # sine: givens[1, :]\n givens = jnp.zeros((2, n_kry), dtype=x0.dtype)\n beta_vec = jnp.zeros((n_kry + 1), dtype=x0.dtype)\n beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[0], beta)\n V = jnp.zeros((n, n_kry + 1), dtype=x0.dtype)\n V = jax.ops.index_update(V, jax.ops.index[:, 0], v)\n R = jnp.zeros((n_kry + 1, n_kry), dtype=x0.dtype)\n\n # The variable data for the carry call. Each iteration modifies these\n # values and feeds the results to the next iteration.\n k = 0\n gmres_variables = (k, V, R, beta_vec, err, # < The actual output we need.\n givens) # < Modified between iterations.\n gmres_constants = (tol, A_mv, A_args, b_norm, n_kry)\n gmres_carry = (gmres_variables, gmres_constants)\n # The 'x' input for the carry call. Each iteration will receive an ascending\n # loop index (from the jnp.arange) along with the constant data\n # in gmres_constants.\n gmres_carry = jax.lax.while_loop(gmres_krylov_loop_condition,\n gmres_krylov_work,\n gmres_carry)\n gmres_variables, gmres_constants = gmres_carry\n k, V, R, beta_vec, err, givens = gmres_variables\n return (k, V, R, beta_vec)",
"def tt_gmres_leftprecond(AOp, b, nrm_b, eps=1.e-6, maxIter=20, verbose=True, preconOp=None, adaptiveTolerance=True):\n\n def calc_solution():\n x = pitts_py.TensorTrain_double(b.dimensions())\n x.setZero()\n nrm_x = 0\n for i in range(len(y)):\n nrm_x = pitts_py.axpby(y[i], V[i], nrm_x, x, eps)\n return x, nrm_x\n\n def residual_error(x, nrm_x):\n #print(\"TT-GMRES: solution max rank %d\" % np.max(x.getTTranks()))\n # calculate real residual\n r = pitts_py.TensorTrain_double(b.dimensions())\n r_nrm = nrm_x * AOp(x, r, eps/10, maxRank=9999)\n if preconOp is not None:\n r_nrm = pitts_py.axpby(orig_nrm_b, orig_b, -r_nrm, r, eps/10, maxRank=9999)\n #print(\"TT-GMRES: real residual norm %g\" % (r_nrm/orig_nrm_b) )\n else:\n r_nrm = pitts_py.axpby(nrm_b, b, -r_nrm, r, eps/10, maxRank=9999)\n #print(\"TT-GMRES: real residual norm %g\" % (r_nrm/nrm_b) )\n return r_nrm\n\n if verbose:\n if preconOp is None:\n print('# \"iteration\" \"rel LSTQ norm\" \"rel residual norm\" \"new direction rank\" \"new Krylov vector rank\" \"solution rank\"')\n else:\n print('# \"iteration\" \"rel LSTQ norm\" \"rel residual norm\" \"new direction rank\" \"precond direction rank\" \"new Krylov vector rank\" \"solution rank\"')\n\n # assumes b is normalized and nrm_b is the desired rhs norm\n\n # left-preconditioning, transform RHS\n if preconOp is not None:\n orig_b = b\n orig_nrm_b = nrm_b\n b = pitts_py.TensorTrain_double(orig_b.dimensions())\n nrm_b = nrm_b * preconOp.apply(orig_b, b, eps / 10, 9999)\n nrm_b = nrm_b * pitts_py.normalize(b, eps/10, 9999)\n\n # define initial subspace\n beta = nrm_b\n curr_beta = beta\n V = [b]\n m = maxIter\n H = np.zeros((m + 1, m), order='F')\n\n if preconOp is not None:\n z = pitts_py.TensorTrain_double(b.dimensions())\n\n if verbose:\n #print(\"TT-GMRES: initial residual norm: %g, max. rank: %d\" % (beta, np.max(b.getTTranks())))\n if preconOp is None:\n print(0, 1, 1, np.max(b.getTTranks()), np.max(b.getTTranks()), 0)\n #print(\"TT-GMRES: un-preconditioned RHS max. rank: %d\" % np.max(orig_b.getTTranks()))\n else:\n print(0, 1, 1, np.max(orig_b.getTTranks()), np.max(b.getTTranks()), np.max(b.getTTranks()), 0)\n\n for j in range(m):\n if adaptiveTolerance:\n delta = eps / (curr_beta / beta) / (1.2 * m)\n else:\n delta = eps\n w = pitts_py.TensorTrain_double(b.dimensions())\n\n if preconOp is not None:\n z_nrm = AOp(V[j], z, delta, 9999)#, (j+1)*rank_b)\n w_nrm = z_nrm * preconOp.apply(z, w, delta, 9999)#, (j+2)*rank_b)\n else:\n w_nrm = AOp(V[j], w, delta, 9999)#, (j+2)*rank_b)\n\n if preconOp is not None:\n rank_z = np.max(z.getTTranks())\n rank_w = np.max(w.getTTranks())\n\n H[:j+2,j] = w_nrm * tt_pivmgs(V, w, delta, maxRank=9999)\n\n rank_vj = np.max(w.getTTranks())\n\n Hj = H[:j+2,:j+1]\n betae = np.zeros(j+2)\n betae[0] = beta\n # solving Hj * y = beta e_1\n y, curr_beta, rank, s = np.linalg.lstsq(Hj, betae, rcond=None)\n curr_beta = np.sqrt(curr_beta[0]) if curr_beta.size > 0 else 0\n if verbose:\n #print(\"TT-GMRES: LSTSQ residual norm: %g \" % (curr_beta / beta) )\n x, nrm_x = calc_solution()\n r_nrm = residual_error(x, nrm_x)\n rank_x = np.max(x.getTTranks())\n if preconOp is None:\n print(j+1, curr_beta/beta, r_nrm / nrm_b, rank_w, rank_vj, rank_x)\n else:\n print(j+1, curr_beta/beta, r_nrm / orig_nrm_b, rank_w, rank_z, rank_vj, rank_x)\n if curr_beta / beta <= eps:\n break\n\n if not verbose:\n x, nrm_x = calc_solution()\n return x, nrm_x",
"def gmres_krylov_work(gmres_carry: GmresCarryType) -> GmresCarryType:\n gmres_variables, gmres_constants = gmres_carry\n k, V, R, beta_vec, err, givens = gmres_variables\n tol, A_mv, A_args, b_norm, _ = gmres_constants\n\n V, H = kth_arnoldi_step(k, A_mv, A_args, V, R, tol)\n R_col, givens = apply_givens_rotation(H[:, k], givens, k)\n R = jax.ops.index_update(R, jax.ops.index[:, k], R_col[:])\n\n # Update the residual vector.\n cs, sn = givens[:, k] * beta_vec[k]\n beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[k], cs)\n beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[k + 1], sn)\n err = jnp.abs(sn) / b_norm\n gmres_variables = (k + 1, V, R, beta_vec, err, givens)\n return (gmres_variables, gmres_constants)",
"def gmres_update(k: int, V: jax.ShapedArray, R: jax.ShapedArray,\n beta_vec: jax.ShapedArray,\n x0: jax.ShapedArray) -> jax.ShapedArray:\n q = min(k, R.shape[1])\n y = jax.scipy.linalg.solve_triangular(R[:q, :q], beta_vec[:q])\n x = x0 + V[:, :q] @ y\n return x",
"def minmap_newton(A, b, x, max_iter=0, tol_rel=0.00001, tol_abs=np.finfo(np.float64).eps*10, profile=True):\n\n # Human readable dictionary of exit messages\n msg = {1 : 'preprocessing', # flag = 1\n 2 : 'iterating', # flag = 2\n 3 : 'relative', # flag = 3\n 4 : 'absolute', # flag = 4\n 5 : 'stagnation', # flag = 5\n 6 : 'local minima', # flag = 6\n 7 : 'nondescent', # flag = 7\n 8 : 'maxlimit', # flag = 8\n }\n\n # We use N as a reference for the usage of asserts throughout the\n # program. We assume that everything is column vector of shape\n # (N,1) if not the asserts will catch them.\n N = np.size(b)\n flag = 1\n\n assert x.shape == (N,1), 'x0 is not a column vector, it has shape: ' + repr(x.shape)\n assert A.shape == (N,N), 'A is not a square matrix, it has shape: ' + repr(A.shape)\n assert b.shape == (N,1), 'b is not a column vector, it has shape: ' + repr(b.shape)\n\n if max_iter == 0:\n max_iter = np.floor(N/2.0)\n\n # Ensure sane values\n max_iter = max(max_iter,1)\n # Rest of the value should be sane\n\n ##### Magic constants #####\n h = 1e-7\n alpha = 0.5\n beta = 0.001\n gamma = 1e-28\n\n eps = np.finfo(np.float64).eps\n rho = np.finfo(np.float64).eps\n gmres_tol = 10*eps\n\n ##### Values needed file iterating #####\n convergence = np.zeros(max_iter+1)\n\n # Should use np.infty.\n err = 1e20\n iterate = 1\n flag = 2\n\n while iterate <= max_iter:\n y = np.dot(A,x) + b\n assert y.shape == (N,1), 'y is not a column vector, it has shape: ' + repr(y.shape)\n assert np.all(np.isreal(y)), 'y is not real'\n # Calculate the minimum map column vector.\n H = minmap(x,y)\n assert H.shape == (N,1), 'H is not a column vector, it has shape: ' + repr(H.shape)\n assert np.all(np.isreal(H)), 'H is not real'\n old_err = err\n # Calculate merit value, error\n err = 0.5*np.dot(H.T,H)\n assert err.shape == (1,1), 'err is not a scalar, it has shape: ' + repr(err.shape)\n assert np.isreal(err), 'err is not real'\n\n if profile:\n convergence[iterate-1] = err\n\n ##### Test the stopping criterias used #####\n rel_err = np.abs(err-old_err)/np.abs(old_err)\n\n if rel_err < tol_rel:\n flag = 3\n break\n\n if err < tol_abs:\n flag = 4\n break\n\n ##### Solving the Newton system\n restart = min(N, 20) # Number of iterates done before Restart\n # for GMRES should restart\n S = np.where(y < x)\n J = np.identity(N)\n J[S,:] = A[S,:]\n dx = np.zeros((N,1))\n dx = gmres(J, (-H), tol=gmres_tol, restart=restart)[0].reshape(N,1)\n\n assert dx.shape == (N,1), 'dx is not a column vector, it has shape: ' + repr(dx.shape)\n assert np.all(np.isreal(dx)), 'dx is not real'\n\n nabla_H = np.dot(H.T, J)\n # Ensure nabla_H is a column vector\n nabla_H = nabla_H.reshape(N,1)\n assert nabla_H.shape == (N,1), 'nabla_H is not a column vector, it has shape: ' + repr(nabla_H.shape)\n assert np.all(np.isreal(nabla_H)), 'nabla_H is not real'\n\n # Tests whether the search direction is below machine\n # precision.\n if np.max(np.abs(dx)) < eps:\n flag = 5\n print \"*** Search direction below machine precision at iterate \" + repr(iterate) + \", choosing gradient as search direction.\"\n dx = -nabla_H\n\n # Test whether we are stuck in a local minima\n if np.linalg.norm(nabla_H) < tol_abs:\n flag = 6\n break\n\n # Test whether our direction is a sufficient descent direction\n if np.dot(nabla_H.T,dx) > -rho*(np.dot(dx.T, dx)):\n # Otherwise we should try gradient direction instead.\n print \"*** Non descend direction at iterate \" + repr(iterate) + \", choosing gradient as search direction.\"\n dx = -nabla_H\n\n ##### Armijo backtracking combined with a projected line-search #####\n tau = 1.0\n f_0 = err\n grad_f = beta*np.dot(nabla_H.T,dx)\n\n x_k = x[:]\n assert x_k.shape == (N,1), 'x_k is not a column vector, it has shape: ' + repr(x_k)\n assert np.all(np.isreal(x_k)), 'x_k is not real'\n \n # Perform backtracking line search\n while True:\n x_k = np.maximum(0, x + dx*tau)\n assert x_k.shape == (N,1), 'x_k is not a column vector, it has shape: ' + repr(x_k.shape)\n assert np.all(np.isreal(x_k)), 'x_k is not real'\n y_k = np.dot(A,x_k)+b\n assert y_k.shape == (N,1), 'y_k is not a column vector, it has shape: ' + repr(y_k.shape)\n assert np.all(np.isreal(y_k)), 'y_k is not real'\n H_k = minmap(y_k,x_k)\n assert H_k.shape == (N,1), 'H_k is not a column vector, it has shape: ' + repr(H_k.shape)\n assert np.all(np.isreal(H_k)), 'H_k is not real'\n f_k = 0.5*(np.dot(H_k.T,H_k))\n # Test Armijo condition for sufficient decrease\n if f_k <= f_0 + tau*grad_f:\n break\n # Test whether the stepsize has become too small\n if tau*tau < gamma:\n break\n tau *= alpha\n\n # Update iterate with result from line search.\n x = x_k\n assert x.shape == (N,1), 'x is not a column vector, it has shape: ' + repr(x.shape)\n assert np.all(np.isreal(x)), 'x is not real.'\n\n # Increment iterate\n iterate += 1\n\n if iterate >= max_iter:\n iterate -= 1\n flag = 8\n\n return (x, err, iterate, flag, convergence[:iterate], msg[flag])",
"def _newtons_method_gmres_action(f, initial_guess, max_iter=50, tol=1e-12):\n\n output_dim = len(f(initial_guess))\n \n @np.vectorize\n def sum_values(dictionary):\n return sum(dictionary.values())\n \n def create_action(x0):\n \n def L_fun(x):\n \"\"\"\n Action\n Returns J_f(x0)*x by setting the values of 'x' as the initial derivatives for the variables in x0.\n \"\"\"\n \n f_x0 = f(ad.create_vector('x0', x0, seed_vector=x));\n f_x0 = np.array(f_x0) #ensure that f_x0 is np.array\n action = sum_values(ad.get_deriv(f_x0))\n return action\n \n L = LinearOperator(shape=(output_dim, len(x0)), matvec=L_fun)\n \n return L\n \n x0 = initial_guess\n for iter_num in range(max_iter):\n L = create_action(x0)\n b = -1 * np.array(f(x0))\n if len(x0) == 1:\n b = np.array([b])\n step, _ = gmres(L, b, tol = tol, atol = 'legacy')\n xnext = x0 + step \n if np.all(np.abs(xnext - x0) < tol):\n return (xnext, iter_num + 1);\n x0 = xnext\n \n raise RuntimeError(\"Failed to converge after {0} iterations, value is {1}\".format(max_iter, x0) );",
"def mr(A, n_iterations, stop=False):\n assert len(A.sizes) == 2\n assert A.sizes[0] == A.sizes[1]\n M = A.same_shape()\n n = A.sizes[0]\n @for_range(n)\n def _(i):\n e = sfix.Array(n)\n e.assign_all(0)\n e[i] = 1\n M[i] = solve_linear(A, e, n_iterations, stop=stop)\n return M.transpose()",
"def linear_least_squares(a, b, residuals=False):\n if type(a) != np.ndarray or not a.flags[\"C_CONTIGUOUS\"]:\n main_warning(\n \"Matrix a is not a C-contiguous numpy array. The solver will create a copy, which will result\"\n + \" in increased memory usage.\"\n )\n\n a = np.asarray(a, order=\"c\")\n i = dgemm(alpha=1.0, a=a.T, b=a.T, trans_b=True)\n x = np.linalg.solve(i, dgemm(alpha=1.0, a=a.T, b=b))\n\n if residuals:\n return x, np.linalg.norm(np.dot(a, x) - b)\n else:\n return x",
"def question27():\n global conv_residuals\n def catch(r):\n \"\"\"Helper function to retrieve residual + steps to convergence for\n GMRES operation in Scipy. Used as a callback function for\n scipy.sparse.linalg.gmres\n \"\"\"\n global conv_residuals\n conv_residuals.append(r)\n return\n\n def iterate(rk):\n \"\"\" Preconditioner Function for GMRES.\"\"\"\n y = scipy.sparse.linalg.spsolve(P1, rk)\n RHS = scipy.sparse.csr_matrix.dot(P4, y) + rk\n zk = scipy.sparse.linalg.spsolve(P3, RHS)\n return zk\n\n\n N_search = np.array([20, 40, 60, 80, 100, 120, 140, 160, 180])\n steps_till_conv_N = np.zeros(N_search.size)\n\n fig271 = plt.figure(figsize=(13, 8))\n\n for i, n in enumerate(N_search):\n n2 = n**2\n A = construct_matrix_A(n)\n b = np.random.randn(n2)\n M, N = construct_M_N(n)\n mu_max = scipy.sparse.linalg.eigs(M, k=1, which='LM', return_eigenvectors=False)[0].real\n mu_min = scipy.sparse.linalg.eigs(M, k=1, which='SM', return_eigenvectors=False)[0].real\n gamma = np.sqrt(mu_max*mu_min)\n gammaI = scipy.sparse.diags((gamma,), (0,), shape=(n2, n2), format=\"csr\")\n P1 = gammaI + M\n P2 = gammaI - N\n P3 = gammaI + N\n P4 = gammaI - M\n M = scipy.sparse.linalg.LinearOperator((n2, n2), matvec=iterate)\n conv_residuals = []\n x = scipy.sparse.linalg.gmres(A, b, M=M, callback=catch)\n steps_till_conv_N[i] += len(conv_residuals)\n n_steps = len(conv_residuals)\n plt.semilogy(range(n_steps), conv_residuals, label=f\"N = {n}\")\n\n plt.xlabel(\"Steps Required for Convergence\")\n plt.ylabel(\"Residuals\")\n plt.title(\"Figure 271 - GMRES + Preconditioner Residuals for Varying N\", fontsize=13)\n plt.legend()\n plt.grid()\n plt.savefig(f\"figures/figure271.png\")\n plt.show()\n\n\n fig270 = plt.figure(figsize=(13, 8))\n plt.plot(N_search, steps_till_conv_N)\n plt.xlabel(\"N\")\n plt.ylabel(\"Steps until convergence\")\n plt.title(\"Figure 270 - GMRES + Preconditioner Convergence Required for Varying N\", fontsize=13)\n plt.grid()\n plt.savefig(f\"figures/figure270.png\")\n plt.show()\n return",
"def solve_matrix(M, b):\n\n try:\n x = np.linalg.solve(M, b)\n except np.LinAlgError:\n print(\"ERR: Matrix is singular\")\n return None\n\n if not np.allclose(np.dot(M, x), b):\n print(\"ERR: Matrix is inconsistent (most likely with the independent sources)\")\n return None\n \n return x",
"def cg_solve_jax(A,\n b,\n x_0=None,\n cg_iters=10,\n cg_residual_tol=1e-20,\n damping=1e-4):\n x = jnp.zeros_like(b) if x_0 is None else x_0\n if x_0 is not None:\n hvp_x0 = jnp.dot(A, x)\n\n r = b.copy() if x_0 is None else b-hvp_x0\n p = r.copy()\n rdotr = p.dot(r)\n\n for i in range(cg_iters):\n hvp_p = jnp.dot(A, p)\n z = hvp_p\n\n v = rdotr / p.dot(z)\n x += v * p\n r -= v * z\n\n s = r\n newrdotr = s.dot(r)\n mu = newrdotr / rdotr\n\n p = s + mu * p\n rdotr = newrdotr\n\n if rdotr < cg_residual_tol:\n break\n return x",
"def _lin_solve(b, x, x0, a, c, iterations, n):\n c_recip = 1 / c\n for k in range(0, iterations):\n for m in range(1, n - 1):\n for j in range(1, n - 1):\n for i in range(1, n - 1):\n x[index_of(i, j, m, n)] = (x0[index_of(i, j, m, n)] + a * (x[index_of(i + 1, j, m, n)]\n + x[index_of(i - 1, j, m, n)]\n + x[index_of(i, j + 1, m, n)]\n + x[index_of(i, j - 1, m, n)]\n + x[index_of(i, j, m + 1, n)]\n + x[index_of(i, j, m - 1, n)]\n )) * c_recip\n _set_bounds(b, x, n)",
"def linearRegression(A, b):\n m = Model()\n m.setParam('OutputFlag', False)\n\n n = len(A) # number of rows in A\n d = len(A[0]) # number of columns in A\n assert n == len(b) # make sure the shape of matrix is correct\n\n # x is of size d\n x = m.addVars(d, name='x')\n # ** is not supported by gurobi!\n square = lambda _: _ * _\n # \\sum_i (A[i] * x - b[i])^2\n m.setObjective(sum(square(sum(A[i][j] * x[j] for j in xrange(d)) - b[i])\n for i in xrange(n)), GRB.MINIMIZE)\n m.optimize()\n\n return [x[_].X for _ in xrange(d)]",
"def linear_least_squares(M, v):\n \n B = copy(M)\n [m,n] = shape(B)\n if rank(B) != min(m,n):\n print('Warning: can not be solved since the rank of the matrix is not its maximum value')\n return nan\n else:\n \n A = copy(M)\n At = transpose(M)\n b = copy(v)\n b = transpose(b)\n \n AtA = dot(At, A)\n Atb = transpose(dot(At, b))\n print(AtA, Atb)\n \n x = gauss_elimination(AtA, Atb)\n print('x*:')\n return x",
"def cg(A, b, x0=None, tol=1e-5, maxiter=None, M=None, callback=None,\n atol=None):\n A, M, x, b = _make_system(A, M, x0, b)\n matvec = A.matvec\n psolve = M.matvec\n\n n = A.shape[0]\n if maxiter is None:\n maxiter = n * 10\n if n == 0:\n return cupy.empty_like(b), 0\n b_norm = cupy.linalg.norm(b)\n if b_norm == 0:\n return b, 0\n if atol is None:\n atol = tol * float(b_norm)\n else:\n atol = max(float(atol), tol * float(b_norm))\n\n r = b - matvec(x)\n iters = 0\n rho = 0\n while iters < maxiter:\n z = psolve(r)\n rho1 = rho\n rho = cublas.dotc(r, z)\n if iters == 0:\n p = z\n else:\n beta = rho / rho1\n p = z + beta * p\n q = matvec(p)\n alpha = rho / cublas.dotc(p, q)\n x = x + alpha * p\n r = r - alpha * q\n iters += 1\n if callback is not None:\n callback(x)\n resid = cublas.nrm2(r)\n if resid <= atol:\n break\n\n info = 0\n if iters == maxiter and not (resid <= atol):\n info = iters\n\n return x, info",
"def _gmres(self, super_operator, super_rhs, tol):\n return login_gmres(\n super_operator, super_rhs, tol,\n return_residuals=True,\n **SOLVER_OPTIONS\n )",
"def simpleDemo(verbose=False):\n N = 100\n u_true = np.array([np.sin(x / 10.0) for x in np.linspace(0, 20, N)])\n A = openmg.operators.poisson(N, sparse=True)\n b = openmg.tools.flexibleMmult(A, u_true)\n params = {'problemShape': (N,), 'gridLevels': 3, 'cycles': 10,\n 'iterations': 2, 'verbose': verbose, 'dense': True,\n 'threshold': 1e-2, 'giveInfo': True}\n u_mg, infoDict = openmg.mgSolve(A, b, params)\n if verbose:\n print \"info:\"\n print infoDict\n \n ## if verbose==True, output will look something like this:\n # Generating restriction matrices; dense=True\n # Generating coefficient matrices; dense=True ... made 3 A matrices\n # calling amg_cycle at level 0\n # calling amg_cycle at level 1\n # direct solving at level 2\n # Residual norm from cycle 1 is 0.805398.\n # cycle 1 < cycles 10\n # calling amg_cycle at level 0\n # calling amg_cycle at level 1\n # direct solving at level 2\n # Residual norm from cycle 2 is 0.107866.\n # cycle 2 < cycles 10\n # calling amg_cycle at level 0\n # calling amg_cycle at level 1\n # direct solving at level 2\n # Residual norm from cycle 3 is 0.018650.\n # cycle 3 < cycles 10\n # calling amg_cycle at level 0\n # calling amg_cycle at level 1\n # direct solving at level 2\n # Residual norm from cycle 4 is 0.003405.\n # Returning mgSolve after 4 cycle(s) with norm 0.003405\n # info:\n # {'norm': 0.0034051536498270769, 'cycle': 4} \n return u_mg",
"def _gmres(self, super_operator, super_rhs, tol):\n sol, solve_info, residuals = linalg.gmres(\n super_operator, super_rhs,\n tol=tol,\n use_strong_form=True,\n return_residuals=True,\n **SOLVER_OPTIONS\n )\n return sol, solve_info, residuals",
"def PCG(A, b, x0, M_inv, eps=0.01, imax=50):\n i = 0\n x = x0\n # residue\n r = b - A @ x\n # step in the direction of residue\n d = M_inv @ r\n # initial delta^2\n delta_new = np.dot(r,d)\n delta_0 = delta_new\n while i < i_max and delta_new > eps**2 * delta_0:\n alpha = delta_new / np.einsum('i,ij,j', d,A,d)\n x += alpha * d\n if i % 50 == 0:\n r = b - A@x\n else:\n r -= alpha*q\n s = M_inv @ r\n delta_old = delta_new\n delta_new = np.dot(r, s)\n beta = delta_new / delta_old\n d = s + beta*d\n i += 1\n return x",
"def solve(self, A, B):\n d = tf.matrix_diag_part(A)\n D = tf.reshape(tf.matrix_diag(d), tf.shape(A))\n R = A - D\n\n iD = tf.reshape(tf.matrix_diag(1.0 / d), tf.shape(A))\n\n X = tf.zeros_like(B)\n for _ in range(self.nb_iterations):\n T = tf.einsum('bmn,bno->bmo', R, X)\n S = B - T\n X = tf.einsum('bmn,bno->bmo', iD, S)\n return tf.reshape(X, tf.shape(B))",
"def SolveAndCorrect(M, b):\n\tXMatrix = Solve(M, b)\n\tXMatrix, Qc = MatrixCorrections(XMatrix)\n\treturn XMatrix, Qc",
"def _General_Iterative_Method(A, b, x0, M, N, max_iterations=200, epsilon=1e-2, w=1.0, method=\"General Iterative\"):\n\n residual_queue = []\n convergences_queue = []\n last_x = x0\n M_inverse = np.linalg.inv(M)\n curr_iter = 0\n while curr_iter < max_iterations:\n curr_x = (1 - w) * last_x + (w * M_inverse) @ (b - N @ last_x)\n c = np.linalg.norm(A @ curr_x - b, 2) / np.linalg.norm(b, 2)\n convergences_queue.append(np.linalg.norm(A @ curr_x - b, 2) / np.linalg.norm(A @ last_x - b, 2))\n residual_queue.append(np.linalg.norm(A @ curr_x - b, 2))\n if c < epsilon or curr_iter == max_iterations - 1:\n print_graph(residual_queue, curr_iter, \"residual\", method, w)\n print_graph(convergences_queue, curr_iter, \"convergence rate\", method, w)\n print(\"Number of Iterations: \" + str(curr_iter))\n return curr_x\n last_x = curr_x\n curr_iter += 1\n return \"failed\"",
"def solve(self):\n self.m.optimize()\n if self.m.status == GRB.OPTIMAL:\n self.solution = self.sol_as_mat()\n return self.solution"
] | [
"0.8164",
"0.72392",
"0.7206821",
"0.68150455",
"0.678389",
"0.6487322",
"0.63835514",
"0.624065",
"0.598083",
"0.5946647",
"0.5763055",
"0.56070817",
"0.55459785",
"0.55261856",
"0.55072945",
"0.54999584",
"0.54536366",
"0.53874725",
"0.5362347",
"0.5356209",
"0.5320891",
"0.52675056",
"0.52610177",
"0.5245691",
"0.5201263",
"0.517293",
"0.5085363",
"0.5069066",
"0.50317764",
"0.50265664"
] | 0.7316262 | 1 |
Computes the residual vector r and its norm, beta, which is minimized by GMRES. | def gmres_residual(A_mv: Callable, A_args: Sequence, b: jax.ShapedArray,
x: jax.ShapedArray) -> Tuple[jax.ShapedArray, float]:
r = b - A_mv(x, *A_args)
beta = jnp.linalg.norm(r)
return r, beta | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def residual(self, y,r):\n u,v,tt = self.split(y)\n fiu,fiv,fitt = self.problem.internal_forces(u,v,tt)\n R = np.concatenate((fiu,fiv,fitt))\n R = self.residualApplyBCs(R,y,r)\n return R",
"def get_residual(self, beta: ndarray) -> ndarray:\n return self.data.weight*(self.data.obs -\n self.fevar.mapping(beta))",
"def beta_r(r):\n return 0.",
"def residualNorm2(self):\n r2 = (np.dot(self.x,np.dot(self.AtA,self.x)-2.0*self.Atb) + self.btb)*self.scale\n if self.regularizationLambda > 0:\n r2 -= self.regularizationLambda*np.dot(self.x,self.x)\n return r2",
"def beta_r(r):\n return 1.",
"def beta_r(r, beta):\n return beta",
"def beta_r(r, r_ani):\n return 1./2 * r / (r + r_ani)",
"def gmres_update(k: int, V: jax.ShapedArray, R: jax.ShapedArray,\n beta_vec: jax.ShapedArray,\n x0: jax.ShapedArray) -> jax.ShapedArray:\n q = min(k, R.shape[1])\n y = jax.scipy.linalg.solve_triangular(R[:q, :q], beta_vec[:q])\n x = x0 + V[:, :q] @ y\n return x",
"def beta_r(r, r_ani):\n return r**2/(r_ani**2 + r**2)",
"def residuals(p, r, theta):\n return r - f(theta, p)",
"def residual(var, matrix, RHSvector):\n from fipy.tools.numerix import array, LINFnorm\n \n Lx = matrix * array(var)\n return LINFnorm(Lx - RHSvector)",
"def residuals(self, b):\n x, y = self.xvals, self.yvals\n return self._numexpr(x, *b) - y",
"def wR(r, rc):\n nr = norm_numba(r)\n return (1 - nr / rc) if nr / rc < 1.0 else 0.0",
"def compute_residuals(r):\n global conv_residuals\n conv_residuals.append(r)\n return",
"def probaR(self, r):\n\n if r == 0.:\n return self.__alpha0 + self.__beta + self.__eta / 2.\n\n if r == 1.:\n return self.__alpha1 + self.__beta + self.__eta / 2.\n\n return self.__eta * (3./2. + r - r*r)",
"def beta_r(r, r_ani, beta_inf):\n return beta_inf * r**2/(r_ani**2 + r**2)",
"def m_beta(r, m_x, r_x, r_c, beta, **kwargs):\n # analytic enclosed mass inside r_x gives normalization rho_0\n rho_0 = m_x / (4./3 * np.pi * r_x**3 * spec.hyp2f1(\n 3./2, 3 * beta / 2, 5./2, -(r_x / r_c)**2))\n\n m = 4./3 * np.pi * rho_0 * r**3 * spec.hyp2f1(\n 3./2, 3 * beta / 2, 5./2, -(r/r_c)**2)\n\n return m",
"def gmres(A_mv: Callable, A_args: Sequence, b: jax.ShapedArray,\n x: jax.ShapedArray, num_krylov_vectors: int, x0: jax.ShapedArray,\n tol: float, b_norm: float) -> Tuple[bool, float, jax.ShapedArray]:\n r, beta = gmres_residual(A_mv, A_args, b, x)\n k, V, R, beta_vec = gmres_krylov(A_mv, A_args, num_krylov_vectors,\n x0, r, beta, tol, b_norm)\n x = gmres_update(k, V, R, beta_vec, x0)\n done = k < num_krylov_vectors - 1\n return done, beta, x",
"def _solver_dirty(X, R, coef_shared_, coef_specific_, Ls, alpha, beta,\n max_iter, tol, positive):\n n_tasks = len(X)\n n_samples, n_features = X[0].shape\n theta = coef_shared_ + coef_specific_\n alpha *= n_samples\n beta *= n_samples\n\n # dg = 1.\n for i in range(max_iter):\n w_max = 0.0\n d_w_max = 0.0\n for j in range(n_features):\n if Ls[j] == 0.:\n continue\n # compute residual\n grad = np.zeros(n_tasks)\n tmp1 = np.zeros(n_tasks)\n tmp2 = np.zeros(n_tasks)\n\n normtmp = 0.\n for t in range(n_tasks):\n for n in range(n_samples):\n grad[t] += X[t, n, j] * R[t, n]\n grad[t] /= Ls[j]\n tmp1[t] = grad[t] + coef_shared_[j, t]\n tmp2[t] = grad[t] + coef_specific_[j, t]\n\n normtmp += tmp1[t] ** 2\n\n normtmp = np.sqrt(normtmp)\n\n # l2 thresholding\n\n thresholdl2 = 0.\n if normtmp:\n thresholdl2 = max(1. - alpha / (Ls[j] * normtmp), 0.)\n tmp1 *= thresholdl2\n thresholdl1 = beta / Ls[j]\n tmp2 = np.sign(tmp2) * np.maximum(np.abs(tmp2) - thresholdl1, 0.)\n if positive:\n tmp2 = np.maximum(tmp2, 0.)\n tmp1 = np.maximum(tmp1, 0.)\n new_theta = tmp1 + tmp2\n if theta[j].any():\n for t in range(n_tasks):\n R[t] += X[t, :, j] * theta[j, t]\n\n d_w_j = np.abs(theta[j] - new_theta).max()\n d_w_max = max(d_w_max, d_w_j)\n w_max = max(w_max, np.abs(tmp1 + tmp2).max())\n coef_shared_[j] = tmp1\n coef_specific_[j] = tmp2\n theta[j] = new_theta\n\n if theta[j].any():\n for t in range(n_tasks):\n R[t] -= X[t, :, j] * theta[j, t]\n\n if (w_max == 0.0 or d_w_max / w_max < tol):\n break\n\n return coef_shared_, coef_specific_, R, i",
"def gmres_krylov_work(gmres_carry: GmresCarryType) -> GmresCarryType:\n gmres_variables, gmres_constants = gmres_carry\n k, V, R, beta_vec, err, givens = gmres_variables\n tol, A_mv, A_args, b_norm, _ = gmres_constants\n\n V, H = kth_arnoldi_step(k, A_mv, A_args, V, R, tol)\n R_col, givens = apply_givens_rotation(H[:, k], givens, k)\n R = jax.ops.index_update(R, jax.ops.index[:, k], R_col[:])\n\n # Update the residual vector.\n cs, sn = givens[:, k] * beta_vec[k]\n beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[k], cs)\n beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[k + 1], sn)\n err = jnp.abs(sn) / b_norm\n gmres_variables = (k + 1, V, R, beta_vec, err, givens)\n return (gmres_variables, gmres_constants)",
"def beta_model(r, s0, rc, beta, c):\n return s0 * np.power((1.0+(r/rc)**2), 0.5-3*beta) + c",
"def _residual_edp(self, params):\n data = self.F**2\n model = np.absolute(self._model())**2\n sigma = self.sigma\n return (data[self.mask]-model[self.mask]) / sigma[self.mask] \n \n # The following three lines do not reproduce Sun's results, which proves\n # that the fits were done through intensity, not form factor.\n #data = self.F\n #model = np.absolute(self._model())\n #return (data - model) ",
"def sigma_beta_rmax(R, r_max, m_x, r_x, r_c, beta, **kwargs):\n # analytic enclosed mass inside r_x gives normalization rho_0\n rho_0 = m_x / (\n 4./3 * np.pi * r_x**3 * spec.hyp2f1(\n 1.5, 1.5 * beta, 2.5, -(r_x / r_c)**2\n )\n )\n\n prefactor = 2 * r_c * rho_0\n sigma = prefactor * (\n ((r_max / r_c)**2 - (R / r_c)**2)**0.5\n / (1 + (R / r_c)**2)**(1.5 * beta)\n * spec.hyp2f1(\n 0.5, 1.5 * beta, 1.5,\n -(((r_max / r_c)**2 - (R/r_c)**2) / (1 + (R / r_c)**2))\n )\n ).real\n\n return sigma.astype(float)",
"def sigma_beta(R, m_x, r_x, r_c, beta, **kwargs):\n # analytic enclosed mass inside r_x gives normalization rho_0\n rho_0 = m_x / (4./3 * np.pi * r_x**3 * spec.hyp2f1(\n 3./2, 3. * beta / 2, 5./2, -(r_x / r_c)**2))\n\n prefactor = np.pi**0.5 * r_c * rho_0\n sigma = prefactor * (\n (((R/r_c)**2 + 1)**(0.5 - 3 * beta / 2) *\n spec.gamma(3 * beta / 2 - 0.5)) / spec.gamma(3 * beta / 2))\n\n return sigma",
"def beta_r(self, r, **kwargs):\n return self._model.beta_r(r, **kwargs)",
"def __call__(self,r):\n return self._n0 * np.power(r / self._r0, self._beta)",
"def residuals(self, ts, rvs, p):\n\n if p.npl == 0:\n return rvs\n else:\n rvmodel = np.sum(rv.rv_model(ts,p), axis=0)\n return rvs - rvmodel",
"def gmres_krylov(A_mv: Callable, A_args: Sequence, n_kry: int,\n x0: jax.ShapedArray, r: jax.ShapedArray, beta: float,\n tol: float,\n b_norm: float) -> Tuple[int, jax.ShapedArray,\n jax.ShapedArray, jax.ShapedArray]:\n n = r.size\n err = beta\n v = r / beta\n\n # These will store the Givens rotations used to update the QR decompositions\n # of the Arnoldi matrices.\n # cos : givens[0, :]\n # sine: givens[1, :]\n givens = jnp.zeros((2, n_kry), dtype=x0.dtype)\n beta_vec = jnp.zeros((n_kry + 1), dtype=x0.dtype)\n beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[0], beta)\n V = jnp.zeros((n, n_kry + 1), dtype=x0.dtype)\n V = jax.ops.index_update(V, jax.ops.index[:, 0], v)\n R = jnp.zeros((n_kry + 1, n_kry), dtype=x0.dtype)\n\n # The variable data for the carry call. Each iteration modifies these\n # values and feeds the results to the next iteration.\n k = 0\n gmres_variables = (k, V, R, beta_vec, err, # < The actual output we need.\n givens) # < Modified between iterations.\n gmres_constants = (tol, A_mv, A_args, b_norm, n_kry)\n gmres_carry = (gmres_variables, gmres_constants)\n # The 'x' input for the carry call. Each iteration will receive an ascending\n # loop index (from the jnp.arange) along with the constant data\n # in gmres_constants.\n gmres_carry = jax.lax.while_loop(gmres_krylov_loop_condition,\n gmres_krylov_work,\n gmres_carry)\n gmres_variables, gmres_constants = gmres_carry\n k, V, R, beta_vec, err, givens = gmres_variables\n return (k, V, R, beta_vec)",
"def residual(self, x, y, num_targets):\n \n x = x/sum(x) # normalize weights\n\n # RUN IM-SRG(2)\n ref = self._refs.T.dot(x)\n main(self._n_holes,self._n_particles, \n g=self._g_val, \n pb=self._pb_val, \n ref=ref, \n verbose=0, \n generator=self._generator,\n output_root = self._coeffs_root)\n\n # LOAD EVOLVED COEFFICIENTS\n H0B, H1B, H2B, eta1B_vac, eta2B_vac = pickle.load(open(self._coeffs_root+'/vac_coeffs_evolved.p', 'rb'))\n\n # PERFORM FULL CI AND GET EIGENVALUES\n hme = pyci.matrix(self._n_holes,self._n_particles, H0B, H1B, H2B, H2B, imsrg=True)\n ev_eigs = np.linalg.eigvalsh(hme)\n\n #return np.sqrt(np.mean((ev_eigs-y)**2))\n #return abs(ev_eigs[0:num_targets] - y[0:num_targets])\n #return abs(ev_eigs[1] - y[1])\n #return abs(ev_eigs[0] - y[0])\n return np.sqrt(0.80*(ev_eigs[0]-y[0])**2 + 0.20/35*((ev_eigs[1::]-y[1::]).T.dot(ev_eigs[1::]-y[1::])))",
"def residual(t, x, xdot, result):\n result[0] = x[2]-xdot[0]\n result[1] = x[3]-xdot[1]\n result[2] = -xdot[2]+x[4]*x[0]/m\n result[3] = -xdot[3]+x[4]*x[1]/m-g\n result[4] = x[2]**2 + x[3]**2 \\\n + (x[0]**2 + x[1]**2)/m*x[4] - x[1] * g\n print(result)"
] | [
"0.69030577",
"0.68404454",
"0.6596661",
"0.6463848",
"0.6344658",
"0.63295376",
"0.6283406",
"0.6267384",
"0.6263794",
"0.62514615",
"0.6221709",
"0.6146919",
"0.61445266",
"0.6131107",
"0.6113608",
"0.61097836",
"0.6103144",
"0.6083334",
"0.6065997",
"0.6033683",
"0.60207003",
"0.60178405",
"0.5999418",
"0.5984028",
"0.5963782",
"0.5936815",
"0.5921717",
"0.59005046",
"0.58798075",
"0.5872653"
] | 0.68639606 | 1 |
Performs a single iteration of gmres_krylov. See that function for a more detailed description. | def gmres_krylov_work(gmres_carry: GmresCarryType) -> GmresCarryType:
gmres_variables, gmres_constants = gmres_carry
k, V, R, beta_vec, err, givens = gmres_variables
tol, A_mv, A_args, b_norm, _ = gmres_constants
V, H = kth_arnoldi_step(k, A_mv, A_args, V, R, tol)
R_col, givens = apply_givens_rotation(H[:, k], givens, k)
R = jax.ops.index_update(R, jax.ops.index[:, k], R_col[:])
# Update the residual vector.
cs, sn = givens[:, k] * beta_vec[k]
beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[k], cs)
beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[k + 1], sn)
err = jnp.abs(sn) / b_norm
gmres_variables = (k + 1, V, R, beta_vec, err, givens)
return (gmres_variables, gmres_constants) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gmres_krylov(A_mv: Callable, A_args: Sequence, n_kry: int,\n x0: jax.ShapedArray, r: jax.ShapedArray, beta: float,\n tol: float,\n b_norm: float) -> Tuple[int, jax.ShapedArray,\n jax.ShapedArray, jax.ShapedArray]:\n n = r.size\n err = beta\n v = r / beta\n\n # These will store the Givens rotations used to update the QR decompositions\n # of the Arnoldi matrices.\n # cos : givens[0, :]\n # sine: givens[1, :]\n givens = jnp.zeros((2, n_kry), dtype=x0.dtype)\n beta_vec = jnp.zeros((n_kry + 1), dtype=x0.dtype)\n beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[0], beta)\n V = jnp.zeros((n, n_kry + 1), dtype=x0.dtype)\n V = jax.ops.index_update(V, jax.ops.index[:, 0], v)\n R = jnp.zeros((n_kry + 1, n_kry), dtype=x0.dtype)\n\n # The variable data for the carry call. Each iteration modifies these\n # values and feeds the results to the next iteration.\n k = 0\n gmres_variables = (k, V, R, beta_vec, err, # < The actual output we need.\n givens) # < Modified between iterations.\n gmres_constants = (tol, A_mv, A_args, b_norm, n_kry)\n gmres_carry = (gmres_variables, gmres_constants)\n # The 'x' input for the carry call. Each iteration will receive an ascending\n # loop index (from the jnp.arange) along with the constant data\n # in gmres_constants.\n gmres_carry = jax.lax.while_loop(gmres_krylov_loop_condition,\n gmres_krylov_work,\n gmres_carry)\n gmres_variables, gmres_constants = gmres_carry\n k, V, R, beta_vec, err, givens = gmres_variables\n return (k, V, R, beta_vec)",
"def gmres_krylov_loop_condition(gmres_carry: GmresCarryType) -> bool:\n gmres_constants, gmres_variables = gmres_carry\n tol = gmres_constants[0]\n k = gmres_variables[0]\n err = gmres_variables[4]\n n_kry = gmres_constants[4]\n\n def is_iterating(k, n_kry):\n return k < n_kry\n\n def not_converged(args):\n err, tol = args\n return err >= tol\n return jax.lax.cond(is_iterating(k, n_kry), # Predicate.\n not_converged, # Called if True.\n lambda x: False, # Called if False.\n (err, tol)) # Arguments to calls.",
"def run(self):\n i = 0\n try:\n for i in range(0, self._iters):\n if self._verbose:\n print(\" Inner CG Iteration \" + repr(i))\n\n self._forward(self._p_k, self._v_k)\n sigma_k = measure(self._p_k, self._v_k)\n alpha_k = self._rho_k / sigma_k\n update_m(self._m, alpha_k, self._p_k)\n sub_scaled_vector(self._residual_k, self._residual_k, alpha_k,\n self._v_k)\n self._v_k = gpuarray_copy(self._residual_k)\n rho_k_plus_1 = measure(self._v_k, self._residual_k)\n rho_k_t = np.abs(rho_k_plus_1)\n\n if (rho_k_t / self._rho_0 <= self._relative_tolerance) \\\n or (rho_k_t <= self._absolute_tolerance):\n if self._verbose:\n print(\"Converged at Iteration \" + str(i) + \".\")\n self.converged = True\n self.iteration = i+1\n return\n\n add_scaled_vector(self._p_k, self._v_k,\n rho_k_plus_1/self._rho_k,\n self._p_k)\n self._rho_k = rho_k_plus_1\n\n if self._verbose >= 3:\n print(\" Residual=\" + repr(rho_k_t))\n except KeyboardInterrupt:\n raise\n finally:\n self.iteration = i+1",
"def gmres_m(A_mv: Callable, A_args: Sequence,\n b: jax.ShapedArray, x0: jax.ShapedArray, tol: float,\n atol: float, num_krylov_vectors: int,\n maxiter: int) -> Tuple[jax.ShapedArray, float, int, bool]:\n num_krylov_vectors = min(num_krylov_vectors, b.size)\n x = x0\n b_norm = jnp.linalg.norm(b)\n tol = max(tol * b_norm, atol)\n for n_iter in range(maxiter):\n done, beta, x = gmres(A_mv, A_args, b, x, num_krylov_vectors, x0, tol,\n b_norm)\n if done:\n break\n return x, beta, n_iter, done",
"def run(self):\n if not self._no_progress and self._verbose:\n from progressbar import ProgressBar\n progress = ProgressBar()\n iter_range = progress(range(self._iters))\n else:\n iter_range = range(self._iters)\n\n if self._no_progress and self._time_iters:\n from time import time\n\n i = 0\n try:\n for i in iter_range:\n if self._verbose and self._no_progress:\n print(\"Iteration \" + repr(i))\n\n if self._no_progress and self._time_iters:\n start = time()\n\n self.iteration += 1\n\n self._forward(self._p_k, self._v_k)\n sigma_k = measure(self._p_k, self._v_k)\n alpha_k = self._rho_k / sigma_k\n if self._double:\n update_m_double(self._m, alpha_k, self._p_k)\n sub_scaled_vector_double(self._residual_k,\n self._residual_k,\n alpha_k, self._v_k)\n else:\n update_m(self._m, alpha_k, self._p_k)\n sub_scaled_vector(self._residual_k, self._residual_k,\n alpha_k, self._v_k)\n self._v_k = gpuarray_copy(self._residual_k)\n rho_k_plus_1 = measure(self._v_k, self._residual_k)\n rho_k_t = np.abs(rho_k_plus_1)\n\n if (rho_k_t / self._rho_0 <= self._relative_tolerance) \\\n or (rho_k_t <= self._absolute_tolerance):\n print(\"Converged.\")\n self.converged = True\n break\n\n if self._double:\n add_scaled_vector_double(self._p_k, self._v_k,\n rho_k_plus_1/self._rho_k,\n self._p_k)\n else:\n add_scaled_vector(self._p_k, self._v_k,\n rho_k_plus_1/self._rho_k, self._p_k)\n\n self._rho_k = rho_k_plus_1\n\n if self._noisy:\n print(\" Residual=\" + str(rho_k_t))\n\n if self._no_progress and self._time_iters:\n print(\"Elapsed time for iteration \" + str(i) + \": \" +\n str(time() - start) + \" seconds\")\n\n if self._save_images:\n save_image(np.abs(self._m.get().reshape(self._data.nX1,\n self._data.nX2)),\n self._out_dir, i, self._image_format)\n if self._save_matlab:\n save_matlab(self._m.get().reshape(self._data.nX1,\n self._data.nX2),\n self._out_dir, i)\n except KeyboardInterrupt:\n print(\"Reconstruction aborted (CTRL-C) at iteration \" + str(i))\n finally:\n if self._save_images:\n save_image(np.abs(self._m.get().reshape(self._data.nX1,\n self._data.nX2)),\n self._out_dir, \"result\", self._image_format)\n if self._save_matlab:\n save_matlab(self._m.get().reshape(self._data.nX1,\n self._data.nX2),\n self._out_dir, \"result\")\n self.iteration = i+1\n return (self._m.get().reshape(self._data.nX1, self._data.nX2),\n self.iteration)",
"def gmres(A_mv: Callable, A_args: Sequence, b: jax.ShapedArray,\n x: jax.ShapedArray, num_krylov_vectors: int, x0: jax.ShapedArray,\n tol: float, b_norm: float) -> Tuple[bool, float, jax.ShapedArray]:\n r, beta = gmres_residual(A_mv, A_args, b, x)\n k, V, R, beta_vec = gmres_krylov(A_mv, A_args, num_krylov_vectors,\n x0, r, beta, tol, b_norm)\n x = gmres_update(k, V, R, beta_vec, x0)\n done = k < num_krylov_vectors - 1\n return done, beta, x",
"def main():\n feature_extraction_model = \"HOG\"\n dimension_reduction_model = \"PCA\"\n k_value = get_input_k(\"k\")\n K_value = get_input_k(\"K\")\n folder = get_input_folder(\"Folder\")\n dim_k_value = 40\n\n query_images = get_input_image_list(folder)\n start = time.time()\n dim_red = DimensionReduction(feature_extraction_model, dimension_reduction_model, dim_k_value, folder_metadata=folder,\n metadata_collection=\"labelled\")\n obj_feat = dim_red.get_object_feature_matrix()\n features_list = np.array(obj_feat['featureVector'].tolist())\n images_list = np.array(obj_feat['imageId'])\n cos_sim = cosine_similarity(features_list)\n\n sim_graph = sim_graph_from_sim_max(cos_sim, images_list, k_value)\n results = ppr(sim_graph, images_list, query_images)\n results = results[:K_value]\n\n print(\"Top {} images from Personalized page Rank are:\".format(K_value))\n for r in results:\n r[\"path\"] = os.path.abspath(os.path.join(folder, r['imageId']))\n print(r)\n\n query_images_list = [os.path.abspath(os.path.join(folder, img)) for img in query_images]\n title = {\"Model\": \"Personalized Page Rank\", \"k\": k_value, \"K\": K_value}\n show_images_ppr(query_images_list, title, results)\n print(\"Execution time: {} seconds\".format(time.time() - start))",
"def question27():\n global conv_residuals\n def catch(r):\n \"\"\"Helper function to retrieve residual + steps to convergence for\n GMRES operation in Scipy. Used as a callback function for\n scipy.sparse.linalg.gmres\n \"\"\"\n global conv_residuals\n conv_residuals.append(r)\n return\n\n def iterate(rk):\n \"\"\" Preconditioner Function for GMRES.\"\"\"\n y = scipy.sparse.linalg.spsolve(P1, rk)\n RHS = scipy.sparse.csr_matrix.dot(P4, y) + rk\n zk = scipy.sparse.linalg.spsolve(P3, RHS)\n return zk\n\n\n N_search = np.array([20, 40, 60, 80, 100, 120, 140, 160, 180])\n steps_till_conv_N = np.zeros(N_search.size)\n\n fig271 = plt.figure(figsize=(13, 8))\n\n for i, n in enumerate(N_search):\n n2 = n**2\n A = construct_matrix_A(n)\n b = np.random.randn(n2)\n M, N = construct_M_N(n)\n mu_max = scipy.sparse.linalg.eigs(M, k=1, which='LM', return_eigenvectors=False)[0].real\n mu_min = scipy.sparse.linalg.eigs(M, k=1, which='SM', return_eigenvectors=False)[0].real\n gamma = np.sqrt(mu_max*mu_min)\n gammaI = scipy.sparse.diags((gamma,), (0,), shape=(n2, n2), format=\"csr\")\n P1 = gammaI + M\n P2 = gammaI - N\n P3 = gammaI + N\n P4 = gammaI - M\n M = scipy.sparse.linalg.LinearOperator((n2, n2), matvec=iterate)\n conv_residuals = []\n x = scipy.sparse.linalg.gmres(A, b, M=M, callback=catch)\n steps_till_conv_N[i] += len(conv_residuals)\n n_steps = len(conv_residuals)\n plt.semilogy(range(n_steps), conv_residuals, label=f\"N = {n}\")\n\n plt.xlabel(\"Steps Required for Convergence\")\n plt.ylabel(\"Residuals\")\n plt.title(\"Figure 271 - GMRES + Preconditioner Residuals for Varying N\", fontsize=13)\n plt.legend()\n plt.grid()\n plt.savefig(f\"figures/figure271.png\")\n plt.show()\n\n\n fig270 = plt.figure(figsize=(13, 8))\n plt.plot(N_search, steps_till_conv_N)\n plt.xlabel(\"N\")\n plt.ylabel(\"Steps until convergence\")\n plt.title(\"Figure 270 - GMRES + Preconditioner Convergence Required for Varying N\", fontsize=13)\n plt.grid()\n plt.savefig(f\"figures/figure270.png\")\n plt.show()\n return",
"def grid_search(train_labels: str, \n test_labels: str, \n output:str, \n res:tuple=(120, 160), \n lazy:bool=True, \n batch_size:int=16, \n epochs:int=20):\n\n # Data\n print(\"=> Loading data.\")\n train = FLIRDataset(train_labels, res=res, batch_size=batch_size)\n test = FLIRDataset(test_labels, res=res, batch_size=batch_size)\n\n # In eager loading mode, train on everything.\n if not lazy:\n X_train, y_train = train.get_all()\n X_test, y_test = test.get_all()\n X_train = np.concatenate([X_train, X_test], axis=0)\n y_train = np.concatenate([y_train, y_test], axis=0)\n\n\n def net(x, num_classes=1):\n x = K.applications.resnet_v2.ResNet50V2(include_top=False, weights=None, input_shape=x.shape[1:])(x)\n x = K.layers.Flatten()(x)\n x = K.layers.Dense(num_classes, activation=\"softmax\")(x)\n return x\n\n print(\"\\n=> Training model.\")\n input_tensor = K.layers.Input((160, 120, 1))\n output_tensor = net(input_tensor, num_classes=train.num_classes())\n model = K.Model(input_tensor, output_tensor)\n\n model.compile(optimizer=\"sgd\",\n loss=\"categorical_crossentropy\",\n metrics=[\"accuracy\"])\n\n # Train model\n if lazy:\n model.fit(x=train, \n epochs=epochs, \n validation_data=train, \n verbose=2)\n else:\n model.fit(x=X_train, \n y=y_train, \n epochs=epochs, \n batch_size=batch_size, \n verbose=2)\n\n # Save weights\n model.save_weights(os.path.join(output, \"flir_pretrained_weights.h5\"))",
"def convergence_gmres_A():\n global conv_residuals\n def compute_residuals(r):\n \"\"\"Helper function to retrieve residual + steps to convergence for\n GMRES operation in Scipy. Used as a callback function for\n scipy.sparse.linalg.gmres\n \"\"\"\n global conv_residuals\n conv_residuals.append(r)\n return\n\n n_search = np.array([20, 40, 60, 80, 100, 120, 140, 160, 180])\n steps_till_conv_n = np.zeros(n_search.size)\n\n for i, n in enumerate(n_search):\n A = construct_matrix_A(n)\n # To average, we loop over 10 times\n for j in range(10):\n b = np.random.randn(n**2)\n conv_residuals = []\n x = scipy.sparse.linalg.gmres(A, b, callback=compute_residuals)\n steps_till_conv_n[i] += len(conv_residuals)\n\n # Divide by 10 to take the average:\n steps_till_conv_n /= 10\n\n fig220 = plt.figure(figsize=(13, 8))\n plt.plot(n_search, steps_till_conv_n)\n plt.xlabel(\"N\")\n plt.ylabel(\"Steps Taken to Converge\")\n plt.title(\"Figure 220 - Steps Taken for GMRES to Converge for Varying N\",\n fontsize=13)\n plt.grid()\n plt.savefig(\"figures/figure220.png\")\n plt.show()\n\n n_search = np.array([10, 50, 100, 150])\n\n fig221 = plt.figure(figsize=(13, 8))\n for i, n in enumerate(n_search):\n A = construct_matrix_A(n)\n b = np.random.randn(n**2)\n conv_residuals = []\n x = scipy.sparse.linalg.gmres(A, b, callback=compute_residuals)\n plt.semilogy(range(len(conv_residuals)), conv_residuals, label=f\"N = {n}\")\n\n plt.xlabel(\"Step Taken to Convergence\")\n plt.ylabel(\"Residuals\")\n plt.title(\"Figure 221 - GMRES Residuals for Varying N\", fontsize=13)\n plt.legend()\n plt.grid()\n plt.savefig(\"figures/figure221.png\")\n plt.show()\n return",
"def main():\n base_dir = os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n os.pardir,\n )\n default_output_path = os.path.join(base_dir, \"output\", \"out.png\")\n default_texture_path = os.path.join(base_dir, \"textures\", \"grid.png\")\n\n default_options = {\n \"resolution\": (1512, 762),\n \"texture_path\": default_texture_path,\n \"output_path\": default_output_path,\n \"iterations\": 200, # Increase this for good results\n \"camera_position\": [3.1, 1.570796, 0.],\n \"num_processes\": multi.cpu_count(),\n \"chunk_size\": 9000,\n \"gain\": 1,\n \"normalize\": 0,\n \"spin\": 0.7,\n }\n args = parse_args(default_options)\n\n output_path = os.path.dirname(args.output_path)\n if not os.path.exists(output_path):\n print(\"Error: Output path does not exist at:\")\n print(args.output_path)\n print(\"Create the directory or change the path then try again.\")\n print_help_and_exit()\n\n\n try:\n texture = spm.imread(args.texture_path)\n except FileNotFoundError as error:\n print(error)\n print(\"Error: Texture file not found at:\")\n print(args.texture_path)\n print_help_and_exit()\n\n # Convert to float to work in linear colour space\n texture = convert_image_to_float(texture)\n if not args.no_srgb:\n # Convert to sRGB before resizing for correct results\n srgbtorgb(texture)\n\n texture = convert_image_to_float(\n spm.imresize(texture, 2.0, interp=\"bicubic\"),\n )\n\n black_hole = KerrBlackHole(args.spin)\n raytracer = KerrRaytracer(\n black_hole,\n args.camera_position,\n texture,\n args.resolution,\n args.iterations,\n args.num_processes,\n args.chunk_size,\n shuffle=not args.disable_shuffle,\n )\n raytracer.generate_image()\n print(\"Raytracing Completed Succesfully.\")\n print(\n \"Total raytracing time:\",\n datetime.timedelta(seconds=(time.time() - raytracer.start_time)),\n )\n\n colour = post_process(raytracer.colour_buffer_preproc, args.gain, args.normalize)\n\n save_to_img(\n colour,\n args.output_path,\n args.resolution,\n srgb_out=not args.no_srgb,\n )",
"def eval_one_iteration(sess, model, cv_num_batch, iteration):\n counter = 0\n cv_g_mse_loss = 0.0\n cv_g_l2_loss = 0.0\n cv_g_loss = 0.0\n for batch in range(int(cv_num_batch/FLAGS.num_gpu)):\n g_mse_losses, g_l2_losses, \\\n g_losses = sess.run([model.g_mse_losses,\n model.g_l2_losses,\n model.g_losses])\n g_mse_loss = np.mean(g_mse_losses)\n g_l2_loss = np.mean(g_l2_losses)\n g_loss = np.mean(g_losses)\n counter += FLAGS.num_gpu\n cv_g_mse_loss += g_mse_loss\n cv_g_l2_loss += g_l2_loss\n cv_g_loss += g_loss\n\n _summaries = sess.run(model.summaries)\n model.writer.add_summary(_summaries, iteration * cv_num_batch)\n\n cv_g_mse_loss = cv_g_mse_loss / counter * FLAGS.num_gpu\n cv_g_l2_loss = cv_g_l2_loss / counter * FLAGS.num_gpu\n cv_g_loss = cv_g_loss / counter * FLAGS.num_gpu\n\n return cv_g_mse_loss, cv_g_l2_loss, cv_g_loss",
"def __call__(self, results):\n\n for key in results.get('seg_fields', []):\n if self.scale_factor != 1:\n results[key] = general_ocr.imrescale(\n results[key],\n self.scale_factor,\n interpolation='nearest',\n backend=self.backend)\n return results",
"def compute(self, X, Y, n):\n inner_cv = KFold(5, shuffle=True, random_state=1673)\n\n print('-> grid searching and cross validation ...')\n for training, validation, j in self._k_fold_cross_validation(X, 5, n):\n\n x, y, valid_x, valid_y = X.loc[training, :], Y[training], X.loc[validation, :], Y[validation]\n x_features, valid_features = self.sat_features.loc[training, :], self.sat_features.loc[validation, :]\n\n if 'kNN' in self.model_list:\n parameters = {'n_neighbors': range(1, 18, 2)}\n model = KNeighborsRegressor(weights='distance')\n self.kNN = GridSearchCV(estimator=model, param_grid=parameters, cv=inner_cv, scoring=r2)\n\n res = self.kNN.fit(x, y).predict(valid_x)\n self.results['kNN'].append(list(res))\n self.scores['kNN'].append(R2(valid_y, res))\n\n if 'Kriging' in self.model_list:\n parameters = {\"kernel\": [RBF(l) for l in [[1, 1]]]}\n model = GaussianProcessRegressor(alpha=0.1, n_restarts_optimizer=0)\n self.Kriging = GridSearchCV(estimator=model, param_grid=parameters, cv=inner_cv, scoring=r2)\n\n res = self.Kriging.fit(x, y).predict(valid_x)\n self.results['Kriging'].append(list(res))\n self.scores['Kriging'].append(R2(valid_y, res))\n\n if 'RmSense' in self.model_list:\n parameters = {\"alpha\": [0.001, 0.01, 0.1, 1, 10, 100, 1000]}\n model = Ridge()\n self.RmSense = GridSearchCV(estimator=model, param_grid=parameters, cv=inner_cv, scoring=r2)\n #print('INFO: best alpha - ', self.RmSense.fit(x_features, y).best_params_)\n\n res = self.RmSense.fit(x_features, y).predict(valid_features)\n self.results['RmSense'].append(list(res))\n self.scores['RmSense'].append(R2(valid_y, res))\n\n if 'Ensamble' in self.model_list:\n res = (self.RmSense.predict(valid_features) + self.kNN.predict(valid_x)) / 2.\n self.results['Ensamble'].append(list(res))\n self.scores['Ensamble'].append(R2(valid_y, res))\n\n for m in self.model_list:\n print('score {}: {}'.format(m, np.mean(self.scores[m])))",
"def get_all_results(pred_root, meta_results):\r\n results_all = {}\r\n for key in tqdm(meta_results, desc='Generating results ..'):\r\n persons = meta_results[key]\r\n\r\n global_seg = cv2.imread(pred_root + 'global_seg/{}.png'.format(key),\r\n cv2.IMREAD_UNCHANGED)\r\n global_tag = cv2.imread(pred_root + 'global_tag/{}.png'.format(key),\r\n cv2.IMREAD_UNCHANGED)\r\n\r\n results = {}\r\n dets, masks = [], []\r\n for p_id, score in persons:\r\n mask = (global_tag == p_id)\r\n if np.sum(mask) == 0:\r\n continue\r\n seg = mask * global_seg\r\n ys, xs = np.where(mask > 0)\r\n x1, y1, x2, y2 = xs.min(), ys.min(), xs.max(), ys.max()\r\n dets.append((x1, y1, x2, y2, score))\r\n masks.append(seg)\r\n\r\n # Reuiqred Field of each result: a list of masks,\r\n # each is a multi-class masks for one person.\r\n # It can also be sparsified to\r\n # [scipy.sparse.csr_matrix(mask) for mask in masks]\r\n # to save memory cost\r\n results['MASKS'] = masks if not Sparse \\\r\n else [scipy.sparse.csr_matrix(m) for m in masks]\r\n # Reuiqred Field of each result,\r\n # a list of detections corresponding to results['MASKS'].\r\n results['DETS'] = dets\r\n\r\n if cache_pkl:\r\n results_cache_add = cache_pkl_path + key + '.pklz'\r\n pickle.dump(results, gzip.open(results_cache_add, 'w'))\r\n results_all[key] = results_cache_add\r\n else:\r\n results_all[key] = results\r\n\r\n if PLOT:\r\n import pylab as plt\r\n plt.figure('seg')\r\n plt.imshow(global_seg)\r\n print('Seg unique:' + str(np.unique(global_seg)))\r\n plt.figure('tag')\r\n plt.imshow(global_tag)\r\n print('Tag unique:' + str(np.unique(global_tag)))\r\n plt.show()\r\n\r\n return results_all",
"def solve(self):\n\n # Set up display header if verbose operation enabled\n if self.opt['Verbose']:\n hdr = 'Itn DFidX PriResX DuaResX DFidG' + \\\n ' ResG '\n print(hdr)\n print('-' * len(hdr))\n\n # Main iteration loop\n for n in range(self.opt['MaxMainIter']):\n\n # At start of 2nd iteration, set the numbers of inner\n # iterations for the X and G solvers from the options\n # object for the outer solver\n if n == 1:\n self.slvX.opt['MaxMainIter'] = self.opt['XslvIter']\n self.slvG.opt['MaxMainIter'] = self.opt['GslvIter']\n\n # Run the configured number of iterations of the X (CSC)\n # solver and assign the result to X\n self.X = self.slvX.solve()\n\n # Compute the sum of the subpixel shifts of X\n Xhs = np.sum(fftconv(self.H, self.X.squeeze(), axes=(0, 1)),\n axis=-1)\n\n # Set the convolution kernel in the deconvolution solver\n # to the sum of the subpixel shifts of X\n self.slvG.setG(Xhs)\n # Run the configured number of iterations of the G\n # (deconvolution) solver and crop the result to obtain the\n # updated g\n self.g = self.slvG.solve()[0:self.gshp[0], 0:self.gshp[1]]\n\n # Construct a new dictionary for the X (CSC) solver from\n # the updated psf g\n self.D, self.dn = self.getD(self.g)\n self.slvX.setdict(self.D[..., np.newaxis, np.newaxis, :])\n\n # Display iteration statistics if verbose operation enabled\n if self.opt['Verbose']:\n itsX = self.slvX.getitstat()\n itsG = self.slvG.getitstat()\n fmt = '%3d %.3e %.3e %.3e %.3e %.3e'\n tpl = (n, itsX.DFid[-1], itsX.PrimalRsdl[-1],\n itsX.DualRsdl[-1], itsG.DFid[-1], itsG.Rsdl[-1])\n print(fmt % tpl)\n\n # Return the (normalised) psf estimate g\n return self.g / np.linalg.norm(self.g)",
"def iterate_over_hkl_compute(self, max_hkl=6):\n \n # r will contain the return value, an array with rows that contain:\n # h, k, l, qhkl, qhkl_vector\n r = []\n \n for h in range(-max_hkl,max_hkl+1):\n for k in range(-max_hkl,max_hkl+1):\n for l in range(-max_hkl,max_hkl+1):\n \n # Don't put a reflection at origin\n if not (h==0 and k==0 and l==0):\n qhkl, qhkl_vector = self.q_hkl_exp(h,k,l)\n r.append( [ h, k, l, qhkl, qhkl_vector ] )\n \n return r",
"def inner_loop(model, optim, img, rays_o, rays_d, bound, num_samples, raybatch_size, inner_steps):\n pixels = img.reshape(-1, 3)\n rays_o, rays_d = rays_o.reshape(-1, 3), rays_d.reshape(-1, 3)\n\n num_rays = rays_d.shape[0]\n for step in range(inner_steps):\n indices = torch.randint(num_rays, size=[raybatch_size])\n raybatch_o, raybatch_d = rays_o[indices], rays_d[indices]\n pixelbatch = pixels[indices] \n t_vals, xyz = sample_points(raybatch_o, raybatch_d, bound[0], bound[1],\n num_samples, perturb=True)\n \n optim.zero_grad()\n rgbs, sigmas = model(xyz)\n colors = volume_render(rgbs, sigmas, t_vals)\n loss = F.mse_loss(colors, pixelbatch)\n loss.backward()\n optim.step()",
"def loop(self, *, l_img=True, r_img=False, depth_map=False, depth_map_img=False, point_cloud=False,\n\t\t\t\t\tod_bbox=False, od_img=False, ss_pred=False, ss_img=False,\n\t\t\t\t\tdist_to_col=False, dist_to_col_img=False,\n\t\t\t\t\tis_close=False, min_dist=False, is_close_simple=False, min_dist_simple=False):\n\t\tis_c = is_close or min_dist\n\t\tis_c_s = is_close_simple or min_dist_simple\n\t\td2c = dist_to_col or dist_to_col_img or is_c\n\t\tod = od_bbox or od_img or d2c\n\t\tss = ss_pred or ss_img\n\t\t\n\t\truntime_parameters = sl.RuntimeParameters()\n\n\t\twhile True:\n\t\t\tcache = []\n\t\t\tif self.zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:\n\t\t\t\tif l_img or od or ss or d2c:\n\t\t\t\t\t_l_img = sl.Mat()\n\t\t\t\t\tself.zed.retrieve_image(_l_img, sl.VIEW.LEFT)\n\t\t\t\t\tif l_img:\n\t\t\t\t\t\tcache.append(_l_img.get_data())\n\n\t\t\t\tif r_img:\n\t\t\t\t\t_r_img = sl.Mat()\n\t\t\t\t\tself.zed.retrieve_image(_r_img, sl.VIEW.RIGHT)\n\t\t\t\t\tcache.append(_r_img.get_data())\n\n\t\t\t\tif depth_map or is_c_s:\n\t\t\t\t\t_depth_map = sl.Mat()\n\t\t\t\t\tself.zed.retrieve_measure(_depth_map, sl.MEASURE.DEPTH)\n\t\t\t\t\tcache.append(_depth_map.get_data())\t\t\n\n\t\t\t\tif depth_map_img:\n\t\t\t\t\t_depth_map_img = sl.Mat()\n\t\t\t\t\tself.zed.retrieve_image(_depth_map_img, sl.VIEW.DEPTH)\n\t\t\t\t\tcache.append(_depth_map_img.get_data())\n\n\t\t\t\tif point_cloud or d2c:\n\t\t\t\t\t_point_cloud = sl.Mat()\n\t\t\t\t\tself.zed.retrieve_measure(_point_cloud, sl.MEASURE.XYZRGBA)\n\t\t\t\t\tif point_cloud:\n\t\t\t\t\t\tcache.append(_point_cloud.get_data())\n\n\t\t\t\tif od:\n\t\t\t\t\t_od_bbox = self.object_detection(_l_img, return_image=od_img)\n\t\t\t\t\tif od_img:\n\t\t\t\t\t\t_od_bbox, _od_img = _od_bbox\n\t\t\t\t\tif od_bbox:\n\t\t\t\t\t\tcache.append(_od_bbox)\n\t\t\t\t\tif od_img:\n\t\t\t\t\t\tcache.append(_od_img)\n\n\t\t\t\tif ss:\n\t\t\t\t\t_ss_pred = self.semantic_segmentation(_l_img, return_image=ss_img)\n\t\t\t\t\tif ss_img:\n\t\t\t\t\t\t_ss_pred, _ss_img = _ss_pred\n\t\t\t\t\tif ss_pred:\n\t\t\t\t\t\tcache.append(_ss_pred)\n\t\t\t\t\tif ss_img:\n\t\t\t\t\t\tcache.append(_ss_img)\n\n\t\t\t\tif d2c:\n\t\t\t\t\t_dist_to_col = self.distance_to_collision(_od_bbox, _point_cloud, image=_l_img, return_image=dist_to_col_img)\n\t\t\t\t\tif dist_to_col_img:\n\t\t\t\t\t\t_dist_to_col, _dist_to_col_img = _dist_to_col\n\t\t\t\t\tif dist_to_col:\n\t\t\t\t\t\tcache.append(_dist_to_col)\n\t\t\t\t\tif dist_to_col_img:\n\t\t\t\t\t\tcache.append(_dist_to_col_img)\n\n\t\t\t\tif is_c:\n\t\t\t\t\t_is_close = self.is_close_to_collision(_od_bbox, _dist_to_col, return_min_dist=min_dist)\n\t\t\t\t\tif min_dist:\n\t\t\t\t\t\t_is_close, _min_dist = _is_close\n\t\t\t\t\tif is_close:\n\t\t\t\t\t\tcache.append(_is_close)\n\t\t\t\t\tif min_dist:\n\t\t\t\t\t\tcache.append(_min_dist)\n\n\t\t\t\tif is_c_s:\n\t\t\t\t\t_is_close = self.is_close_to_collision_simple(_depth_map, return_min_dist=min_dist_simple)\n\t\t\t\t\tif min_dist_simple:\n\t\t\t\t\t\t_is_close, _min_dist = _is_close\n\t\t\t\t\tif is_close_simple:\n\t\t\t\t\t\tcache.append(_is_close)\n\t\t\t\t\tif min_dist_simple:\n\t\t\t\t\t\tcache.append(_min_dist)\n\n\t\t\tif cache:\n\t\t\t\tyield cache\n\t\t\telse:\n\t\t\t\traise StopIteration\n\n\t\t\t# use 'q' to quit the loop\n\t\t\tif cv2.waitKey(1) & 0xFF == ord('q'):\n\t\t\t\traise StopIteration",
"def main(logger, resultsDict):\n\n print(\"=\" * 30)\n print(\"Main function of overlayMasks.\")\n print(\"=\" * 30)\n\n # Get parameters from .json files.\n full_img_dir = config_overlay[\"full_img_dir\"]\n y_true_dir = config_overlay[\"y_true_dir\"]\n y_pred_dir = config_overlay[\"y_pred_dir\"]\n extension = config_overlay[\"extension\"]\n target_size = (config_overlay[\"target_size\"], config_overlay[\"target_size\"])\n save_maskoverlay_dir = config_overlay[\"save_maskoverlay_dir\"]\n save_fulloverlay_dir = config_overlay[\"save_fulloverlay_dir\"]\n\n # ------------\n\n # Get paths.\n full_img_paths_list = []\n y_true_paths_list = []\n y_pred_paths_list = []\n\n for full in os.listdir(full_img_dir):\n if full.endswith(extension):\n full_img_paths_list.append(os.path.join(full_img_dir, full))\n\n for full in os.listdir(y_true_dir):\n if full.endswith(extension):\n y_true_paths_list.append(os.path.join(y_true_dir, full))\n\n for full in os.listdir(y_pred_dir):\n if full.endswith(extension):\n y_pred_paths_list.append(os.path.join(y_pred_dir, full))\n\n full_img_paths_list.sort()\n y_true_paths_list.sort()\n y_pred_paths_list.sort()\n\n # ------------\n\n # Load full_img.\n full_img_arrays = [\n cv2.resize(src=cv2.imread(path, cv2.IMREAD_GRAYSCALE), dsize=target_size)\n for path in full_img_paths_list\n ]\n\n # Load y_true masks.\n y_true_arrays = [\n cv2.resize(src=cv2.imread(path, cv2.IMREAD_GRAYSCALE), dsize=target_size)\n for path in y_true_paths_list\n ]\n\n # Load y_pred masks.\n y_pred_arrays = [\n cv2.resize(src=cv2.imread(path, cv2.IMREAD_GRAYSCALE), dsize=target_size)\n for path in y_pred_paths_list\n ]\n\n print(full_img_arrays[0].min(), full_img_arrays[0].max())\n print(y_true_arrays[0].min(), y_true_arrays[0].max())\n print(y_pred_arrays[0].min(), y_pred_arrays[0].max())\n\n # ------------\n\n # Stack to create RGB version of grayscale images.\n full_img_rgb = [np.stack([img, img, img], axis=-1) for img in full_img_arrays]\n\n # Green true mask. Note OpenCV uses BGR.\n y_true_rgb = [\n np.stack([np.zeros_like(img), img, np.zeros_like(img)], axis=-1)\n for img in y_true_arrays\n ]\n\n # Red predicted mask. Note OpenCV uses BGR.\n y_pred_rgb = [\n np.stack([np.zeros_like(img), np.zeros_like(img), img], axis=-1)\n for img in y_pred_arrays\n ]\n\n # ------------\n\n for i in range(len(full_img_rgb)):\n\n # First overlay true and predicted masks.\n overlay_masks = cv2.addWeighted(\n src1=y_true_rgb[i], alpha=0.5, src2=y_pred_rgb[i], beta=1, gamma=0\n )\n\n # Then overlay full_img and masks.\n overlay_all = cv2.addWeighted(\n src1=full_img_rgb[i], alpha=1, src2=overlay_masks, beta=0.5, gamma=0\n )\n\n # Save.\n\n # Get patient ID from y_true masks.\n filename = os.path.basename(y_true_paths_list[i])\n filename_split = filename.split(\"_\")\n patientID = \"_\".join([filename_split[i] for i in range(4)])\n\n masks_filename = patientID + \"___MasksOverlay.png\"\n all_filename = patientID + \"___AllOverlay.png\"\n\n save_path_masks = os.path.join(save_maskoverlay_dir, masks_filename)\n save_path_all = os.path.join(save_fulloverlay_dir, all_filename)\n\n print(save_path_masks)\n print(save_path_all)\n\n cv2.imwrite(filename=save_path_masks, img=overlay_masks)\n cv2.imwrite(filename=save_path_all, img=overlay_all)",
"def kohonen():\n# plb.close('all')\n \n dim = 28*28\n data_range = 255.0\n \n # load in data and labels \n data = np.array(np.loadtxt('data.txt'))\n labels = np.loadtxt('labels.txt')\n\n # select 4 digits \n name = \"Stettler\"\n targetdigits = name2digits(name) # assign the four digits that should be used\n print(targetdigits) # output the digits that were selected\n\n # this selects all data vectors that corresponds to one of the four digits\n data = data[np.logical_or.reduce([labels==x for x in targetdigits]),:]\n \n dy, dx = data.shape\n \n #set the size of the Kohonen map. In this case it will be 6 X 6\n size_k = 6\n \n #set the width of the neighborhood via the width of the gaussian that\n #describes it\n sigma = 2.0\n \n #initialise the centers randomly\n centers = np.random.rand(size_k**2, dim) * data_range\n \n #build a neighborhood matrix\n neighbor = np.arange(size_k**2).reshape((size_k, size_k))\n\n #set the learning rate\n eta = 0.9 # HERE YOU HAVE TO SET YOUR OWN LEARNING RATE\n \n #set the maximal iteration count\n tmax = 5000 # this might or might not work; use your own convergence criterion\n \n #set the random order in which the datapoints should be presented\n i_random = np.arange(tmax) % dy\n np.random.shuffle(i_random)\n \n for t, i in enumerate(i_random):\n som_step(centers, data[i,:],neighbor,eta,sigma)\n\n # for visualization, you can use this:\n for i in range(size_k**2):\n plb.subplot(size_k,size_k,i)\n \n plb.imshow(np.reshape(centers[i,:], [28, 28]),interpolation='bilinear')\n plb.axis('off')\n \n # leave the window open at the end of the loop\n plb.show()\n plb.draw()",
"def run_kohonen(data, size_k: int=6, sigma: float=2.0, eta: int=0.9, \n tmax: int=5000, convergence=0):\n dim = 28*28\n data_range = 255.0\n dy, dx = data.shape\n \n #convergence criteria\n eps = 1E-6\n eps_2 = 0.1\n \n #initialise the centers randomly\n centers = np.random.rand(size_k**2, dim) * data_range\n \n #build a neighborhood matrix\n neighbor = np.arange(size_k**2).reshape((size_k, size_k))\n \n #set the random order in which the datapoints should be presented\n i_random = np.arange(tmax) % dy\n np.random.shuffle(i_random)\n \n #error for convergence criterion\n error = [np.inf]\n \n print('start iteration')\n for t, i in enumerate(i_random):\n old_centers = copy(centers)\n som_step(centers, data[int(i),:],neighbor,eta,sigma)\n \n if t % 1E4 == 0:\n print('iteration {}'.format(t))\n \n if convergence == 1:\n #convergence: distance between samples and best matching prototypes \n error.append(calculate_error(centers,data))\n# if np.abs((error[-2]-error[-1])/error[1]) < eps :\n# break\n \n elif convergence == 2:\n #convergence: non significant weight update\n err = np.linalg.norm(centers-old_centers)\n error.append(err)\n# if err < eps_2:\n# break\n \n \"\"\" # for visualization, you can use this:\n for i in range(size_k**2):\n plb.subplot(size_k,size_k,i)\n \n plb.imshow(np.reshape(centers[i,:], [28, 28]),interpolation='bilinear')\n plb.axis('off')\n \n # leave the window open at the end of the loop\n plb.show()\n plb.draw() \"\"\"\n \n print('Total iteration : {}'.format(t))\n return centers, error[1:]",
"def getResults(solver, minBit, maxBit, saveFile, noResults):\n\n for k in range(minBit, maxBit + 1, 2):\n for i in range(noResults):\n\n keys = generate_RSA.KeyGen(k) # initialise keys\n keys.generateKeys() # generate keys\n\n solver.setN(keys.n) # setup solver\n solver.setE(keys.e)\n\n solver.solve() # solve problem\n\n if solver.d == keys.d: # if we got it right\n resTime = resTime_C # update correct dictionaries\n resCount = resCount_C\n resSpace = resSpace_C\n else:\n resTime = resTime_W # else update wrong dictionaries\n resCount = resCount_W\n resSpace = resSpace_W\n\n if k not in resTime: # if we've not yet had a result for k\n resTime[k] = [solver.time, 1] # then set\n resSpace[k] = [solver.space, 1] # then set\n resCount[k] = [solver.count, 1]\n else:\n oldT, oldC = resTime[k] # keeps a running average\n newC = oldC + 1 # increment count\n newT = ((oldT * oldC) + solver.time) / newC # get new averagae\n resTime[k] = [newT, newC] # without storing all variables\n\n oldS, oldC = resSpace[k] # keeps a running average\n newS = ((oldS * oldC) + solver.space) / newC\n resSpace[k] = [newS, newC] # without storing all variables\n\n oldCount, oldC = resCount[k] # keeps a running average\n newCount = ((oldCount * oldC) + solver.count) / newC\n resCount[k] = [newCount, newC] # without storing all variables\n\n if i % 10 == 0:\n saveResults(saveFile) # every ten results save again",
"def kmeans_004():\n crops = [200] # Should probably also add 250\n scales = [30, 50] # Scaling is probably the most important part here\n\n scores = []\n for s in scales:\n crop = 200\n n_centroids = 1600\n n_patches = 400000\n # rf_size = int(round(s * .2))\n rf_size = 10\n logger.info(\"Training with crop {}, scale {}, patch size {}, patches {}, centroids {}\".format(crop, s, rf_size, n_patches, n_centroids))\n\n train_x_crop_scale = CropScaleImageTransformer(training=True,\n result_path='data/data_train_crop_{}_scale_{}.npy'.format(crop, s),\n crop_size=crop,\n scaled_size=s,\n n_jobs=-1,\n memmap=True)\n\n # spherical generator\n kmeans_generator = KMeansFeatureGenerator(n_centroids=n_centroids,\n rf_size=rf_size,\n result_path='data/mdl_kmeans_004_scale_{}_rf_{}'.format(s, rf_size),\n n_iterations=20,\n n_jobs=-1,)\n\n patch_extractor = models.KMeansFeatures.PatchSampler(n_patches=n_patches,\n patch_size=rf_size,\n n_jobs=-1)\n images = train_x_crop_scale.transform()\n logger.info(\"Images ndarray shape: {}\".format(images.shape))\n patches = patch_extractor.transform(images)\n logger.info(\"Patches ndarray shape: {}\".format(patches.shape))\n\n kmeans_generator.fit(patches)\n\n del patches\n gc.collect()\n\n train_x = kmeans_generator.transform(images, save_to_file='data/data_kmeans_features_004_scale_{}_rf_{}.npy'.format(s, rf_size), memmap=True)\n train_y = classes.train_solutions.data\n # Unload some objects\n del images\n gc.collect()\n logger.info(\"Train X ndarray shape: {}\".format(train_x.shape))\n\n wrapper = ModelWrapper(models.Ridge.RidgeRFEstimator, {'alpha': 500, 'n_estimators': 250}, n_jobs=-1)\n wrapper.cross_validation(train_x, train_y, n_folds=2, parallel_estimator=True)\n scores.append((s, wrapper.cv_scores))\n del wrapper\n gc.collect()",
"def __call__(self, img_ori, objs, **kvs):\n # Crop image, forward to get the param\n param_lst = []\n roi_box_lst = []\n\n crop_policy = kvs.get('crop_policy', 'box')\n for obj in objs:\n if crop_policy == 'box':\n # by face box\n roi_box = parse_roi_box_from_bbox(obj)\n elif crop_policy == 'landmark':\n # by landmarks\n roi_box = parse_roi_box_from_landmark(obj)\n else:\n raise ValueError(f'Unknown crop policy {crop_policy}')\n\n roi_box_lst.append(roi_box)\n self.img_crop = crop_img(img_ori, roi_box)\n img = cv2.resize(self.img_crop, dsize=(self.size, self.size), interpolation=cv2.INTER_LINEAR)\n inp = self.transform(img).unsqueeze(0)\n\n if self.gpu_mode:\n inp = inp.cuda(device=self.gpu_id)\n\n # if kvs.get('timer_flag', False):\n if True:\n end = time.time()\n param = self.model(inp)\n elapse = f'Inference: {(time.time() - end) * 1000:.1f}ms'\n print(elapse)\n else:\n param = self.model(inp)\n\n param = param.squeeze().cpu().numpy().flatten().astype(np.float32)\n param = param * self.param_std + self.param_mean # re-scale\n # print('output', param)\n param_lst.append(param)\n\n return param_lst, roi_box_lst",
"def do_pnp(pts3d_for_pnp, pts2d_for_pnp, K, iterations=200, reprojThresh=5):\n list_pts3d_for_pnp = pts3d_for_pnp\n list_pts2d_for_pnp = pts2d_for_pnp\n pts3d_for_pnp = np.array(pts3d_for_pnp)\n # pts2d_for_pnp = np.expand_dims(np.squeeze(np.array(pts2d_for_pnp)), axis=1)\n # print(pts3d_for_pnp)\n # print(pts2d_for_pnp.shape)\n num_pts = len(pts3d_for_pnp)\n print(num_pts)\n highest_inliers = 0\n for j in range(iterations):\n pt_idxs = np.random.choice(num_pts, 6, replace=False)\n pts3 = np.array([pts3d_for_pnp[pt_idxs[i]] for i in range(len(pt_idxs))])\n # print(\"pts\",pts3)\n pts2 = np.array([pts2d_for_pnp[pt_idxs[i]] for i in range(len(pt_idxs))])\n _, rvec, tvec = cv2.solvePnP(pts3, pts2, K, distCoeffs=np.array([]), flags=cv2.SOLVEPNP_ITERATIVE)\n R, _ = cv2.Rodrigues(rvec)\n pnp_errors, projpts, avg_err, perc_inliers = test_reproj_pnp_points(list_pts3d_for_pnp, list_pts2d_for_pnp, R, tvec, K, rep_thresh=reprojThresh)\n if highest_inliers < perc_inliers:\n highest_inliers = perc_inliers\n best_R = R\n best_tvec = tvec\n R = best_R\n tvec = best_tvec\n # print('rvec:', rvec,'\\n\\ntvec:', tvec)\n print(\"avg\",avg_err)\n print(\"inlier\",perc_inliers)\n return R, tvec",
"def voc_pred_process(pred_data, val_cls, recs):\n num_classes = config.num_classes\n cls_img_ids = {}\n cls_bboxes = {}\n cls_scores = {}\n classes = {}\n cls_npos = {}\n for cls in val_cls:\n if cls == 'background':\n continue\n class_recs = {}\n npos = 0\n for imagename in imagenames:\n R = [obj for obj in recs[imagename] if obj['name'] == cls]\n bbox = np.array([x['bbox'] for x in R])\n difficult = np.array([x['difficult'] for x in R]).astype(np.bool)\n det = [False] * len(R)\n npos = npos + sum(~difficult)\n class_recs[imagename] = {'bbox': bbox,\n 'difficult': difficult,\n 'det': det}\n cls_npos[cls] = npos\n classes[cls] = class_recs\n cls_img_ids[cls] = []\n cls_bboxes[cls] = []\n cls_scores[cls] = []\n\n for sample in pred_data:\n pred_boxes = sample['boxes']\n box_scores = sample['box_scores']\n img_id = sample['img_id']\n h, w = sample['image_shape']\n\n final_boxes = []\n final_label = []\n final_score = []\n\n for c in range(1, num_classes):\n class_box_scores = box_scores[:, c]\n score_mask = class_box_scores > config.min_score\n class_box_scores = class_box_scores[score_mask]\n class_boxes = pred_boxes[score_mask] * [h, w, h, w]\n\n if score_mask.any():\n nms_index = apply_nms(class_boxes, class_box_scores, config.nms_threshold, config.max_boxes)\n class_boxes = class_boxes[nms_index]\n class_box_scores = class_box_scores[nms_index]\n\n final_boxes += class_boxes.tolist()\n final_score += class_box_scores.tolist()\n final_label += [c] * len(class_box_scores)\n\n for loc, label, score in zip(final_boxes, final_label, final_score):\n cls_img_ids[val_cls[label]].append(img_id)\n cls_bboxes[val_cls[label]].append([loc[1], loc[0], loc[3], loc[2]])\n cls_scores[val_cls[label]].append(score)\n return classes, cls_img_ids, cls_bboxes, cls_scores, cls_npos",
"def iteration(self):\n T = self.generate_T()\n R = self.reproduce(T)\n self.P = self.choose_mi_best(R)\n #print(self.P)",
"def kmeans_006():\n n_centroids_vals = [1000, 2000, 2500, 3000]\n scores = []\n\n for n_centroids in n_centroids_vals:\n s = 15\n crop = 150\n n_patches = 400000\n rf_size = 5\n logger.info(\"Training with n_centroids {}\".format(n_centroids))\n\n train_x_crop_scale = CropScaleImageTransformer(training=True,\n result_path='data/data_train_crop_{}_scale_{}.npy'.format(crop, s),\n crop_size=crop,\n scaled_size=s,\n n_jobs=-1,\n memmap=True)\n test_x_crop_scale = CropScaleImageTransformer(training=False,\n result_path='data/data_test_crop_{}_scale_{}.npy'.format(crop, s),\n crop_size=crop,\n scaled_size=s,\n n_jobs=-1,\n memmap=True)\n\n kmeans_generator = KMeansFeatureGenerator(n_centroids=n_centroids,\n rf_size=rf_size,\n result_path='data/mdl_kmeans_006_centroids_{}'.format(n_centroids),\n n_iterations=20,\n n_jobs=-1,)\n\n patch_extractor = models.KMeansFeatures.PatchSampler(n_patches=n_patches,\n patch_size=rf_size,\n n_jobs=-1)\n images = train_x_crop_scale.transform()\n\n patches = patch_extractor.transform(images)\n\n kmeans_generator.fit(patches)\n\n del patches\n gc.collect()\n\n train_x = kmeans_generator.transform(images, save_to_file='data/data_kmeans_features_006_centroids_{}.npy'.format(n_centroids), memmap=True)\n train_y = classes.train_solutions.data\n # Unload some objects\n del images\n gc.collect()\n\n wrapper = ModelWrapper(models.Ridge.RidgeRFEstimator, {'alpha': 500, 'n_estimators': 250}, n_jobs=-1)\n wrapper.cross_validation(train_x, train_y, n_folds=2, parallel_estimator=True)\n\n score = (n_centroids, wrapper.cv_scores)\n logger.info(\"Scores: {}\".format(score))\n scores.append(score)\n\n del wrapper\n gc.collect()",
"def run(self):\n self.errList = []\n self.initialize()\n\n for i in range(self.maxiter):\n # update U\n for k in range(self.K):\n [p, sigma, q] = np.linalg.svd(self.H.dot(self.S[:, :, k]).dot(self.V.T).dot(self.X[k].T),\n full_matrices=False)\n self.U[k] = q.T.dot(p.T)\n self.U[k] = self.U[k].real\n\n # calculate temporal variable y\n y = np.zeros([self.rank, self.L, self.K])\n for k in range(self.K):\n y[:, :, k] = self.U[k].T.dot(self.X[k])\n\n # get H, V, and temps by running a single iteration of CP_ALS\n if i == 0:\n [cp, rec] = pyten.method.cp_als(pyten.tenclass.Tensor(y),\n self.rank, tol=self.tol, maxiter=1, printitn=0)\n else:\n [cp, rec] = pyten.method.cp_als(pyten.tenclass.Tensor(y),\n self.rank, tol=self.tol, maxiter=1,\n init=[self.H, self.V, temps], printitn=0)\n self.H = cp.Us[0]\n self.V = cp.Us[1]\n temps = cp.Us[2].dot(np.diag(cp.lmbda))\n\n # update S\n for k in range(self.K):\n self.S[:, :, k] = np.diag(temps[k, :])\n\n # checking the stop criteria\n # error = 0\n for k in range(self.K):\n temp = self.U[k].dot(self.H).dot(self.S[:, :, k]).dot(self.V.T)\n self.sigma_new += np.linalg.norm(temp - self.X[k]) ** 2\n\n error = abs(self.sigma_new - self.sigma_old) #/ self.sigma_old\n self.errList.append(error)\n if (i + 1) % self.printitn == 0:\n print 'PARAFAC2: iterations={0}, difference={1}, fit_difference={2}'.format(i + 1, self.errList[-1],\n self.sigma_new)\n elif error < self.tol:\n print 'PARAFAC2: iterations={0}, difference={1}, fit_difference={2}'.format(i + 1, self.errList[-1],\n self.sigma_new)\n\n if error < self.tol:\n break\n else:\n self.sigma_old = self.sigma_new\n self.sigma_new = 0\n\n for k in range(self.K):\n self.fit[k] = self.U[k].dot(self.H).dot(self.S[:, :, k]).dot(self.V.T)"
] | [
"0.62931263",
"0.601181",
"0.5886399",
"0.57915413",
"0.56589055",
"0.56554955",
"0.55133504",
"0.53384787",
"0.53033984",
"0.52407485",
"0.5225457",
"0.5210151",
"0.5209773",
"0.52093935",
"0.518312",
"0.51578075",
"0.51371425",
"0.5087555",
"0.50848186",
"0.50783587",
"0.50739896",
"0.5054838",
"0.5033058",
"0.5004067",
"0.49799156",
"0.49574396",
"0.49483347",
"0.49300227",
"0.49252197",
"0.49153504"
] | 0.6446188 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.